summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--Documentation/PCI/index.rst2
-rw-r--r--Documentation/PCI/pciebus-howto.rst (renamed from Documentation/PCI/picebus-howto.rst)0
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--Documentation/admin-guide/sysctl/net.rst29
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt30
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt1
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt4
-rw-r--r--Documentation/networking/tls-offload.rst18
-rw-r--r--Documentation/networking/tuntap.txt4
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst279
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--MAINTAINERS59
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/include/asm/mach_desc.h3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/unwind.c5
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c87
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi16
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi32
-rw-r--r--arch/arm/boot/dts/am4372.dtsi32
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi3
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revc.dts7
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi50
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts4
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S3
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c4
-rw-r--r--arch/arm/mach-omap2/omap4-common.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c3
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/nds32/kernel/signal.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
-rw-r--r--arch/riscv/include/asm/fixmap.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h12
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/um/include/shared/timer-internal.h14
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/time.c16
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c13
-rw-r--r--arch/x86/events/amd/ibs.c13
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c6
-rw-r--r--arch/x86/include/asm/bootparam_utils.h2
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/intel-family.h15
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/kernel/apic/apic.c72
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c66
-rw-r--r--arch/x86/kernel/uprobes.c17
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c33
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/mm/pageattr.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/x86/power/cpu.c86
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/bluetooth/btqca.c29
-rw-r--r--drivers/bluetooth/btqca.h7
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/ti-sysc.c24
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c8
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fsi/fsi-scom.c8
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c10
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c15
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/counters.c10
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c76
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/sw/siw/siw.h8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c113
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c40
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/iommu/dma-iommu.c3
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/misc/lkdtm/bugs.c4
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/vmw_balloon.c10
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c6
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-sprd.c30
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mtd/hyperbus/Kconfig1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c138
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c12
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/netdevsim/dev.c63
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/netdev.c9
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/phy-c45.c40
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/r8152.c10
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/ixp4xx/Kconfig4
-rw-r--r--drivers/soc/ti/pm33xx.c19
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/usb/chipidea/udc.c32
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usbtmc.c3
-rw-r--r--drivers/usb/core/hcd-pci.c30
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/xhci-rcar.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c15
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c197
-rw-r--r--fs/cifs/connect.c31
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/sess.c26
-rw-r--r--fs/io_uring.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/inode.c33
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/dma-contiguous.h5
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/logic_pio.h1
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h5
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netlink.h5
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sock.h10
-rw-r--r--include/rdma/restrack.h3
-rw-r--r--include/soc/arc/mcip.h11
-rw-r--r--include/trace/events/rxrpc.h65
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/jffs2.h5
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/rds.h2
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/bpf/syscall.c30
-rw-r--r--kernel/bpf/verifier.c9
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/direct.c10
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kallsyms.c6
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/psi.c8
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/vsyscall.c22
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_probe.c3
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/logic_pio.c73
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/memcontrol.c87
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/z3fold.c90
-rw-r--r--mm/zsmalloc.c80
-rw-r--r--net/batman-adv/bat_iv_ogm.c20
-rw-r--r--net/batman-adv/bat_v_ogm.c18
-rw-r--r--net/batman-adv/multicast.c8
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_debugfs.c31
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c2
-rw-r--r--net/ceph/crypto.c6
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/core/stream.c16
-rw-r--r--net/dsa/switch.c3
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ieee802154/socket.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_fragment.c39
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/route.c17
-rw-r--r--net/ipv4/tcp.c33
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/ncsi/ncsi-cmd.c13
-rw-r--r--net/ncsi/ncsi-rsp.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c5
-rw-r--r--net/netfilter/nf_flow_table_core.c43
-rw-r--r--net/netfilter/nf_flow_table_ip.c44
-rw-r--r--net/netfilter/nf_tables_api.c19
-rw-r--r--net/netfilter/nf_tables_offload.c17
-rw-r--r--net/netfilter/nft_flow_offload.c15
-rw-r--r--net/netfilter/xt_nfacct.c36
-rw-r--r--net/netfilter/xt_physdev.c6
-rw-r--r--net/openvswitch/conntrack.c20
-rw-r--r--net/openvswitch/flow.c160
-rw-r--r--net/openvswitch/flow.h1
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rds/ib.c16
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/rdma_transport.c10
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-internal.h25
-rw-r--r--net/rxrpc/call_event.c23
-rw-r--r--net/rxrpc/call_object.c33
-rw-r--r--net/rxrpc/conn_client.c44
-rw-r--r--net/rxrpc/conn_event.c6
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/input.c359
-rw-r--r--net/rxrpc/local_event.c4
-rw-r--r--net/rxrpc/local_object.c104
-rw-r--r--net/rxrpc/output.c9
-rw-r--r--net/rxrpc/peer_event.c10
-rw-r--r--net/rxrpc/protocol.h9
-rw-r--r--net/rxrpc/recvmsg.c53
-rw-r--r--net/rxrpc/rxkad.c32
-rw-r--r--net/rxrpc/sendmsg.c13
-rw-r--r--net/rxrpc/skbuff.c40
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sched/act_ctinfo.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c11
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_mpls.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c8
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbedit.c14
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/sch_cbs.c19
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sched/sch_taprio.c34
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/smc_tx.c6
-rw-r--r--net/sunrpc/clnt.c47
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/addr.c1
-rw-r--r--net/tipc/link.c92
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tls/tls_device.c9
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c23
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--security/keys/request_key.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/usb/line6/pcm.c18
-rw-r--r--sound/usb/mixer.c36
-rw-r--r--sound/usb/mixer_quirks.c8
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/lib/bpf/libbpf.c33
-rw-r--r--tools/power/x86/turbostat/Makefile3
-rw-r--r--tools/power/x86/turbostat/turbostat.c101
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile3
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.82
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c28
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_btf_dump.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c28
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c32
-rwxr-xr-xtools/testing/selftests/net/tcp_fastopen_backup_key.sh2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh48
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json47
-rw-r--r--virt/kvm/arm/mmio.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c7
591 files changed, 5905 insertions, 3029 deletions
diff --git a/.mailmap b/.mailmap
index acba1a6163f1..afaad605284a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
67Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
68Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
69Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
67Domen Puncer <domen@coderock.org> 70Domen Puncer <domen@coderock.org>
68Douglas Gilbert <dougg@torque.net> 71Douglas Gilbert <dougg@torque.net>
69Ed L. Cashin <ecashin@coraid.com> 72Ed L. Cashin <ecashin@coraid.com>
@@ -160,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co
160Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> 163Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
161Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> 164Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
162Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> 165Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
166Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
167Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
163Mayuresh Janorkar <mayur@ti.com> 168Mayuresh Janorkar <mayur@ti.com>
164Michael Buesch <m@bues.ch> 169Michael Buesch <m@bues.ch>
165Michel Dänzer <michel@tungstengraphics.com> 170Michel Dänzer <michel@tungstengraphics.com>
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index f4c6121868c3..6768305e4c26 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
9 :numbered: 9 :numbered:
10 10
11 pci 11 pci
12 picebus-howto 12 pciebus-howto
13 pci-iov-howto 13 pci-iov-howto
14 msi-howto 14 msi-howto
15 acpi-info 15 acpi-info
diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst
index f882ff62c51f..f882ff62c51f 100644
--- a/Documentation/PCI/picebus-howto.rst
+++ b/Documentation/PCI/pciebus-howto.rst
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 47d981a86e2f..4c1971960afa 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4090,6 +4090,13 @@
4090 Run specified binary instead of /init from the ramdisk, 4090 Run specified binary instead of /init from the ramdisk,
4091 used for early userspace startup. See initrd. 4091 used for early userspace startup. See initrd.
4092 4092
4093 rdrand= [X86]
4094 force - Override the decision by the kernel to hide the
4095 advertisement of RDRAND support (this affects
4096 certain AMD processors because of buggy BIOS
4097 support, specifically around the suspend/resume
4098 path).
4099
4093 rdt= [HW,X86,RDT] 4100 rdt= [HW,X86,RDT]
4094 Turn on/off individual RDT features. List is: 4101 Turn on/off individual RDT features. List is:
4095 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, 4102 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index a7d44e71019d..287b98708a40 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
39 802 E802 protocol ax25 AX25 39 802 E802 protocol ax25 AX25
40 ethernet Ethernet protocol rose X.25 PLP layer 40 ethernet Ethernet protocol rose X.25 PLP layer
41 ipv4 IP version 4 x25 X.25 protocol 41 ipv4 IP version 4 x25 X.25 protocol
42 ipx IPX token-ring IBM token ring
43 bridge Bridging decnet DEC net 42 bridge Bridging decnet DEC net
44 ipv6 IP version 6 tipc TIPC 43 ipv6 IP version 6 tipc TIPC
45 ========= =================== = ========== ================== 44 ========= =================== = ========== ==================
@@ -401,33 +400,7 @@ interface.
401(network) that the route leads to, the router (may be directly connected), the 400(network) that the route leads to, the router (may be directly connected), the
402route flags, and the device the route is using. 401route flags, and the device the route is using.
403 402
404 4035. TIPC
4055. IPX
406------
407
408The IPX protocol has no tunable values in proc/sys/net.
409
410The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
411socket giving the local and remote addresses in Novell format (that is
412network:node:port). In accordance with the strange Novell tradition,
413everything but the port is in hex. Not_Connected is displayed for sockets that
414are not tied to a specific remote address. The Tx and Rx queue sizes indicate
415the number of bytes pending for transmission and reception. The state
416indicates the state the socket is in and the uid is the owning uid of the
417socket.
418
419The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
420it gives the network number, the node number, and indicates if the network is
421the primary network. It also indicates which device it is bound to (or
422Internal for internal networks) and the Frame Type if appropriate. Linux
423supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
424IPX.
425
426The /proc/net/ipx_route table holds a list of IPX routes. For each route it
427gives the destination network, the router node (or Directly) and the network
428address of the router (or Connected) for internal networks.
429
4306. TIPC
431------- 404-------
432 405
433tipc_rmem 406tipc_rmem
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 09fc02b99845..a5c1db95b3ec 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -1,20 +1,30 @@
1* ARC-HS Interrupt Distribution Unit 1* ARC-HS Interrupt Distribution Unit
2 2
3 This optional 2nd level interrupt controller can be used in SMP configurations for 3 This optional 2nd level interrupt controller can be used in SMP configurations
4 dynamic IRQ routing, load balancing of common/external IRQs towards core intc. 4 for dynamic IRQ routing, load balancing of common/external IRQs towards core
5 intc.
5 6
6Properties: 7Properties:
7 8
8- compatible: "snps,archs-idu-intc" 9- compatible: "snps,archs-idu-intc"
9- interrupt-controller: This is an interrupt controller. 10- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>. 11- #interrupt-cells: Must be <1> or <2>.
11 12
12 Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N 13 Value of the first cell specifies the "common" IRQ from peripheral to IDU.
13 of the particular interrupt line of IDU corresponds to the line N+24 of the 14 Number N of the particular interrupt line of IDU corresponds to the line N+24
14 core interrupt controller. 15 of the core interrupt controller.
15 16
16 intc accessed via the special ARC AUX register interface, hence "reg" property 17 The (optional) second cell specifies any of the following flags:
17 is not specified. 18 - bits[3:0] trigger type and level flags
19 1 = low-to-high edge triggered
20 2 = NOT SUPPORTED (high-to-low edge triggered)
21 4 = active high level-sensitive <<< DEFAULT
22 8 = NOT SUPPORTED (active low level-sensitive)
23 When no second cell is specified, the interrupt is assumed to be level
24 sensitive.
25
26 The interrupt controller is accessed via the special ARC AUX register
27 interface, hence "reg" property is not specified.
18 28
19Example: 29Example:
20 core_intc: core-interrupt-controller { 30 core_intc: core-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index 4ac21cef370e..113e7ac79aad 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -12,6 +12,7 @@ Required properties:
12 - "microchip,ksz8565" 12 - "microchip,ksz8565"
13 - "microchip,ksz9893" 13 - "microchip,ksz9893"
14 - "microchip,ksz9563" 14 - "microchip,ksz9563"
15 - "microchip,ksz8563"
15 16
16Optional properties: 17Optional properties:
17 18
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 63c73fafe26d..0b61a90f1592 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -15,10 +15,10 @@ Required properties:
15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. 15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. 17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
18 Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. 18 Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
19 Or the generic form: "cdns,emac". 19 Or the generic form: "cdns,emac".
20- reg: Address and length of the register set for the device 20- reg: Address and length of the register set for the device
21 For "sifive,fu540-macb", second range is required to specify the 21 For "sifive,fu540-c000-gem", second range is required to specify the
22 address and length of the registers for GEMGXL Management block. 22 address and length of the registers for GEMGXL Management block.
23- interrupts: Should contain macb interrupt 23- interrupts: Should contain macb interrupt
24- phy-mode: See ethernet.txt file in the same directory. 24- phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index b70b70dc4524..0dd3f748239f 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
506These flags will be acted upon accordingly by the core ``ktls`` code. 506These flags will be acted upon accordingly by the core ``ktls`` code.
507TLS device feature flags only control adding of new TLS connection 507TLS device feature flags only control adding of new TLS connection
508offloads, old connections will remain active after flags are cleared. 508offloads, old connections will remain active after flags are cleared.
509
510Known bugs
511==========
512
513skb_orphan() leaks clear text
514-----------------------------
515
516Currently drivers depend on the :c:member:`sk` member of
517:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
518encryption. Any operation which removes or does not preserve the socket
519association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
520will cause the driver to miss the packets and lead to clear text leaks.
521
522Redirects leak clear text
523-------------------------
524
525In the RX direction, if segment has already been decrypted by the device
526and it gets redirected or mirrored - clear text will be transmitted out.
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 949d5dcdd9a3..0104830d5075 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
204media, receives them from user space program and instead of sending 204media, receives them from user space program and instead of sending
205packets via physical media sends them to the user space program. 205packets via physical media sends them to the user space program.
206 206
207Let's say that you configured IPX on the tap0, then whenever 207Let's say that you configured IPv6 on the tap0, then whenever
208the kernel sends an IPX packet to tap0, it is passed to the application 208the kernel sends an IPv6 packet to tap0, it is passed to the application
209(VTun for example). The application encrypts, compresses and sends it to 209(VTun for example). The application encrypts, compresses and sends it to
210the other side over TCP or UDP. The application on the other side decompresses 210the other side over TCP or UDP. The application on the other side decompresses
211and decrypts the data received and writes the packet to the TAP device, 211and decrypts the data received and writes the packet to the TAP device,
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
new file mode 100644
index 000000000000..d37cbc502936
--- /dev/null
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -0,0 +1,279 @@
1Embargoed hardware issues
2=========================
3
4Scope
5-----
6
7Hardware issues which result in security problems are a different category
8of security bugs than pure software bugs which only affect the Linux
9kernel.
10
11Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
12differently because they usually affect all Operating Systems ("OS") and
13therefore need coordination across different OS vendors, distributions,
14hardware vendors and other parties. For some of the issues, software
15mitigations can depend on microcode or firmware updates, which need further
16coordination.
17
18.. _Contact:
19
20Contact
21-------
22
23The Linux kernel hardware security team is separate from the regular Linux
24kernel security team.
25
26The team only handles the coordination of embargoed hardware security
27issues. Reports of pure software security bugs in the Linux kernel are not
28handled by this team and the reporter will be guided to contact the regular
29Linux kernel security team (:ref:`Documentation/admin-guide/
30<securitybugs>`) instead.
31
32The team can be contacted by email at <hardware-security@kernel.org>. This
33is a private list of security officers who will help you to coordinate an
34issue according to our documented process.
35
36The list is encrypted and email to the list can be sent by either PGP or
37S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
38certificate. The list's PGP key and S/MIME certificate are available from
39https://www.kernel.org/....
40
41While hardware security issues are often handled by the affected hardware
42vendor, we welcome contact from researchers or individuals who have
43identified a potential hardware flaw.
44
45Hardware security officers
46^^^^^^^^^^^^^^^^^^^^^^^^^^
47
48The current team of hardware security officers:
49
50 - Linus Torvalds (Linux Foundation Fellow)
51 - Greg Kroah-Hartman (Linux Foundation Fellow)
52 - Thomas Gleixner (Linux Foundation Fellow)
53
54Operation of mailing-lists
55^^^^^^^^^^^^^^^^^^^^^^^^^^
56
57The encrypted mailing-lists which are used in our process are hosted on
58Linux Foundation's IT infrastructure. By providing this service Linux
59Foundation's director of IT Infrastructure security technically has the
60ability to access the embargoed information, but is obliged to
61confidentiality by his employment contract. Linux Foundation's director of
62IT Infrastructure security is also responsible for the kernel.org
63infrastructure.
64
65The Linux Foundation's current director of IT Infrastructure security is
66Konstantin Ryabitsev.
67
68
69Non-disclosure agreements
70-------------------------
71
72The Linux kernel hardware security team is not a formal body and therefore
73unable to enter into any non-disclosure agreements. The kernel community
74is aware of the sensitive nature of such issues and offers a Memorandum of
75Understanding instead.
76
77
78Memorandum of Understanding
79---------------------------
80
81The Linux kernel community has a deep understanding of the requirement to
82keep hardware security issues under embargo for coordination between
83different OS vendors, distributors, hardware vendors and other parties.
84
85The Linux kernel community has successfully handled hardware security
86issues in the past and has the necessary mechanisms in place to allow
87community compliant development under embargo restrictions.
88
89The Linux kernel community has a dedicated hardware security team for
90initial contact, which oversees the process of handling such issues under
91embargo rules.
92
93The hardware security team identifies the developers (domain experts) who
94will form the initial response team for a particular issue. The initial
95response team can bring in further developers (domain experts) to address
96the issue in the best technical way.
97
98All involved developers pledge to adhere to the embargo rules and to keep
99the received information confidential. Violation of the pledge will lead to
100immediate exclusion from the current issue and removal from all related
101mailing-lists. In addition, the hardware security team will also exclude
102the offender from future issues. The impact of this consequence is a highly
103effective deterrent in our community. In case a violation happens the
104hardware security team will inform the involved parties immediately. If you
105or anyone becomes aware of a potential violation, please report it
106immediately to the Hardware security officers.
107
108
109Process
110^^^^^^^
111
112Due to the globally distributed nature of Linux kernel development,
113face-to-face meetings are almost impossible to address hardware security
114issues. Phone conferences are hard to coordinate due to time zones and
115other factors and should be only used when absolutely necessary. Encrypted
116email has been proven to be the most effective and secure communication
117method for these types of issues.
118
119Start of Disclosure
120"""""""""""""""""""
121
122Disclosure starts by contacting the Linux kernel hardware security team by
123email. This initial contact should contain a description of the problem and
124a list of any known affected hardware. If your organization builds or
125distributes the affected hardware, we encourage you to also consider what
126other hardware could be affected.
127
128The hardware security team will provide an incident-specific encrypted
129mailing-list which will be used for initial discussion with the reporter,
130further disclosure and coordination.
131
132The hardware security team will provide the disclosing party a list of
133developers (domain experts) who should be informed initially about the
134issue after confirming with the developers that they will adhere to this
135Memorandum of Understanding and the documented process. These developers
136form the initial response team and will be responsible for handling the
137issue after initial contact. The hardware security team is supporting the
138response team, but is not necessarily involved in the mitigation
139development process.
140
141While individual developers might be covered by a non-disclosure agreement
142via their employer, they cannot enter individual non-disclosure agreements
143in their role as Linux kernel developers. They will, however, agree to
144adhere to this documented process and the Memorandum of Understanding.
145
146
147Disclosure
148""""""""""
149
150The disclosing party provides detailed information to the initial response
151team via the specific encrypted mailing-list.
152
153From our experience the technical documentation of these issues is usually
154a sufficient starting point and further technical clarification is best
155done via email.
156
157Mitigation development
158""""""""""""""""""""""
159
160The initial response team sets up an encrypted mailing-list or repurposes
161an existing one if appropriate. The disclosing party should provide a list
162of contacts for all other parties who have already been, or should be
163informed about the issue. The response team contacts these parties so they
164can name experts who should be subscribed to the mailing-list.
165
166Using a mailing-list is close to the normal Linux development process and
167has been successfully used in developing mitigations for various hardware
168security issues in the past.
169
170The mailing-list operates in the same way as normal Linux development.
171Patches are posted, discussed and reviewed and if agreed on applied to a
172non-public git repository which is only accessible to the participating
173developers via a secure connection. The repository contains the main
174development branch against the mainline kernel and backport branches for
175stable kernel versions as necessary.
176
177The initial response team will identify further experts from the Linux
178kernel developer community as needed and inform the disclosing party about
179their participation. Bringing in experts can happen at any time of the
180development process and often needs to be handled in a timely manner.
181
182Coordinated release
183"""""""""""""""""""
184
185The involved parties will negotiate the date and time where the embargo
186ends. At that point the prepared mitigations are integrated into the
187relevant kernel trees and published.
188
189While we understand that hardware security issues need coordinated embargo
190time, the embargo time should be constrained to the minimum time which is
191required for all involved parties to develop, test and prepare the
192mitigations. Extending embargo time artificially to meet conference talk
193dates or other non-technical reasons is creating more work and burden for
194the involved developers and response teams as the patches need to be kept
195up to date in order to follow the ongoing upstream kernel development,
196which might create conflicting changes.
197
198CVE assignment
199""""""""""""""
200
201Neither the hardware security team nor the initial response team assign
202CVEs, nor are CVEs required for the development process. If CVEs are
203provided by the disclosing party they can be used for documentation
204purposes.
205
206Process ambassadors
207-------------------
208
209For assistance with this process we have established ambassadors in various
210organizations, who can answer questions about or provide guidance on the
211reporting process and further handling. Ambassadors are not involved in the
212disclosure of a particular issue, unless requested by a response team or by
213an involved disclosed party. The current ambassadors list:
214
215 ============= ========================================================
216 ARM
217 AMD
218 IBM
219 Intel
220 Qualcomm
221
222 Microsoft
223 VMware
224 XEN
225
226 Canonical Tyler Hicks <tyhicks@canonical.com>
227 Debian Ben Hutchings <ben@decadent.org.uk>
228 Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
229 Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
230 SUSE Jiri Kosina <jkosina@suse.cz>
231
232 Amazon
233 Google
234 ============== ========================================================
235
236If you want your organization to be added to the ambassadors list, please
237contact the hardware security team. The nominated ambassador has to
238understand and support our process fully and is ideally well connected in
239the Linux kernel community.
240
241Encrypted mailing-lists
242-----------------------
243
244We use encrypted mailing-lists for communication. The operating principle
245of these lists is that email sent to the list is encrypted either with the
246list's PGP key or with the list's S/MIME certificate. The mailing-list
247software decrypts the email and re-encrypts it individually for each
248subscriber with the subscriber's PGP key or S/MIME certificate. Details
249about the mailing-list software and the setup which is used to ensure the
250security of the lists and protection of the data can be found here:
251https://www.kernel.org/....
252
253List keys
254^^^^^^^^^
255
256For initial contact see :ref:`Contact`. For incident specific mailing-lists
257the key and S/MIME certificate are conveyed to the subscribers by email
258sent from the specific list.
259
260Subscription to incident specific lists
261^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
262
263Subscription is handled by the response teams. Disclosed parties who want
264to participate in the communication send a list of potential subscribers to
265the response team so the response team can validate subscription requests.
266
267Each subscriber needs to send a subscription request to the response team
268by email. The email must be signed with the subscriber's PGP key or S/MIME
269certificate. If a PGP key is used, it must be available from a public key
270server and is ideally connected to the Linux kernel's PGP web of trust. See
271also: https://www.kernel.org/signature.html.
272
273The response team verifies that the subscriber request is valid and adds
274the subscriber to the list. After subscription the subscriber will receive
275email from the mailing-list which is signed either with the list's PGP key
276or the list's S/MIME certificate. The subscriber's email client can extract
277the PGP key or the S/MIME certificate from the signature so the subscriber
278can send encrypted email to the list.
279
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 878ebfda7eef..e2c9ffc682c5 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -45,6 +45,7 @@ Other guides to the community that are of interest to most developers are:
45 submit-checklist 45 submit-checklist
46 kernel-docs 46 kernel-docs
47 deprecated 47 deprecated
48 embargoed-hardware-issues
48 49
49These are some overall technical guides that have been put here for now for 50These are some overall technical guides that have been put here for now for
50lack of a better place. 51lack of a better place.
diff --git a/MAINTAINERS b/MAINTAINERS
index 8c343c2f4ce1..d18e4a2d366a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com> 183M: Heiner Kallweit <hkallweit1@gmail.com>
184L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
185S: Maintained 185S: Maintained
186F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169*
187 187
1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER
189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -683,7 +683,7 @@ S: Maintained
683F: drivers/crypto/sunxi-ss/ 683F: drivers/crypto/sunxi-ss/
684 684
685ALLWINNER VPU DRIVER 685ALLWINNER VPU DRIVER
686M: Maxime Ripard <maxime.ripard@bootlin.com> 686M: Maxime Ripard <mripard@kernel.org>
687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
688L: linux-media@vger.kernel.org 688L: linux-media@vger.kernel.org
689S: Maintained 689S: Maintained
@@ -1408,7 +1408,7 @@ S: Maintained
1408F: drivers/clk/sunxi/ 1408F: drivers/clk/sunxi/
1409 1409
1410ARM/Allwinner sunXi SoC support 1410ARM/Allwinner sunXi SoC support
1411M: Maxime Ripard <maxime.ripard@bootlin.com> 1411M: Maxime Ripard <mripard@kernel.org>
1412M: Chen-Yu Tsai <wens@csie.org> 1412M: Chen-Yu Tsai <wens@csie.org>
1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1414S: Maintained 1414S: Maintained
@@ -3577,7 +3577,7 @@ F: Documentation/filesystems/caching/cachefiles.txt
3577F: fs/cachefiles/ 3577F: fs/cachefiles/
3578 3578
3579CADENCE MIPI-CSI2 BRIDGES 3579CADENCE MIPI-CSI2 BRIDGES
3580M: Maxime Ripard <maxime.ripard@bootlin.com> 3580M: Maxime Ripard <mripard@kernel.org>
3581L: linux-media@vger.kernel.org 3581L: linux-media@vger.kernel.org
3582S: Maintained 3582S: Maintained
3583F: Documentation/devicetree/bindings/media/cdns,*.txt 3583F: Documentation/devicetree/bindings/media/cdns,*.txt
@@ -5290,7 +5290,7 @@ F: include/linux/vga*
5290 5290
5291DRM DRIVERS AND MISC GPU PATCHES 5291DRM DRIVERS AND MISC GPU PATCHES
5292M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> 5292M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
5293M: Maxime Ripard <maxime.ripard@bootlin.com> 5293M: Maxime Ripard <mripard@kernel.org>
5294M: Sean Paul <sean@poorly.run> 5294M: Sean Paul <sean@poorly.run>
5295W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html 5295W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
5296S: Maintained 5296S: Maintained
@@ -5303,7 +5303,7 @@ F: include/uapi/drm/drm*
5303F: include/linux/vga* 5303F: include/linux/vga*
5304 5304
5305DRM DRIVERS FOR ALLWINNER A10 5305DRM DRIVERS FOR ALLWINNER A10
5306M: Maxime Ripard <maxime.ripard@bootlin.com> 5306M: Maxime Ripard <mripard@kernel.org>
5307L: dri-devel@lists.freedesktop.org 5307L: dri-devel@lists.freedesktop.org
5308S: Supported 5308S: Supported
5309F: drivers/gpu/drm/sun4i/ 5309F: drivers/gpu/drm/sun4i/
@@ -6060,7 +6060,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
6060M: Heiner Kallweit <hkallweit1@gmail.com> 6060M: Heiner Kallweit <hkallweit1@gmail.com>
6061L: netdev@vger.kernel.org 6061L: netdev@vger.kernel.org
6062S: Maintained 6062S: Maintained
6063F: Documentation/ABI/testing/sysfs-bus-mdio 6063F: Documentation/ABI/testing/sysfs-class-net-phydev
6064F: Documentation/devicetree/bindings/net/ethernet-phy.yaml 6064F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
6065F: Documentation/devicetree/bindings/net/mdio* 6065F: Documentation/devicetree/bindings/net/mdio*
6066F: Documentation/networking/phy.rst 6066F: Documentation/networking/phy.rst
@@ -7508,7 +7508,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
7508M: Gregory CLEMENT <gregory.clement@bootlin.com> 7508M: Gregory CLEMENT <gregory.clement@bootlin.com>
7509L: linux-i2c@vger.kernel.org 7509L: linux-i2c@vger.kernel.org
7510S: Maintained 7510S: Maintained
7511F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt 7511F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
7512F: drivers/i2c/busses/i2c-mv64xxx.c 7512F: drivers/i2c/busses/i2c-mv64xxx.c
7513 7513
7514I2C OVER PARALLEL PORT 7514I2C OVER PARALLEL PORT
@@ -8449,11 +8449,6 @@ S: Maintained
8449F: fs/io_uring.c 8449F: fs/io_uring.c
8450F: include/uapi/linux/io_uring.h 8450F: include/uapi/linux/io_uring.h
8451 8451
8452IP MASQUERADING
8453M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8454S: Maintained
8455F: net/ipv4/netfilter/ipt_MASQUERADE.c
8456
8457IPMI SUBSYSTEM 8452IPMI SUBSYSTEM
8458M: Corey Minyard <minyard@acm.org> 8453M: Corey Minyard <minyard@acm.org>
8459L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) 8454L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8827,14 +8822,6 @@ F: virt/kvm/*
8827F: tools/kvm/ 8822F: tools/kvm/
8828F: tools/testing/selftests/kvm/ 8823F: tools/testing/selftests/kvm/
8829 8824
8830KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
8831M: Joerg Roedel <joro@8bytes.org>
8832L: kvm@vger.kernel.org
8833W: http://www.linux-kvm.org/
8834S: Maintained
8835F: arch/x86/include/asm/svm.h
8836F: arch/x86/kvm/svm.c
8837
8838KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) 8825KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
8839M: Marc Zyngier <maz@kernel.org> 8826M: Marc Zyngier <maz@kernel.org>
8840R: James Morse <james.morse@arm.com> 8827R: James Morse <james.morse@arm.com>
@@ -8877,7 +8864,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
8877M: Janosch Frank <frankja@linux.ibm.com> 8864M: Janosch Frank <frankja@linux.ibm.com>
8878R: David Hildenbrand <david@redhat.com> 8865R: David Hildenbrand <david@redhat.com>
8879R: Cornelia Huck <cohuck@redhat.com> 8866R: Cornelia Huck <cohuck@redhat.com>
8880L: linux-s390@vger.kernel.org 8867L: kvm@vger.kernel.org
8881W: http://www.ibm.com/developerworks/linux/linux390/ 8868W: http://www.ibm.com/developerworks/linux/linux390/
8882T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 8869T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
8883S: Supported 8870S: Supported
@@ -8892,6 +8879,11 @@ F: tools/testing/selftests/kvm/*/s390x/
8892KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 8879KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
8893M: Paolo Bonzini <pbonzini@redhat.com> 8880M: Paolo Bonzini <pbonzini@redhat.com>
8894M: Radim Krčmář <rkrcmar@redhat.com> 8881M: Radim Krčmář <rkrcmar@redhat.com>
8882R: Sean Christopherson <sean.j.christopherson@intel.com>
8883R: Vitaly Kuznetsov <vkuznets@redhat.com>
8884R: Wanpeng Li <wanpengli@tencent.com>
8885R: Jim Mattson <jmattson@google.com>
8886R: Joerg Roedel <joro@8bytes.org>
8895L: kvm@vger.kernel.org 8887L: kvm@vger.kernel.org
8896W: http://www.linux-kvm.org 8888W: http://www.linux-kvm.org
8897T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8889T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -8899,8 +8891,12 @@ S: Supported
8899F: arch/x86/kvm/ 8891F: arch/x86/kvm/
8900F: arch/x86/kvm/*/ 8892F: arch/x86/kvm/*/
8901F: arch/x86/include/uapi/asm/kvm* 8893F: arch/x86/include/uapi/asm/kvm*
8894F: arch/x86/include/uapi/asm/vmx.h
8895F: arch/x86/include/uapi/asm/svm.h
8902F: arch/x86/include/asm/kvm* 8896F: arch/x86/include/asm/kvm*
8903F: arch/x86/include/asm/pvclock-abi.h 8897F: arch/x86/include/asm/pvclock-abi.h
8898F: arch/x86/include/asm/svm.h
8899F: arch/x86/include/asm/vmx.h
8904F: arch/x86/kernel/kvm.c 8900F: arch/x86/kernel/kvm.c
8905F: arch/x86/kernel/kvmclock.c 8901F: arch/x86/kernel/kvmclock.c
8906 8902
@@ -9228,6 +9224,18 @@ F: include/linux/nd.h
9228F: include/linux/libnvdimm.h 9224F: include/linux/libnvdimm.h
9229F: include/uapi/linux/ndctl.h 9225F: include/uapi/linux/ndctl.h
9230 9226
9227LICENSES and SPDX stuff
9228M: Thomas Gleixner <tglx@linutronix.de>
9229M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
9230L: linux-spdx@vger.kernel.org
9231S: Maintained
9232T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
9233F: COPYING
9234F: Documentation/process/license-rules.rst
9235F: LICENSES/
9236F: scripts/spdxcheck-test.sh
9237F: scripts/spdxcheck.py
9238
9231LIGHTNVM PLATFORM SUPPORT 9239LIGHTNVM PLATFORM SUPPORT
9232M: Matias Bjorling <mb@lightnvm.io> 9240M: Matias Bjorling <mb@lightnvm.io>
9233W: http://github/OpenChannelSSD 9241W: http://github/OpenChannelSSD
@@ -11080,7 +11088,7 @@ NET_FAILOVER MODULE
11080M: Sridhar Samudrala <sridhar.samudrala@intel.com> 11088M: Sridhar Samudrala <sridhar.samudrala@intel.com>
11081L: netdev@vger.kernel.org 11089L: netdev@vger.kernel.org
11082S: Supported 11090S: Supported
11083F: driver/net/net_failover.c 11091F: drivers/net/net_failover.c
11084F: include/net/net_failover.h 11092F: include/net/net_failover.h
11085F: Documentation/networking/net_failover.rst 11093F: Documentation/networking/net_failover.rst
11086 11094
@@ -14472,6 +14480,7 @@ F: drivers/net/phy/phylink.c
14472F: drivers/net/phy/sfp* 14480F: drivers/net/phy/sfp*
14473F: include/linux/phylink.h 14481F: include/linux/phylink.h
14474F: include/linux/sfp.h 14482F: include/linux/sfp.h
14483K: phylink
14475 14484
14476SGI GRU DRIVER 14485SGI GRU DRIVER
14477M: Dimitri Sivanich <sivanich@sgi.com> 14486M: Dimitri Sivanich <sivanich@sgi.com>
@@ -14877,9 +14886,9 @@ F: include/linux/arm_sdei.h
14877F: include/uapi/linux/arm_sdei.h 14886F: include/uapi/linux/arm_sdei.h
14878 14887
14879SOFTWARE RAID (Multiple Disks) SUPPORT 14888SOFTWARE RAID (Multiple Disks) SUPPORT
14880M: Shaohua Li <shli@kernel.org> 14889M: Song Liu <song@kernel.org>
14881L: linux-raid@vger.kernel.org 14890L: linux-raid@vger.kernel.org
14882T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 14891T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
14883S: Supported 14892S: Supported
14884F: drivers/md/Makefile 14893F: drivers/md/Makefile
14885F: drivers/md/Kconfig 14894F: drivers/md/Kconfig
diff --git a/Makefile b/Makefile
index 9fa18613566f..0cbe8717bdb3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc5 5EXTRAVERSION = -rc7
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index a83c4f5e928b..8483a86c743d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
12# for CONFIG_OF_ALL_DTBS test 12# for CONFIG_OF_ALL_DTBS test
13dtstree := $(srctree)/$(src) 13dtstree := $(srctree)/$(src)
14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
15
16# board-specific dtc flags
17DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index f5ae394ebe06..41b16f21beec 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -256,7 +256,7 @@
256 256
257.macro FAKE_RET_FROM_EXCPN 257.macro FAKE_RET_FROM_EXCPN
258 lr r9, [status32] 258 lr r9, [status32]
259 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) 259 bic r9, r9, STATUS_AE_MASK
260 or r9, r9, STATUS_IE_MASK 260 or r9, r9, STATUS_IE_MASK
261 kflag r9 261 kflag r9
262.endm 262.endm
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index a0eeb9f8f0a9..d9ee43c6b7db 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -62,15 +62,15 @@
62#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
63 63
64#ifdef CONFIG_ARC_HAS_ICCM 64#ifdef CONFIG_ARC_HAS_ICCM
65#define __arcfp_code __attribute__((__section__(".text.arcfp"))) 65#define __arcfp_code __section(.text.arcfp)
66#else 66#else
67#define __arcfp_code __attribute__((__section__(".text"))) 67#define __arcfp_code __section(.text)
68#endif 68#endif
69 69
70#ifdef CONFIG_ARC_HAS_DCCM 70#ifdef CONFIG_ARC_HAS_DCCM
71#define __arcfp_data __attribute__((__section__(".data.arcfp"))) 71#define __arcfp_data __section(.data.arcfp)
72#else 72#else
73#define __arcfp_data __attribute__((__section__(".data"))) 73#define __arcfp_data __section(.data)
74#endif 74#endif
75 75
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 8ac0e2ac3e70..73746ed5b834 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
53 */ 53 */
54#define MACHINE_START(_type, _name) \ 54#define MACHINE_START(_type, _name) \
55static const struct machine_desc __mach_desc_##_type \ 55static const struct machine_desc __mach_desc_##_type \
56__used \ 56__used __section(.arch.info.init) = { \
57__attribute__((__section__(".arch.info.init"))) = { \
58 .name = _name, 57 .name = _name,
59 58
60#define MACHINE_END \ 59#define MACHINE_END \
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 18b493dfb3a8..abf9398cc333 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
203} 203}
204 204
205static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
206 unsigned int distr) 206 bool set_distr, unsigned int distr)
207{ 207{
208 union { 208 union {
209 unsigned int word; 209 unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
212 }; 212 };
213 } data; 213 } data;
214 214
215 data.distr = distr; 215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
216 data.lvl = lvl; 216 if (set_distr)
217 data.distr = distr;
218 if (set_lvl)
219 data.lvl = lvl;
217 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
218} 221}
219 222
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
240 raw_spin_unlock_irqrestore(&mcip_lock, flags); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
241} 244}
242 245
246static void idu_irq_ack(struct irq_data *data)
247{
248 unsigned long flags;
249
250 raw_spin_lock_irqsave(&mcip_lock, flags);
251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
253}
254
255static void idu_irq_mask_ack(struct irq_data *data)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&mcip_lock, flags);
260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
263}
264
243static int 265static int
244idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
245 bool force) 267 bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
263 else 285 else
264 distribution_mode = IDU_M_DISTRI_RR; 286 distribution_mode = IDU_M_DISTRI_RR;
265 287
266 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
267 289
268 raw_spin_unlock_irqrestore(&mcip_lock, flags); 290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
269 291
270 return IRQ_SET_MASK_OK; 292 return IRQ_SET_MASK_OK;
271} 293}
272 294
295static int idu_irq_set_type(struct irq_data *data, u32 type)
296{
297 unsigned long flags;
298
299 /*
300 * ARCv2 IDU HW does not support inverse polarity, so these are the
301 * only interrupt types supported.
302 */
303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
304 return -EINVAL;
305
306 raw_spin_lock_irqsave(&mcip_lock, flags);
307
308 idu_set_mode(data->hwirq, true,
309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
310 IDU_M_TRIG_LEVEL,
311 false, 0);
312
313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
314
315 return 0;
316}
317
273static void idu_irq_enable(struct irq_data *data) 318static void idu_irq_enable(struct irq_data *data)
274{ 319{
275 /* 320 /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
289 .name = "MCIP IDU Intc", 334 .name = "MCIP IDU Intc",
290 .irq_mask = idu_irq_mask, 335 .irq_mask = idu_irq_mask,
291 .irq_unmask = idu_irq_unmask, 336 .irq_unmask = idu_irq_unmask,
337 .irq_ack = idu_irq_ack,
338 .irq_mask_ack = idu_irq_mask_ack,
292 .irq_enable = idu_irq_enable, 339 .irq_enable = idu_irq_enable,
340 .irq_set_type = idu_irq_set_type,
293#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
294 .irq_set_affinity = idu_irq_set_affinity, 342 .irq_set_affinity = idu_irq_set_affinity,
295#endif 343#endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
317} 365}
318 366
319static const struct irq_domain_ops idu_irq_ops = { 367static const struct irq_domain_ops idu_irq_ops = {
320 .xlate = irq_domain_xlate_onecell, 368 .xlate = irq_domain_xlate_onetwocell,
321 .map = idu_irq_map, 369 .map = idu_irq_map,
322}; 370};
323 371
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index c2663fce7f6c..dc05a63516f5 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
572#else 572#else
573 BUILD_BUG_ON(sizeof(u32) != sizeof(value)); 573 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
574#endif 574#endif
575 /* Fall through */
575 case DW_EH_PE_native: 576 case DW_EH_PE_native:
576 if (end < (const void *)(ptr.pul + 1)) 577 if (end < (const void *)(ptr.pul + 1))
577 return 0; 578 return 0;
@@ -826,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
826 case DW_CFA_def_cfa: 827 case DW_CFA_def_cfa:
827 state->cfa.reg = get_uleb128(&ptr.p8, end); 828 state->cfa.reg = get_uleb128(&ptr.p8, end);
828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); 829 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
829 /*nobreak*/ 830 /* fall through */
830 case DW_CFA_def_cfa_offset: 831 case DW_CFA_def_cfa_offset:
831 state->cfa.offs = get_uleb128(&ptr.p8, end); 832 state->cfa.offs = get_uleb128(&ptr.p8, end);
832 unw_debug("cfa_def_cfa_offset: 0x%lx ", 833 unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
834 break; 835 break;
835 case DW_CFA_def_cfa_sf: 836 case DW_CFA_def_cfa_sf:
836 state->cfa.reg = get_uleb128(&ptr.p8, end); 837 state->cfa.reg = get_uleb128(&ptr.p8, end);
837 /*nobreak */ 838 /* fall through */
838 case DW_CFA_def_cfa_offset_sf: 839 case DW_CFA_def_cfa_offset_sf:
839 state->cfa.offs = get_sleb128(&ptr.p8, end) 840 state->cfa.offs = get_sleb128(&ptr.p8, end)
840 * state->dataAlign; 841 * state->dataAlign;
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 62c210e7ee4c..70a3fbe79fba 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
101 if (is_isa_arcv2() && ioc_enable && coherent) 101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true; 102 dev->dma_coherent = true;
103 103
104 dev_info(dev, "use %sncoherent DMA ops\n", 104 dev_info(dev, "use %scoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non"); 105 dev->dma_coherent ? "" : "non");
106} 106}
107 107
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 7dd2dd335cf6..0b961a2a10b8 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -6,11 +6,15 @@
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/of_fdt.h>
10#include <linux/libfdt.h>
9#include <linux/smp.h> 11#include <linux/smp.h>
10#include <asm/arcregs.h> 12#include <asm/arcregs.h>
11#include <asm/io.h> 13#include <asm/io.h>
12#include <asm/mach_desc.h> 14#include <asm/mach_desc.h>
13 15
16int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
17
14#define ARC_CCM_UNUSED_ADDR 0x60000000 18#define ARC_CCM_UNUSED_ADDR 0x60000000
15 19
16static void __init hsdk_init_per_cpu(unsigned int cpu) 20static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 101 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
98} 102}
99 103
104static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
105{
106 void *fdt = initial_boot_params;
107 const void *prop;
108 int node, ret;
109 bool dt_coh_set;
110
111 node = fdt_path_offset(fdt, path);
112 if (node < 0)
113 goto tweak_fail;
114
115 prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
116 if (!prop && ret != -FDT_ERR_NOTFOUND)
117 goto tweak_fail;
118
119 dt_coh_set = ret != -FDT_ERR_NOTFOUND;
120 ret = 0;
121
122 /* need to remove "dma-coherent" property */
123 if (dt_coh_set && !coherent)
124 ret = fdt_delprop(fdt, node, "dma-coherent");
125
126 /* need to set "dma-coherent" property */
127 if (!dt_coh_set && coherent)
128 ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
129
130 if (ret < 0)
131 goto tweak_fail;
132
133 return 0;
134
135tweak_fail:
136 pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
137 return -EFAULT;
138}
139
100enum hsdk_axi_masters { 140enum hsdk_axi_masters {
101 M_HS_CORE = 0, 141 M_HS_CORE = 0,
102 M_HS_RTT, 142 M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) 202#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) 203#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164 204
205static void __init hsdk_init_memory_bridge_axi_dmac(void)
206{
207 bool coherent = !!arc_hsdk_axi_dmac_coherent;
208 u32 axi_m_slv1, axi_m_oft1;
209
210 /*
211 * Don't tweak memory bridge configuration if we failed to tweak DTB
212 * as we will end up in a inconsistent state.
213 */
214 if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
215 return;
216
217 if (coherent) {
218 axi_m_slv1 = 0x77999999;
219 axi_m_oft1 = 0x76DCBA98;
220 } else {
221 axi_m_slv1 = 0x77777777;
222 axi_m_oft1 = 0x76543210;
223 }
224
225 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
227 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
228 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
229 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
230
231 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
233 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
234 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
235 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
236}
237
165static void __init hsdk_init_memory_bridge(void) 238static void __init hsdk_init_memory_bridge(void)
166{ 239{
167 u32 reg; 240 u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); 300 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); 301 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229 302
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); 303 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); 304 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); 305 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); 306 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); 307 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247 308
309 hsdk_init_memory_bridge_axi_dmac();
310
248 /* 311 /*
249 * PAE remapping for DMA clients does not work due to an RTL bug, so 312 * PAE remapping for DMA clients does not work due to an RTL bug, so
250 * CREG_PAE register must be programmed to all zeroes, otherwise it 313 * CREG_PAE register must be programmed to all zeroes, otherwise it
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
7 select ARCH_HAS_BINFMT_FLAT 7 select ARCH_HAS_BINFMT_FLAT
8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU
9 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 select ARCH_HAS_DEVMEM_IS_ALLOWED
10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
11 select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
10 select ARCH_HAS_ELF_RANDOMIZE 12 select ARCH_HAS_ELF_RANDOMIZE
11 select ARCH_HAS_FORTIFY_SOURCE 13 select ARCH_HAS_FORTIFY_SOURCE
12 select ARCH_HAS_KEEPINITRD 14 select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
18 select ARCH_HAS_SET_MEMORY 20 select ARCH_HAS_SET_MEMORY
19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 21 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 22 select ARCH_HAS_STRICT_MODULE_RWX if MMU
23 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
24 select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 25 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
22 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 26 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
23 select ARCH_HAVE_CUSTOM_GPIO_H 27 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index ced1a19d5f89..46849d6ecb3e 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -185,7 +185,7 @@
185 uart0: serial@0 { 185 uart0: serial@0 {
186 compatible = "ti,am3352-uart", "ti,omap3-uart"; 186 compatible = "ti,am3352-uart", "ti,omap3-uart";
187 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
188 reg = <0x0 0x2000>; 188 reg = <0x0 0x1000>;
189 interrupts = <72>; 189 interrupts = <72>;
190 status = "disabled"; 190 status = "disabled";
191 dmas = <&edma 26 0>, <&edma 27 0>; 191 dmas = <&edma 26 0>, <&edma 27 0>;
@@ -934,7 +934,7 @@
934 uart1: serial@0 { 934 uart1: serial@0 {
935 compatible = "ti,am3352-uart", "ti,omap3-uart"; 935 compatible = "ti,am3352-uart", "ti,omap3-uart";
936 clock-frequency = <48000000>; 936 clock-frequency = <48000000>;
937 reg = <0x0 0x2000>; 937 reg = <0x0 0x1000>;
938 interrupts = <73>; 938 interrupts = <73>;
939 status = "disabled"; 939 status = "disabled";
940 dmas = <&edma 28 0>, <&edma 29 0>; 940 dmas = <&edma 28 0>, <&edma 29 0>;
@@ -966,7 +966,7 @@
966 uart2: serial@0 { 966 uart2: serial@0 {
967 compatible = "ti,am3352-uart", "ti,omap3-uart"; 967 compatible = "ti,am3352-uart", "ti,omap3-uart";
968 clock-frequency = <48000000>; 968 clock-frequency = <48000000>;
969 reg = <0x0 0x2000>; 969 reg = <0x0 0x1000>;
970 interrupts = <74>; 970 interrupts = <74>;
971 status = "disabled"; 971 status = "disabled";
972 dmas = <&edma 30 0>, <&edma 31 0>; 972 dmas = <&edma 30 0>, <&edma 31 0>;
@@ -1614,7 +1614,7 @@
1614 uart3: serial@0 { 1614 uart3: serial@0 {
1615 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1615 compatible = "ti,am3352-uart", "ti,omap3-uart";
1616 clock-frequency = <48000000>; 1616 clock-frequency = <48000000>;
1617 reg = <0x0 0x2000>; 1617 reg = <0x0 0x1000>;
1618 interrupts = <44>; 1618 interrupts = <44>;
1619 status = "disabled"; 1619 status = "disabled";
1620 }; 1620 };
@@ -1644,7 +1644,7 @@
1644 uart4: serial@0 { 1644 uart4: serial@0 {
1645 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1645 compatible = "ti,am3352-uart", "ti,omap3-uart";
1646 clock-frequency = <48000000>; 1646 clock-frequency = <48000000>;
1647 reg = <0x0 0x2000>; 1647 reg = <0x0 0x1000>;
1648 interrupts = <45>; 1648 interrupts = <45>;
1649 status = "disabled"; 1649 status = "disabled";
1650 }; 1650 };
@@ -1674,7 +1674,7 @@
1674 uart5: serial@0 { 1674 uart5: serial@0 {
1675 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1675 compatible = "ti,am3352-uart", "ti,omap3-uart";
1676 clock-frequency = <48000000>; 1676 clock-frequency = <48000000>;
1677 reg = <0x0 0x2000>; 1677 reg = <0x0 0x1000>;
1678 interrupts = <46>; 1678 interrupts = <46>;
1679 status = "disabled"; 1679 status = "disabled";
1680 }; 1680 };
@@ -1758,6 +1758,8 @@
1758 1758
1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ 1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
1760 compatible = "ti,sysc-omap4", "ti,sysc"; 1760 compatible = "ti,sysc-omap4", "ti,sysc";
1761 reg = <0xcc020 0x4>;
1762 reg-names = "rev";
1761 ti,hwmods = "d_can0"; 1763 ti,hwmods = "d_can0";
1762 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1764 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1763 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, 1765 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
@@ -1780,6 +1782,8 @@
1780 1782
1781 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ 1783 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
1782 compatible = "ti,sysc-omap4", "ti,sysc"; 1784 compatible = "ti,sysc-omap4", "ti,sysc";
1785 reg = <0xd0020 0x4>;
1786 reg-names = "rev";
1783 ti,hwmods = "d_can1"; 1787 ti,hwmods = "d_can1";
1784 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1788 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1785 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, 1789 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index e5c2f71a7c77..fb6b8aa12cc5 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -234,13 +234,33 @@
234 interrupt-names = "edma3_tcerrint"; 234 interrupt-names = "edma3_tcerrint";
235 }; 235 };
236 236
237 mmc3: mmc@47810000 { 237 target-module@47810000 {
238 compatible = "ti,omap4-hsmmc"; 238 compatible = "ti,sysc-omap2", "ti,sysc";
239 ti,hwmods = "mmc3"; 239 ti,hwmods = "mmc3";
240 ti,needs-special-reset; 240 reg = <0x478102fc 0x4>,
241 interrupts = <29>; 241 <0x47810110 0x4>,
242 reg = <0x47810000 0x1000>; 242 <0x47810114 0x4>;
243 status = "disabled"; 243 reg-names = "rev", "sysc", "syss";
244 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
245 SYSC_OMAP2_ENAWAKEUP |
246 SYSC_OMAP2_SOFTRESET |
247 SYSC_OMAP2_AUTOIDLE)>;
248 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
249 <SYSC_IDLE_NO>,
250 <SYSC_IDLE_SMART>;
251 ti,syss-mask = <1>;
252 clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
253 clock-names = "fck";
254 #address-cells = <1>;
255 #size-cells = <1>;
256 ranges = <0x0 0x47810000 0x1000>;
257
258 mmc3: mmc@0 {
259 compatible = "ti,omap4-hsmmc";
260 ti,needs-special-reset;
261 interrupts = <29>;
262 reg = <0x0 0x1000>;
263 };
244 }; 264 };
245 265
246 usb: usb@47400000 { 266 usb: usb@47400000 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 55aff4db9c7c..848e2a8884e2 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -228,13 +228,33 @@
228 interrupt-names = "edma3_tcerrint"; 228 interrupt-names = "edma3_tcerrint";
229 }; 229 };
230 230
231 mmc3: mmc@47810000 { 231 target-module@47810000 {
232 compatible = "ti,omap4-hsmmc"; 232 compatible = "ti,sysc-omap2", "ti,sysc";
233 reg = <0x47810000 0x1000>;
234 ti,hwmods = "mmc3"; 233 ti,hwmods = "mmc3";
235 ti,needs-special-reset; 234 reg = <0x478102fc 0x4>,
236 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; 235 <0x47810110 0x4>,
237 status = "disabled"; 236 <0x47810114 0x4>;
237 reg-names = "rev", "sysc", "syss";
238 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
239 SYSC_OMAP2_ENAWAKEUP |
240 SYSC_OMAP2_SOFTRESET |
241 SYSC_OMAP2_AUTOIDLE)>;
242 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
243 <SYSC_IDLE_NO>,
244 <SYSC_IDLE_SMART>;
245 ti,syss-mask = <1>;
246 clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
247 clock-names = "fck";
248 #address-cells = <1>;
249 #size-cells = <1>;
250 ranges = <0x0 0x47810000 0x1000>;
251
252 mmc3: mmc@0 {
253 compatible = "ti,omap4-hsmmc";
254 ti,needs-special-reset;
255 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
256 reg = <0x0 0x1000>;
257 };
238 }; 258 };
239 259
240 sham: sham@53100000 { 260 sham: sham@53100000 {
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 989cb60b9029..04bee4ff9dcb 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -1574,6 +1574,8 @@
1574 1574
1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ 1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
1576 compatible = "ti,sysc-omap4", "ti,sysc"; 1576 compatible = "ti,sysc-omap4", "ti,sysc";
1577 reg = <0xcc020 0x4>;
1578 reg-names = "rev";
1577 ti,hwmods = "d_can0"; 1579 ti,hwmods = "d_can0";
1578 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1580 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1579 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; 1581 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
@@ -1593,6 +1595,8 @@
1593 1595
1594 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ 1596 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
1595 compatible = "ti,sysc-omap4", "ti,sysc"; 1597 compatible = "ti,sysc-omap4", "ti,sysc";
1598 reg = <0xd0020 0x4>;
1599 reg-names = "rev";
1596 ti,hwmods = "d_can1"; 1600 ti,hwmods = "d_can1";
1597 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1601 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1598 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; 1602 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 1d5e99964bbf..0aaacea1d887 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -175,14 +175,9 @@
175}; 175};
176 176
177&mmc1 { 177&mmc1 {
178 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 178 pinctrl-names = "default", "hs";
179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
180 pinctrl-1 = <&mmc1_pins_hs>; 180 pinctrl-1 = <&mmc1_pins_hs>;
181 pinctrl-2 = <&mmc1_pins_sdr12>;
182 pinctrl-3 = <&mmc1_pins_sdr25>;
183 pinctrl-4 = <&mmc1_pins_sdr50>;
184 pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
185 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
186}; 181};
187 182
188&mmc2 { 183&mmc2 {
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index c65d7f6d3b5a..ea1c119feaa5 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27}; 22};
28 23
29&mmc2 { 24&mmc2 {
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index dc5141c35610..7935d70874ce 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -24,14 +24,9 @@
24}; 24};
25 25
26&mmc1 { 26&mmc1 {
27 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 27 pinctrl-names = "default", "hs";
28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
29 pinctrl-1 = <&mmc1_pins_hs>; 29 pinctrl-1 = <&mmc1_pins_hs>;
30 pinctrl-2 = <&mmc1_pins_default>;
31 pinctrl-3 = <&mmc1_pins_hs>;
32 pinctrl-4 = <&mmc1_pins_sdr50>;
33 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
34 pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
35}; 30};
36 31
37&mmc2 { 32&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index d02f5fa61e5f..bc76f1705c0f 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -379,7 +379,7 @@
379 }; 379 };
380}; 380};
381 381
382&gpio7 { 382&gpio7_target {
383 ti,no-reset-on-init; 383 ti,no-reset-on-init;
384 ti,no-idle-on-init; 384 ti,no-idle-on-init;
385}; 385};
@@ -430,6 +430,7 @@
430 430
431 bus-width = <4>; 431 bus-width = <4>;
432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ 432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
433 no-1-8-v;
433}; 434};
434 435
435&mmc2 { 436&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index a374b5cd6db0..7b113b52c3fb 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 4badd2144db9..30c500b15b21 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 714e971b912a..de7f85efaa51 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -498,7 +498,7 @@
498 phy-supply = <&ldousb_reg>; 498 phy-supply = <&ldousb_reg>;
499}; 499};
500 500
501&gpio7 { 501&gpio7_target {
502 ti,no-reset-on-init; 502 ti,no-reset-on-init;
503 ti,no-idle-on-init; 503 ti,no-idle-on-init;
504}; 504};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 23faedec08ab..21e5914fdd62 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1261,7 +1261,7 @@
1261 }; 1261 };
1262 }; 1262 };
1263 1263
1264 target-module@51000 { /* 0x48051000, ap 45 2e.0 */ 1264 gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
1265 compatible = "ti,sysc-omap2", "ti,sysc"; 1265 compatible = "ti,sysc-omap2", "ti,sysc";
1266 ti,hwmods = "gpio7"; 1266 ti,hwmods = "gpio7";
1267 reg = <0x51000 0x4>, 1267 reg = <0x51000 0x4>,
@@ -3025,7 +3025,7 @@
3025 3025
3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */ 3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */
3027 compatible = "ti,sysc-omap4", "ti,sysc"; 3027 compatible = "ti,sysc-omap4", "ti,sysc";
3028 reg = <0x80000 0x4>; 3028 reg = <0x80020 0x4>;
3029 reg-names = "rev"; 3029 reg-names = "rev";
3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; 3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
3031 clock-names = "fck"; 3031 clock-names = "fck";
@@ -4577,7 +4577,7 @@
4577 4577
4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ 4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
4579 compatible = "ti,sysc-omap4", "ti,sysc"; 4579 compatible = "ti,sysc-omap4", "ti,sysc";
4580 reg = <0xc000 0x4>; 4580 reg = <0xc020 0x4>;
4581 reg-names = "rev"; 4581 reg-names = "rev";
4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; 4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
4583 clock-names = "fck"; 4583 clock-names = "fck";
diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
index 28ebb4eb884a..214b9e6de2c3 100644
--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
@@ -32,7 +32,7 @@
32 * 32 *
33 * Datamanual Revisions: 33 * Datamanual Revisions:
34 * 34 *
35 * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 35 * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
37 * 37 *
38 */ 38 */
@@ -229,45 +229,45 @@
229 229
230 mmc3_pins_default: mmc3_pins_default { 230 mmc3_pins_default: mmc3_pins_default {
231 pinctrl-single,pins = < 231 pinctrl-single,pins = <
232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
238 >; 238 >;
239 }; 239 };
240 240
241 mmc3_pins_hs: mmc3_pins_hs { 241 mmc3_pins_hs: mmc3_pins_hs {
242 pinctrl-single,pins = < 242 pinctrl-single,pins = <
243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
249 >; 249 >;
250 }; 250 };
251 251
252 mmc3_pins_sdr12: mmc3_pins_sdr12 { 252 mmc3_pins_sdr12: mmc3_pins_sdr12 {
253 pinctrl-single,pins = < 253 pinctrl-single,pins = <
254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
260 >; 260 >;
261 }; 261 };
262 262
263 mmc3_pins_sdr25: mmc3_pins_sdr25 { 263 mmc3_pins_sdr25: mmc3_pins_sdr25 {
264 pinctrl-single,pins = < 264 pinctrl-single,pins = <
265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
271 >; 271 >;
272 }; 272 };
273 273
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 3fa0cbe456db..0f3870d3b099 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -246,13 +246,13 @@
246 reg = <0>; 246 reg = <0>;
247 }; 247 };
248 248
249 n25q128a13_2: flash@1 { 249 n25q128a13_2: flash@2 {
250 compatible = "n25q128a13", "jedec,spi-nor"; 250 compatible = "n25q128a13", "jedec,spi-nor";
251 #address-cells = <1>; 251 #address-cells = <1>;
252 #size-cells = <1>; 252 #size-cells = <1>;
253 spi-max-frequency = <66000000>; 253 spi-max-frequency = <66000000>;
254 spi-rx-bus-width = <2>; 254 spi-rx-bus-width = <2>;
255 reg = <1>; 255 reg = <2>;
256 }; 256 };
257}; 257};
258 258
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 1d5210eb4776..582925238d65 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -66,7 +66,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
66 66
671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists, 671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one 68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
69 teq r3, r2, lsr #10 @ instruction 69 teq r3, r2, lsr #11 @ instruction
70 subne r0, sv_pc, #4 @ allow for mov 70 subne r0, sv_pc, #4 @ allow for mov
71 subeq r0, sv_pc, #8 @ allow for mov + stmia 71 subeq r0, sv_pc, #8 @ allow for mov + stmia
72 72
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 81159af44862..14a6c3eb3298 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -126,6 +126,8 @@ restart:
126 orr r11, r11, r13 @ mask all requested interrupts 126 orr r11, r11, r13 @ mask all requested interrupts
127 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 127 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
128 128
129 str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
130
129 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? 131 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
130 beq hksw @ no - try next source 132 beq hksw @ no - try next source
131 133
@@ -133,7 +135,6 @@ restart:
133 @@@@@@@@@@@@@@@@@@@@@@ 135 @@@@@@@@@@@@@@@@@@@@@@
134 @ Keyboard clock FIQ mode interrupt handler 136 @ Keyboard clock FIQ mode interrupt handler
135 @ r10 now contains KEYBRD_CLK_MASK, use it 137 @ r10 now contains KEYBRD_CLK_MASK, use it
136 str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
137 bic r11, r11, r10 @ unmask it 138 bic r11, r11, r10 @ unmask it
138 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 139 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
139 140
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 43899fa56674..0254eb9cf8c6 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
70 * interrupts default to since commit 80ac93c27441 70 * interrupts default to since commit 80ac93c27441
71 * requires interrupt already acked and unmasked. 71 * requires interrupt already acked and unmasked.
72 */ 72 */
73 if (irq_chip->irq_ack) 73 if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
74 irq_chip->irq_ack(d);
75 if (irq_chip->irq_unmask)
76 irq_chip->irq_unmask(d); 74 irq_chip->irq_unmask(d);
77 } 75 }
78 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) 76 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index f9c02f9f1c92..5c3845730dbf 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
127 struct device_node *np; 127 struct device_node *np;
128 struct gen_pool *sram_pool; 128 struct gen_pool *sram_pool;
129 129
130 if (!soc_is_omap44xx() && !soc_is_omap54xx())
131 return 0;
132
130 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); 133 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
131 if (!np) 134 if (!np)
132 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", 135 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 4a5b4aee6615..1ec21e9ba1e9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { 379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
380 .rev_offs = 0x0, 380 .rev_offs = 0x0,
381 .sysc_offs = 0x4, 381 .sysc_offs = 0x4,
382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, 382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
383 SYSC_HAS_RESET_STATUS,
383 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), 384 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
384 .sysc_fields = &omap_hwmod_sysc_type2, 385 .sysc_fields = &omap_hwmod_sysc_type2,
385}; 386};
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 0ce56ad754ce..ea2c84214bac 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
46 switch (tag->u.acorn.vram_pages) { 46 switch (tag->u.acorn.vram_pages) {
47 case 512: 47 case 512:
48 vram_size += PAGE_SIZE * 256; 48 vram_size += PAGE_SIZE * 256;
49 /* Fall through - ??? */
49 case 256: 50 case 256:
50 vram_size += PAGE_SIZE * 256; 51 vram_size += PAGE_SIZE * 256;
51 default: 52 default:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54cd7ed90ba..c1222c0e9fd3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -664,10 +664,6 @@ config ARM_LPAE
664 !CPU_32v4 && !CPU_32v3 664 !CPU_32v4 && !CPU_32v3
665 select PHYS_ADDR_T_64BIT 665 select PHYS_ADDR_T_64BIT
666 select SWIOTLB 666 select SWIOTLB
667 select ARCH_HAS_DMA_COHERENT_TO_PFN
668 select ARCH_HAS_DMA_MMAP_PGPROT
669 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
670 select ARCH_HAS_SYNC_DMA_FOR_CPU
671 help 667 help
672 Say Y if you have an ARMv7 processor supporting the LPAE page 668 Say Y if you have an ARMv7 processor supporting the LPAE page
673 table format and you would like to access memory beyond the 669 table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 16d373d587c4..b4be3baa83d4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -175,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID 175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn) 176int pfn_valid(unsigned long pfn)
177{ 177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
178 return memblock_is_map_memory(__pfn_to_phys(pfn)); 183 return memblock_is_map_memory(__pfn_to_phys(pfn));
179} 184}
180EXPORT_SYMBOL(pfn_valid); 185EXPORT_SYMBOL(pfn_valid);
@@ -628,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n)
628 if (t->flags & PF_KTHREAD) 633 if (t->flags & PF_KTHREAD)
629 continue; 634 continue;
630 for_each_thread(t, s) 635 for_each_thread(t, s)
631 set_section_perms(perms, n, true, s->mm); 636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
632 } 638 }
633 set_section_perms(perms, n, true, current->active_mm); 639 set_section_perms(perms, n, true, current->active_mm);
634 set_section_perms(perms, n, true, &init_mm); 640 set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index c7a87368850b..12aa7eaeaf68 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -339,6 +339,12 @@
339 pinctrl-names = "default"; 339 pinctrl-names = "default";
340}; 340};
341 341
342&ir {
343 status = "okay";
344 pinctrl-0 = <&remote_input_ao_pins>;
345 pinctrl-names = "default";
346};
347
342&pwm_ef { 348&pwm_ef {
343 status = "okay"; 349 status = "okay";
344 pinctrl-0 = <&pwm_e_pins>; 350 pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index f8d43e3dcf20..1785552d450c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -2386,6 +2386,7 @@
2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; 2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
2387 clock-names = "ddr"; 2387 clock-names = "ddr";
2388 phys = <&usb2_phy1>; 2388 phys = <&usb2_phy1>;
2389 phy-names = "usb2-phy";
2389 dr_mode = "peripheral"; 2390 dr_mode = "peripheral";
2390 g-rx-fifo-size = <192>; 2391 g-rx-fifo-size = <192>;
2391 g-np-tx-fifo-size = <128>; 2392 g-np-tx-fifo-size = <128>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
index 81780ffcc7f0..4e916e1f71f7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
@@ -53,6 +53,7 @@
53 53
54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>; 54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
55 enable-active-high; 55 enable-active-high;
56 regulator-always-on;
56 }; 57 };
57 58
58 tf_io: gpio-regulator-tf_io { 59 tf_io: gpio-regulator-tf_io {
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK: 46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) 47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
48 return 0x0000000000003CB0ull; 48 return 0x0000000000003CB0ull;
49 /* Else, fall through */
49 default: 50 default:
50 return 0x0000000000023CB0ull; 51 return 0x0000000000023CB0ull;
51 } 52 }
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index fe61513982b4..330b19fcd990 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
316 regs->uregs[0] = -EINTR; 316 regs->uregs[0] = -EINTR;
317 break; 317 break;
318 } 318 }
319 /* Else, fall through */
319 case -ERESTARTNOINTR: 320 case -ERESTARTNOINTR:
320 regs->uregs[0] = regs->orig_r0; 321 regs->uregs[0] = regs->orig_r0;
321 regs->ipc -= 4; 322 regs->ipc -= 4;
@@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs)
360 switch (regs->uregs[0]) { 361 switch (regs->uregs[0]) {
361 case -ERESTART_RESTARTBLOCK: 362 case -ERESTART_RESTARTBLOCK:
362 regs->uregs[15] = __NR_restart_syscall; 363 regs->uregs[15] = __NR_restart_syscall;
364 /* Fall through */
363 case -ERESTARTNOHAND: 365 case -ERESTARTNOHAND:
364 case -ERESTARTSYS: 366 case -ERESTARTSYS:
365 case -ERESTARTNOINTR: 367 case -ERESTARTNOINTR:
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a39b079e73f2..6d58c1739b42 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#ifndef _PARISC_PGTABLE_H 2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H
4 4
5#include <asm/page.h>
5#include <asm-generic/4level-fixup.h> 6#include <asm-generic/4level-fixup.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
@@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
98 99
99#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
100 101
101#include <asm/page.h>
102
103#define pte_ERROR(e) \ 102#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
105#define pmd_ERROR(e) \ 104#define pmd_ERROR(e) \
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index e99a14798ab0..c4b606fe73eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
660 } 660 }
661 tce = be64_to_cpu(tce); 661 tce = be64_to_cpu(tce);
662 662
663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) 663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
664 return H_PARAMETER; 664 ret = H_PARAMETER;
665 goto unlock_exit;
666 }
665 667
666 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 668 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
667 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, 669 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index f50bbeedfc66..b4f20f13b860 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); 556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557 557
558 ua = 0; 558 ua = 0;
559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) 559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
560 return H_PARAMETER; 560 ret = H_PARAMETER;
561 goto unlock_exit;
562 }
561 563
562 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 564 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
563 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, 565 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 9c66033c3a54..161f28d04a07 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -30,10 +30,6 @@ enum fixed_addresses {
30 __end_of_fixed_addresses 30 __end_of_fixed_addresses
31}; 31};
32 32
33#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
34#define FIXADDR_TOP (VMALLOC_START)
35#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
36
37#define FIXMAP_PAGE_IO PAGE_KERNEL 33#define FIXMAP_PAGE_IO PAGE_KERNEL
38 34
39#define __early_set_fixmap __set_fixmap 35#define __early_set_fixmap __set_fixmap
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index a364aba23d55..c24a083b3e12 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -420,14 +420,22 @@ static inline void pgtable_cache_init(void)
420#define VMALLOC_END (PAGE_OFFSET - 1) 420#define VMALLOC_END (PAGE_OFFSET - 1)
421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
422 422
423#define FIXADDR_TOP VMALLOC_START
424#ifdef CONFIG_64BIT
425#define FIXADDR_SIZE PMD_SIZE
426#else
427#define FIXADDR_SIZE PGDIR_SIZE
428#endif
429#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
430
423/* 431/*
424 * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32. 432 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
425 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 433 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
426 */ 434 */
427#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
428#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 436#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
429#else 437#else
430#define TASK_SIZE VMALLOC_START 438#define TASK_SIZE FIXADDR_START
431#endif 439#endif
432 440
433#include <asm-generic/pgtable.h> 441#include <asm-generic/pgtable.h>
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e636728ab452..955eb355c2fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
863 break; 863 break;
864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
865 /* lcgr %dst,%dst */ 865 /* lcgr %dst,%dst */
866 EMIT4(0xb9130000, dst_reg, dst_reg); 866 EMIT4(0xb9030000, dst_reg, dst_reg);
867 break; 867 break;
868 /* 868 /*
869 * BPF_FROM_BE/LE 869 * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1049 /* llgf %w1,map.max_entries(%b2) */ 1049 /* llgf %w1,map.max_entries(%b2) */
1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1051 offsetof(struct bpf_array, map.max_entries)); 1051 offsetof(struct bpf_array, map.max_entries));
1052 /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ 1052 /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1053 EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, 1053 EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1054 REG_W1, 0, 0xa); 1054 REG_W1, 0, 0xa);
1055 1055
1056 /* 1056 /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1076 * goto out; 1076 * goto out;
1077 */ 1077 */
1078 1078
1079 /* sllg %r1,%b3,3: %r1 = index * 8 */ 1079 /* llgfr %r1,%b3: %r1 = (u32) index */
1080 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); 1080 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1081 /* sllg %r1,%r1,3: %r1 *= 8 */
1082 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1081 /* lg %r1,prog(%b2,%r1) */ 1083 /* lg %r1,prog(%b2,%r1) */
1082 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, 1084 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1083 REG_1, offsetof(struct bpf_array, ptrs)); 1085 REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 8574338bf23b..9991ec2371e4 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
34 time_travel_time = ns; 34 time_travel_time = ns;
35} 35}
36 36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 37static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
38 unsigned long long expiry)
39{ 38{
40 time_travel_timer_mode = mode; 39 time_travel_timer_mode = mode;
40}
41
42static inline void time_travel_set_timer_expiry(unsigned long long expiry)
43{
41 time_travel_timer_expiry = expiry; 44 time_travel_timer_expiry = expiry;
42} 45}
43#else 46#else
@@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
50{ 53{
51} 54}
52 55
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 56static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
54 unsigned long long expiry) 57{
58}
59
60static inline void time_travel_set_timer_expiry(unsigned long long expiry)
55{ 61{
56} 62}
57 63
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 67c0d1a860e9..6bede7888fc2 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
213 if (time_travel_timer_mode != TT_TMR_DISABLED || 213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) { 214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT) 215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0); 216 time_travel_set_timer_mode(TT_TMR_DISABLED);
217 /* 217 /*
218 * time_travel_time will be adjusted in the timer 218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal 219 * IRQ handler so it works even when the signal
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 6a051b078359..234757233355 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
50static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
51{ 51{
52 if (time_travel_mode != TT_MODE_OFF) 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0); 53 time_travel_set_timer_mode(TT_TMR_DISABLED);
54 54
55 if (time_travel_mode != TT_MODE_INFCPU) 55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable(); 56 os_timer_disable();
@@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
62{ 62{
63 unsigned long long interval = NSEC_PER_SEC / HZ; 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64 64
65 if (time_travel_mode != TT_MODE_OFF) 65 if (time_travel_mode != TT_MODE_OFF) {
66 time_travel_set_timer(TT_TMR_PERIODIC, 66 time_travel_set_timer_mode(TT_TMR_PERIODIC);
67 time_travel_time + interval); 67 time_travel_set_timer_expiry(time_travel_time + interval);
68 }
68 69
69 if (time_travel_mode != TT_MODE_INFCPU) 70 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval); 71 os_timer_set_interval(interval);
@@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
77{ 78{
78 delta += 1; 79 delta += 1;
79 80
80 if (time_travel_mode != TT_MODE_OFF) 81 if (time_travel_mode != TT_MODE_OFF) {
81 time_travel_set_timer(TT_TMR_ONESHOT, 82 time_travel_set_timer_mode(TT_TMR_ONESHOT);
82 time_travel_time + delta); 83 time_travel_set_timer_expiry(time_travel_time + delta);
84 }
83 85
84 if (time_travel_mode != TT_MODE_INFCPU) 86 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta); 87 return os_timer_one_shot(delta);
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 56e748a7679f..94df0868804b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
38 38
39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) 39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) 40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) 42REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
42export REALMODE_CFLAGS 43export REALMODE_CFLAGS
43 44
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 5f2d03067ae5..c8862696a47b 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void)
72 72
73 /* Find the first usable memory region under bios_start. */ 73 /* Find the first usable memory region under bios_start. */
74 for (i = boot_params->e820_entries - 1; i >= 0; i--) { 74 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
75 unsigned long new = bios_start;
76
75 entry = &boot_params->e820_table[i]; 77 entry = &boot_params->e820_table[i];
76 78
77 /* Skip all entries above bios_start. */ 79 /* Skip all entries above bios_start. */
@@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void)
84 86
85 /* Adjust bios_start to the end of the entry if needed. */ 87 /* Adjust bios_start to the end of the entry if needed. */
86 if (bios_start > entry->addr + entry->size) 88 if (bios_start > entry->addr + entry->size)
87 bios_start = entry->addr + entry->size; 89 new = entry->addr + entry->size;
88 90
89 /* Keep bios_start page-aligned. */ 91 /* Keep bios_start page-aligned. */
90 bios_start = round_down(bios_start, PAGE_SIZE); 92 new = round_down(new, PAGE_SIZE);
91 93
92 /* Skip the entry if it's too small. */ 94 /* Skip the entry if it's too small. */
93 if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) 95 if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
94 continue; 96 continue;
95 97
98 /* Protect against underflow. */
99 if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
100 break;
101
102 bios_start = new;
96 break; 103 break;
97 } 104 }
98 105
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 62f317c9113a..5b35b7ea5d72 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -661,10 +661,17 @@ fail:
661 661
662 throttle = perf_event_overflow(event, &data, &regs); 662 throttle = perf_event_overflow(event, &data, &regs);
663out: 663out:
664 if (throttle) 664 if (throttle) {
665 perf_ibs_stop(event, 0); 665 perf_ibs_stop(event, 0);
666 else 666 } else {
667 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); 667 period >>= 4;
668
669 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
670 (*config & IBS_OP_CNT_CTL))
671 period |= *config & IBS_OP_CUR_CNT_RAND;
672
673 perf_ibs_enable_event(perf_ibs, hwc, period);
674 }
668 675
669 perf_event_update_userpage(event); 676 perf_event_update_userpage(event);
670 677
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..325959d19d9a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1236 * Add a single event to the PMU. 1236 * Add a single event to the PMU.
1237 * 1237 *
1238 * The event is added to the group of enabled events 1238 * The event is added to the group of enabled events
1239 * but only if it can be scehduled with existing events. 1239 * but only if it can be scheduled with existing events.
1240 */ 1240 */
1241static int x86_pmu_add(struct perf_event *event, int flags) 1241static int x86_pmu_add(struct perf_event *event, int flags)
1242{ 1242{
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 648260b5f367..e4c2cb65ea50 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
3572 return left; 3572 return left;
3573} 3573}
3574 3574
3575static u64 nhm_limit_period(struct perf_event *event, u64 left)
3576{
3577 return max(left, 32ULL);
3578}
3579
3575PMU_FORMAT_ATTR(event, "config:0-7" ); 3580PMU_FORMAT_ATTR(event, "config:0-7" );
3576PMU_FORMAT_ATTR(umask, "config:8-15" ); 3581PMU_FORMAT_ATTR(umask, "config:8-15" );
3577PMU_FORMAT_ATTR(edge, "config:18" ); 3582PMU_FORMAT_ATTR(edge, "config:18" );
@@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void)
4606 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 4611 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4607 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4612 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4608 x86_pmu.extra_regs = intel_nehalem_extra_regs; 4613 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4614 x86_pmu.limit_period = nhm_limit_period;
4609 4615
4610 mem_attr = nhm_mem_events_attrs; 4616 mem_attr = nhm_mem_events_attrs;
4611 4617
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index f5e90a849bca..9e5f3c722c33 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -59,7 +59,6 @@ static void sanitize_boot_params(struct boot_params *boot_params)
59 BOOT_PARAM_PRESERVE(apm_bios_info), 59 BOOT_PARAM_PRESERVE(apm_bios_info),
60 BOOT_PARAM_PRESERVE(tboot_addr), 60 BOOT_PARAM_PRESERVE(tboot_addr),
61 BOOT_PARAM_PRESERVE(ist_info), 61 BOOT_PARAM_PRESERVE(ist_info),
62 BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
63 BOOT_PARAM_PRESERVE(hd0_info), 62 BOOT_PARAM_PRESERVE(hd0_info),
64 BOOT_PARAM_PRESERVE(hd1_info), 63 BOOT_PARAM_PRESERVE(hd1_info),
65 BOOT_PARAM_PRESERVE(sys_desc_table), 64 BOOT_PARAM_PRESERVE(sys_desc_table),
@@ -71,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
71 BOOT_PARAM_PRESERVE(eddbuf_entries), 70 BOOT_PARAM_PRESERVE(eddbuf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), 71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
73 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), 72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
73 BOOT_PARAM_PRESERVE(hdr),
74 BOOT_PARAM_PRESERVE(e820_table), 74 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf), 75 BOOT_PARAM_PRESERVE(eddbuf),
76 }; 76 };
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 287f1f7b2e52..c38a66661576 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -16,7 +16,6 @@
16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19extern void mcount(void);
20extern atomic_t modifying_ftrace_code; 19extern atomic_t modifying_ftrace_code;
21extern void __fentry__(void); 20extern void __fentry__(void);
22 21
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0278aa66ef62..fe7c205233f1 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -11,6 +11,21 @@
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
13 * that group keep the CPUID for the variants sorted by model number. 13 * that group keep the CPUID for the variants sorted by model number.
14 *
15 * The defined symbol names have the following form:
16 * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
17 * where:
18 * OPTFAMILY Describes the family of CPUs that this belongs to. Default
19 * is assumed to be "_CORE" (and should be omitted). Other values
20 * currently in use are _ATOM and _XEON_PHI
21 * MICROARCH Is the code name for the micro-architecture for this core.
22 * N.B. Not the platform name.
23 * OPTDIFF If needed, a short string to differentiate by market segment.
24 * Exact strings here will vary over time. _DESKTOP, _MOBILE, and
25 * _X (short for Xeon server) should be used when they are
26 * appropriate.
27 *
28 * The #define line may optionally include a comment including platform names.
14 */ 29 */
15 30
16#define INTEL_FAM6_CORE_YONAH 0x0E 31#define INTEL_FAM6_CORE_YONAH 0x0E
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6b4fc2788078..271d837d69a8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -381,6 +381,7 @@
381#define MSR_AMD64_PATCH_LEVEL 0x0000008b 381#define MSR_AMD64_PATCH_LEVEL 0x0000008b
382#define MSR_AMD64_TSC_RATIO 0xc0000104 382#define MSR_AMD64_TSC_RATIO 0xc0000104
383#define MSR_AMD64_NB_CFG 0xc001001f 383#define MSR_AMD64_NB_CFG 0xc001001f
384#define MSR_AMD64_CPUID_FN_1 0xc0011004
384#define MSR_AMD64_PATCH_LOADER 0xc0010020 385#define MSR_AMD64_PATCH_LOADER 0xc0010020
385#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 386#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
386#define MSR_AMD64_OSVW_STATUS 0xc0010141 387#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 109f974f9835..80bc209c0708 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -192,7 +192,7 @@
192 " lfence;\n" \ 192 " lfence;\n" \
193 " jmp 902b;\n" \ 193 " jmp 902b;\n" \
194 " .align 16\n" \ 194 " .align 16\n" \
195 "903: addl $4, %%esp;\n" \ 195 "903: lea 4(%%esp), %%esp;\n" \
196 " pushl %[thunk_target];\n" \ 196 " pushl %[thunk_target];\n" \
197 " ret;\n" \ 197 " ret;\n" \
198 " .align 16\n" \ 198 " .align 16\n" \
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1392d5e6e8d6..ee26e9215f18 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -252,16 +252,20 @@ struct pebs_lbr {
252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
253#define IBSCTL_LVT_OFFSET_MASK 0x0F 253#define IBSCTL_LVT_OFFSET_MASK 0x0F
254 254
255/* ibs fetch bits/masks */ 255/* IBS fetch bits/masks */
256#define IBS_FETCH_RAND_EN (1ULL<<57) 256#define IBS_FETCH_RAND_EN (1ULL<<57)
257#define IBS_FETCH_VAL (1ULL<<49) 257#define IBS_FETCH_VAL (1ULL<<49)
258#define IBS_FETCH_ENABLE (1ULL<<48) 258#define IBS_FETCH_ENABLE (1ULL<<48)
259#define IBS_FETCH_CNT 0xFFFF0000ULL 259#define IBS_FETCH_CNT 0xFFFF0000ULL
260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
261 261
262/* ibs op bits/masks */ 262/*
263/* lower 4 bits of the current count are ignored: */ 263 * IBS op bits/masks
264#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) 264 * The lower 7 bits of the current count are random bits
265 * preloaded by hardware and ignored in software
266 */
267#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
268#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
265#define IBS_OP_CNT_CTL (1ULL<<19) 269#define IBS_OP_CNT_CTL (1ULL<<19)
266#define IBS_OP_VAL (1ULL<<18) 270#define IBS_OP_VAL (1ULL<<18)
267#define IBS_OP_ENABLE (1ULL<<17) 271#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f5291362da1a..dba2828b779a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; 722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
723 723
724/* 724/*
725 * Temporary interrupt handler. 725 * Temporary interrupt handler and polled calibration function.
726 */ 726 */
727static void __init lapic_cal_handler(struct clock_event_device *dev) 727static void __init lapic_cal_handler(struct clock_event_device *dev)
728{ 728{
@@ -851,7 +851,8 @@ bool __init apic_needs_pit(void)
851static int __init calibrate_APIC_clock(void) 851static int __init calibrate_APIC_clock(void)
852{ 852{
853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
854 void (*real_handler)(struct clock_event_device *dev); 854 u64 tsc_perj = 0, tsc_start = 0;
855 unsigned long jif_start;
855 unsigned long deltaj; 856 unsigned long deltaj;
856 long delta, deltatsc; 857 long delta, deltatsc;
857 int pm_referenced = 0; 858 int pm_referenced = 0;
@@ -878,28 +879,64 @@ static int __init calibrate_APIC_clock(void)
878 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" 879 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
879 "calibrating APIC timer ...\n"); 880 "calibrating APIC timer ...\n");
880 881
882 /*
883 * There are platforms w/o global clockevent devices. Instead of
884 * making the calibration conditional on that, use a polling based
885 * approach everywhere.
886 */
881 local_irq_disable(); 887 local_irq_disable();
882 888
883 /* Replace the global interrupt handler */
884 real_handler = global_clock_event->event_handler;
885 global_clock_event->event_handler = lapic_cal_handler;
886
887 /* 889 /*
888 * Setup the APIC counter to maximum. There is no way the lapic 890 * Setup the APIC counter to maximum. There is no way the lapic
889 * can underflow in the 100ms detection time frame 891 * can underflow in the 100ms detection time frame
890 */ 892 */
891 __setup_APIC_LVTT(0xffffffff, 0, 0); 893 __setup_APIC_LVTT(0xffffffff, 0, 0);
892 894
893 /* Let the interrupts run */ 895 /*
896 * Methods to terminate the calibration loop:
897 * 1) Global clockevent if available (jiffies)
898 * 2) TSC if available and frequency is known
899 */
900 jif_start = READ_ONCE(jiffies);
901
902 if (tsc_khz) {
903 tsc_start = rdtsc();
904 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
905 }
906
907 /*
908 * Enable interrupts so the tick can fire, if a global
909 * clockevent device is available
910 */
894 local_irq_enable(); 911 local_irq_enable();
895 912
896 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) 913 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
897 cpu_relax(); 914 /* Wait for a tick to elapse */
915 while (1) {
916 if (tsc_khz) {
917 u64 tsc_now = rdtsc();
918 if ((tsc_now - tsc_start) >= tsc_perj) {
919 tsc_start += tsc_perj;
920 break;
921 }
922 } else {
923 unsigned long jif_now = READ_ONCE(jiffies);
898 924
899 local_irq_disable(); 925 if (time_after(jif_now, jif_start)) {
926 jif_start = jif_now;
927 break;
928 }
929 }
930 cpu_relax();
931 }
900 932
901 /* Restore the real event handler */ 933 /* Invoke the calibration routine */
902 global_clock_event->event_handler = real_handler; 934 local_irq_disable();
935 lapic_cal_handler(NULL);
936 local_irq_enable();
937 }
938
939 local_irq_disable();
903 940
904 /* Build delta t1-t2 as apic timer counts down */ 941 /* Build delta t1-t2 as apic timer counts down */
905 delta = lapic_cal_t1 - lapic_cal_t2; 942 delta = lapic_cal_t1 - lapic_cal_t2;
@@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void)
943 levt->features &= ~CLOCK_EVT_FEAT_DUMMY; 980 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
944 981
945 /* 982 /*
946 * PM timer calibration failed or not turned on 983 * PM timer calibration failed or not turned on so lets try APIC
947 * so lets try APIC timer based calibration 984 * timer based calibration, if a global clockevent device is
985 * available.
948 */ 986 */
949 if (!pm_referenced) { 987 if (!pm_referenced && global_clock_event) {
950 apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); 988 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
951 989
952 /* 990 /*
@@ -1141,6 +1179,10 @@ void clear_local_APIC(void)
1141 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 1179 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1142 v = apic_read(APIC_LVT1); 1180 v = apic_read(APIC_LVT1);
1143 apic_write(APIC_LVT1, v | APIC_LVT_MASKED); 1181 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1182 if (!x2apic_enabled()) {
1183 v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1184 apic_write(APIC_LDR, v);
1185 }
1144 if (maxlvt >= 4) { 1186 if (maxlvt >= 4) {
1145 v = apic_read(APIC_LVTPC); 1187 v = apic_read(APIC_LVTPC);
1146 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); 1188 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index afee386ff711..caedd8d60d36 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
38 return early_per_cpu(x86_cpu_to_apicid, cpu); 38 return early_per_cpu(x86_cpu_to_apicid, cpu);
39} 39}
40 40
41static inline unsigned long calculate_ldr(int cpu)
42{
43 unsigned long val, id;
44
45 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
46 id = per_cpu(x86_bios_cpu_apicid, cpu);
47 val |= SET_APIC_LOGICAL_ID(id);
48
49 return val;
50}
51
52/* 41/*
53 * Set up the logical destination ID. 42 * bigsmp enables physical destination mode
54 * 43 * and doesn't use LDR and DFR
55 * Intel recommends to set DFR, LDR and TPR before enabling
56 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
57 * document number 292116). So here it goes...
58 */ 44 */
59static void bigsmp_init_apic_ldr(void) 45static void bigsmp_init_apic_ldr(void)
60{ 46{
61 unsigned long val;
62 int cpu = smp_processor_id();
63
64 apic_write(APIC_DFR, APIC_DFR_FLAT);
65 val = calculate_ldr(cpu);
66 apic_write(APIC_LDR, val);
67} 47}
68 48
69static void bigsmp_setup_apic_routing(void) 49static void bigsmp_setup_apic_routing(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c7bb6c69f21c..d6af97fd170a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use 2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet. 2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
2440 */ 2440 */
2441 return ioapic_initialized ? ioapic_dynirq_base : gsi_top; 2441 if (!ioapic_initialized)
2442 return gsi_top;
2443 /*
2444 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
2445 * updated. So simply return @from if ioapic_dynirq_base == 0.
2446 */
2447 return ioapic_dynirq_base ? : from;
2442} 2448}
2443 2449
2444#ifdef CONFIG_X86_32 2450#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8d4e50428b68..68c363c341bf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
804 msr_set_bit(MSR_AMD64_DE_CFG, 31); 804 msr_set_bit(MSR_AMD64_DE_CFG, 31);
805} 805}
806 806
807static bool rdrand_force;
808
809static int __init rdrand_cmdline(char *str)
810{
811 if (!str)
812 return -EINVAL;
813
814 if (!strcmp(str, "force"))
815 rdrand_force = true;
816 else
817 return -EINVAL;
818
819 return 0;
820}
821early_param("rdrand", rdrand_cmdline);
822
823static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
824{
825 /*
826 * Saving of the MSR used to hide the RDRAND support during
827 * suspend/resume is done by arch/x86/power/cpu.c, which is
828 * dependent on CONFIG_PM_SLEEP.
829 */
830 if (!IS_ENABLED(CONFIG_PM_SLEEP))
831 return;
832
833 /*
834 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
835 * RDRAND support using the CPUID function directly.
836 */
837 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
838 return;
839
840 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
841
842 /*
843 * Verify that the CPUID change has occurred in case the kernel is
844 * running virtualized and the hypervisor doesn't support the MSR.
845 */
846 if (cpuid_ecx(1) & BIT(30)) {
847 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
848 return;
849 }
850
851 clear_cpu_cap(c, X86_FEATURE_RDRAND);
852 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
853}
854
855static void init_amd_jg(struct cpuinfo_x86 *c)
856{
857 /*
858 * Some BIOS implementations do not restore proper RDRAND support
859 * across suspend and resume. Check on whether to hide the RDRAND
860 * instruction support via CPUID.
861 */
862 clear_rdrand_cpuid_bit(c);
863}
864
807static void init_amd_bd(struct cpuinfo_x86 *c) 865static void init_amd_bd(struct cpuinfo_x86 *c)
808{ 866{
809 u64 value; 867 u64 value;
@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
818 wrmsrl_safe(MSR_F15H_IC_CFG, value); 876 wrmsrl_safe(MSR_F15H_IC_CFG, value);
819 } 877 }
820 } 878 }
879
880 /*
881 * Some BIOS implementations do not restore proper RDRAND support
882 * across suspend and resume. Check on whether to hide the RDRAND
883 * instruction support via CPUID.
884 */
885 clear_rdrand_cpuid_bit(c);
821} 886}
822 887
823static void init_amd_zn(struct cpuinfo_x86 *c) 888static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
860 case 0x10: init_amd_gh(c); break; 925 case 0x10: init_amd_gh(c); break;
861 case 0x12: init_amd_ln(c); break; 926 case 0x12: init_amd_ln(c); break;
862 case 0x15: init_amd_bd(c); break; 927 case 0x15: init_amd_bd(c); break;
928 case 0x16: init_amd_jg(c); break;
863 case 0x17: init_amd_zn(c); break; 929 case 0x17: init_amd_zn(c); break;
864 } 930 }
865 931
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index d8359ebeea70..8cd745ef8c7b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
508 void (*abort)(struct arch_uprobe *, struct pt_regs *); 508 void (*abort)(struct arch_uprobe *, struct pt_regs *);
509}; 509};
510 510
511static inline int sizeof_long(void) 511static inline int sizeof_long(struct pt_regs *regs)
512{ 512{
513 return in_ia32_syscall() ? 4 : 8; 513 /*
514 * Check registers for mode as in_xxx_syscall() does not apply here.
515 */
516 return user_64bit_mode(regs) ? 8 : 4;
514} 517}
515 518
516static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 519static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
521 524
522static int emulate_push_stack(struct pt_regs *regs, unsigned long val) 525static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
523{ 526{
524 unsigned long new_sp = regs->sp - sizeof_long(); 527 unsigned long new_sp = regs->sp - sizeof_long(regs);
525 528
526 if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) 529 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
527 return -EFAULT; 530 return -EFAULT;
528 531
529 regs->sp = new_sp; 532 regs->sp = new_sp;
@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
556 long correction = utask->vaddr - utask->xol_vaddr; 559 long correction = utask->vaddr - utask->xol_vaddr;
557 regs->ip += correction; 560 regs->ip += correction;
558 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { 561 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
559 regs->sp += sizeof_long(); /* Pop incorrect return address */ 562 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
560 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) 563 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
561 return -ERESTART; 564 return -ERESTART;
562 } 565 }
@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
675 * "call" insn was executed out-of-line. Just restore ->sp and restart. 678 * "call" insn was executed out-of-line. Just restore ->sp and restart.
676 * We could also restore ->ip and try to call branch_emulate_op() again. 679 * We could also restore ->ip and try to call branch_emulate_op() again.
677 */ 680 */
678 regs->sp += sizeof_long(); 681 regs->sp += sizeof_long(regs);
679 return -ERESTART; 682 return -ERESTART;
680} 683}
681 684
@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1056unsigned long 1059unsigned long
1057arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) 1060arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1058{ 1061{
1059 int rasize = sizeof_long(), nleft; 1062 int rasize = sizeof_long(regs), nleft;
1060 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ 1063 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1061 1064
1062 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) 1065 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c10a8b10b203..fff790a3f4ee 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1782 struct kvm_cpuid_entry2 __user *entries) 1782 struct kvm_cpuid_entry2 __user *entries)
1783{ 1783{
1784 uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); 1784 uint16_t evmcs_ver = 0;
1785 struct kvm_cpuid_entry2 cpuid_entries[] = { 1785 struct kvm_cpuid_entry2 cpuid_entries[] = {
1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, 1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1787 { .function = HYPERV_CPUID_INTERFACE }, 1787 { .function = HYPERV_CPUID_INTERFACE },
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1793 }; 1793 };
1794 int i, nent = ARRAY_SIZE(cpuid_entries); 1794 int i, nent = ARRAY_SIZE(cpuid_entries);
1795 1795
1796 if (kvm_x86_ops->nested_get_evmcs_version)
1797 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1798
1796 /* Skip NESTED_FEATURES if eVMCS is not supported */ 1799 /* Skip NESTED_FEATURES if eVMCS is not supported */
1797 if (!evmcs_ver) 1800 if (!evmcs_ver)
1798 --nent; 1801 --nent;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 685d17c11461..e904ff06a83d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
217 new->phys_map[xapic_id] = apic; 217 new->phys_map[xapic_id] = apic;
218 218
219 if (!kvm_apic_sw_enabled(apic))
220 continue;
221
219 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 222 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 223
221 if (apic_x2apic_mode(apic)) { 224 if (apic_x2apic_mode(apic)) {
@@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
258 static_key_slow_dec_deferred(&apic_sw_disabled); 261 static_key_slow_dec_deferred(&apic_sw_disabled);
259 else 262 else
260 static_key_slow_inc(&apic_sw_disabled.key); 263 static_key_slow_inc(&apic_sw_disabled.key);
264
265 recalculate_apic_map(apic->vcpu->kvm);
261 } 266 }
262} 267}
263 268
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24843cf49579..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5653 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5654 struct kvm_page_track_notifier_node *node)
5655{ 5655{
5656 struct kvm_mmu_page *sp; 5656 kvm_mmu_zap_all(kvm);
5657 LIST_HEAD(invalid_list);
5658 unsigned long i;
5659 bool flush;
5660 gfn_t gfn;
5661
5662 spin_lock(&kvm->mmu_lock);
5663
5664 if (list_empty(&kvm->arch.active_mmu_pages))
5665 goto out_unlock;
5666
5667 flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
5668
5669 for (i = 0; i < slot->npages; i++) {
5670 gfn = slot->base_gfn + i;
5671
5672 for_each_valid_sp(kvm, sp, gfn) {
5673 if (sp->gfn != gfn)
5674 continue;
5675
5676 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5677 }
5678 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5679 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5680 flush = false;
5681 cond_resched_lock(&kvm->mmu_lock);
5682 }
5683 }
5684 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5685
5686out_unlock:
5687 spin_unlock(&kvm->mmu_lock);
5688} 5657}
5689 5658
5690void kvm_mmu_init_vm(struct kvm *kvm) 5659void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d685491fce4d..e0368076a1ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1714 if (!entry) 1714 if (!entry)
1715 return -EINVAL; 1715 return -EINVAL;
1716 1716
1717 new_entry = READ_ONCE(*entry);
1718 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & 1717 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1719 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | 1718 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1720 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); 1719 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
@@ -7129,12 +7128,6 @@ failed:
7129 return ret; 7128 return ret;
7130} 7129}
7131 7130
7132static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
7133{
7134 /* Not supported */
7135 return 0;
7136}
7137
7138static int nested_enable_evmcs(struct kvm_vcpu *vcpu, 7131static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7139 uint16_t *vmcs_version) 7132 uint16_t *vmcs_version)
7140{ 7133{
@@ -7333,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7333 .mem_enc_unreg_region = svm_unregister_enc_region, 7326 .mem_enc_unreg_region = svm_unregister_enc_region,
7334 7327
7335 .nested_enable_evmcs = nested_enable_evmcs, 7328 .nested_enable_evmcs = nested_enable_evmcs,
7336 .nested_get_evmcs_version = nested_get_evmcs_version, 7329 .nested_get_evmcs_version = NULL,
7337 7330
7338 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, 7331 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7339}; 7332};
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 42ed3faa6af8..c030c96fc81a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7797 .set_nested_state = NULL, 7797 .set_nested_state = NULL,
7798 .get_vmcs12_pages = NULL, 7798 .get_vmcs12_pages = NULL,
7799 .nested_enable_evmcs = NULL, 7799 .nested_enable_evmcs = NULL,
7800 .nested_get_evmcs_version = NULL,
7800 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, 7801 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7801}; 7802};
7802 7803
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b0bd45ac73..290c3c3efb87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6594,12 +6594,13 @@ restart:
6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6595 toggle_interruptibility(vcpu, ctxt->interruptibility); 6595 toggle_interruptibility(vcpu, ctxt->interruptibility);
6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6597 kvm_rip_write(vcpu, ctxt->eip);
6598 if (r == EMULATE_DONE && ctxt->tf)
6599 kvm_vcpu_do_singlestep(vcpu, &r);
6600 if (!ctxt->have_exception || 6597 if (!ctxt->have_exception ||
6601 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6598 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
6599 kvm_rip_write(vcpu, ctxt->eip);
6600 if (r == EMULATE_DONE && ctxt->tf)
6601 kvm_vcpu_do_singlestep(vcpu, &r);
6602 __kvm_set_rflags(vcpu, ctxt->eflags); 6602 __kvm_set_rflags(vcpu, ctxt->eflags);
6603 }
6603 6604
6604 /* 6605 /*
6605 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 6606 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6a9a77a403c9..e14e95ea7338 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
516 */ 516 */
517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, 517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
518 unsigned long pfn, unsigned long npg, 518 unsigned long pfn, unsigned long npg,
519 int warnlvl) 519 unsigned long lpsize, int warnlvl)
520{ 520{
521 pgprotval_t forbidden, res; 521 pgprotval_t forbidden, res;
522 unsigned long end; 522 unsigned long end;
@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); 535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
536 forbidden = res; 536 forbidden = res;
537 537
538 res = protect_kernel_text_ro(start, end); 538 /*
539 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); 539 * Special case to preserve a large page. If the change spawns the
540 forbidden |= res; 540 * full large page mapping then there is no point to split it
541 * up. Happens with ftrace and is going to be removed once ftrace
542 * switched to text_poke().
543 */
544 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
545 res = protect_kernel_text_ro(start, end);
546 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
547 forbidden |= res;
548 }
541 549
542 /* Check the PFN directly */ 550 /* Check the PFN directly */
543 res = protect_pci_bios(pfn, pfn + npg - 1); 551 res = protect_pci_bios(pfn, pfn + npg - 1);
@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
819 * extra conditional required here. 827 * extra conditional required here.
820 */ 828 */
821 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, 829 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
822 CPA_CONFLICT); 830 psize, CPA_CONFLICT);
823 831
824 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { 832 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
825 /* 833 /*
@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
855 * protection requirement in the large page. 863 * protection requirement in the large page.
856 */ 864 */
857 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, 865 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
858 CPA_DETECT); 866 psize, CPA_DETECT);
859 867
860 /* 868 /*
861 * If there is a conflict, split the large page. 869 * If there is a conflict, split the large page.
@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
906 if (!cpa->force_static_prot) 914 if (!cpa->force_static_prot)
907 goto set; 915 goto set;
908 916
909 prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); 917 /* Hand in lpsize = 0 to enforce the protection mechanism */
918 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
910 919
911 if (pgprot_val(prot) == pgprot_val(ref_prot)) 920 if (pgprot_val(prot) == pgprot_val(ref_prot))
912 goto set; 921 goto set;
@@ -1503,7 +1512,8 @@ repeat:
1503 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 1512 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1504 1513
1505 cpa_inc_4k_install(); 1514 cpa_inc_4k_install();
1506 new_prot = static_protections(new_prot, address, pfn, 1, 1515 /* Hand in lpsize = 0 to enforce the protection mechanism */
1516 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1507 CPA_PROTECT); 1517 CPA_PROTECT);
1508 1518
1509 new_prot = pgprot_clear_protnone_bits(new_prot); 1519 new_prot = pgprot_clear_protnone_bits(new_prot);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index eaaed5bfc4a4..991549a1c5f3 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
390 390
391 emit_prologue(&prog, bpf_prog->aux->stack_depth, 391 emit_prologue(&prog, bpf_prog->aux->stack_depth,
392 bpf_prog_was_classic(bpf_prog)); 392 bpf_prog_was_classic(bpf_prog));
393 addrs[0] = prog - temp;
393 394
394 for (i = 0; i < insn_cnt; i++, insn++) { 395 for (i = 1; i <= insn_cnt; i++, insn++) {
395 const s32 imm32 = insn->imm; 396 const s32 imm32 = insn->imm;
396 u32 dst_reg = insn->dst_reg; 397 u32 dst_reg = insn->dst_reg;
397 u32 src_reg = insn->src_reg; 398 u32 src_reg = insn->src_reg;
@@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1105 extra_pass = true; 1106 extra_pass = true;
1106 goto skip_init_addrs; 1107 goto skip_init_addrs;
1107 } 1108 }
1108 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); 1109 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1109 if (!addrs) { 1110 if (!addrs) {
1110 prog = orig_prog; 1111 prog = orig_prog;
1111 goto out_addrs; 1112 goto out_addrs;
@@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1115 * Before first pass, make a rough estimation of addrs[] 1116 * Before first pass, make a rough estimation of addrs[]
1116 * each BPF instruction is translated to less than 64 bytes 1117 * each BPF instruction is translated to less than 64 bytes
1117 */ 1118 */
1118 for (proglen = 0, i = 0; i < prog->len; i++) { 1119 for (proglen = 0, i = 0; i <= prog->len; i++) {
1119 proglen += 64; 1120 proglen += 64;
1120 addrs[i] = proglen; 1121 addrs[i] = proglen;
1121 } 1122 }
@@ -1180,7 +1181,7 @@ out_image:
1180 1181
1181 if (!image || !prog->is_func || extra_pass) { 1182 if (!image || !prog->is_func || extra_pass) {
1182 if (image) 1183 if (image)
1183 bpf_prog_fill_jited_linfo(prog, addrs); 1184 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1184out_addrs: 1185out_addrs:
1185 kfree(addrs); 1186 kfree(addrs);
1186 kfree(jit_data); 1187 kfree(jit_data);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 24b079e94bc2..c9ef6a7a4a1a 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/tboot.h> 14#include <linux/tboot.h>
15#include <linux/dmi.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/proto.h> 18#include <asm/proto.h>
@@ -23,7 +24,7 @@
23#include <asm/debugreg.h> 24#include <asm/debugreg.h>
24#include <asm/cpu.h> 25#include <asm/cpu.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <linux/dmi.h> 27#include <asm/cpu_device_id.h>
27 28
28#ifdef CONFIG_X86_32 29#ifdef CONFIG_X86_32
29__visible unsigned long saved_context_ebx; 30__visible unsigned long saved_context_ebx;
@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
397 398
398core_initcall(bsp_pm_check_init); 399core_initcall(bsp_pm_check_init);
399 400
400static int msr_init_context(const u32 *msr_id, const int total_num) 401static int msr_build_context(const u32 *msr_id, const int num)
401{ 402{
402 int i = 0; 403 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
403 struct saved_msr *msr_array; 404 struct saved_msr *msr_array;
405 int total_num;
406 int i, j;
404 407
405 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { 408 total_num = saved_msrs->num + num;
406 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
407 return -EINVAL;
408 }
409 409
410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); 410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
411 if (!msr_array) { 411 if (!msr_array) {
@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
415 415
416 for (i = 0; i < total_num; i++) { 416 if (saved_msrs->array) {
417 msr_array[i].info.msr_no = msr_id[i]; 417 /*
418 * Multiple callbacks can invoke this function, so copy any
419 * MSR save requests from previous invocations.
420 */
421 memcpy(msr_array, saved_msrs->array,
422 sizeof(struct saved_msr) * saved_msrs->num);
423
424 kfree(saved_msrs->array);
425 }
426
427 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
428 msr_array[i].info.msr_no = msr_id[j];
418 msr_array[i].valid = false; 429 msr_array[i].valid = false;
419 msr_array[i].info.reg.q = 0; 430 msr_array[i].info.reg.q = 0;
420 } 431 }
421 saved_context.saved_msrs.num = total_num; 432 saved_msrs->num = total_num;
422 saved_context.saved_msrs.array = msr_array; 433 saved_msrs->array = msr_array;
423 434
424 return 0; 435 return 0;
425} 436}
426 437
427/* 438/*
428 * The following section is a quirk framework for problematic BIOSen: 439 * The following sections are a quirk framework for problematic BIOSen:
429 * Sometimes MSRs are modified by the BIOSen after suspended to 440 * Sometimes MSRs are modified by the BIOSen after suspended to
430 * RAM, this might cause unexpected behavior after wakeup. 441 * RAM, this might cause unexpected behavior after wakeup.
431 * Thus we save/restore these specified MSRs across suspend/resume 442 * Thus we save/restore these specified MSRs across suspend/resume
@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
440 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; 451 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
441 452
442 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); 453 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
443 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); 454 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
444} 455}
445 456
446static const struct dmi_system_id msr_save_dmi_table[] = { 457static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
455 {} 466 {}
456}; 467};
457 468
469static int msr_save_cpuid_features(const struct x86_cpu_id *c)
470{
471 u32 cpuid_msr_id[] = {
472 MSR_AMD64_CPUID_FN_1,
473 };
474
475 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
476 c->family);
477
478 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
479}
480
481static const struct x86_cpu_id msr_save_cpu_table[] = {
482 {
483 .vendor = X86_VENDOR_AMD,
484 .family = 0x15,
485 .model = X86_MODEL_ANY,
486 .feature = X86_FEATURE_ANY,
487 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
488 },
489 {
490 .vendor = X86_VENDOR_AMD,
491 .family = 0x16,
492 .model = X86_MODEL_ANY,
493 .feature = X86_FEATURE_ANY,
494 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
495 },
496 {}
497};
498
499typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
500static int pm_cpu_check(const struct x86_cpu_id *c)
501{
502 const struct x86_cpu_id *m;
503 int ret = 0;
504
505 m = x86_match_cpu(msr_save_cpu_table);
506 if (m) {
507 pm_cpu_match_t fn;
508
509 fn = (pm_cpu_match_t)m->driver_data;
510 ret = fn(m);
511 }
512
513 return ret;
514}
515
458static int pm_check_save_msr(void) 516static int pm_check_save_msr(void)
459{ 517{
460 dmi_check_system(msr_save_dmi_table); 518 dmi_check_system(msr_save_dmi_table);
519 pm_cpu_check(msr_save_cpu_table);
520
461 return 0; 521 return 0;
462} 522}
463 523
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
200 make the card work). 200 make the card work).
201 201
202config ATM_NICSTAR_USE_IDT77105 202config ATM_NICSTAR_USE_IDT77105
203 bool "Use IDT77015 PHY driver (25Mbps)" 203 bool "Use IDT77105 PHY driver (25Mbps)"
204 depends on ATM_NICSTAR 204 depends on ATM_NICSTAR
205 help 205 help
206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In 206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
74 struct ht16k33_fbdev fbdev; 74 struct ht16k33_fbdev fbdev;
75}; 75};
76 76
77static struct fb_fix_screeninfo ht16k33_fb_fix = { 77static const struct fb_fix_screeninfo ht16k33_fb_fix = {
78 .id = DRIVER_NAME, 78 .id = DRIVER_NAME,
79 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
80 .visual = FB_VISUAL_MONO10, 80 .visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
85 .accel = FB_ACCEL_NONE, 85 .accel = FB_ACCEL_NONE,
86}; 86};
87 87
88static struct fb_var_screeninfo ht16k33_fb_var = { 88static const struct fb_var_screeninfo ht16k33_fb_var = {
89 .xres = HT16K33_MATRIX_LED_MAX_ROWS, 89 .xres = HT16K33_MATRIX_LED_MAX_ROWS,
90 .yres = HT16K33_MATRIX_LED_MAX_COLS, 90 .yres = HT16K33_MATRIX_LED_MAX_COLS,
91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
322 thi->name[0], 322 thi->name[0],
323 resource->name); 323 resource->name);
324 324
325 allow_kernel_signal(DRBD_SIGKILL);
326 allow_kernel_signal(SIGXCPU);
325restart: 327restart:
326 retval = thi->function(thi); 328 retval = thi->function(thi);
327 329
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3327192bb71f..c8fb886aebd4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3038,6 +3038,17 @@ again:
3038 } 3038 }
3039 return true; 3039 return true;
3040 case RBD_OBJ_READ_PARENT: 3040 case RBD_OBJ_READ_PARENT:
3041 /*
3042 * The parent image is read only up to the overlap -- zero-fill
3043 * from the overlap to the end of the request.
3044 */
3045 if (!*result) {
3046 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3047
3048 if (obj_overlap < obj_req->ex.oe_len)
3049 rbd_obj_zero_range(obj_req, obj_overlap,
3050 obj_req->ex.oe_len - obj_overlap);
3051 }
3041 return true; 3052 return true;
3042 default: 3053 default:
3043 BUG(); 3054 BUG();
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8b33128dccee..0875470a7806 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
99 return 0; 99 return 0;
100} 100}
101 101
102int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
103{
104 struct sk_buff *skb;
105 int err;
106
107 bt_dev_dbg(hdev, "QCA pre shutdown cmd");
108
109 skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
110 NULL, HCI_INIT_TIMEOUT);
111 if (IS_ERR(skb)) {
112 err = PTR_ERR(skb);
113 bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
114 return err;
115 }
116
117 kfree_skb(skb);
118
119 return 0;
120}
121EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
122
102static void qca_tlv_check_data(struct rome_config *config, 123static void qca_tlv_check_data(struct rome_config *config,
103 const struct firmware *fw) 124 const struct firmware *fw)
104{ 125{
@@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
119 BT_DBG("Length\t\t : %d bytes", length); 140 BT_DBG("Length\t\t : %d bytes", length);
120 141
121 config->dnld_mode = ROME_SKIP_EVT_NONE; 142 config->dnld_mode = ROME_SKIP_EVT_NONE;
143 config->dnld_type = ROME_SKIP_EVT_NONE;
122 144
123 switch (config->type) { 145 switch (config->type) {
124 case TLV_TYPE_PATCH: 146 case TLV_TYPE_PATCH:
@@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
268 290
269 evt = skb_put(skb, sizeof(*evt)); 291 evt = skb_put(skb, sizeof(*evt));
270 evt->ncmd = 1; 292 evt->ncmd = 1;
271 evt->opcode = QCA_HCI_CC_OPCODE; 293 evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
272 294
273 skb_put_u8(skb, QCA_HCI_CC_SUCCESS); 295 skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
274 296
@@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
323 */ 345 */
324 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || 346 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
325 config->dnld_type == ROME_SKIP_EVT_VSE) 347 config->dnld_type == ROME_SKIP_EVT_VSE)
326 return qca_inject_cmd_complete_event(hdev); 348 ret = qca_inject_cmd_complete_event(hdev);
327 349
328out: 350out:
329 release_firmware(fw); 351 release_firmware(fw);
@@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
388 return err; 410 return err;
389 } 411 }
390 412
413 /* Give the controller some time to get ready to receive the NVM */
414 msleep(10);
415
391 /* Download NVM configuration */ 416 /* Download NVM configuration */
392 config.type = TLV_TYPE_NVM; 417 config.type = TLV_TYPE_NVM;
393 if (firmware_name) 418 if (firmware_name)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6a291a7a5d96..69c5315a65fd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
13#define EDL_PATCH_TLV_REQ_CMD (0x1E) 13#define EDL_PATCH_TLV_REQ_CMD (0x1E)
14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) 14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
15#define MAX_SIZE_PER_TLV_SEGMENT (243) 15#define MAX_SIZE_PER_TLV_SEGMENT (243)
16#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
16 17
17#define EDL_CMD_REQ_RES_EVT (0x00) 18#define EDL_CMD_REQ_RES_EVT (0x00)
18#define EDL_PATCH_VER_RES_EVT (0x19) 19#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
135 const char *firmware_name); 136 const char *firmware_name);
136int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); 137int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
137int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); 138int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
139int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
138static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) 140static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
139{ 141{
140 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; 142 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
@@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
167{ 169{
168 return false; 170 return false;
169} 171}
172
173static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
174{
175 return -EOPNOTSUPP;
176}
170#endif 177#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3876fee6ad13..5cf0734eb31b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2762 fw_size = fw->size; 2762 fw_size = fw->size;
2763 2763
2764 /* The size of patch header is 30 bytes, should be skip */ 2764 /* The size of patch header is 30 bytes, should be skip */
2765 if (fw_size < 30) 2765 if (fw_size < 30) {
2766 err = -EINVAL;
2766 goto err_release_fw; 2767 goto err_release_fw;
2768 }
2767 2769
2768 fw_size -= 30; 2770 fw_size -= 30;
2769 fw_ptr += 30; 2771 fw_ptr += 30;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 82a0a3691a63..9a970fd1975a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
705 unsigned long flags; 705 unsigned long flags;
706 struct qca_data *qca = hu->priv; 706 struct qca_data *qca = hu->priv;
707 707
708 BT_DBG("hu %p want to sleep", hu); 708 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
709 709
710 spin_lock_irqsave(&qca->hci_ibs_lock, flags); 710 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
711 711
@@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
720 break; 720 break;
721 721
722 case HCI_IBS_RX_ASLEEP: 722 case HCI_IBS_RX_ASLEEP:
723 /* Fall through */ 723 break;
724 724
725 default: 725 default:
726 /* Any other state is illegal */ 726 /* Any other state is illegal */
@@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
912 if (hdr->evt == HCI_EV_VENDOR) 912 if (hdr->evt == HCI_EV_VENDOR)
913 complete(&qca->drop_ev_comp); 913 complete(&qca->drop_ev_comp);
914 914
915 kfree(skb); 915 kfree_skb(skb);
916 916
917 return 0; 917 return 0;
918 } 918 }
@@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
1386{ 1386{
1387 struct hci_uart *hu = hci_get_drvdata(hdev); 1387 struct hci_uart *hu = hci_get_drvdata(hdev);
1388 1388
1389 /* Perform pre shutdown command */
1390 qca_send_pre_shutdown_cmd(hdev);
1391
1389 qca_power_shutdown(hu); 1392 qca_power_shutdown(hu);
1390 return 0; 1393 return 0;
1391} 1394}
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
456 size_t pdata_size; 456 size_t pdata_size;
457}; 457};
458 458
459static void hisi_lpc_acpi_remove(struct device *hostdev)
460{
461 struct acpi_device *adev = ACPI_COMPANION(hostdev);
462 struct acpi_device *child;
463
464 device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
465
466 list_for_each_entry(child, &adev->children, node)
467 acpi_device_clear_enumerated(child);
468}
469
459/* 470/*
460 * hisi_lpc_acpi_probe - probe children for ACPI FW 471 * hisi_lpc_acpi_probe - probe children for ACPI FW
461 * @hostdev: LPC host device pointer 472 * @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
555 return 0; 566 return 0;
556 567
557fail: 568fail:
558 device_for_each_child(hostdev, NULL, 569 hisi_lpc_acpi_remove(hostdev);
559 hisi_lpc_acpi_remove_subdev);
560 return ret; 570 return ret;
561} 571}
562 572
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
569{ 579{
570 return -ENODEV; 580 return -ENODEV;
571} 581}
582
583static void hisi_lpc_acpi_remove(struct device *hostdev)
584{
585}
572#endif // CONFIG_ACPI 586#endif // CONFIG_ACPI
573 587
574/* 588/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
606 range->fwnode = dev->fwnode; 620 range->fwnode = dev->fwnode;
607 range->flags = LOGIC_PIO_INDIRECT; 621 range->flags = LOGIC_PIO_INDIRECT;
608 range->size = PIO_INDIRECT_SIZE; 622 range->size = PIO_INDIRECT_SIZE;
623 range->hostdata = lpcdev;
624 range->ops = &hisi_lpc_ops;
625 lpcdev->io_host = range;
609 626
610 ret = logic_pio_register_range(range); 627 ret = logic_pio_register_range(range);
611 if (ret) { 628 if (ret) {
612 dev_err(dev, "register IO range failed (%d)!\n", ret); 629 dev_err(dev, "register IO range failed (%d)!\n", ret);
613 return ret; 630 return ret;
614 } 631 }
615 lpcdev->io_host = range;
616 632
617 /* register the LPC host PIO resources */ 633 /* register the LPC host PIO resources */
618 if (acpi_device) 634 if (acpi_device)
619 ret = hisi_lpc_acpi_probe(dev); 635 ret = hisi_lpc_acpi_probe(dev);
620 else 636 else
621 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 637 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
622 if (ret) 638 if (ret) {
639 logic_pio_unregister_range(range);
623 return ret; 640 return ret;
641 }
624 642
625 lpcdev->io_host->hostdata = lpcdev; 643 dev_set_drvdata(dev, lpcdev);
626 lpcdev->io_host->ops = &hisi_lpc_ops;
627 644
628 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; 645 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
629 dev_info(dev, "registered range [%pa - %pa]\n", 646 dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
632 return ret; 649 return ret;
633} 650}
634 651
652static int hisi_lpc_remove(struct platform_device *pdev)
653{
654 struct device *dev = &pdev->dev;
655 struct acpi_device *acpi_device = ACPI_COMPANION(dev);
656 struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
657 struct logic_pio_hwaddr *range = lpcdev->io_host;
658
659 if (acpi_device)
660 hisi_lpc_acpi_remove(dev);
661 else
662 of_platform_depopulate(dev);
663
664 logic_pio_unregister_range(range);
665
666 return 0;
667}
668
635static const struct of_device_id hisi_lpc_of_match[] = { 669static const struct of_device_id hisi_lpc_of_match[] = {
636 { .compatible = "hisilicon,hip06-lpc", }, 670 { .compatible = "hisilicon,hip06-lpc", },
637 { .compatible = "hisilicon,hip07-lpc", }, 671 { .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
645 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), 679 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
646 }, 680 },
647 .probe = hisi_lpc_probe, 681 .probe = hisi_lpc_probe,
682 .remove = hisi_lpc_remove,
648}; 683};
649builtin_platform_driver(hisi_lpc_driver); 684builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..2db474ab4c6b 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
949 *best_mode = SYSC_IDLE_SMART_WKUP; 949 *best_mode = SYSC_IDLE_SMART_WKUP;
950 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 950 else if (idlemodes & BIT(SYSC_IDLE_SMART))
951 *best_mode = SYSC_IDLE_SMART; 951 *best_mode = SYSC_IDLE_SMART;
952 else if (idlemodes & SYSC_IDLE_FORCE) 952 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
953 *best_mode = SYSC_IDLE_FORCE; 953 *best_mode = SYSC_IDLE_FORCE;
954 else 954 else
955 return -EINVAL; 955 return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1269 0xffff00f0, 0), 1269 0xffff00f0, 0),
1270 SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), 1270 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
1271 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
1271 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), 1272 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
1272 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1273 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
1273 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1274 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
1692 if (error) 1693 if (error)
1693 return 0; 1694 return 0;
1694 1695
1695 if (val) 1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1697 else
1698 ddata->cfg.sysc_val = ddata->cap->sysc_mask;
1699 1697
1700 return 0; 1698 return 0;
1701} 1699}
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
2385 2383
2386 error = sysc_init_dts_quirks(ddata); 2384 error = sysc_init_dts_quirks(ddata);
2387 if (error) 2385 if (error)
2388 goto unprepare; 2386 return error;
2389 2387
2390 error = sysc_map_and_check_registers(ddata); 2388 error = sysc_map_and_check_registers(ddata);
2391 if (error) 2389 if (error)
2392 goto unprepare; 2390 return error;
2393 2391
2394 error = sysc_init_sysc_mask(ddata); 2392 error = sysc_init_sysc_mask(ddata);
2395 if (error) 2393 if (error)
2396 goto unprepare; 2394 return error;
2397 2395
2398 error = sysc_init_idlemodes(ddata); 2396 error = sysc_init_idlemodes(ddata);
2399 if (error) 2397 if (error)
2400 goto unprepare; 2398 return error;
2401 2399
2402 error = sysc_init_syss_mask(ddata); 2400 error = sysc_init_syss_mask(ddata);
2403 if (error) 2401 if (error)
2404 goto unprepare; 2402 return error;
2405 2403
2406 error = sysc_init_pdata(ddata); 2404 error = sysc_init_pdata(ddata);
2407 if (error) 2405 if (error)
2408 goto unprepare; 2406 return error;
2409 2407
2410 sysc_init_early_quirks(ddata); 2408 sysc_init_early_quirks(ddata);
2411 2409
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
2415 2413
2416 error = sysc_init_resets(ddata); 2414 error = sysc_init_resets(ddata);
2417 if (error) 2415 if (error)
2418 return error; 2416 goto unprepare;
2419 2417
2420 error = sysc_init_module(ddata); 2418 error = sysc_init_module(ddata);
2421 if (error) 2419 if (error)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..1c46babeb093 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
324 return NULL; 324 return NULL;
325} 325}
326 326
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
327/** 346/**
328 * clk_core_get - Find the clk_core parent of a clk 347 * clk_core_get - Find the clk_core parent of a clk
329 * @core: clk to find parent of 348 * @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
355 * }; 374 * };
356 * 375 *
357 * Returns: -ENOENT when the provider can't be found or the clk doesn't 376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
358 * exist in the provider. -EINVAL when the name can't be found. NULL when the 377 * exist in the provider or the name can't be found in the DT node or
359 * provider knows about the clk but it isn't provided on this system. 378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
360 * A valid clk_core pointer when the clk can be found in the provider. 380 * A valid clk_core pointer when the clk can be found in the provider.
361 */ 381 */
362static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
367 struct device *dev = core->dev; 387 struct device *dev = core->dev;
368 const char *dev_id = dev ? dev_name(dev) : NULL; 388 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 389 struct device_node *np = core->of_node;
390 struct of_phandle_args clkspec;
370 391
371 if (np && (name || index >= 0)) 392 if (np && (name || index >= 0) &&
372 hw = of_clk_get_hw(np, index, name); 393 !of_parse_clkspec(np, index, name, &clkspec)) {
373 394 hw = of_clk_get_hw_from_clkspec(&clkspec);
374 /* 395 of_node_put(clkspec.np);
375 * If the DT search above couldn't find the provider or the provider 396 } else if (name) {
376 * didn't know about this clk, fallback to looking up via clkdev based 397 /*
377 * clk_lookups 398 * If the DT search above couldn't find the provider fallback to
378 */ 399 * looking up via clkdev based clk_lookups.
379 if (PTR_ERR(hw) == -ENOENT && name) 400 */
380 hw = clk_find_hw(dev_id, name); 401 hw = clk_find_hw(dev_id, name);
402 }
381 403
382 if (IS_ERR(hw)) 404 if (IS_ERR(hw))
383 return ERR_CAST(hw); 405 return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
401 parent = ERR_PTR(-EPROBE_DEFER); 423 parent = ERR_PTR(-EPROBE_DEFER);
402 } else { 424 } else {
403 parent = clk_core_get(core, index); 425 parent = clk_core_get(core, index);
404 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
405 parent = clk_core_lookup(entry->name); 427 parent = clk_core_lookup(entry->name);
406 } 428 }
407 429
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
1632 break; 1654 break;
1633 1655
1634 /* Fallback to comparing globally unique names */ 1656 /* Fallback to comparing globally unique names */
1635 if (!strcmp(parent->name, core->parents[i].name)) 1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1636 break; 1659 break;
1637 } 1660 }
1638 1661
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
14#include "clk-exynos5-subcmu.h" 14#include "clk-exynos5-subcmu.h"
15 15
16static struct samsung_clk_provider *ctx; 16static struct samsung_clk_provider *ctx;
17static const struct exynos5_subcmu_info *cmu; 17static const struct exynos5_subcmu_info **cmu;
18static int nr_cmus; 18static int nr_cmus;
19 19
20static void exynos5_subcmu_clk_save(void __iomem *base, 20static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
56 * when OF-core populates all device-tree nodes. 56 * when OF-core populates all device-tree nodes.
57 */ 57 */
58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, 58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
59 const struct exynos5_subcmu_info *_cmu) 59 const struct exynos5_subcmu_info **_cmu)
60{ 60{
61 ctx = _ctx; 61 ctx = _ctx;
62 cmu = _cmu; 62 cmu = _cmu;
63 nr_cmus = _nr_cmus; 63 nr_cmus = _nr_cmus;
64 64
65 for (; _nr_cmus--; _cmu++) { 65 for (; _nr_cmus--; _cmu++) {
66 exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, 66 exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
67 _cmu->nr_gate_clks); 67 (*_cmu)->nr_gate_clks);
68 exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, 68 exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
69 _cmu->nr_suspend_regs); 69 (*_cmu)->nr_suspend_regs);
70 } 70 }
71} 71}
72 72
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
163 if (of_property_read_string(np, "label", &name) < 0) 163 if (of_property_read_string(np, "label", &name) < 0)
164 continue; 164 continue;
165 for (i = 0; i < nr_cmus; i++) 165 for (i = 0; i < nr_cmus; i++)
166 if (strcmp(cmu[i].pd_name, name) == 0) 166 if (strcmp(cmu[i]->pd_name, name) == 0)
167 exynos5_clk_register_subcmu(&pdev->dev, 167 exynos5_clk_register_subcmu(&pdev->dev,
168 &cmu[i], np); 168 cmu[i], np);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
21}; 21};
22 22
23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, 23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
24 const struct exynos5_subcmu_info *cmu); 24 const struct exynos5_subcmu_info **cmu);
25 25
26#endif 26#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
681 .pd_name = "DISP1", 681 .pd_name = "DISP1",
682}; 682};
683 683
684static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
685 &exynos5250_disp_subcmu,
686};
687
684static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { 688static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
685 /* sorted in descending order */ 689 /* sorted in descending order */
686 /* PLL_36XX_RATE(rate, m, p, s, k) */ 690 /* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
843 847
844 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, 848 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
845 ARRAY_SIZE(exynos5250_clk_regs)); 849 ARRAY_SIZE(exynos5250_clk_regs));
846 exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); 850 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
851 exynos5250_subcmus);
847 852
848 samsung_clk_of_add_provider(np, ctx); 853 samsung_clk_of_add_provider(np, ctx);
849 854
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
534 GATE_BUS_TOP, 24, 0, 0), 534 GATE_BUS_TOP, 24, 0, 0),
535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), 536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
537 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
538 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
539}; 537};
540 538
541static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 539static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
577 575
578static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { 576static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
579 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), 577 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
578 /* Maudio Block */
580 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 579 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
581 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), 580 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
581 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
582 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
583 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
584 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
582}; 585};
583 586
584static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { 587static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
890 /* GSCL Block */ 893 /* GSCL Block */
891 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), 894 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
892 895
893 /* MSCL Block */
894 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
895
896 /* PSGEN */ 896 /* PSGEN */
897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), 897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), 898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", 1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), 1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
1019 1019
1020 /* Maudio Block */
1021 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1022 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1023 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1024 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1025
1026 /* FSYS Block */ 1020 /* FSYS Block */
1027 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), 1021 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
1028 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), 1022 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1162 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", 1156 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
1163 GATE_IP_GSCL1, 17, 0, 0), 1157 GATE_IP_GSCL1, 17, 0, 0),
1164 1158
1165 /* MSCL Block */
1166 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1167 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1168 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1169 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1170 GATE_IP_MSCL, 8, 0, 0),
1171 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1172 GATE_IP_MSCL, 9, 0, 0),
1173 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1174 GATE_IP_MSCL, 10, 0, 0),
1175
1176 /* ISP */ 1159 /* ISP */
1177 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", 1160 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
1178 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), 1161 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
1281 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ 1264 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
1282}; 1265};
1283 1266
1284static const struct exynos5_subcmu_info exynos5x_subcmus[] = { 1267static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
1285 { 1268 /* MSCL Block */
1286 .div_clks = exynos5x_disp_div_clks, 1269 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1287 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), 1270 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1288 .gate_clks = exynos5x_disp_gate_clks, 1271 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1289 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), 1272 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1290 .suspend_regs = exynos5x_disp_suspend_regs, 1273 GATE_IP_MSCL, 8, 0, 0),
1291 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), 1274 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1292 .pd_name = "DISP", 1275 GATE_IP_MSCL, 9, 0, 0),
1293 }, { 1276 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1294 .div_clks = exynos5x_gsc_div_clks, 1277 GATE_IP_MSCL, 10, 0, 0),
1295 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), 1278};
1296 .gate_clks = exynos5x_gsc_gate_clks, 1279
1297 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), 1280static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
1298 .suspend_regs = exynos5x_gsc_suspend_regs, 1281 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
1299 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), 1282};
1300 .pd_name = "GSC", 1283
1301 }, { 1284static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
1302 .div_clks = exynos5x_mfc_div_clks, 1285 { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
1303 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1286 { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
1304 .gate_clks = exynos5x_mfc_gate_clks, 1287 { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
1305 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), 1288};
1306 .suspend_regs = exynos5x_mfc_suspend_regs, 1289
1307 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), 1290static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
1308 .pd_name = "MFC", 1291 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
1309 }, 1292 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
1293 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1294 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1295 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1296 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1297};
1298
1299static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
1300 { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
1301};
1302
1303static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
1304 .div_clks = exynos5x_disp_div_clks,
1305 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
1306 .gate_clks = exynos5x_disp_gate_clks,
1307 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
1308 .suspend_regs = exynos5x_disp_suspend_regs,
1309 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
1310 .pd_name = "DISP",
1311};
1312
1313static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1314 .div_clks = exynos5x_gsc_div_clks,
1315 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
1316 .gate_clks = exynos5x_gsc_gate_clks,
1317 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
1318 .suspend_regs = exynos5x_gsc_suspend_regs,
1319 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
1320 .pd_name = "GSC",
1321};
1322
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
1326 .gate_clks = exynos5x_mfc_gate_clks,
1327 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
1328 .suspend_regs = exynos5x_mfc_suspend_regs,
1329 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
1330 .pd_name = "MFC",
1331};
1332
1333static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
1334 .div_clks = exynos5x_mscl_div_clks,
1335 .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
1336 .gate_clks = exynos5x_mscl_gate_clks,
1337 .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
1338 .suspend_regs = exynos5x_mscl_suspend_regs,
1339 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
1340 .pd_name = "MSC",
1341};
1342
1343static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1344 .gate_clks = exynos5800_mau_gate_clks,
1345 .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
1346 .suspend_regs = exynos5800_mau_suspend_regs,
1347 .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
1348 .pd_name = "MAU",
1349};
1350
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu,
1354 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu,
1356};
1357
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu,
1361 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu,
1310}; 1364};
1311 1365
1312static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { 1366static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
1539 samsung_clk_extended_sleep_init(reg_base, 1593 samsung_clk_extended_sleep_init(reg_base,
1540 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), 1594 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
1541 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); 1595 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
1542 if (soc == EXYNOS5800) 1596
1597 if (soc == EXYNOS5800) {
1543 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, 1598 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
1544 ARRAY_SIZE(exynos5800_clk_regs)); 1599 ARRAY_SIZE(exynos5800_clk_regs));
1545 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), 1600
1546 exynos5x_subcmus); 1601 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
1602 exynos5800_subcmus);
1603 } else {
1604 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
1605 exynos5x_subcmus);
1606 }
1547 1607
1548 samsung_clk_of_add_provider(np, ctx); 1608 samsung_clk_of_add_provider(np, ctx);
1549} 1609}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
38 if (socfpgaclk->fixed_div) { 38 if (socfpgaclk->fixed_div) {
39 div = socfpgaclk->fixed_div; 39 div = socfpgaclk->fixed_div;
40 } else { 40 } else {
41 if (!socfpgaclk->bypass_reg) 41 if (socfpgaclk->hw.reg)
42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); 42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
43 } 43 }
44 44
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index f79eede71c62..edefa669153f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
540 unsigned long flags; 540 unsigned long flags;
541 unsigned int i; 541 unsigned int i;
542 542
543 /* If there's no device there's nothing to do */
544 if (!ccp)
545 return 0;
546
543 spin_lock_irqsave(&ccp->cmd_lock, flags); 547 spin_lock_irqsave(&ccp->cmd_lock, flags);
544 548
545 ccp->suspending = 1; 549 ccp->suspending = 1;
@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
564 unsigned long flags; 568 unsigned long flags;
565 unsigned int i; 569 unsigned int i;
566 570
571 /* If there's no device there's nothing to do */
572 if (!ccp)
573 return 0;
574
567 spin_lock_irqsave(&ccp->cmd_lock, flags); 575 spin_lock_irqsave(&ccp->cmd_lock, flags);
568 576
569 ccp->suspending = 0; 577 ccp->suspending = 0;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1163 switch (chan->feature & FSL_DMA_IP_MASK) { 1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX: 1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 /* Fall through */
1166 case FSL_DMA_IP_83XX: 1167 case FSL_DMA_IP_83XX:
1167 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1168 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index a13f224303c6..0221dee8dd4c 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
210 return -EIO; 210 return -EIO;
211 } 211 }
212 212
213 if (!IS_ERR(conf->confd)) { 213 if (conf->confd) {
214 if (!gpiod_get_raw_value_cansleep(conf->confd)) { 214 if (!gpiod_get_raw_value_cansleep(conf->confd)) {
215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); 215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
216 return -EIO; 216 return -EIO;
@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
289 return PTR_ERR(conf->status); 289 return PTR_ERR(conf->status);
290 } 290 }
291 291
292 conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); 292 conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
293 if (IS_ERR(conf->confd)) { 293 if (IS_ERR(conf->confd)) {
294 dev_warn(&spi->dev, "Not using confd gpio: %ld\n", 294 dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
295 PTR_ERR(conf->confd)); 295 PTR_ERR(conf->confd));
296 return PTR_ERR(conf->confd);
297 } else if (!conf->confd) {
298 dev_warn(&spi->dev, "Not using confd gpio");
296 } 299 }
297 300
298 /* Register manager with unique name */ 301 /* Register manager with unique name */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 343153d47e5b..004dc03ccf09 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -38,8 +38,7 @@
38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000 38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
39#define SCOM_STATUS_PIB_RESP_SHIFT 12 39#define SCOM_STATUS_PIB_RESP_SHIFT 12
40 40
41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ 41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
42 SCOM_STATUS_PROTECTION | \
43 SCOM_STATUS_PARITY | \ 42 SCOM_STATUS_PARITY | \
44 SCOM_STATUS_PIB_ABORT | \ 43 SCOM_STATUS_PIB_ABORT | \
45 SCOM_STATUS_PIB_RESP_MASK) 44 SCOM_STATUS_PIB_RESP_MASK)
@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
251 /* Return -EBUSY on PIB abort to force a retry */ 250 /* Return -EBUSY on PIB abort to force a retry */
252 if (status & SCOM_STATUS_PIB_ABORT) 251 if (status & SCOM_STATUS_PIB_ABORT)
253 return -EBUSY; 252 return -EBUSY;
254 if (status & SCOM_STATUS_ERR_SUMMARY) {
255 fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
256 sizeof(uint32_t));
257 return -EIO;
258 }
259 return 0; 253 return 0;
260} 254}
261 255
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..9762dd6d99fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
363 /* Special handling for SPI GPIOs if used */ 363 /* Special handling for SPI GPIOs if used */
364 if (IS_ERR(desc)) 364 if (IS_ERR(desc))
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 365 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc)) { 366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
367 /* This quirk looks up flags and all */ 367 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 369 if (!IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f497003f119c..cca749010cd0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; 1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1094 lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; 1094 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1095 GPIOLINE_FLAG_IS_OUT);
1095 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 1096 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1096 lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; 1097 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1098 GPIOLINE_FLAG_IS_OUT);
1097 1099
1098 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) 1100 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1099 return -EFAULT; 1101 return -EFAULT;
@@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1371 if (status) 1373 if (status)
1372 goto err_remove_from_list; 1374 goto err_remove_from_list;
1373 1375
1374 status = gpiochip_irqchip_init_valid_mask(chip);
1375 if (status)
1376 goto err_remove_from_list;
1377
1378 status = gpiochip_alloc_valid_mask(chip); 1376 status = gpiochip_alloc_valid_mask(chip);
1379 if (status) 1377 if (status)
1380 goto err_remove_irqchip_mask; 1378 goto err_remove_from_list;
1381
1382 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1383 if (status)
1384 goto err_free_gpiochip_mask;
1385 1379
1386 status = of_gpiochip_add(chip); 1380 status = of_gpiochip_add(chip);
1387 if (status) 1381 if (status)
1388 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1389 1383
1390 status = gpiochip_init_valid_mask(chip); 1384 status = gpiochip_init_valid_mask(chip);
1391 if (status) 1385 if (status)
@@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1411 1405
1412 machine_gpiochip_add(chip); 1406 machine_gpiochip_add(chip);
1413 1407
1408 status = gpiochip_irqchip_init_valid_mask(chip);
1409 if (status)
1410 goto err_remove_acpi_chip;
1411
1412 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1413 if (status)
1414 goto err_remove_irqchip_mask;
1415
1414 /* 1416 /*
1415 * By first adding the chardev, and then adding the device, 1417 * By first adding the chardev, and then adding the device,
1416 * we get a device node entry in sysfs under 1418 * we get a device node entry in sysfs under
@@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1422 if (gpiolib_initialized) { 1424 if (gpiolib_initialized) {
1423 status = gpiochip_setup_dev(gdev); 1425 status = gpiochip_setup_dev(gdev);
1424 if (status) 1426 if (status)
1425 goto err_remove_acpi_chip; 1427 goto err_remove_irqchip;
1426 } 1428 }
1427 return 0; 1429 return 0;
1428 1430
1431err_remove_irqchip:
1432 gpiochip_irqchip_remove(chip);
1433err_remove_irqchip_mask:
1434 gpiochip_irqchip_free_valid_mask(chip);
1429err_remove_acpi_chip: 1435err_remove_acpi_chip:
1430 acpi_gpiochip_remove(chip); 1436 acpi_gpiochip_remove(chip);
1431err_remove_of_chip: 1437err_remove_of_chip:
1432 gpiochip_free_hogs(chip); 1438 gpiochip_free_hogs(chip);
1433 of_gpiochip_remove(chip); 1439 of_gpiochip_remove(chip);
1434err_remove_chip:
1435 gpiochip_irqchip_remove(chip);
1436err_free_gpiochip_mask: 1440err_free_gpiochip_mask:
1437 gpiochip_free_valid_mask(chip); 1441 gpiochip_free_valid_mask(chip);
1438err_remove_irqchip_mask:
1439 gpiochip_irqchip_free_valid_mask(chip);
1440err_remove_from_list: 1442err_remove_from_list:
1441 spin_lock_irqsave(&gpio_lock, flags); 1443 spin_lock_irqsave(&gpio_lock, flags);
1442 list_del(&gdev->list); 1444 list_del(&gdev->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, 579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1143 num_deps = chunk->length_dw * 4 / 1143 num_deps = chunk->length_dw * 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem); 1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1145 1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1146 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1147 GFP_KERNEL); 1150 GFP_KERNEL);
1148 p->num_post_deps = 0; 1151 p->num_post_deps = 0;
@@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1166 1169
1167 1170
1168static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1169 struct amdgpu_cs_chunk 1172 struct amdgpu_cs_chunk *chunk)
1170 *chunk)
1171{ 1173{
1172 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1173 unsigned num_deps; 1175 unsigned num_deps;
@@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
1177 num_deps = chunk->length_dw * 4 / 1179 num_deps = chunk->length_dw * 4 /
1178 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1179 1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1180 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1181 GFP_KERNEL); 1186 GFP_KERNEL);
1182 p->num_post_deps = 0; 1187 p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..7398b4850649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 struct drm_sched_entity *entity) 534 struct drm_sched_entity *entity)
535{ 535{
536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); 537 struct dma_fence *other;
538 struct dma_fence *other = centity->fences[idx]; 538 unsigned idx;
539 long r;
539 540
540 if (other) { 541 spin_lock(&ctx->ring_lock);
541 signed long r; 542 idx = centity->sequence & (amdgpu_sched_jobs - 1);
542 r = dma_fence_wait(other, true); 543 other = dma_fence_get(centity->fences[idx]);
543 if (r < 0) { 544 spin_unlock(&ctx->ring_lock);
544 if (r != -ERESTARTSYS)
545 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546 545
547 return r; 546 if (!other)
548 } 547 return 0;
549 }
550 548
551 return 0; 549 r = dma_fence_wait(other, true);
550 if (r < 0 && r != -ERESTARTSYS)
551 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
552
553 dma_fence_put(other);
554 return r;
552} 555}
553 556
554void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 557void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 04b8ac4432c7..c066e1d3f981 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
596 case CHIP_VEGA20: 596 case CHIP_VEGA20:
597 break; 597 break;
598 case CHIP_RAVEN: 598 case CHIP_RAVEN:
599 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 599 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
600 break; 600 &&((adev->gfx.rlc_fw_version != 106 &&
601 if ((adev->gfx.rlc_fw_version != 106 && 601 adev->gfx.rlc_fw_version < 531) ||
602 adev->gfx.rlc_fw_version < 531) || 602 (adev->gfx.rlc_fw_version == 53815) ||
603 (adev->gfx.rlc_fw_version == 53815) || 603 (adev->gfx.rlc_feature_version < 1) ||
604 (adev->gfx.rlc_feature_version < 1) || 604 !adev->gfx.rlc.is_rlc_v2_1))
605 !adev->gfx.rlc.is_rlc_v2_1)
606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 605 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
606
607 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
608 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
609 AMD_PG_SUPPORT_CP |
610 AMD_PG_SUPPORT_RLC_SMU_HS;
607 break; 611 break;
608 default: 612 default:
609 break; 613 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
552 AMD_CG_SUPPORT_BIF_LS; 552 AMD_CG_SUPPORT_BIF_LS;
553 adev->pg_flags = AMD_PG_SUPPORT_VCN | 553 adev->pg_flags = AMD_PG_SUPPORT_VCN |
554 AMD_PG_SUPPORT_VCN_DPG | 554 AMD_PG_SUPPORT_VCN_DPG |
555 AMD_PG_SUPPORT_MMHUB |
556 AMD_PG_SUPPORT_ATHUB; 555 AMD_PG_SUPPORT_ATHUB;
557 adev->external_rev_id = adev->rev_id + 0x1; 556 adev->external_rev_id = adev->rev_id + 0x1;
558 break; 557 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
992 992
993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
994 } 994 }
995
996 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
997 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
998 AMD_PG_SUPPORT_CP |
999 AMD_PG_SUPPORT_RLC_SMU_HS;
1000 break; 995 break;
1001 default: 996 default:
1002 /* FIXME: not supported yet */ 997 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
3131convert_color_depth_from_display_info(const struct drm_connector *connector, 3131convert_color_depth_from_display_info(const struct drm_connector *connector,
3132 const struct drm_connector_state *state) 3132 const struct drm_connector_state *state)
3133{ 3133{
3134 uint32_t bpc = connector->display_info.bpc; 3134 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3135
3136 /* Assume 8 bpc by default if no bpc is specified. */
3137 bpc = bpc ? bpc : 8;
3135 3138
3136 if (!state) 3139 if (!state)
3137 state = connector->state; 3140 state = connector->state;
3138 3141
3139 if (state) { 3142 if (state) {
3140 bpc = state->max_bpc; 3143 /*
3144 * Cap display bpc based on the user requested value.
3145 *
3146 * The value for state->max_bpc may not correctly updated
3147 * depending on when the connector gets added to the state
3148 * or if this was called outside of atomic check, so it
3149 * can't be used directly.
3150 */
3151 bpc = min(bpc, state->max_requested_bpc);
3152
3141 /* Round down to the nearest even number. */ 3153 /* Round down to the nearest even number. */
3142 bpc = bpc - (bpc & 1); 3154 bpc = bpc - (bpc & 1);
3143 } 3155 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..90c4e87ac5ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2101 if (ret) 2101 if (ret)
2102 return ret; 2102 return ret;
2103 2103
2104 *query = metrics_table.CurrSocketPower << 8; 2104 /* For the 40.46 release, they changed the value name */
2105 if (hwmgr->smu_version == 0x282e00)
2106 *query = metrics_table.AverageSocketPower << 8;
2107 else
2108 *query = metrics_table.CurrSocketPower << 8;
2105 2109
2106 return ret; 2110 return ret;
2107} 2111}
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2349 data->dpm_table.soc_table.dpm_state.soft_max_level = 2353 data->dpm_table.soc_table.dpm_state.soft_max_level =
2350 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2354 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2351 2355
2352 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2356 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2357 FEATURE_DPM_UCLK_MASK |
2358 FEATURE_DPM_SOCCLK_MASK);
2353 PP_ASSERT_WITH_CODE(!ret, 2359 PP_ASSERT_WITH_CODE(!ret,
2354 "Failed to upload boot level to highest!", 2360 "Failed to upload boot level to highest!",
2355 return ret); 2361 return ret);
2356 2362
2357 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2363 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2364 FEATURE_DPM_UCLK_MASK |
2365 FEATURE_DPM_SOCCLK_MASK);
2358 PP_ASSERT_WITH_CODE(!ret, 2366 PP_ASSERT_WITH_CODE(!ret,
2359 "Failed to upload dpm max level to highest!", 2367 "Failed to upload dpm max level to highest!",
2360 return ret); 2368 return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2387 data->dpm_table.soc_table.dpm_state.soft_max_level = 2395 data->dpm_table.soc_table.dpm_state.soft_max_level =
2388 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2396 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2389 2397
2390 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2398 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2399 FEATURE_DPM_UCLK_MASK |
2400 FEATURE_DPM_SOCCLK_MASK);
2391 PP_ASSERT_WITH_CODE(!ret, 2401 PP_ASSERT_WITH_CODE(!ret,
2392 "Failed to upload boot level to highest!", 2402 "Failed to upload boot level to highest!",
2393 return ret); 2403 return ret);
2394 2404
2395 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2405 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2396 PP_ASSERT_WITH_CODE(!ret, 2408 PP_ASSERT_WITH_CODE(!ret,
2397 "Failed to upload dpm max level to highest!", 2409 "Failed to upload dpm max level to highest!",
2398 return ret); 2410 return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2403 2415
2404static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2416static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2405{ 2417{
2418 struct vega20_hwmgr *data =
2419 (struct vega20_hwmgr *)(hwmgr->backend);
2420 uint32_t soft_min_level, soft_max_level;
2406 int ret = 0; 2421 int ret = 0;
2407 2422
2408 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2423 /* gfxclk soft min/max settings */
2424 soft_min_level =
2425 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2426 soft_max_level =
2427 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2428
2429 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2430 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2431 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2432 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2433
2434 /* uclk soft min/max settings */
2435 soft_min_level =
2436 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2437 soft_max_level =
2438 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2439
2440 data->dpm_table.mem_table.dpm_state.soft_min_level =
2441 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2442 data->dpm_table.mem_table.dpm_state.soft_max_level =
2443 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2444
2445 /* socclk soft min/max settings */
2446 soft_min_level =
2447 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2448 soft_max_level =
2449 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2450
2451 data->dpm_table.soc_table.dpm_state.soft_min_level =
2452 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2453 data->dpm_table.soc_table.dpm_state.soft_max_level =
2454 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2455
2456 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2457 FEATURE_DPM_UCLK_MASK |
2458 FEATURE_DPM_SOCCLK_MASK);
2409 PP_ASSERT_WITH_CODE(!ret, 2459 PP_ASSERT_WITH_CODE(!ret,
2410 "Failed to upload DPM Bootup Levels!", 2460 "Failed to upload DPM Bootup Levels!",
2411 return ret); 2461 return ret);
2412 2462
2413 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2463 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2464 FEATURE_DPM_UCLK_MASK |
2465 FEATURE_DPM_SOCCLK_MASK);
2414 PP_ASSERT_WITH_CODE(!ret, 2466 PP_ASSERT_WITH_CODE(!ret,
2415 "Failed to upload DPM Max Levels!", 2467 "Failed to upload DPM Max Levels!",
2416 return ret); 2468 return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a0f52c86d8c7..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -907,8 +907,6 @@ struct smu_funcs
907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) 907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
908#define smu_set_azalia_d3_pme(smu) \ 908#define smu_set_azalia_d3_pme(smu) \
909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) 909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
910#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
911 ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
912#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ 910#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
913 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) 911 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
914#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ 912#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 5fde5cf65b42..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
326 struct amdgpu_device *adev = smu->adev; 326 struct amdgpu_device *adev = smu->adev;
327 const struct smc_firmware_header_v1_0 *hdr; 327 const struct smc_firmware_header_v1_0 *hdr;
328 int ret, index; 328 int ret, index;
329 uint32_t size; 329 uint32_t size = 0;
330 uint16_t atom_table_size;
330 uint8_t frev, crev; 331 uint8_t frev, crev;
331 void *table; 332 void *table;
332 uint16_t version_major, version_minor; 333 uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 355 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
355 powerplayinfo); 356 powerplayinfo);
356 357
357 ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, 358 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
358 (uint8_t **)&table); 359 (uint8_t **)&table);
359 if (ret) 360 if (ret)
360 return ret; 361 return ret;
362 size = atom_table_size;
361 } 363 }
362 364
363 if (!smu->smu_table.power_play_table) 365 if (!smu->smu_table.power_play_table)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index dd6fd1c8bf24..6a14497257e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
3050 3050
3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) 3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3052{ 3052{
3053 uint32_t smu_version;
3053 int ret = 0; 3054 int ret = 0;
3054 SmuMetrics_t metrics; 3055 SmuMetrics_t metrics;
3055 3056
@@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3060 if (ret) 3061 if (ret)
3061 return ret; 3062 return ret;
3062 3063
3063 *value = metrics.CurrSocketPower << 8; 3064 ret = smu_get_smc_version(smu, NULL, &smu_version);
3065 if (ret)
3066 return ret;
3067
3068 /* For the 40.46 release, they changed the value name */
3069 if (smu_version == 0x282e00)
3070 *value = metrics.AverageSocketPower << 8;
3071 else
3072 *value = metrics.CurrSocketPower << 8;
3064 3073
3065 return 0; 3074 return 0;
3066} 3075}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..9d4d5075cc64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/iommu.h> 8#include <linux/iommu.h>
9#include <linux/of_device.h> 9#include <linux/of_device.h>
10#include <linux/of_graph.h> 10#include <linux/of_graph.h>
11#include <linux/of_reserved_mem.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#ifdef CONFIG_DEBUG_FS 14#ifdef CONFIG_DEBUG_FS
@@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
126 pipe->of_output_port = 127 pipe->of_output_port =
127 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); 128 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
128 129
129 pipe->of_node = np; 130 pipe->of_node = of_node_get(np);
130 131
131 return 0; 132 return 0;
132} 133}
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
143 return mdev->irq; 144 return mdev->irq;
144 } 145 }
145 146
147 /* Get the optional framebuffer memory resource */
148 ret = of_reserved_mem_device_init(dev);
149 if (ret && ret != -ENODEV)
150 return ret;
151 ret = 0;
152
146 for_each_available_child_of_node(np, child) { 153 for_each_available_child_of_node(np, child) {
147 if (of_node_cmp(child->name, "pipeline") == 0) { 154 if (of_node_cmp(child->name, "pipeline") == 0) {
148 ret = komeda_parse_pipe_dt(mdev, child); 155 ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
289 296
290 mdev->n_pipelines = 0; 297 mdev->n_pipelines = 0;
291 298
299 of_reserved_mem_device_release(dev);
300
292 if (funcs && funcs->cleanup) 301 if (funcs && funcs->cleanup)
293 funcs->cleanup(mdev); 302 funcs->cleanup(mdev);
294 303
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
35 return NULL; 35 return NULL;
36} 36}
37 37
38u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
39{
40 u32 bpp;
41
42 switch (info->format) {
43 case DRM_FORMAT_YUV420_8BIT:
44 bpp = 12;
45 break;
46 case DRM_FORMAT_YUV420_10BIT:
47 bpp = 15;
48 break;
49 default:
50 bpp = info->cpp[0] * 8;
51 break;
52 }
53
54 return bpp;
55}
56
38/* Two assumptions 57/* Two assumptions
39 * 1. RGB always has YTR 58 * 1. RGB always has YTR
40 * 2. Tiled RGB always has SC 59 * 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
97komeda_get_format_caps(struct komeda_format_caps_table *table, 97komeda_get_format_caps(struct komeda_format_caps_table *table,
98 u32 fourcc, u64 modifier); 98 u32 fourcc, u64 modifier);
99 99
100u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
101 u64 modifier);
102
100u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, 103u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
101 u32 layer_type, u32 *n_fmts); 104 u32 layer_type, u32 *n_fmts);
102 105
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
43 struct drm_framebuffer *fb = &kfb->base; 43 struct drm_framebuffer *fb = &kfb->base;
44 const struct drm_format_info *info = fb->format; 44 const struct drm_format_info *info = fb->format;
45 struct drm_gem_object *obj; 45 struct drm_gem_object *obj;
46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; 46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
47 u64 min_size; 47 u64 min_size;
48 48
49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); 49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, 88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
89 alignment_header); 89 alignment_header);
90 90
91 bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
91 kfb->afbc_size = kfb->offset_payload + n_blocks * 92 kfb->afbc_size = kfb->offset_payload + n_blocks *
92 ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, 93 ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
93 AFBC_SUPERBLK_ALIGNMENT); 94 AFBC_SUPERBLK_ALIGNMENT);
94 min_size = kfb->afbc_size + fb->offsets[0]; 95 min_size = kfb->afbc_size + fb->offsets[0];
95 if (min_size > obj->size) { 96 if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..69d9e26c60c8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
14#include <drm/drm_gem_cma_helper.h> 14#include <drm/drm_gem_cma_helper.h>
15#include <drm/drm_gem_framebuffer_helper.h> 15#include <drm/drm_gem_framebuffer_helper.h>
16#include <drm/drm_irq.h> 16#include <drm/drm_irq.h>
17#include <drm/drm_probe_helper.h>
17#include <drm/drm_vblank.h> 18#include <drm/drm_vblank.h>
18 19
19#include "komeda_dev.h" 20#include "komeda_dev.h"
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
146 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
147 struct komeda_plane_state *kplane_st; 148 struct komeda_plane_state *kplane_st;
148 struct drm_plane_state *plane_st; 149 struct drm_plane_state *plane_st;
149 struct drm_framebuffer *fb;
150 struct drm_plane *plane; 150 struct drm_plane *plane;
151 struct list_head zorder_list; 151 struct list_head zorder_list;
152 int order = 0, err; 152 int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
172 172
173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
174 plane_st = &kplane_st->base; 174 plane_st = &kplane_st->base;
175 fb = plane_st->fb;
176 plane = plane_st->plane; 175 plane = plane_st->plane;
177 176
178 plane_st->normalized_zpos = order++; 177 plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
205 struct drm_atomic_state *state) 204 struct drm_atomic_state *state)
206{ 205{
207 struct drm_crtc *crtc; 206 struct drm_crtc *crtc;
208 struct drm_crtc_state *old_crtc_st, *new_crtc_st; 207 struct drm_crtc_state *new_crtc_st;
209 int i, err; 208 int i, err;
210 209
211 err = drm_atomic_helper_check_modeset(dev, state); 210 err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
216 * so need to add all affected_planes (even unchanged) to 215 * so need to add all affected_planes (even unchanged) to
217 * drm_atomic_state. 216 * drm_atomic_state.
218 */ 217 */
219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { 218 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
220 err = drm_atomic_add_affected_planes(state, crtc); 219 err = drm_atomic_add_affected_planes(state, crtc);
221 if (err) 220 if (err)
222 return err; 221 return err;
@@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
307 komeda_kms_irq_handler, IRQF_SHARED, 306 komeda_kms_irq_handler, IRQF_SHARED,
308 drm->driver->name, drm); 307 drm->driver->name, drm);
309 if (err) 308 if (err)
310 goto cleanup_mode_config; 309 goto free_component_binding;
311 310
312 err = mdev->funcs->enable_irq(mdev); 311 err = mdev->funcs->enable_irq(mdev);
313 if (err) 312 if (err)
314 goto cleanup_mode_config; 313 goto free_component_binding;
315 314
316 drm->irq_enabled = true; 315 drm->irq_enabled = true;
317 316
317 drm_kms_helper_poll_init(drm);
318
318 err = drm_dev_register(drm, 0); 319 err = drm_dev_register(drm, 0);
319 if (err) 320 if (err)
320 goto cleanup_mode_config; 321 goto free_interrupts;
321 322
322 return kms; 323 return kms;
323 324
324cleanup_mode_config: 325free_interrupts:
326 drm_kms_helper_poll_fini(drm);
325 drm->irq_enabled = false; 327 drm->irq_enabled = false;
328 mdev->funcs->disable_irq(mdev);
329free_component_binding:
330 component_unbind_all(mdev->dev, drm);
331cleanup_mode_config:
326 drm_mode_config_cleanup(drm); 332 drm_mode_config_cleanup(drm);
327 komeda_kms_cleanup_private_objs(kms); 333 komeda_kms_cleanup_private_objs(kms);
334 drm->dev_private = NULL;
335 drm_dev_put(drm);
328free_kms: 336free_kms:
329 kfree(kms); 337 kfree(kms);
330 return ERR_PTR(err); 338 return ERR_PTR(err);
@@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
335 struct drm_device *drm = &kms->base; 343 struct drm_device *drm = &kms->base;
336 struct komeda_dev *mdev = drm->dev_private; 344 struct komeda_dev *mdev = drm->dev_private;
337 345
346 drm_dev_unregister(drm);
347 drm_kms_helper_poll_fini(drm);
348 drm_atomic_helper_shutdown(drm);
338 drm->irq_enabled = false; 349 drm->irq_enabled = false;
339 mdev->funcs->disable_irq(mdev); 350 mdev->funcs->disable_irq(mdev);
340 drm_dev_unregister(drm);
341 component_unbind_all(mdev->dev, drm); 351 component_unbind_all(mdev->dev, drm);
342 komeda_kms_cleanup_private_objs(kms);
343 drm_mode_config_cleanup(drm); 352 drm_mode_config_cleanup(drm);
353 komeda_kms_cleanup_private_objs(kms);
344 drm->dev_private = NULL; 354 drm->dev_private = NULL;
345 drm_dev_put(drm); 355 drm_dev_put(drm);
346} 356}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..14b683164544 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
480 struct seq_file *sf); 480 struct seq_file *sf);
481 481
482/* component APIs */ 482/* component APIs */
483extern __printf(10, 11)
483struct komeda_component * 484struct komeda_component *
484komeda_component_add(struct komeda_pipeline *pipe, 485komeda_component_add(struct komeda_pipeline *pipe,
485 size_t comp_sz, u32 id, u32 hw_id, 486 size_t comp_sz, u32 id, u32 hw_id,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..2851cac94d86 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
148 if (!kcrtc->master->wb_layer) 148 if (!kcrtc->master->wb_layer)
149 return 0; 149 return 0;
150 150
151 kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); 151 kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
152 if (!kwb_conn) 152 if (!kwb_conn)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1465 else if (intel_crtc_has_dp_encoder(pipe_config)) 1465 else if (intel_crtc_has_dp_encoder(pipe_config))
1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1467 &pipe_config->dp_m_n); 1467 &pipe_config->dp_m_n);
1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
1469 dotclock = pipe_config->port_clock * 2 / 3; 1469 dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
1470 else 1470 else
1471 dotclock = pipe_config->port_clock; 1471 dotclock = pipe_config->port_clock;
1472 1472
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..18e4cba76720 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
539 539
540 intel_attach_force_audio_property(connector); 540 intel_attach_force_audio_property(connector);
541 intel_attach_broadcast_rgb_property(connector); 541 intel_attach_broadcast_rgb_property(connector);
542 drm_connector_attach_max_bpc_property(connector, 6, 12); 542
543 /*
544 * Reuse the prop from the SST connector because we're
545 * not allowed to create new props after device registration.
546 */
547 connector->max_bpc_property =
548 intel_dp->attached_connector->base.max_bpc_property;
549 if (connector->max_bpc_property)
550 drm_connector_attach_max_bpc_property(connector, 6, 12);
543 551
544 return connector; 552 return connector;
545 553
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..f413904a3e96 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | 541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); 542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
543 DRM_INFO("PPS2 = 0x%08x\n", pps_val); 543 DRM_INFO("PPS2 = 0x%08x\n", pps_val);
544 if (encoder->type == INTEL_OUTPUT_EDP) { 544 if (cpu_transcoder == TRANSCODER_EDP) {
545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); 545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
546 /* 546 /*
547 * If 2 VDSC instances are needed, configure PPS for second 547 * If 2 VDSC instances are needed, configure PPS for second
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..bac1ee94f63f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1598 1598
1599 pci_set_master(pdev); 1599 pci_set_master(pdev);
1600 1600
1601 /*
1602 * We don't have a max segment size, so set it to the max so sg's
1603 * debugging layer doesn't complain
1604 */
1605 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1606
1601 /* overlay on gen2 is broken and can't address above 1G */ 1607 /* overlay on gen2 is broken and can't address above 1G */
1602 if (IS_GEN(dev_priv, 2)) { 1608 if (IS_GEN(dev_priv, 2)) {
1603 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1609 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..724627afdedc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
101static void vgt_deballoon_space(struct i915_ggtt *ggtt, 101static void vgt_deballoon_space(struct i915_ggtt *ggtt,
102 struct drm_mm_node *node) 102 struct drm_mm_node *node)
103{ 103{
104 if (!drm_mm_node_allocated(node))
105 return;
106
104 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", 107 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
105 node->start, 108 node->start,
106 node->start + node->size, 109 node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
829 829
830 /* 830 /*
831 * Frequence the dpll for the port should run at. Differs from the 831 * Frequence the dpll for the port should run at. Differs from the
832 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 832 * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
833 * already multiplied by pixel_multiplier. 833 * already multiplied by pixel_multiplier.
834 */ 834 */
835 int port_clock; 835 int port_clock;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/dma-mapping.h>
20 21
21#include "mtk_drm_crtc.h" 22#include "mtk_drm_crtc.h"
22#include "mtk_drm_ddp.h" 23#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
213 struct mtk_drm_private *private = drm->dev_private; 214 struct mtk_drm_private *private = drm->dev_private;
214 struct platform_device *pdev; 215 struct platform_device *pdev;
215 struct device_node *np; 216 struct device_node *np;
217 struct device *dma_dev;
216 int ret; 218 int ret;
217 219
218 if (!iommu_present(&platform_bus_type)) 220 if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
275 goto err_component_unbind; 277 goto err_component_unbind;
276 } 278 }
277 279
278 private->dma_dev = &pdev->dev; 280 dma_dev = &pdev->dev;
281 private->dma_dev = dma_dev;
282
283 /*
284 * Configure the DMA segment size to make sure we get contiguous IOVA
285 * when importing PRIME buffers.
286 */
287 if (!dma_dev->dma_parms) {
288 private->dma_parms_allocated = true;
289 dma_dev->dma_parms =
290 devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
291 GFP_KERNEL);
292 }
293 if (!dma_dev->dma_parms) {
294 ret = -ENOMEM;
295 goto err_component_unbind;
296 }
297
298 ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
299 if (ret) {
300 dev_err(dma_dev, "Failed to set DMA segment size\n");
301 goto err_unset_dma_parms;
302 }
279 303
280 /* 304 /*
281 * We don't use the drm_irq_install() helpers provided by the DRM 305 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
285 drm->irq_enabled = true; 309 drm->irq_enabled = true;
286 ret = drm_vblank_init(drm, MAX_CRTC); 310 ret = drm_vblank_init(drm, MAX_CRTC);
287 if (ret < 0) 311 if (ret < 0)
288 goto err_component_unbind; 312 goto err_unset_dma_parms;
289 313
290 drm_kms_helper_poll_init(drm); 314 drm_kms_helper_poll_init(drm);
291 drm_mode_config_reset(drm); 315 drm_mode_config_reset(drm);
292 316
293 return 0; 317 return 0;
294 318
319err_unset_dma_parms:
320 if (private->dma_parms_allocated)
321 dma_dev->dma_parms = NULL;
295err_component_unbind: 322err_component_unbind:
296 component_unbind_all(drm->dev, drm); 323 component_unbind_all(drm->dev, drm);
297err_config_cleanup: 324err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
302 329
303static void mtk_drm_kms_deinit(struct drm_device *drm) 330static void mtk_drm_kms_deinit(struct drm_device *drm)
304{ 331{
332 struct mtk_drm_private *private = drm->dev_private;
333
305 drm_kms_helper_poll_fini(drm); 334 drm_kms_helper_poll_fini(drm);
306 drm_atomic_helper_shutdown(drm); 335 drm_atomic_helper_shutdown(drm);
307 336
337 if (private->dma_parms_allocated)
338 private->dma_dev->dma_parms = NULL;
339
308 component_unbind_all(drm->dev, drm); 340 component_unbind_all(drm->dev, drm);
309 drm_mode_config_cleanup(drm); 341 drm_mode_config_cleanup(drm);
310} 342}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
320 .compat_ioctl = drm_compat_ioctl, 352 .compat_ioctl = drm_compat_ioctl,
321}; 353};
322 354
355/*
356 * We need to override this because the device used to import the memory is
357 * not dev->dev, as drm_gem_prime_import() expects.
358 */
359struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
360 struct dma_buf *dma_buf)
361{
362 struct mtk_drm_private *private = dev->dev_private;
363
364 return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
365}
366
323static struct drm_driver mtk_drm_driver = { 367static struct drm_driver mtk_drm_driver = {
324 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 368 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
325 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
331 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 375 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
332 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 376 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
333 .gem_prime_export = drm_gem_prime_export, 377 .gem_prime_export = drm_gem_prime_export,
334 .gem_prime_import = drm_gem_prime_import, 378 .gem_prime_import = mtk_drm_gem_prime_import,
335 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, 379 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
336 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, 380 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
337 .gem_prime_mmap = mtk_drm_gem_mmap_buf, 381 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
524 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 568 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
525 if (!comp) { 569 if (!comp) {
526 ret = -ENOMEM; 570 ret = -ENOMEM;
571 of_node_put(node);
527 goto err_node; 572 goto err_node;
528 } 573 }
529 574
530 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 575 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
531 if (ret) 576 if (ret) {
577 of_node_put(node);
532 goto err_node; 578 goto err_node;
579 }
533 580
534 private->ddp_comp[comp_id] = comp; 581 private->ddp_comp[comp_id] = comp;
535 } 582 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
51 } commit; 51 } commit;
52 52
53 struct drm_atomic_state *suspend_state; 53 struct drm_atomic_state *suspend_state;
54
55 bool dma_parms_allocated;
54}; 56};
55 57
56extern struct platform_driver mtk_ddp_driver; 58extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
40 u8 *ptr = msg->buf; 40 u8 *ptr = msg->buf;
41 41
42 while (remaining) { 42 while (remaining) {
43 u8 cnt = (remaining > 16) ? 16 : remaining; 43 u8 cnt, retries, cmd;
44 u8 cmd;
45 44
46 if (msg->flags & I2C_M_RD) 45 if (msg->flags & I2C_M_RD)
47 cmd = 1; 46 cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 50 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 51 cmd |= 4; /* MOT */
53 52
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); 53 for (retries = 0, cnt = 0;
55 if (ret < 0) { 54 retries < 32 && !cnt;
56 nvkm_i2c_aux_release(aux); 55 retries++) {
57 return ret; 56 cnt = min_t(u8, remaining, 16);
57 ret = aux->func->xfer(aux, true, cmd,
58 msg->addr, ptr, &cnt);
59 if (ret < 0)
60 goto out;
61 }
62 if (!cnt) {
63 AUX_TRACE(aux, "no data after 32 retries");
64 ret = -EIO;
65 goto out;
58 } 66 }
59 67
60 ptr += cnt; 68 ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
64 msg++; 72 msg++;
65 } 73 }
66 74
75 ret = num;
76out:
67 nvkm_i2c_aux_release(aux); 77 nvkm_i2c_aux_release(aux);
68 return num; 78 return ret;
69} 79}
70 80
71static u32 81static u32
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..14b41de44ebc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
4 * Author: Archit Taneja <archit@ti.com> 4 * Author: Archit Taneja <archit@ti.com>
5 */ 5 */
6 6
7#include <linux/bitops.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/platform_device.h> 10#include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
20{ 21{
21 struct device_node *remote_node; 22 struct device_node *remote_node;
22 23
23 remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); 24 remote_node = of_graph_get_remote_node(out->dev->of_node,
25 ffs(out->of_ports) - 1, 0);
24 if (!remote_node) { 26 if (!remote_node) {
25 dev_dbg(out->dev, "failed to find video sink\n"); 27 dev_dbg(out->dev, "failed to find video sink\n");
26 return 0; 28 return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
669 if (omapdss_is_initialized() == false) 669 if (omapdss_is_initialized() == false)
670 return -EPROBE_DEFER; 670 return -EPROBE_DEFER;
671 671
672 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 672 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
673 if (ret) { 673 if (ret) {
674 dev_err(&pdev->dev, "Failed to set the DMA mask\n"); 674 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
675 return ret; 675 return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..952201c6d821 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
59static struct drm_driver qxl_driver; 59static struct drm_driver qxl_driver;
60static struct pci_driver qxl_pci_driver; 60static struct pci_driver qxl_pci_driver;
61 61
62static bool is_vga(struct pci_dev *pdev)
63{
64 return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
65}
66
62static int 67static int
63qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 68qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 69{
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
83 if (ret) 88 if (ret)
84 goto disable_pci; 89 goto disable_pci;
85 90
91 if (is_vga(pdev)) {
92 ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
93 if (ret) {
94 DRM_ERROR("can't get legacy vga ioports\n");
95 goto disable_pci;
96 }
97 }
98
86 ret = qxl_device_init(qdev, &qxl_driver, pdev); 99 ret = qxl_device_init(qdev, &qxl_driver, pdev);
87 if (ret) 100 if (ret)
88 goto disable_pci; 101 goto put_vga;
89 102
90 ret = qxl_modeset_init(qdev); 103 ret = qxl_modeset_init(qdev);
91 if (ret) 104 if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
105 qxl_modeset_fini(qdev); 118 qxl_modeset_fini(qdev);
106unload: 119unload:
107 qxl_device_fini(qdev); 120 qxl_device_fini(qdev);
121put_vga:
122 if (is_vga(pdev))
123 vga_put(pdev, VGA_RSRC_LEGACY_IO);
108disable_pci: 124disable_pci:
109 pci_disable_device(pdev); 125 pci_disable_device(pdev);
110free_dev: 126free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
122 138
123 qxl_modeset_fini(qdev); 139 qxl_modeset_fini(qdev);
124 qxl_device_fini(qdev); 140 qxl_device_fini(qdev);
141 if (is_vga(pdev))
142 vga_put(pdev, VGA_RSRC_LEGACY_IO);
125 143
126 dev->dev_private = NULL; 144 dev->dev_private = NULL;
127 kfree(qdev); 145 kfree(qdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
673 673
674 /* Locate the companion LVDS encoder for dual-link operation, if any. */ 674 /* Locate the companion LVDS encoder for dual-link operation, if any. */
675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); 675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
676 if (!companion) { 676 if (!companion)
677 dev_err(dev, "Companion LVDS encoder not found\n"); 677 return 0;
678 return -ENXIO;
679 }
680 678
681 /* 679 /*
682 * Sanity check: the companion encoder must have the same compatible 680 * Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
314 /* R and B components are only 5 bits deep */ 314 /* R and B components are only 5 bits deep */
315 val |= SUN4I_TCON0_FRM_CTL_MODE_R; 315 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
316 val |= SUN4I_TCON0_FRM_CTL_MODE_B; 316 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
317 /* Fall through */
317 case MEDIA_BUS_FMT_RGB666_1X18: 318 case MEDIA_BUS_FMT_RGB666_1X18:
318 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: 319 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
319 /* Fall through: enable dithering */ 320 /* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
993 ret = sun6i_dsi_dcs_read(dsi, msg); 993 ret = sun6i_dsi_dcs_read(dsi, msg);
994 break; 994 break;
995 } 995 }
996 /* Else, fall through */
996 997
997 default: 998 default:
998 ret = -EINVAL; 999 ret = -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..09b526518f5a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
204 .interruptible = false, 204 .interruptible = false,
205 .no_wait_gpu = false 205 .no_wait_gpu = false
206 }; 206 };
207 size_t max_segment;
207 208
208 /* wtf swapping */ 209 /* wtf swapping */
209 if (bo->pages) 210 if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
215 if (!bo->pages) 216 if (!bo->pages)
216 goto out; 217 goto out;
217 218
218 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, 219 max_segment = virtio_max_dma_size(qdev->vdev);
219 nr_pages << PAGE_SHIFT, GFP_KERNEL); 220 max_segment &= PAGE_MASK;
221 if (max_segment > SCATTERLIST_MAX_SEGMENT)
222 max_segment = SCATTERLIST_MAX_SEGMENT;
223 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224 nr_pages << PAGE_SHIFT,
225 max_segment, GFP_KERNEL);
220 if (ret) 226 if (ret)
221 goto out; 227 goto out;
222 return 0; 228 return 0;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
1153 1153
1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); 1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
1155 1155
1156 cp2112_gpio_direction_input(gc, d->hwirq);
1157
1158 if (!dev->gpio_poll) { 1156 if (!dev->gpio_poll) {
1159 dev->gpio_poll = true; 1157 dev->gpio_poll = true;
1160 schedule_delayed_work(&dev->gpio_poll_worker, 0); 1158 schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
1204 return PTR_ERR(dev->desc[pin]); 1202 return PTR_ERR(dev->desc[pin]);
1205 } 1203 }
1206 1204
1205 ret = cp2112_gpio_direction_input(&dev->gc, pin);
1206 if (ret < 0) {
1207 dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
1208 goto err_desc;
1209 }
1210
1207 ret = gpiochip_lock_as_irq(&dev->gc, pin); 1211 ret = gpiochip_lock_as_irq(&dev->gc, pin);
1208 if (ret) { 1212 if (ret) {
1209 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); 1213 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 21268c9fa71a..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
3749 3749
3750 { L27MHZ_DEVICE(HID_ANY_ID) }, 3750 { L27MHZ_DEVICE(HID_ANY_ID) },
3751 3751
3752 { /* Logitech G203/Prodigy Gaming Mouse */
3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
3754 { /* Logitech G302 Gaming Mouse */
3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
3756 { /* Logitech G303 Gaming Mouse */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
3758 { /* Logitech G400 Gaming Mouse */
3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
3760 { /* Logitech G403 Wireless Gaming Mouse over USB */ 3752 { /* Logitech G403 Wireless Gaming Mouse over USB */
3761 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, 3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
3762 { /* Logitech G403 Gaming Mouse */
3763 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
3764 { /* Logitech G403 Hero Gaming Mouse over USB */
3765 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
3766 { /* Logitech G502 Proteus Core Gaming Mouse */
3767 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
3768 { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
3769 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
3770 { /* Logitech G502 Hero Gaming Mouse over USB */
3771 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
3772 { /* Logitech G700 Gaming Mouse over USB */
3773 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
3774 { /* Logitech G700s Gaming Mouse over USB */
3775 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
3776 { /* Logitech G703 Gaming Mouse over USB */ 3754 { /* Logitech G703 Gaming Mouse over USB */
3777 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, 3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
3778 { /* Logitech G703 Hero Gaming Mouse over USB */ 3756 { /* Logitech G703 Hero Gaming Mouse over USB */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
24#define ICL_MOBILE_DEVICE_ID 0x34FC 24#define ICL_MOBILE_DEVICE_ID 0x34FC
25#define SPT_H_DEVICE_ID 0xA135 25#define SPT_H_DEVICE_ID 0xA135
26#define CML_LP_DEVICE_ID 0x02FC 26#define CML_LP_DEVICE_ID 0x02FC
27#define EHL_Ax_DEVICE_ID 0x4BB3
27 28
28#define REVISION_ID_CHT_A0 0x6 29#define REVISION_ID_CHT_A0 0x6
29#define REVISION_ID_CHT_Ax_SI 0x0 30#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
36 {0, } 37 {0, }
37}; 38};
38MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 39MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 7a8ddc999a8e..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
846 y >>= 1; 846 y >>= 1;
847 distance >>= 1; 847 distance >>= 1;
848 } 848 }
849 if (features->type == INTUOSHT2)
850 distance = features->distance_max - distance;
849 input_report_abs(input, ABS_X, x); 851 input_report_abs(input, ABS_X, x);
850 input_report_abs(input, ABS_Y, y); 852 input_report_abs(input, ABS_Y, y);
851 input_report_abs(input, ABS_DISTANCE, distance); 853 input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1059 input_report_key(input, BTN_BASE2, (data[11] & 0x02)); 1061 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1060 1062
1061 if (data[12] & 0x80) 1063 if (data[12] & 0x80)
1062 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); 1064 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1063 else 1065 else
1064 input_report_abs(input, ABS_WHEEL, 0); 1066 input_report_abs(input, ABS_WHEEL, 0);
1065 1067
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1290 } 1292 }
1291 if (wacom->tool[0]) { 1293 if (wacom->tool[0]) {
1292 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1294 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
1293 if (wacom->features.type == INTUOSP2_BT) { 1295 if (wacom->features.type == INTUOSP2_BT ||
1296 wacom->features.type == INTUOSP2S_BT) {
1294 input_report_abs(pen_input, ABS_DISTANCE, 1297 input_report_abs(pen_input, ABS_DISTANCE,
1295 range ? frame[13] : wacom->features.distance_max); 1298 range ? frame[13] : wacom->features.distance_max);
1296 } else { 1299 } else {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
26 26
27static unsigned long virt_to_hvpfn(void *addr) 27static unsigned long virt_to_hvpfn(void *addr)
28{ 28{
29 unsigned long paddr; 29 phys_addr_t paddr;
30 30
31 if (is_vmalloc_addr(addr)) 31 if (is_vmalloc_addr(addr))
32 paddr = page_to_phys(vmalloc_to_page(addr)) + 32 paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..fb16a622e8ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
146 */ 146 */
147 u64 guestid; 147 u64 guestid;
148 148
149 void *tsc_page;
150
151 struct hv_per_cpu_context __percpu *cpu_context; 149 struct hv_per_cpu_context __percpu *cpu_context;
152 150
153 /* 151 /*
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c0378c3de9a4..91dfeba62485 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
165 .driver_data = (kernel_ulong_t)0, 165 .driver_data = (kernel_ulong_t)0,
166 }, 166 },
167 { 167 {
168 /* Lewisburg PCH */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
170 .driver_data = (kernel_ulong_t)0,
171 },
172 {
168 /* Gemini Lake */ 173 /* Gemini Lake */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 174 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
170 .driver_data = (kernel_ulong_t)&intel_th_2x, 175 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
199 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), 204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
200 .driver_data = (kernel_ulong_t)&intel_th_2x, 205 .driver_data = (kernel_ulong_t)&intel_th_2x,
201 }, 206 },
207 {
208 /* Tiger Lake PCH */
209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
210 .driver_data = (kernel_ulong_t)&intel_th_2x,
211 },
202 { 0 }, 212 { 0 },
203}; 213};
204 214
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e55b902560de..181e7ff1ec4f 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent,
1276 1276
1277err: 1277err:
1278 put_device(&src->dev); 1278 put_device(&src->dev);
1279 kfree(src);
1280 1279
1281 return err; 1280 return err;
1282} 1281}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d7fd76baec92..19ef2b0c682a 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
790 790
791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) 791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
792{ 792{
793 u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 793 u32 val;
794
795 /* We do not support the SMBUS Quick command */
796 val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
794 797
795 if (adap->algo->reg_slave) 798 if (adap->algo->reg_slave)
796 val |= I2C_FUNC_SLAVE; 799 val |= I2C_FUNC_SLAVE;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305b2dd9..f5f001738df5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
94 94
95 dev->disable_int(dev); 95 dev->disable_int(dev);
96 dev->disable(dev); 96 dev->disable(dev);
97 synchronize_irq(dev->irq);
97 dev->slave = NULL; 98 dev->slave = NULL;
98 pm_runtime_put(dev->dev); 99 pm_runtime_put(dev->dev);
99 100
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f2956936c3f2..2e08b4722dc4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
1194 int i; 1194 int i;
1195 1195
1196 status = acpi_get_object_info(obj_handle, &info); 1196 status = acpi_get_object_info(obj_handle, &info);
1197 if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) 1197 if (ACPI_FAILURE(status))
1198 return AE_OK; 1198 return AE_OK;
1199 1199
1200 if (!(info->valid & ACPI_VALID_HID))
1201 goto smo88xx_not_found;
1202
1200 hid = info->hardware_id.string; 1203 hid = info->hardware_id.string;
1201 if (!hid) 1204 if (!hid)
1202 return AE_OK; 1205 goto smo88xx_not_found;
1203 1206
1204 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); 1207 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
1205 if (i < 0) 1208 if (i < 0)
1206 return AE_OK; 1209 goto smo88xx_not_found;
1210
1211 kfree(info);
1207 1212
1208 *((bool *)return_value) = true; 1213 *((bool *)return_value) = true;
1209 return AE_CTRL_TERMINATE; 1214 return AE_CTRL_TERMINATE;
1215
1216smo88xx_not_found:
1217 kfree(info);
1218 return AE_OK;
1210} 1219}
1211 1220
1212static bool is_dell_system_with_lis3lv02d(void) 1221static bool is_dell_system_with_lis3lv02d(void)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 252edb433fdf..29eae1bf4f86 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
234 .max_num_msgs = 255, 234 .max_num_msgs = 255,
235}; 235};
236 236
237static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
238 .flags = I2C_AQ_NO_ZERO_LEN,
239};
240
237static const struct mtk_i2c_compatible mt2712_compat = { 241static const struct mtk_i2c_compatible mt2712_compat = {
238 .regs = mt_i2c_regs_v1, 242 .regs = mt_i2c_regs_v1,
239 .pmic_i2c = 0, 243 .pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
298}; 302};
299 303
300static const struct mtk_i2c_compatible mt8183_compat = { 304static const struct mtk_i2c_compatible mt8183_compat = {
305 .quirks = &mt8183_i2c_quirks,
301 .regs = mt_i2c_regs_v2, 306 .regs = mt_i2c_regs_v2,
302 .pmic_i2c = 0, 307 .pmic_i2c = 0,
303 .dcm = 0, 308 .dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
870 875
871static u32 mtk_i2c_functionality(struct i2c_adapter *adap) 876static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
872{ 877{
873 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 878 if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
879 return I2C_FUNC_I2C |
880 (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
881 else
882 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
874} 883}
875 884
876static const struct i2c_algorithm mtk_i2c_algorithm = { 885static const struct i2c_algorithm mtk_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c46c4bddc7ca..cba325eb852f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -91,7 +91,7 @@
91#define SB800_PIIX4_PORT_IDX_MASK 0x06 91#define SB800_PIIX4_PORT_IDX_MASK 0x06
92#define SB800_PIIX4_PORT_IDX_SHIFT 1 92#define SB800_PIIX4_PORT_IDX_SHIFT 1
93 93
94/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ 94/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
358 /* Find which register is used for port selection */ 358 /* Find which register is used for port selection */
359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || 359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { 360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
361 switch (PIIX4_dev->device) { 361 if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
362 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: 362 (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
363 PIIX4_dev->revision >= 0x1F)) {
363 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; 364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
364 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; 365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
365 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; 366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
366 break; 367 } else {
367 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
368 default:
369 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 368 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
370 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; 369 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
371 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; 370 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
372 break;
373 } 371 }
374 } else { 372 } else {
375 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, 373 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f26ed495d384..9c440fa6a3dd 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
832 */ 832 */
833void i2c_unregister_device(struct i2c_client *client) 833void i2c_unregister_device(struct i2c_client *client)
834{ 834{
835 if (!client) 835 if (IS_ERR_OR_NULL(client))
836 return; 836 return;
837 837
838 if (client->dev.of_node) { 838 if (client->dev.of_node) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
4724 if (ret) 4724 if (ret)
4725 goto err; 4725 goto err;
4726 4726
4727 cma_configfs_init(); 4727 ret = cma_configfs_init();
4728 if (ret)
4729 goto err_ib;
4728 4730
4729 return 0; 4731 return 0;
4730 4732
4733err_ib:
4734 ib_unregister_client(&cma_client);
4731err: 4735err:
4732 unregister_netdevice_notifier(&cma_nb); 4736 unregister_netdevice_notifier(&cma_nb);
4733 ib_sa_unregister_client(&sa_client); 4737 ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index b79890739a2c..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
149 struct auto_mode_param *param = &counter->mode.param; 149 struct auto_mode_param *param = &counter->mode.param;
150 bool match = true; 150 bool match = true;
151 151
152 if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) 152 if (!rdma_is_visible_in_pid_ns(&qp->res))
153 return false; 153 return false;
154 154
155 /* Ensure that counter belong to right PID */ 155 /* Ensure that counter belongs to the right PID */
156 if (!rdma_is_kernel_res(&counter->res) && 156 if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
157 !rdma_is_kernel_res(&qp->res) &&
158 (task_pid_vnr(counter->res.task) != current->pid))
159 return false; 157 return false;
160 158
161 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) 159 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
424 return qp; 422 return qp;
425 423
426err: 424err:
427 rdma_restrack_put(&qp->res); 425 rdma_restrack_put(res);
428 return NULL; 426 return NULL;
429} 427}
430 428
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 87d40d1ecdde..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i]) 383 if (!names[i])
384 continue; 384 continue;
385 curr = rdma_restrack_count(device, i, 385 curr = rdma_restrack_count(device, i);
386 task_active_pid_ns(current));
387 ret = fill_res_info_entry(msg, names[i], curr); 386 ret = fill_res_info_entry(msg, names[i], curr);
388 if (ret) 387 if (ret)
389 goto err; 388 goto err;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
107 * rdma_restrack_count() - the current usage of specific object 107 * rdma_restrack_count() - the current usage of specific object
108 * @dev: IB device 108 * @dev: IB device
109 * @type: actual type of object to operate 109 * @type: actual type of object to operate
110 * @ns: PID namespace
111 */ 110 */
112int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, 111int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
113 struct pid_namespace *ns)
114{ 112{
115 struct rdma_restrack_root *rt = &dev->res[type]; 113 struct rdma_restrack_root *rt = &dev->res[type];
116 struct rdma_restrack_entry *e; 114 struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
119 117
120 xa_lock(&rt->xa); 118 xa_lock(&rt->xa);
121 xas_for_each(&xas, e, U32_MAX) { 119 xas_for_each(&xas, e, U32_MAX) {
122 if (ns == &init_pid_ns || 120 if (!rdma_is_visible_in_pid_ns(e))
123 (!rdma_is_kernel_res(e) && 121 continue;
124 ns == task_active_pid_ns(e->task))) 122 cnt++;
125 cnt++;
126 } 123 }
127 xa_unlock(&rt->xa); 124 xa_unlock(&rt->xa);
128 return cnt; 125 return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
360 */ 357 */
361 if (rdma_is_kernel_res(res)) 358 if (rdma_is_kernel_res(res))
362 return task_active_pid_ns(current) == &init_pid_ns; 359 return task_active_pid_ns(current) == &init_pid_ns;
363 return task_active_pid_ns(current) == task_active_pid_ns(res->task); 360
361 /* PID 0 means that resource is not found in current namespace */
362 return task_pid_vnr(res->task);
364} 363}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
379 379
380int ib_umem_page_count(struct ib_umem *umem) 380int ib_umem_page_count(struct ib_umem *umem)
381{ 381{
382 int i; 382 int i, n = 0;
383 int n;
384 struct scatterlist *sg; 383 struct scatterlist *sg;
385 384
386 if (umem->is_odp)
387 return ib_umem_num_pages(umem);
388
389 n = 0;
390 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 385 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
391 n += sg_dma_len(sg) >> PAGE_SHIFT; 386 n += sg_dma_len(sg) >> PAGE_SHIFT;
392 387
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
136 spin_unlock_irqrestore(&cmdq->lock, flags); 136 spin_unlock_irqrestore(&cmdq->lock, flags);
137 return -EBUSY; 137 return -EBUSY;
138 } 138 }
139
140 size = req->cmd_size;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
143 */
144 bnxt_qplib_set_cmd_slots(req);
145
139 memset(resp, 0, sizeof(*resp)); 146 memset(resp, 0, sizeof(*resp));
140 crsqe->resp = (struct creq_qp_event *)resp; 147 crsqe->resp = (struct creq_qp_event *)resp;
141 crsqe->resp->cookie = req->cookie; 148 crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
150 157
151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 158 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 preq = (u8 *)req; 159 preq = (u8 *)req;
153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 do { 160 do {
155 /* Locate the next cmdq slot */ 161 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 162 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
55 do { \ 55 do { \
56 memset(&(req), 0, sizeof((req))); \ 56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ 57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = (sizeof((req)) + \ 58 (req).cmd_size = sizeof((req)); \
59 BNXT_QPLIB_CMDQE_UNITS - 1) / \
60 BNXT_QPLIB_CMDQE_UNITS; \
61 (req).flags = cpu_to_le16(cmd_flags); \ 59 (req).flags = cpu_to_le16(cmd_flags); \
62 } while (0) 60 } while (0)
63 61
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
95 BNXT_QPLIB_CMDQE_UNITS); 93 BNXT_QPLIB_CMDQE_UNITS);
96} 94}
97 95
96/* Set the cmd_size to a factor of CMDQE unit */
97static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
98{
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
101}
102
98#define MAX_CMDQ_IDX(depth) ((depth) - 1) 103#define MAX_CMDQ_IDX(depth) ((depth) - 1)
99 104
100static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) 105static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
141 if (!data) 141 if (!data)
142 return -ENOMEM; 142 return -ENOMEM;
143 copy = min(len, datalen - 1); 143 copy = min(len, datalen - 1);
144 if (copy_from_user(data, buf, copy)) 144 if (copy_from_user(data, buf, copy)) {
145 return -EFAULT; 145 ret = -EFAULT;
146 goto free_data;
147 }
146 148
147 ret = debugfs_file_get(file->f_path.dentry); 149 ret = debugfs_file_get(file->f_path.dentry);
148 if (unlikely(ret)) 150 if (unlikely(ret))
149 return ret; 151 goto free_data;
150 ptr = data; 152 ptr = data;
151 token = ptr; 153 token = ptr;
152 for (ptr = data; *ptr; ptr = end + 1, token = ptr) { 154 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
195 ret = len; 197 ret = len;
196 198
197 debugfs_file_put(file->f_path.dentry); 199 debugfs_file_put(file->f_path.dentry);
200free_data:
198 kfree(data); 201 kfree(data);
199 return ret; 202 return ret;
200} 203}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
214 return -ENOMEM; 217 return -ENOMEM;
215 ret = debugfs_file_get(file->f_path.dentry); 218 ret = debugfs_file_get(file->f_path.dentry);
216 if (unlikely(ret)) 219 if (unlikely(ret))
217 return ret; 220 goto free_data;
218 bit = find_first_bit(fault->opcodes, bitsize); 221 bit = find_first_bit(fault->opcodes, bitsize);
219 while (bit < bitsize) { 222 while (bit < bitsize) {
220 zero = find_next_zero_bit(fault->opcodes, bitsize, bit); 223 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
232 data[size - 1] = '\n'; 235 data[size - 1] = '\n';
233 data[size] = '\0'; 236 data[size] = '\0';
234 ret = simple_read_from_buffer(buf, len, pos, data, size); 237 ret = simple_read_from_buffer(buf, len, pos, data, size);
238free_data:
235 kfree(data); 239 kfree(data);
236 return ret; 240 return ret;
237} 241}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 996fc298207e..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2574 hfi1_kern_clear_hw_flow(priv->rcd, qp); 2574 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2575} 2575}
2576 2576
2577static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, 2577static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
2578 struct hfi1_packet *packet, u8 rcv_type,
2579 u8 opcode)
2580{ 2578{
2581 struct rvt_qp *qp = packet->qp; 2579 struct rvt_qp *qp = packet->qp;
2582 struct hfi1_qp_priv *qpriv = qp->priv;
2583 u32 ipsn;
2584 struct ib_other_headers *ohdr = packet->ohdr;
2585 struct rvt_ack_entry *e;
2586 struct tid_rdma_request *req;
2587 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2588 u32 i;
2589 2580
2590 if (rcv_type >= RHF_RCV_TYPE_IB) 2581 if (rcv_type >= RHF_RCV_TYPE_IB)
2591 goto done; 2582 goto done;
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2602 if (rcv_type == RHF_RCV_TYPE_EAGER) { 2593 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2603 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); 2594 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2604 hfi1_schedule_send(qp); 2595 hfi1_schedule_send(qp);
2605 goto done_unlock;
2606 }
2607
2608 /*
2609 * For TID READ response, error out QP after freeing the tid
2610 * resources.
2611 */
2612 if (opcode == TID_OP(READ_RESP)) {
2613 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2614 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2615 cmp_psn(ipsn, qp->s_psn) < 0) {
2616 hfi1_kern_read_tid_flow_free(qp);
2617 spin_unlock(&qp->s_lock);
2618 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2619 goto done;
2620 }
2621 goto done_unlock;
2622 }
2623
2624 /*
2625 * Error out the qp for TID RDMA WRITE
2626 */
2627 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2628 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2629 e = &qp->s_ack_queue[i];
2630 if (e->opcode == TID_OP(WRITE_REQ)) {
2631 req = ack_to_tid_req(e);
2632 hfi1_kern_exp_rcv_clear_all(req);
2633 }
2634 } 2596 }
2635 spin_unlock(&qp->s_lock);
2636 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2637 goto done;
2638 2597
2639done_unlock: 2598 /* Since no payload is delivered, just drop the packet */
2640 spin_unlock(&qp->s_lock); 2599 spin_unlock(&qp->s_lock);
2641done: 2600done:
2642 return true; 2601 return true;
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2687 u32 fpsn; 2646 u32 fpsn;
2688 2647
2689 lockdep_assert_held(&qp->r_lock); 2648 lockdep_assert_held(&qp->r_lock);
2649 spin_lock(&qp->s_lock);
2690 /* If the psn is out of valid range, drop the packet */ 2650 /* If the psn is out of valid range, drop the packet */
2691 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || 2651 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2692 cmp_psn(ibpsn, qp->s_psn) > 0) 2652 cmp_psn(ibpsn, qp->s_psn) > 0)
2693 return ret; 2653 goto s_unlock;
2694 2654
2695 spin_lock(&qp->s_lock);
2696 /* 2655 /*
2697 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2656 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 * requests and implicitly NAK RDMA read and atomic requests issued 2657 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2740 2699
2741 wqe = do_rc_completion(qp, wqe, ibp); 2700 wqe = do_rc_completion(qp, wqe, ibp);
2742 if (qp->s_acked == qp->s_tail) 2701 if (qp->s_acked == qp->s_tail)
2743 break; 2702 goto s_unlock;
2744 } 2703 }
2745 2704
2705 if (qp->s_acked == qp->s_tail)
2706 goto s_unlock;
2707
2746 /* Handle the eflags for the request */ 2708 /* Handle the eflags for the request */
2747 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 2709 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2748 goto s_unlock; 2710 goto s_unlock;
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2922 if (lnh == HFI1_LRH_GRH) 2884 if (lnh == HFI1_LRH_GRH)
2923 goto r_unlock; 2885 goto r_unlock;
2924 2886
2925 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) 2887 if (tid_rdma_tid_err(packet, rcv_type))
2926 goto r_unlock; 2888 goto r_unlock;
2927 } 2889 }
2928 2890
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2942 */ 2904 */
2943 spin_lock(&qp->s_lock); 2905 spin_lock(&qp->s_lock);
2944 qpriv = qp->priv; 2906 qpriv = qp->priv;
2907 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2908 qpriv->r_tid_tail == qpriv->r_tid_head)
2909 goto unlock;
2945 e = &qp->s_ack_queue[qpriv->r_tid_tail]; 2910 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2911 if (e->opcode != TID_OP(WRITE_REQ))
2912 goto unlock;
2946 req = ack_to_tid_req(e); 2913 req = ack_to_tid_req(e);
2914 if (req->comp_seg == req->cur_seg)
2915 goto unlock;
2947 flow = &req->flows[req->clear_tail]; 2916 flow = &req->flows[req->clear_tail];
2948 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); 2917 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2949 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); 2918 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4509 struct rvt_swqe *wqe; 4478 struct rvt_swqe *wqe;
4510 struct tid_rdma_request *req; 4479 struct tid_rdma_request *req;
4511 struct tid_rdma_flow *flow; 4480 struct tid_rdma_flow *flow;
4512 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; 4481 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4513 unsigned long flags; 4482 unsigned long flags;
4514 u16 fidx; 4483 u16 fidx;
4515 4484
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4538 ack_kpsn--; 4507 ack_kpsn--;
4539 } 4508 }
4540 4509
4510 if (unlikely(qp->s_acked == qp->s_tail))
4511 goto ack_op_err;
4512
4541 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 4513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4542 4514
4543 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 4515 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4550 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); 4522 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4551 4523
4552 /* Drop stale ACK/NAK */ 4524 /* Drop stale ACK/NAK */
4553 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) 4525 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4526 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4554 goto ack_op_err; 4527 goto ack_op_err;
4555 4528
4556 while (cmp_psn(ack_kpsn, 4529 while (cmp_psn(ack_kpsn,
@@ -4712,7 +4685,12 @@ done:
4712 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 4685 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4713 IB_AETH_CREDIT_MASK) { 4686 IB_AETH_CREDIT_MASK) {
4714 case 0: /* PSN sequence error */ 4687 case 0: /* PSN sequence error */
4688 if (!req->flows)
4689 break;
4715 flow = &req->flows[req->acked_tail]; 4690 flow = &req->flows[req->acked_tail];
4691 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4692 if (cmp_psn(psn, flpsn) > 0)
4693 break;
4716 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, 4694 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 flow); 4695 flow);
4718 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); 4696 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
1677 tx_buf_size, DMA_TO_DEVICE); 1677 tx_buf_size, DMA_TO_DEVICE);
1678 kfree(tun_qp->tx_ring[i].buf.addr); 1678 kfree(tun_qp->tx_ring[i].buf.addr);
1679 } 1679 }
1680 kfree(tun_qp->tx_ring);
1681 tun_qp->tx_ring = NULL;
1682 i = MLX4_NUM_TUNNEL_BUFS; 1680 i = MLX4_NUM_TUNNEL_BUFS;
1683err: 1681err:
1684 while (i > 0) { 1682 while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
1687 rx_buf_size, DMA_FROM_DEVICE); 1685 rx_buf_size, DMA_FROM_DEVICE);
1688 kfree(tun_qp->ring[i].addr); 1686 kfree(tun_qp->ring[i].addr);
1689 } 1687 }
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring); 1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL; 1691 tun_qp->ring = NULL;
1692 return -ENOMEM; 1692 return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e12a4404096b..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 1024
1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 if (MLX5_CAP_GEN(mdev, pg)) 1026 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 props->odp_caps = dev->odp_caps; 1028 props->odp_caps = dev->odp_caps;
1029 } 1029 }
@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6140 } 6140 }
6141 6141
6142 mlx5_ib_internal_fill_odp_caps(dev);
6143
6142 err = mlx5_ib_init_multiport_master(dev); 6144 err = mlx5_ib_init_multiport_master(dev);
6143 if (err) 6145 if (err)
6144 return err; 6146 return err;
@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6563 6565
6564static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6566static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6565{ 6567{
6566 mlx5_ib_internal_fill_odp_caps(dev);
6567
6568 return mlx5_ib_odp_init_one(dev); 6568 return mlx5_ib_odp_init_one(dev);
6569} 6569}
6570 6570
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
57 int entry; 57 int entry;
58 58
59 if (umem->is_odp) { 59 if (umem->is_odp) {
60 unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; 60 struct ib_umem_odp *odp = to_ib_umem_odp(umem);
61 unsigned int page_shift = odp->page_shift;
61 62
62 *ncont = ib_umem_page_count(umem); 63 *ncont = ib_umem_odp_num_pages(odp);
63 *count = *ncont << (page_shift - PAGE_SHIFT); 64 *count = *ncont << (page_shift - PAGE_SHIFT);
64 *shift = page_shift; 65 *shift = page_shift;
65 if (order) 66 if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f6a53455bf8b..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1475 bool dyn_bfreg); 1475 bool dyn_bfreg);
1476 1476
1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1478
1479static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1480 bool do_modify_atomic)
1481{
1482 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1483 return false;
1484
1485 if (do_modify_atomic &&
1486 MLX5_CAP_GEN(dev->mdev, atomic) &&
1487 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1488 return false;
1489
1490 return true;
1491}
1478#endif /* MLX5_IB_H */ 1492#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b74fad08412f..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1293 if (err < 0) 1293 if (err < 0)
1294 return ERR_PTR(err); 1294 return ERR_PTR(err);
1295 1295
1296 use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && 1296 use_umr = mlx5_ib_can_use_umr(dev, true);
1297 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
1298 !MLX5_CAP_GEN(dev->mdev, atomic));
1299 1297
1300 if (order <= mr_cache_max_order(dev) && use_umr) { 1298 if (order <= mr_cache_max_order(dev) && use_umr) {
1301 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1299 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1448 goto err; 1446 goto err;
1449 } 1447 }
1450 1448
1451 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1449 if (!mlx5_ib_can_use_umr(dev, true) ||
1450 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
1452 /* 1451 /*
1453 * UMR can't be used - MKey needs to be replaced. 1452 * UMR can't be used - MKey needs to be replaced.
1454 */ 1453 */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1d257d1b3b0d..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
301 301
302 memset(caps, 0, sizeof(*caps)); 302 memset(caps, 0, sizeof(*caps));
303 303
304 if (!MLX5_CAP_GEN(dev->mdev, pg)) 304 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305 !mlx5_ib_can_use_umr(dev, true))
305 return; 306 return;
306 307
307 caps->general_caps = IB_ODP_SUPPORT; 308 caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
355 356
356 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 357 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
357 MLX5_CAP_GEN(dev->mdev, null_mkey) && 358 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
358 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 359 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 361 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
360 362
361 return; 363 return;
@@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1622{ 1624{
1623 int ret = 0; 1625 int ret = 0;
1624 1626
1625 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1627 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1626 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 return ret;
1629
1630 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1627 1631
1628 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1632 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1629 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1633 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1633 } 1637 }
1634 } 1638 }
1635 1639
1636 if (!MLX5_CAP_GEN(dev->mdev, pg))
1637 return ret;
1638
1639 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); 1640 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1640 1641
1641 return ret; 1642 return ret;
@@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1643 1644
1644void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1645void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1645{ 1646{
1646 if (!MLX5_CAP_GEN(dev->mdev, pg)) 1647 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1647 return; 1648 return;
1648 1649
1649 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); 1650 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 379328b2598f..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
4162 MLX5_IB_UMR_OCTOWORD; 4162 MLX5_IB_UMR_OCTOWORD;
4163} 4163}
4164 4164
4165static __be64 frwr_mkey_mask(void) 4165static __be64 frwr_mkey_mask(bool atomic)
4166{ 4166{
4167 u64 result; 4167 u64 result;
4168 4168
@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
4175 MLX5_MKEY_MASK_LW | 4175 MLX5_MKEY_MASK_LW |
4176 MLX5_MKEY_MASK_RR | 4176 MLX5_MKEY_MASK_RR |
4177 MLX5_MKEY_MASK_RW | 4177 MLX5_MKEY_MASK_RW |
4178 MLX5_MKEY_MASK_A |
4179 MLX5_MKEY_MASK_SMALL_FENCE | 4178 MLX5_MKEY_MASK_SMALL_FENCE |
4180 MLX5_MKEY_MASK_FREE; 4179 MLX5_MKEY_MASK_FREE;
4181 4180
4181 if (atomic)
4182 result |= MLX5_MKEY_MASK_A;
4183
4182 return cpu_to_be64(result); 4184 return cpu_to_be64(result);
4183} 4185}
4184 4186
@@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
4204} 4206}
4205 4207
4206static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4208static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4207 struct mlx5_ib_mr *mr, u8 flags) 4209 struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4208{ 4210{
4209 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4211 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4210 4212
@@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4212 4214
4213 umr->flags = flags; 4215 umr->flags = flags;
4214 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4216 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4215 umr->mkey_mask = frwr_mkey_mask(); 4217 umr->mkey_mask = frwr_mkey_mask(atomic);
4216} 4218}
4217 4219
4218static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4220static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4811{ 4813{
4812 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4814 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4813 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4815 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4814 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4817 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4815 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4818 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4816 u8 flags = 0; 4820 u8 flags = 0;
4817 4821
4822 if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824 "Fast update of %s for MR is disabled\n",
4825 (MLX5_CAP_GEN(dev->mdev,
4826 umr_modify_entity_size_disabled)) ?
4827 "entity size" :
4828 "atomic access");
4829 return -EINVAL;
4830 }
4831
4818 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4832 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4819 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4833 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4820 "Invalid IB_SEND_INLINE send flag\n"); 4834 "Invalid IB_SEND_INLINE send flag\n");
@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4826 if (umr_inline) 4840 if (umr_inline)
4827 flags |= MLX5_UMR_INLINE; 4841 flags |= MLX5_UMR_INLINE;
4828 4842
4829 set_reg_umr_seg(*seg, mr, flags); 4843 set_reg_umr_seg(*seg, mr, flags, atomic);
4830 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4844 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4831 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4845 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4832 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4846 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 77b1aabf6ff3..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
138}; 138};
139 139
140struct siw_pble { 140struct siw_pble {
141 u64 addr; /* Address of assigned user buffer */ 141 dma_addr_t addr; /* Address of assigned buffer */
142 u64 size; /* Size of this entry */ 142 unsigned int size; /* Size of this entry */
143 u64 pbl_off; /* Total offset from start of PBL */ 143 unsigned long pbl_off; /* Total offset from start of PBL */
144}; 144};
145 145
146struct siw_pbl { 146struct siw_pbl {
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
735 735
736#define siw_dbg_cep(cep, fmt, ...) \ 736#define siw_dbg_cep(cep, fmt, ...) \
737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ 737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
738 cep, __func__, ##__VA_ARGS__) 738 cep, __func__, ##__VA_ARGS__)
739 739
740void siw_cq_flush(struct siw_cq *cq); 740void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 9ce8a1b925d2..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
355 getname_local(cep->sock, &event.local_addr); 355 getname_local(cep->sock, &event.local_addr);
356 getname_peer(cep->sock, &event.remote_addr); 356 getname_peer(cep->sock, &event.remote_addr);
357 } 357 }
358 siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", 358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 cep->qp ? qp_id(cep->qp) : -1, id, reason, status); 359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360 360
361 return id->event_handler(id, &event); 361 return id->event_handler(id, &event);
362} 362}
@@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
947 siw_cep_get(new_cep); 947 siw_cep_get(new_cep);
948 new_s->sk->sk_user_data = new_cep; 948 new_s->sk->sk_user_data = new_cep;
949 949
950 siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
951
952 if (siw_tcp_nagle == false) { 950 if (siw_tcp_nagle == false) {
953 int val = 1; 951 int val = 1;
954 952
@@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
1011 cep = work->cep; 1009 cep = work->cep;
1012 1010
1013 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", 1011 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1014 cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); 1012 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1013 work->type, cep->state);
1015 1014
1016 siw_cep_set_inuse(cep); 1015 siw_cep_set_inuse(cep);
1017 1016
@@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
1145 } 1144 }
1146 if (release_cep) { 1145 if (release_cep) {
1147 siw_dbg_cep(cep, 1146 siw_dbg_cep(cep,
1148 "release: timer=%s, QP[%u], id 0x%p\n", 1147 "release: timer=%s, QP[%u]\n",
1149 cep->mpa_timer ? "y" : "n", 1148 cep->mpa_timer ? "y" : "n",
1150 cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); 1149 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1151 1150
1152 siw_cancel_mpatimer(cep); 1151 siw_cancel_mpatimer(cep);
1153 1152
@@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1211 else 1210 else
1212 delay = MPAREP_TIMEOUT; 1211 delay = MPAREP_TIMEOUT;
1213 } 1212 }
1214 siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", 1213 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1215 cep->qp ? qp_id(cep->qp) : -1, type, work, delay); 1214 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1216 1215
1217 queue_delayed_work(siw_cm_wq, &work->work, delay); 1216 queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 1217
@@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1376 } 1375 }
1377 if (v4) 1376 if (v4)
1378 siw_dbg_qp(qp, 1377 siw_dbg_qp(qp,
1379 "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", 1378 "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
1380 id, pd_len, 1379 pd_len,
1381 &((struct sockaddr_in *)(laddr))->sin_addr, 1380 &((struct sockaddr_in *)(laddr))->sin_addr,
1382 ntohs(((struct sockaddr_in *)(laddr))->sin_port), 1381 ntohs(((struct sockaddr_in *)(laddr))->sin_port),
1383 &((struct sockaddr_in *)(raddr))->sin_addr, 1382 &((struct sockaddr_in *)(raddr))->sin_addr,
1384 ntohs(((struct sockaddr_in *)(raddr))->sin_port)); 1383 ntohs(((struct sockaddr_in *)(raddr))->sin_port));
1385 else 1384 else
1386 siw_dbg_qp(qp, 1385 siw_dbg_qp(qp,
1387 "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", 1386 "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
1388 id, pd_len, 1387 pd_len,
1389 &((struct sockaddr_in6 *)(laddr))->sin6_addr, 1388 &((struct sockaddr_in6 *)(laddr))->sin6_addr,
1390 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), 1389 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
1391 &((struct sockaddr_in6 *)(raddr))->sin6_addr, 1390 &((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1508 if (rv >= 0) { 1507 if (rv >= 0) {
1509 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); 1508 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1510 if (!rv) { 1509 if (!rv) {
1511 siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, 1510 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1512 qp_id(qp));
1513 siw_cep_set_free(cep); 1511 siw_cep_set_free(cep);
1514 return 0; 1512 return 0;
1515 } 1513 }
1516 } 1514 }
1517error: 1515error:
1518 siw_dbg_qp(qp, "failed: %d\n", rv); 1516 siw_dbg(id->device, "failed: %d\n", rv);
1519 1517
1520 if (cep) { 1518 if (cep) {
1521 siw_socket_disassoc(s); 1519 siw_socket_disassoc(s);
@@ -1540,7 +1538,8 @@ error:
1540 } else if (s) { 1538 } else if (s) {
1541 sock_release(s); 1539 sock_release(s);
1542 } 1540 }
1543 siw_qp_put(qp); 1541 if (qp)
1542 siw_qp_put(qp);
1544 1543
1545 return rv; 1544 return rv;
1546} 1545}
@@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1580 siw_cancel_mpatimer(cep); 1579 siw_cancel_mpatimer(cep);
1581 1580
1582 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1581 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1583 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1582 siw_dbg_cep(cep, "out of state\n");
1584 1583
1585 siw_cep_set_free(cep); 1584 siw_cep_set_free(cep);
1586 siw_cep_put(cep); 1585 siw_cep_put(cep);
@@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1601 up_write(&qp->state_lock); 1600 up_write(&qp->state_lock);
1602 goto error; 1601 goto error;
1603 } 1602 }
1604 siw_dbg_cep(cep, "id 0x%p\n", id); 1603 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1605 1604
1606 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { 1605 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1607 siw_dbg_cep(cep, "peer allows GSO on TX\n"); 1606 siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1611 params->ird > sdev->attrs.max_ird) { 1610 params->ird > sdev->attrs.max_ird) {
1612 siw_dbg_cep( 1611 siw_dbg_cep(
1613 cep, 1612 cep,
1614 "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", 1613 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1615 id, qp_id(qp), params->ord, sdev->attrs.max_ord, 1614 qp_id(qp), params->ord, sdev->attrs.max_ord,
1616 params->ird, sdev->attrs.max_ird); 1615 params->ird, sdev->attrs.max_ird);
1617 rv = -EINVAL; 1616 rv = -EINVAL;
1618 up_write(&qp->state_lock); 1617 up_write(&qp->state_lock);
@@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1624 if (params->private_data_len > max_priv_data) { 1623 if (params->private_data_len > max_priv_data) {
1625 siw_dbg_cep( 1624 siw_dbg_cep(
1626 cep, 1625 cep,
1627 "id 0x%p, [QP %u]: private data length: %d (max %d)\n", 1626 "[QP %u]: private data length: %d (max %d)\n",
1628 id, qp_id(qp), params->private_data_len, max_priv_data); 1627 qp_id(qp), params->private_data_len, max_priv_data);
1629 rv = -EINVAL; 1628 rv = -EINVAL;
1630 up_write(&qp->state_lock); 1629 up_write(&qp->state_lock);
1631 goto error; 1630 goto error;
@@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1679 qp_attrs.flags = SIW_MPA_CRC; 1678 qp_attrs.flags = SIW_MPA_CRC;
1680 qp_attrs.state = SIW_QP_STATE_RTS; 1679 qp_attrs.state = SIW_QP_STATE_RTS;
1681 1680
1682 siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); 1681 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1683 1682
1684 /* Associate QP with CEP */ 1683 /* Associate QP with CEP */
1685 siw_cep_get(cep); 1684 siw_cep_get(cep);
@@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1700 if (rv) 1699 if (rv)
1701 goto error; 1700 goto error;
1702 1701
1703 siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", 1702 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1704 id, qp_id(qp), params->private_data_len); 1703 qp_id(qp), params->private_data_len);
1705 1704
1706 rv = siw_send_mpareqrep(cep, params->private_data, 1705 rv = siw_send_mpareqrep(cep, params->private_data,
1707 params->private_data_len); 1706 params->private_data_len);
@@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1759 siw_cancel_mpatimer(cep); 1758 siw_cancel_mpatimer(cep);
1760 1759
1761 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1760 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1762 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1761 siw_dbg_cep(cep, "out of state\n");
1763 1762
1764 siw_cep_set_free(cep); 1763 siw_cep_set_free(cep);
1765 siw_cep_put(cep); /* put last reference */ 1764 siw_cep_put(cep); /* put last reference */
1766 1765
1767 return -ECONNRESET; 1766 return -ECONNRESET;
1768 } 1767 }
1769 siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, 1768 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1770 pd_len); 1769 pd_len);
1771 1770
1772 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { 1771 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1804 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, 1803 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
1805 sizeof(s_val)); 1804 sizeof(s_val));
1806 if (rv) { 1805 if (rv) {
1807 siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); 1806 siw_dbg(id->device, "setsockopt error: %d\n", rv);
1808 goto error; 1807 goto error;
1809 } 1808 }
1810 rv = s->ops->bind(s, laddr, addr_family == AF_INET ? 1809 rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1811 sizeof(struct sockaddr_in) : 1810 sizeof(struct sockaddr_in) :
1812 sizeof(struct sockaddr_in6)); 1811 sizeof(struct sockaddr_in6));
1813 if (rv) { 1812 if (rv) {
1814 siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); 1813 siw_dbg(id->device, "socket bind error: %d\n", rv);
1815 goto error; 1814 goto error;
1816 } 1815 }
1817 cep = siw_cep_alloc(sdev); 1816 cep = siw_cep_alloc(sdev);
@@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1824 rv = siw_cm_alloc_work(cep, backlog); 1823 rv = siw_cm_alloc_work(cep, backlog);
1825 if (rv) { 1824 if (rv) {
1826 siw_dbg(id->device, 1825 siw_dbg(id->device,
1827 "id 0x%p: alloc_work error %d, backlog %d\n", id, 1826 "alloc_work error %d, backlog %d\n",
1828 rv, backlog); 1827 rv, backlog);
1829 goto error; 1828 goto error;
1830 } 1829 }
1831 rv = s->ops->listen(s, backlog); 1830 rv = s->ops->listen(s, backlog);
1832 if (rv) { 1831 if (rv) {
1833 siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); 1832 siw_dbg(id->device, "listen error %d\n", rv);
1834 goto error; 1833 goto error;
1835 } 1834 }
1836 cep->cm_id = id; 1835 cep->cm_id = id;
@@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1914 1913
1915 list_del(p); 1914 list_del(p);
1916 1915
1917 siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, 1916 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1918 cep->state);
1919 1917
1920 siw_cep_set_inuse(cep); 1918 siw_cep_set_inuse(cep);
1921 1919
@@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1952 struct net_device *dev = to_siw_dev(id->device)->netdev; 1950 struct net_device *dev = to_siw_dev(id->device)->netdev;
1953 int rv = 0, listeners = 0; 1951 int rv = 0, listeners = 0;
1954 1952
1955 siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); 1953 siw_dbg(id->device, "backlog %d\n", backlog);
1956 1954
1957 /* 1955 /*
1958 * For each attached address of the interface, create a 1956 * For each attached address of the interface, create a
@@ -1964,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1964 struct sockaddr_in s_laddr, *s_raddr; 1962 struct sockaddr_in s_laddr, *s_raddr;
1965 const struct in_ifaddr *ifa; 1963 const struct in_ifaddr *ifa;
1966 1964
1965 if (!in_dev) {
1966 rv = -ENODEV;
1967 goto out;
1968 }
1967 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); 1969 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
1968 s_raddr = (struct sockaddr_in *)&id->remote_addr; 1970 s_raddr = (struct sockaddr_in *)&id->remote_addr;
1969 1971
1970 siw_dbg(id->device, 1972 siw_dbg(id->device,
1971 "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", 1973 "laddr %pI4:%d, raddr %pI4:%d\n",
1972 id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), 1974 &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1973 &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); 1975 &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1974 1976
1975 rtnl_lock(); 1977 rtnl_lock();
@@ -1993,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1993 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), 1995 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
1994 *s_raddr = &to_sockaddr_in6(id->remote_addr); 1996 *s_raddr = &to_sockaddr_in6(id->remote_addr);
1995 1997
1998 if (!in6_dev) {
1999 rv = -ENODEV;
2000 goto out;
2001 }
1996 siw_dbg(id->device, 2002 siw_dbg(id->device,
1997 "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", 2003 "laddr %pI6:%d, raddr %pI6:%d\n",
1998 id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), 2004 &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
1999 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); 2005 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
2000 2006
2001 read_lock_bh(&in6_dev->lock); 2007 rtnl_lock();
2002 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 2008 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
2003 struct sockaddr_in6 bind_addr; 2009 if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
2004 2010 continue;
2005 if (ipv6_addr_any(&s_laddr->sin6_addr) || 2011 if (ipv6_addr_any(&s_laddr->sin6_addr) ||
2006 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { 2012 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
2007 bind_addr.sin6_family = AF_INET6; 2013 struct sockaddr_in6 bind_addr = {
2008 bind_addr.sin6_port = s_laddr->sin6_port; 2014 .sin6_family = AF_INET6,
2009 bind_addr.sin6_flowinfo = 0; 2015 .sin6_port = s_laddr->sin6_port,
2010 bind_addr.sin6_addr = ifp->addr; 2016 .sin6_flowinfo = 0,
2011 bind_addr.sin6_scope_id = dev->ifindex; 2017 .sin6_addr = ifp->addr,
2018 .sin6_scope_id = dev->ifindex };
2012 2019
2013 rv = siw_listen_address(id, backlog, 2020 rv = siw_listen_address(id, backlog,
2014 (struct sockaddr *)&bind_addr, 2021 (struct sockaddr *)&bind_addr,
@@ -2017,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
2017 listeners++; 2024 listeners++;
2018 } 2025 }
2019 } 2026 }
2020 read_unlock_bh(&in6_dev->lock); 2027 rtnl_unlock();
2021
2022 in6_dev_put(in6_dev); 2028 in6_dev_put(in6_dev);
2023 } else { 2029 } else {
2024 return -EAFNOSUPPORT; 2030 rv = -EAFNOSUPPORT;
2025 } 2031 }
2032out:
2026 if (listeners) 2033 if (listeners)
2027 rv = 0; 2034 rv = 0;
2028 else if (!rv) 2035 else if (!rv)
2029 rv = -EINVAL; 2036 rv = -EINVAL;
2030 2037
2031 siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); 2038 siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
2032 2039
2033 return rv; 2040 return rv;
2034} 2041}
2035 2042
2036int siw_destroy_listen(struct iw_cm_id *id) 2043int siw_destroy_listen(struct iw_cm_id *id)
2037{ 2044{
2038 siw_dbg(id->device, "id 0x%p\n", id);
2039
2040 if (!id->provider_data) { 2045 if (!id->provider_data) {
2041 siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); 2046 siw_dbg(id->device, "no cep(s)\n");
2042 return 0; 2047 return 0;
2043 } 2048 }
2044 siw_drop_listeners(id); 2049 siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; 71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 } 72 }
73 wc->qp = cqe->base_qp; 73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
75 cq->cq_get % cq->num_cqe, cqe->opcode, 76 cq->cq_get % cq->num_cqe, cqe->opcode,
76 cqe->flags, (void *)cqe->id); 77 cqe->flags, (void *)(uintptr_t)cqe->id);
77 } 78 }
78 WRITE_ONCE(cqe->flags, 0); 79 WRITE_ONCE(cqe->flags, 0);
79 cq->cq_get++; 80 cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
197 */ 197 */
198 if (addr < mem->va || addr + len > mem->va + mem->len) { 198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len); 199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", 200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (unsigned long long)addr, 201 (void *)(uintptr_t)addr,
202 (unsigned long long)(addr + len)); 202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", 203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (unsigned long long)mem->va, 204 (void *)(uintptr_t)mem->va,
205 (unsigned long long)(mem->va + mem->len), 205 (void *)(uintptr_t)(mem->va + mem->len),
206 mem->stag); 206 mem->stag);
207 207
208 return -E_BASE_BOUNDS; 208 return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
330 * Optionally, provides remaining len within current element, and 330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element. 331 * current PBL index for later resume at same element.
332 */ 332 */
333u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) 333dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
334{ 334{
335 int i = idx ? *idx : 0; 335 int i = idx ? *idx : 0;
336 336
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); 9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty); 10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf); 11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); 12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); 13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); 14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag); 15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 0990307c5d2c..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -949,7 +949,7 @@ skip_irq:
949 rv = -EINVAL; 949 rv = -EINVAL;
950 goto out; 950 goto out;
951 } 951 }
952 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 952 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
953 wqe->sqe.sge[0].lkey = 0; 953 wqe->sqe.sge[0].lkey = 0;
954 wqe->sqe.num_sge = 1; 954 wqe->sqe.num_sge = 1;
955 } 955 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
38 38
39 p = siw_get_upage(umem, dest_addr); 39 p = siw_get_upage(umem, dest_addr);
40 if (unlikely(!p)) { 40 if (unlikely(!p)) {
41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", 41 pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
42 __func__, qp_id(rx_qp(srx)), 42 __func__, qp_id(rx_qp(srx)),
43 (void *)dest_addr, (void *)umem->fp_addr); 43 (void *)(uintptr_t)dest_addr,
44 (void *)(uintptr_t)umem->fp_addr);
44 /* siw internal error */ 45 /* siw internal error */
45 srx->skb_copied += copied; 46 srx->skb_copied += copied;
46 srx->skb_new -= copied; 47 srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
50 pg_off = dest_addr & ~PAGE_MASK; 51 pg_off = dest_addr & ~PAGE_MASK;
51 bytes = min(len, (int)PAGE_SIZE - pg_off); 52 bytes = min(len, (int)PAGE_SIZE - pg_off);
52 53
53 siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); 54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
54 55
55 dest = kmap_atomic(p); 56 dest = kmap_atomic(p);
56 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, 57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
104{ 105{
105 int rv; 106 int rv;
106 107
107 siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); 108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
108 109
109 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); 110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
110 if (unlikely(rv)) { 111 if (unlikely(rv)) {
111 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
112 qp_id(rx_qp(srx)), __func__, len, kva, rv); 113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
113 114
114 return rv; 115 return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
132 133
133 while (len) { 134 while (len) {
134 int bytes; 135 int bytes;
135 u64 buf_addr = 136 dma_addr_t buf_addr =
136 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); 137 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
137 if (!buf_addr) 138 if (!buf_addr)
138 break; 139 break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
485 mem_p = *mem; 486 mem_p = *mem;
486 if (mem_p->mem_obj == NULL) 487 if (mem_p->mem_obj == NULL)
487 rv = siw_rx_kva(srx, 488 rv = siw_rx_kva(srx,
488 (void *)(sge->laddr + frx->sge_off), 489 (void *)(uintptr_t)(sge->laddr + frx->sge_off),
489 sge_bytes); 490 sge_bytes);
490 else if (!mem_p->is_pbl) 491 else if (!mem_p->is_pbl)
491 rv = siw_rx_umem(srx, mem_p->umem, 492 rv = siw_rx_umem(srx, mem_p->umem,
492 sge->laddr + frx->sge_off, sge_bytes); 493 sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
598 599
599 if (mem->mem_obj == NULL) 600 if (mem->mem_obj == NULL)
600 rv = siw_rx_kva(srx, 601 rv = siw_rx_kva(srx,
601 (void *)(srx->ddp_to + srx->fpdu_part_rcvd), 602 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
602 bytes); 603 bytes);
603 else if (!mem->is_pbl) 604 else if (!mem->is_pbl)
604 rv = siw_rx_umem(srx, mem->umem, 605 rv = siw_rx_umem(srx, mem->umem,
605 srx->ddp_to + srx->fpdu_part_rcvd, bytes); 606 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
841 bytes = min(srx->fpdu_part_rem, srx->skb_new); 842 bytes = min(srx->fpdu_part_rem, srx->skb_new);
842 843
843 if (mem_p->mem_obj == NULL) 844 if (mem_p->mem_obj == NULL)
844 rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), 845 rv = siw_rx_kva(srx,
845 bytes); 846 (void *)(uintptr_t)(sge->laddr + wqe->processed),
847 bytes);
846 else if (!mem_p->is_pbl) 848 else if (!mem_p->is_pbl)
847 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, 849 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
848 bytes); 850 bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26{ 26{
27 struct siw_pbl *pbl = mem->pbl; 27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va; 28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30 30
31 if (paddr) 31 if (paddr)
32 return virt_to_page(paddr); 32 return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
37/* 37/*
38 * Copy short payload at provided destination payload address 38 * Copy short payload at provided destination payload address
39 */ 39 */
40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) 40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41{ 41{
42 struct siw_wqe *wqe = &c_tx->wqe_active; 42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0]; 43 struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
50 return 0; 50 return 0;
51 51
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); 53 memcpy(paddr, &wqe->sqe.sge[1], bytes);
54 } else { 54 } else {
55 struct siw_mem *mem = wqe->mem[0]; 55 struct siw_mem *mem = wqe->mem[0];
56 56
57 if (!mem->mem_obj) { 57 if (!mem->mem_obj) {
58 /* Kernel client using kva */ 58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes); 59 memcpy(paddr,
60 (const void *)(uintptr_t)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) { 61 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr, 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
62 (const void __user *)sge->laddr,
63 bytes)) 63 bytes))
64 return -EFAULT; 64 return -EFAULT;
65 } else { 65 } else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
79 buffer = kmap_atomic(p); 79 buffer = kmap_atomic(p);
80 80
81 if (likely(PAGE_SIZE - off >= bytes)) { 81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes); 82 memcpy(paddr, buffer + off, bytes);
83 kunmap_atomic(buffer); 83 kunmap_atomic(buffer);
84 } else { 84 } else {
85 unsigned long part = bytes - (PAGE_SIZE - off); 85 unsigned long part = bytes - (PAGE_SIZE - off);
86 86
87 memcpy((void *)paddr, buffer + off, part); 87 memcpy(paddr, buffer + off, part);
88 kunmap_atomic(buffer); 88 kunmap_atomic(buffer);
89 89
90 if (!mem->is_pbl) 90 if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
98 return -EFAULT; 98 return -EFAULT;
99 99
100 buffer = kmap_atomic(p); 100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer, 101 memcpy(paddr + part, buffer,
102 bytes - part); 102 bytes - part);
103 kunmap_atomic(buffer); 103 kunmap_atomic(buffer);
104 } 104 }
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
166 c_tx->ctrl_len = sizeof(struct iwarp_send); 166 c_tx->ctrl_len = sizeof(struct iwarp_send);
167 167
168 crc = (char *)&c_tx->pkt.send_pkt.crc; 168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc); 169 data = siw_try_1seg(c_tx, crc);
170 break; 170 break;
171 171
172 case SIW_OP_SEND_REMOTE_INV: 172 case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
190 190
191 crc = (char *)&c_tx->pkt.send_pkt.crc; 191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc); 192 data = siw_try_1seg(c_tx, crc);
193 break; 193 break;
194 194
195 case SIW_OP_WRITE: 195 case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
202 202
203 crc = (char *)&c_tx->pkt.write_pkt.crc; 203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc); 204 data = siw_try_1seg(c_tx, crc);
205 break; 205 break;
206 206
207 case SIW_OP_READ_RESPONSE: 207 case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
217 217
218 crc = (char *)&c_tx->pkt.write_pkt.crc; 218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc); 219 data = siw_try_1seg(c_tx, crc);
220 break; 220 break;
221 221
222 default: 222 default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
398 398
399#define MAX_TRAILER (MPA_CRC_SIZE + 4) 399#define MAX_TRAILER (MPA_CRC_SIZE + 4)
400 400
401static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) 401static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
402{ 402{
403 if (hdr_len) { 403 while (kmap_mask) {
404 ++pages; 404 if (kmap_mask & BIT(0))
405 --num_maps; 405 kunmap(*pp);
406 } 406 pp++;
407 while (num_maps-- > 0) { 407 kmap_mask >>= 1;
408 kunmap(*pages);
409 pages++;
410 } 408 }
411} 409}
412 410
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx; 437 pbl_idx = c_tx->pbl_idx;
438 unsigned long kmap_mask = 0L;
440 439
441 if (c_tx->state == SIW_SEND_HDR) { 440 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) { 441 if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
463 462
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx]; 464 mem = wqe->mem[sge_idx];
466 if (!mem->mem_obj) 465 is_kva = mem->mem_obj == NULL ? 1 : 0;
467 is_kva = 1;
468 } else { 466 } else {
469 is_kva = 1; 467 is_kva = 1;
470 } 468 }
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
473 * tx from kernel virtual address: either inline data 471 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer 472 * or memory region with assigned kernel buffer
475 */ 473 */
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off); 474 iov[seg].iov_base =
475 (void *)(uintptr_t)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len; 476 iov[seg].iov_len = sge_len;
478 477
479 if (do_crc) 478 if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
500 p = siw_get_upage(mem->umem, 499 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off); 500 sge->laddr + sge_off);
502 if (unlikely(!p)) { 501 if (unlikely(!p)) {
503 if (hdr_len) 502 siw_unmap_pages(page_array, kmap_mask);
504 seg--;
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
507 hdr_len, seg);
508 }
509 wqe->processed -= c_tx->bytes_unsent; 503 wqe->processed -= c_tx->bytes_unsent;
510 rv = -EFAULT; 504 rv = -EFAULT;
511 goto done_crc; 505 goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
515 if (!c_tx->use_sendpage) { 509 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off; 510 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen; 511 iov[seg].iov_len = plen;
512
513 /* Remember for later kunmap() */
514 kmap_mask |= BIT(seg);
515
518 if (do_crc) 516 if (do_crc)
519 crypto_shash_update( 517 crypto_shash_update(
520 c_tx->mpa_crc_hd, 518 c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
526 page_address(p) + fp_off, 524 page_address(p) + fp_off,
527 plen); 525 plen);
528 } else { 526 } else {
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); 527 u64 va = sge->laddr + sge_off;
530 528
531 page_array[seg] = virt_to_page(pa); 529 page_array[seg] = virt_to_page(va & PAGE_MASK);
532 if (do_crc) 530 if (do_crc)
533 crypto_shash_update( 531 crypto_shash_update(
534 c_tx->mpa_crc_hd, 532 c_tx->mpa_crc_hd,
535 (void *)(sge->laddr + sge_off), 533 (void *)(uintptr_t)va,
536 plen); 534 plen);
537 } 535 }
538 536
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
543 541
544 if (++seg > (int)MAX_ARRAY) { 542 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 543 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) { 544 siw_unmap_pages(page_array, kmap_mask);
547 siw_unmap_pages(page_array, hdr_len,
548 seg - 1);
549 }
550 wqe->processed -= c_tx->bytes_unsent; 545 wqe->processed -= c_tx->bytes_unsent;
551 rv = -EMSGSIZE; 546 rv = -EMSGSIZE;
552 goto done_crc; 547 goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
597 } else { 592 } else {
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 593 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len); 594 hdr_len + data_len + trl_len);
600 if (!is_kva) 595 siw_unmap_pages(page_array, kmap_mask);
601 siw_unmap_pages(page_array, hdr_len, seg);
602 } 596 }
603 if (rv < (int)hdr_len) { 597 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */ 598 /* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
829 rv = -EINVAL; 823 rv = -EINVAL;
830 goto tx_error; 824 goto tx_error;
831 } 825 }
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 826 wqe->sqe.sge[0].laddr =
827 (u64)(uintptr_t)&wqe->sqe.sge[1];
833 } 828 }
834 } 829 }
835 wqe->wr_status = SIW_WR_INPROGRESS; 830 wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
924 919
925static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 920static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
926{ 921{
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; 922 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device); 923 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 924 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
930 int rv = 0; 925 int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
954 mem->stag = sqe->rkey; 949 mem->stag = sqe->rkey;
955 mem->perms = sqe->access; 950 mem->perms = sqe->access;
956 951
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", 952 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova; 953 mem->va = base_mr->iova;
960 mem->stag_valid = 1; 954 mem->stag_valid = 1;
961out: 955out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index e7f3a2379d9d..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
424 */ 424 */
425 qp->srq = to_siw_srq(attrs->srq); 425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0; 426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", 427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 qp->qp_num, qp->srq);
429 } else if (num_rqe) { 428 } else if (num_rqe) {
430 if (qp->kernel_verbs) 429 if (qp->kernel_verbs)
431 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
610 base_ucontext); 609 base_ucontext);
611 struct siw_qp_attrs qp_attrs; 610 struct siw_qp_attrs qp_attrs;
612 611
613 siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); 612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
614 613
615 /* 614 /*
616 * Mark QP as in process of destruction to prevent from 615 * Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
662 void *kbuf = &sqe->sge[1]; 661 void *kbuf = &sqe->sge[1];
663 int num_sge = core_wr->num_sge, bytes = 0; 662 int num_sge = core_wr->num_sge, bytes = 0;
664 663
665 sqe->sge[0].laddr = (u64)kbuf; 664 sqe->sge[0].laddr = (uintptr_t)kbuf;
666 sqe->sge[0].lkey = 0; 665 sqe->sge[0].lkey = 0;
667 666
668 while (num_sge--) { 667 while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
825 break; 824 break;
826 825
827 case IB_WR_REG_MR: 826 case IB_WR_REG_MR:
828 sqe->base_mr = (uint64_t)reg_wr(wr)->mr; 827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
829 sqe->rkey = reg_wr(wr)->key; 828 sqe->rkey = reg_wr(wr)->key;
830 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
831 sqe->opcode = SIW_OP_REG_MR; 830 sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
842 rv = -EINVAL; 841 rv = -EINVAL;
843 break; 842 break;
844 } 843 }
845 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", 844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
846 sqe->opcode, sqe->flags, (void *)sqe->id); 845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
847 847
848 if (unlikely(rv < 0)) 848 if (unlikely(rv < 0))
849 break; 849 break;
@@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1206 int rv; 1206 int rv;
1207 1207
1208 siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", 1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1209 (unsigned long long)start, (unsigned long long)rnic_va, 1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1210 (unsigned long long)len); 1210 (unsigned long long)len);
1211 1211
1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1363 struct siw_mem *mem = mr->mem; 1363 struct siw_mem *mem = mr->mem;
1364 struct siw_pbl *pbl = mem->pbl; 1364 struct siw_pbl *pbl = mem->pbl;
1365 struct siw_pble *pble; 1365 struct siw_pble *pble;
1366 u64 pbl_size; 1366 unsigned long pbl_size;
1367 int i, rv; 1367 int i, rv;
1368 1368
1369 if (!pbl) { 1369 if (!pbl) {
@@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1402 pbl_size += sg_dma_len(slp); 1402 pbl_size += sg_dma_len(slp);
1403 } 1403 }
1404 siw_dbg_mem(mem, 1404 siw_dbg_mem(mem,
1405 "sge[%d], size %llu, addr 0x%016llx, total %llu\n", 1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1406 i, pble->size, pble->addr, pbl_size); 1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1407 pbl_size);
1407 } 1408 }
1408 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1409 if (rv > 0) { 1410 if (rv > 0) {
1410 mem->len = base_mr->length; 1411 mem->len = base_mr->length;
1411 mem->va = base_mr->iova; 1412 mem->va = base_mr->iova;
1412 siw_dbg_mem(mem, 1413 siw_dbg_mem(mem,
1413 "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", 1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1414 mem->len, mem->va, num_sle, pbl->num_buf); 1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1416 pbl->num_buf);
1415 } 1417 }
1416 return rv; 1418 return rv;
1417} 1419}
@@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
1529 } 1531 }
1530 spin_lock_init(&srq->lock); 1532 spin_lock_init(&srq->lock);
1531 1533
1532 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); 1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1533 1535
1534 return 0; 1536 return 0;
1535 1537
@@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1650 1652
1651 if (unlikely(!srq->kernel_verbs)) { 1653 if (unlikely(!srq->kernel_verbs)) {
1652 siw_dbg_pd(base_srq->pd, 1654 siw_dbg_pd(base_srq->pd,
1653 "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", 1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1654 srq);
1655 rv = -EINVAL; 1656 rv = -EINVAL;
1656 goto out; 1657 goto out;
1657 } 1658 }
@@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1673 } 1674 }
1674 if (unlikely(wr->num_sge > srq->max_sge)) { 1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1675 siw_dbg_pd(base_srq->pd, 1676 siw_dbg_pd(base_srq->pd,
1676 "[SRQ 0x%p]: too many sge's: %d\n", srq, 1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1677 wr->num_sge);
1678 rv = -EINVAL; 1678 rv = -EINVAL;
1679 break; 1679 break;
1680 } 1680 }
@@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1693 spin_unlock_irqrestore(&srq->lock, flags); 1693 spin_unlock_irqrestore(&srq->lock, flags);
1694out: 1694out:
1695 if (unlikely(rv < 0)) { 1695 if (unlikely(rv < 0)) {
1696 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); 1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1697 *bad_wr = wr; 1697 *bad_wr = wr;
1698 } 1698 }
1699 return rv; 1699 return rv;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
237 237
238static void hv_kbd_on_channel_callback(void *context) 238static void hv_kbd_on_channel_callback(void *context)
239{ 239{
240 struct vmpacket_descriptor *desc;
240 struct hv_device *hv_dev = context; 241 struct hv_device *hv_dev = context;
241 void *buffer;
242 int bufferlen = 0x100; /* Start with sensible size */
243 u32 bytes_recvd; 242 u32 bytes_recvd;
244 u64 req_id; 243 u64 req_id;
245 int error;
246 244
247 buffer = kmalloc(bufferlen, GFP_ATOMIC); 245 foreach_vmbus_pkt(desc, hv_dev->channel) {
248 if (!buffer) 246 bytes_recvd = desc->len8 * 8;
249 return; 247 req_id = desc->trans_id;
250
251 while (1) {
252 error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
253 &bytes_recvd, &req_id);
254 switch (error) {
255 case 0:
256 if (bytes_recvd == 0) {
257 kfree(buffer);
258 return;
259 }
260
261 hv_kbd_handle_received_packet(hv_dev, buffer,
262 bytes_recvd, req_id);
263 break;
264 248
265 case -ENOBUFS: 249 hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
266 kfree(buffer); 250 req_id);
267 /* Handle large packet */
268 bufferlen = bytes_recvd;
269 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
270 if (!buffer)
271 return;
272 break;
273 }
274 } 251 }
275} 252}
276 253
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d991d40f797f..f68a62c3c32b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -965,11 +965,14 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
965{ 965{
966 bool coherent = dev_is_dma_coherent(dev); 966 bool coherent = dev_is_dma_coherent(dev);
967 size_t alloc_size = PAGE_ALIGN(size); 967 size_t alloc_size = PAGE_ALIGN(size);
968 int node = dev_to_node(dev);
968 struct page *page = NULL; 969 struct page *page = NULL;
969 void *cpu_addr; 970 void *cpu_addr;
970 971
971 page = dma_alloc_contiguous(dev, alloc_size, gfp); 972 page = dma_alloc_contiguous(dev, alloc_size, gfp);
972 if (!page) 973 if (!page)
974 page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 if (!page)
973 return NULL; 976 return NULL;
974 977
975 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 978 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1599 unsigned long freed; 1599 unsigned long freed;
1600 1600
1601 c = container_of(shrink, struct dm_bufio_client, shrinker); 1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c)) 1602 if (sc->gfp_mask & __GFP_FS)
1603 dm_bufio_lock(c);
1604 else if (!dm_bufio_trylock(c))
1603 return SHRINK_STOP; 1605 return SHRINK_STOP;
1604 1606
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); 1607 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
25 unsigned long long badblock_count; 25 unsigned long long badblock_count;
26 spinlock_t dust_lock; 26 spinlock_t dust_lock;
27 unsigned int blksz; 27 unsigned int blksz;
28 int sect_per_block_shift;
28 unsigned int sect_per_block; 29 unsigned int sect_per_block;
29 sector_t start; 30 sector_t start;
30 bool fail_read_on_bb:1; 31 bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79 unsigned long flags; 80 unsigned long flags;
80 81
81 spin_lock_irqsave(&dd->dust_lock, flags); 82 spin_lock_irqsave(&dd->dust_lock, flags);
82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 bblock = dust_rb_search(&dd->badblocklist, block);
83 84
84 if (bblock == NULL) { 85 if (bblock == NULL) {
85 if (!dd->quiet_mode) { 86 if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
113 } 114 }
114 115
115 spin_lock_irqsave(&dd->dust_lock, flags); 116 spin_lock_irqsave(&dd->dust_lock, flags);
116 bblock->bb = block * dd->sect_per_block; 117 bblock->bb = block;
117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118 if (!dd->quiet_mode) { 119 if (!dd->quiet_mode) {
119 DMERR("%s: block %llu already in badblocklist", 120 DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
138 unsigned long flags; 139 unsigned long flags;
139 140
140 spin_lock_irqsave(&dd->dust_lock, flags); 141 spin_lock_irqsave(&dd->dust_lock, flags);
141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 bblock = dust_rb_search(&dd->badblocklist, block);
142 if (bblock != NULL) 143 if (bblock != NULL)
143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 DMINFO("%s: block %llu found in badblocklist", __func__, block);
144 else 145 else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
165 int ret = DM_MAPIO_REMAPPED; 166 int ret = DM_MAPIO_REMAPPED;
166 167
167 if (fail_read_on_bb) { 168 if (fail_read_on_bb) {
169 thisblock >>= dd->sect_per_block_shift;
168 spin_lock_irqsave(&dd->dust_lock, flags); 170 spin_lock_irqsave(&dd->dust_lock, flags);
169 ret = __dust_map_read(dd, thisblock); 171 ret = __dust_map_read(dd, thisblock);
170 spin_unlock_irqrestore(&dd->dust_lock, flags); 172 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195 unsigned long flags; 197 unsigned long flags;
196 198
197 if (fail_read_on_bb) { 199 if (fail_read_on_bb) {
200 thisblock >>= dd->sect_per_block_shift;
198 spin_lock_irqsave(&dd->dust_lock, flags); 201 spin_lock_irqsave(&dd->dust_lock, flags);
199 __dust_map_write(dd, thisblock); 202 __dust_map_write(dd, thisblock);
200 spin_unlock_irqrestore(&dd->dust_lock, flags); 203 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
331 dd->blksz = blksz; 334 dd->blksz = blksz;
332 dd->start = tmp; 335 dd->start = tmp;
333 336
337 dd->sect_per_block_shift = __ffs(sect_per_block);
338
334 /* 339 /*
335 * Whether to fail a read on a "bad" block. 340 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message. 341 * Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
1943 queue_work(ic->wait_wq, &dio->work); 1943 queue_work(ic->wait_wq, &dio->work);
1944 return; 1944 return;
1945 } 1945 }
1946 if (journal_read_pos != NOT_FOUND)
1947 dio->range.n_sectors = ic->sectors_per_block;
1946 wait_and_add_new_range(ic, &dio->range); 1948 wait_and_add_new_range(ic, &dio->range);
1949 /*
1950 * wait_and_add_new_range drops the spinlock, so the journal
1951 * may have been changed arbitrarily. We need to recheck.
1952 * To simplify the code, we restrict I/O size to just one block.
1953 */
1954 if (journal_read_pos != NOT_FOUND) {
1955 sector_t next_sector;
1956 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1957 if (unlikely(new_pos != journal_read_pos)) {
1958 remove_range_unlocked(ic, &dio->range);
1959 goto retry;
1960 }
1961 }
1947 } 1962 }
1948 spin_unlock_irq(&ic->endio_wait.lock); 1963 spin_unlock_irq(&ic->endio_wait.lock);
1949 1964
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
566 * no point in continuing. 566 * no point in continuing.
567 */ 567 */
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && 568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) 569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
570 return -EIO; 571 return -EIO;
572 }
571 573
572 io_job_start(job->kc->throttle); 574 io_job_start(job->kc->throttle);
573 575
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
619 else 621 else
620 job->read_err = 1; 622 job->read_err = 1;
621 push(&kc->complete_jobs, job); 623 push(&kc->complete_jobs, job);
624 wake(kc);
622 break; 625 break;
623 } 626 }
624 627
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194 */ 3194 */
3195 r = rs_prepare_reshape(rs); 3195 r = rs_prepare_reshape(rs);
3196 if (r) 3196 if (r)
3197 return r; 3197 goto bad;
3198 3198
3199 /* Reshaping ain't recovery, so disable recovery */ 3199 /* Reshaping ain't recovery, so disable recovery */
3200 rs_setup_recovery(rs, MaxSector); 3200 rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7b6c3ee9e755..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
1342} 1342}
1343EXPORT_SYMBOL(dm_table_event); 1343EXPORT_SYMBOL(dm_table_event);
1344 1344
1345sector_t dm_table_get_size(struct dm_table *t) 1345inline sector_t dm_table_get_size(struct dm_table *t)
1346{ 1346{
1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348} 1348}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1367 unsigned int l, n = 0, k = 0; 1367 unsigned int l, n = 0, k = 0;
1368 sector_t *node; 1368 sector_t *node;
1369 1369
1370 if (unlikely(sector >= dm_table_get_size(t)))
1371 return &t->targets[t->num_targets];
1372
1370 for (l = 0; l < t->depth; l++) { 1373 for (l = 0; l < t->depth; l++) {
1371 n = get_child(n, k); 1374 n = get_child(n, k);
1372 node = get_node(t, l, n); 1375 node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -37,7 +38,7 @@ enum {
37/* 38/*
38 * Number of seconds of target BIO inactivity to consider the target idle. 39 * Number of seconds of target BIO inactivity to consider the target idle.
39 */ 40 */
40#define DMZ_IDLE_PERIOD (10UL * HZ) 41#define DMZ_IDLE_PERIOD (10UL * HZ)
41 42
42/* 43/*
43 * Percentage of unmapped (free) random zones below which reclaim starts 44 * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags); 135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 136
136 while (block < end_block) { 137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
139 return -EIO;
140
137 /* Get a valid region from the source zone */ 141 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block); 142 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0) 143 if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
215 219
216 dmz_unlock_flush(zmd); 220 dmz_unlock_flush(zmd);
217 221
218 return 0; 222 return ret;
219} 223}
220 224
221/* 225/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
259 263
260 dmz_unlock_flush(zmd); 264 dmz_unlock_flush(zmd);
261 265
262 return 0; 266 return ret;
263} 267}
264 268
265/* 269/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
312 316
313 dmz_unlock_flush(zmd); 317 dmz_unlock_flush(zmd);
314 318
315 return 0; 319 return ret;
316} 320}
317 321
318/* 322/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
334/* 338/*
335 * Find a candidate zone for reclaim and process it. 339 * Find a candidate zone for reclaim and process it.
336 */ 340 */
337static void dmz_reclaim(struct dmz_reclaim *zrc) 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
338{ 342{
339 struct dmz_metadata *zmd = zrc->metadata; 343 struct dmz_metadata *zmd = zrc->metadata;
340 struct dm_zone *dzone; 344 struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
344 348
345 /* Get a data zone */ 349 /* Get a data zone */
346 dzone = dmz_get_zone_for_reclaim(zmd); 350 dzone = dmz_get_zone_for_reclaim(zmd);
347 if (!dzone) 351 if (IS_ERR(dzone))
348 return; 352 return PTR_ERR(dzone);
349 353
350 start = jiffies; 354 start = jiffies;
351 355
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
391out: 395out:
392 if (ret) { 396 if (ret) {
393 dmz_unlock_zone_reclaim(dzone); 397 dmz_unlock_zone_reclaim(dzone);
394 return; 398 return ret;
395 } 399 }
396 400
397 (void) dmz_flush_metadata(zrc->metadata); 401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd, rzone), ret);
406 return ret;
407 }
398 408
399 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", 409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
400 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); 410 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
411 return 0;
401} 412}
402 413
403/* 414/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
427 return false; 438 return false;
428 439
429 /* 440 /*
430 * If the percentage of unmappped random zones is low, 441 * If the percentage of unmapped random zones is low,
431 * reclaim even if the target is busy. 442 * reclaim even if the target is busy.
432 */ 443 */
433 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; 444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
442 struct dmz_metadata *zmd = zrc->metadata; 453 struct dmz_metadata *zmd = zrc->metadata;
443 unsigned int nr_rnd, nr_unmap_rnd; 454 unsigned int nr_rnd, nr_unmap_rnd;
444 unsigned int p_unmap_rnd; 455 unsigned int p_unmap_rnd;
456 int ret;
457
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
445 460
446 if (!dmz_should_reclaim(zrc)) { 461 if (!dmz_should_reclaim(zrc)) {
447 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); 462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
471 (dmz_target_idle(zrc) ? "Idle" : "Busy"), 486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
472 p_unmap_rnd, nr_unmap_rnd, nr_rnd); 487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
473 488
474 dmz_reclaim(zrc); 489 ret = dmz_do_reclaim(zrc);
490 if (ret) {
491 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
492 if (ret == -EIO)
493 /*
494 * LLD might be performing some error handling sequence
495 * at the underlying device. To not interfere, do not
496 * attempt to schedule the next reclaim run immediately.
497 */
498 return;
499 }
475 500
476 dmz_schedule_reclaim(zrc); 501 dmz_schedule_reclaim(zrc);
477} 502}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -56,6 +57,8 @@ struct dmz_dev {
56 57
57 unsigned int nr_zones; 58 unsigned int nr_zones;
58 59
60 unsigned int flags;
61
59 sector_t zone_nr_sectors; 62 sector_t zone_nr_sectors;
60 unsigned int zone_nr_sectors_shift; 63 unsigned int zone_nr_sectors_shift;
61 64
@@ -67,6 +70,9 @@ struct dmz_dev {
67 (dev)->zone_nr_sectors_shift) 70 (dev)->zone_nr_sectors_shift)
68#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) 71#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
69 72
73/* Device flags. */
74#define DMZ_BDEV_DYING (1 << 0)
75
70/* 76/*
71 * Zone descriptor. 77 * Zone descriptor.
72 */ 78 */
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
245void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); 251void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
246void dmz_schedule_reclaim(struct dmz_reclaim *zrc); 252void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
247 253
254/*
255 * Functions defined in dm-zoned-target.c
256 */
257bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
258
248#endif /* DM_ZONED_H */ 259#endif /* DM_ZONED_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
628 628
629 new_parent = shadow_current(s); 629 new_parent = shadow_current(s);
630 630
631 pn = dm_block_data(new_parent);
632 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
633 sizeof(__le64) : s->info->value_type.size;
634
635 /* create & init the left block */
631 r = new_block(s->info, &left); 636 r = new_block(s->info, &left);
632 if (r < 0) 637 if (r < 0)
633 return r; 638 return r;
634 639
640 ln = dm_block_data(left);
641 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
642
643 ln->header.flags = pn->header.flags;
644 ln->header.nr_entries = cpu_to_le32(nr_left);
645 ln->header.max_entries = pn->header.max_entries;
646 ln->header.value_size = pn->header.value_size;
647 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
648 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
649
650 /* create & init the right block */
635 r = new_block(s->info, &right); 651 r = new_block(s->info, &right);
636 if (r < 0) { 652 if (r < 0) {
637 unlock_block(s->info, left); 653 unlock_block(s->info, left);
638 return r; 654 return r;
639 } 655 }
640 656
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right); 657 rn = dm_block_data(right);
644
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 658 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
647 659
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
652
653 rn->header.flags = pn->header.flags; 660 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right); 661 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries; 662 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size; 663 rn->header.value_size = pn->header.value_size;
657
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 664 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
660
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 665 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
665 nr_right * size); 666 nr_right * size);
666 667
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
249 } 249 }
250 250
251 if (smm->recursion_count == 1) 251 if (smm->recursion_count == 1)
252 apply_bops(smm); 252 r = apply_bops(smm);
253 253
254 smm->recursion_count--; 254 smm->recursion_count--;
255 255
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
729 return 0; 729 return 0;
730} 730}
731 731
732static int rk8xx_suspend(struct device *dev) 732static int __maybe_unused rk8xx_suspend(struct device *dev)
733{ 733{
734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
735 int ret = 0; 735 int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
749 return ret; 749 return ret;
750} 750}
751 751
752static int rk8xx_resume(struct device *dev) 752static int __maybe_unused rk8xx_resume(struct device *dev)
753{ 753{
754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
755 int ret = 0; 755 int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
768 768
769 return ret; 769 return ret;
770} 770}
771SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); 771static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
772 772
773static struct i2c_driver rk808_i2c_driver = { 773static struct i2c_driver rk808_i2c_driver = {
774 .driver = { 774 .driver = {
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 1606658b9b7e..24245ccdba72 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -22,7 +22,7 @@ struct lkdtm_list {
22 * recurse past the end of THREAD_SIZE by default. 22 * recurse past the end of THREAD_SIZE by default.
23 */ 23 */
24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) 24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
25#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) 25#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
26#else 26#else
27#define REC_STACK_SIZE (THREAD_SIZE / 8) 27#define REC_STACK_SIZE (THREAD_SIZE / 8)
28#endif 28#endif
@@ -91,7 +91,7 @@ void lkdtm_LOOP(void)
91 91
92void lkdtm_EXHAUST_STACK(void) 92void lkdtm_EXHAUST_STACK(void)
93{ 93{
94 pr_info("Calling function with %d frame size to depth %d ...\n", 94 pr_info("Calling function with %lu frame size to depth %d ...\n",
95 REC_STACK_SIZE, recur_count); 95 REC_STACK_SIZE, recur_count);
96 recursive_loop(recur_count); 96 recursive_loop(recur_count);
97 pr_info("FAIL: survived without exhausting stack?!\n"); 97 pr_info("FAIL: survived without exhausting stack?!\n");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6c0173772162..77f7dff7098d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,8 @@
81 81
82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ 82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
83 83
84#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
85
84#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ 86#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
85#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ 87#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
86 88
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 57cb68f5cc64..541538eff8b1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
98 98
99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100 100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
102
101 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, 103 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
102 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, 104 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
103 105
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 8840299420e0..5e6be1527571 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
691 } 691 }
692 692
693 if (page) { 693 if (page) {
694 vmballoon_mark_page_offline(page, ctl->page_size);
695 /* Success. Add the page to the list and continue. */ 694 /* Success. Add the page to the list and continue. */
696 list_add(&page->lru, &ctl->pages); 695 list_add(&page->lru, &ctl->pages);
697 continue; 696 continue;
@@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list,
930 929
931 list_for_each_entry_safe(page, tmp, page_list, lru) { 930 list_for_each_entry_safe(page, tmp, page_list, lru) {
932 list_del(&page->lru); 931 list_del(&page->lru);
933 vmballoon_mark_page_online(page, page_size);
934 __free_pages(page, vmballoon_page_order(page_size)); 932 __free_pages(page, vmballoon_page_order(page_size));
935 } 933 }
936 934
@@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1005 enum vmballoon_page_size_type page_size) 1003 enum vmballoon_page_size_type page_size)
1006{ 1004{
1007 unsigned long flags; 1005 unsigned long flags;
1006 struct page *page;
1008 1007
1009 if (page_size == VMW_BALLOON_4K_PAGE) { 1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1010 balloon_page_list_enqueue(&b->b_dev_info, pages); 1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
@@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1014 * for the balloon compaction mechanism. 1013 * for the balloon compaction mechanism.
1015 */ 1014 */
1016 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016
1017 list_for_each_entry(page, pages, lru) {
1018 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019 }
1020
1017 list_splice_init(pages, &b->huge_pages); 1021 list_splice_init(pages, &b->huge_pages);
1018 __count_vm_events(BALLOON_INFLATE, *n_pages * 1022 __count_vm_events(BALLOON_INFLATE, *n_pages *
1019 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); 1023 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
@@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
1056 /* 2MB pages */ 1060 /* 2MB pages */
1057 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1061 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1058 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { 1062 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064
1059 list_move(&page->lru, pages); 1065 list_move(&page->lru, pages);
1060 if (++i == n_req_pages) 1066 if (++i == n_req_pages)
1061 break; 1067 break;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index bad89b6e0802..345addd9306d 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
310 310
311 entry = container_of(resource, struct dbell_entry, resource); 311 entry = container_of(resource, struct dbell_entry, resource);
312 if (entry->run_delayed) { 312 if (entry->run_delayed) {
313 schedule_work(&entry->work); 313 if (!schedule_work(&entry->work))
314 vmci_resource_put(resource);
314 } else { 315 } else {
315 entry->notify_cb(entry->client_data); 316 entry->notify_cb(entry->client_data);
316 vmci_resource_put(resource); 317 vmci_resource_put(resource);
@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
361 atomic_read(&dbell->active) == 1) { 362 atomic_read(&dbell->active) == 1) {
362 if (dbell->run_delayed) { 363 if (dbell->run_delayed) {
363 vmci_resource_get(&dbell->resource); 364 vmci_resource_get(&dbell->resource);
364 schedule_work(&dbell->work); 365 if (!schedule_work(&dbell->work))
366 vmci_resource_put(&dbell->resource);
365 } else { 367 } else {
366 dbell->notify_cb(dbell->client_data); 368 dbell->notify_cb(dbell->client_data);
367 } 369 }
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d681e8aaca83..fe914ff5f5d6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
1292 goto err; 1292 goto err;
1293 } 1293 }
1294 1294
1295 /*
1296 * Some SD cards claims an out of spec VDD voltage range. Let's treat
1297 * these bits as being in-valid and especially also bit7.
1298 */
1299 ocr &= ~0x7FFF;
1300
1295 rocr = mmc_select_voltage(host, ocr); 1301 rocr = mmc_select_voltage(host, ocr);
1296 1302
1297 /* 1303 /*
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 163d1cf4367e..44139fceac24 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; 369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
370 host->mmc_host_ops.hs400_enhanced_strobe = 370 host->mmc_host_ops.hs400_enhanced_strobe =
371 sdhci_cdns_hs400_enhanced_strobe; 371 sdhci_cdns_hs400_enhanced_strobe;
372 sdhci_enable_v4_mode(host);
372 373
373 sdhci_get_of_property(pdev); 374 sdhci_get_of_property(pdev);
374 375
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d4e7e8b7be77..e7d1920729fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
358 pm_runtime_use_autosuspend(&pdev->dev); 358 pm_runtime_use_autosuspend(&pdev->dev);
359 359
360 /* HS200 is broken at this moment */
361 host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
362
360 ret = sdhci_add_host(host); 363 ret = sdhci_add_host(host);
361 if (ret) 364 if (ret)
362 goto pm_runtime_disable; 365 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 83a4767ca680..d07b9793380f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); 217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
218 u32 div, val, mask; 218 u32 div, val, mask;
219 219
220 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); 220 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
221 221
222 clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); 222 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
223 sdhci_enable_clk(host, clk); 223 div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
224 sdhci_enable_clk(host, div);
224 225
225 /* enable auto gate sdhc_enable_auto_gate */ 226 /* enable auto gate sdhc_enable_auto_gate */
226 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); 227 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
373 return 1 << 31; 374 return 1 << 31;
374} 375}
375 376
377static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
378{
379 return 0;
380}
381
376static struct sdhci_ops sdhci_sprd_ops = { 382static struct sdhci_ops sdhci_sprd_ops = {
377 .read_l = sdhci_sprd_readl, 383 .read_l = sdhci_sprd_readl,
378 .write_l = sdhci_sprd_writel, 384 .write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
385 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, 391 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
386 .hw_reset = sdhci_sprd_hw_reset, 392 .hw_reset = sdhci_sprd_hw_reset,
387 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, 393 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
394 .get_ro = sdhci_sprd_get_ro,
388}; 395};
389 396
390static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) 397static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
501} 508}
502 509
503static const struct sdhci_pltfm_data sdhci_sprd_pdata = { 510static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
504 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 511 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
512 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
513 SDHCI_QUIRK_MISSING_CAPS,
505 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 514 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
506 SDHCI_QUIRK2_USE_32BIT_BLK_CNT, 515 SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
516 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
507 .ops = &sdhci_sprd_ops, 517 .ops = &sdhci_sprd_ops,
508}; 518};
509 519
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
605 615
606 sdhci_enable_v4_mode(host); 616 sdhci_enable_v4_mode(host);
607 617
618 /*
619 * Supply the existing CAPS, but clear the UHS-I modes. This
620 * will allow these modes to be specified only by device
621 * tree properties through mmc_of_parse().
622 */
623 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
624 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
625 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
626 SDHCI_SUPPORT_DDR50);
627
608 ret = sdhci_setup_host(host); 628 ret = sdhci_setup_host(host);
609 if (ret) 629 if (ret)
610 goto pm_runtime_disable; 630 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f4d4761cf20a..02d8f524bb9e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
258 } 258 }
259} 259}
260 260
261static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
262{
263 /*
264 * Write-enable shall be assumed if GPIO is missing in a board's
265 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
266 * Tegra.
267 */
268 return mmc_gpio_get_ro(host->mmc);
269}
270
261static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 271static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
262{ 272{
263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1224}; 1234};
1225 1235
1226static const struct sdhci_ops tegra_sdhci_ops = { 1236static const struct sdhci_ops tegra_sdhci_ops = {
1237 .get_ro = tegra_sdhci_get_ro,
1227 .read_w = tegra_sdhci_readw, 1238 .read_w = tegra_sdhci_readw,
1228 .write_l = tegra_sdhci_writel, 1239 .write_l = tegra_sdhci_writel,
1229 .set_clock = tegra_sdhci_set_clock, 1240 .set_clock = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1279}; 1290};
1280 1291
1281static const struct sdhci_ops tegra114_sdhci_ops = { 1292static const struct sdhci_ops tegra114_sdhci_ops = {
1293 .get_ro = tegra_sdhci_get_ro,
1282 .read_w = tegra_sdhci_readw, 1294 .read_w = tegra_sdhci_readw,
1283 .write_w = tegra_sdhci_writew, 1295 .write_w = tegra_sdhci_writew,
1284 .write_l = tegra_sdhci_writel, 1296 .write_l = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1332}; 1344};
1333 1345
1334static const struct sdhci_ops tegra210_sdhci_ops = { 1346static const struct sdhci_ops tegra210_sdhci_ops = {
1347 .get_ro = tegra_sdhci_get_ro,
1335 .read_w = tegra_sdhci_readw, 1348 .read_w = tegra_sdhci_readw,
1336 .write_w = tegra210_sdhci_writew, 1349 .write_w = tegra210_sdhci_writew,
1337 .write_l = tegra_sdhci_writel, 1350 .write_l = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1366}; 1379};
1367 1380
1368static const struct sdhci_ops tegra186_sdhci_ops = { 1381static const struct sdhci_ops tegra186_sdhci_ops = {
1382 .get_ro = tegra_sdhci_get_ro,
1369 .read_w = tegra_sdhci_readw, 1383 .read_w = tegra_sdhci_readw,
1370 .write_l = tegra_sdhci_writel, 1384 .write_l = tegra_sdhci_writel,
1371 .set_clock = tegra_sdhci_set_clock, 1385 .set_clock = tegra_sdhci_set_clock,
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index b4e3caf7d799..a4d8968d133d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MTD_HYPERBUS 1menuconfig MTD_HYPERBUS
2 tristate "HyperBus support" 2 tristate "HyperBus support"
3 depends on HAS_IOMEM
3 select MTD_CFI 4 select MTD_CFI
4 select MTD_MAP_BANK_WIDTH_2 5 select MTD_MAP_BANK_WIDTH_2
5 select MTD_CFI_AMDSTD 6 select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
81 default: 81 default:
82 printk(KERN_WARNING "SA1100 flash: unknown base address " 82 printk(KERN_WARNING "SA1100 flash: unknown base address "
83 "0x%08lx, assuming CS0\n", phys); 83 "0x%08lx, assuming CS0\n", phys);
84 /* Fall through */
84 85
85 case SA1100_CS0_PHYS: 86 case SA1100_CS0_PHYS:
86 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; 87 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02fd7822c14a..931d9d935686 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond)
1126done: 1126done:
1127 bond_dev->vlan_features = vlan_features; 1127 bond_dev->vlan_features = vlan_features;
1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1129 NETIF_F_HW_VLAN_CTAG_TX |
1130 NETIF_F_HW_VLAN_STAG_TX |
1129 NETIF_F_GSO_UDP_L4; 1131 NETIF_F_GSO_UDP_L4;
1130 bond_dev->mpls_features = mpls_features; 1132 bond_dev->mpls_features = mpls_features;
1131 bond_dev->gso_max_segs = gso_max_segs; 1133 bond_dev->gso_max_segs = gso_max_segs;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3811fdbda13e..28c963a21dac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
478 unsigned long *supported, 478 unsigned long *supported,
479 struct phylink_link_state *state) 479 struct phylink_link_state *state)
480{ 480{
481 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
481 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 482 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
482 483
483 if (!phy_interface_mode_is_rgmii(state->interface) && 484 if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
487 state->interface != PHY_INTERFACE_MODE_INTERNAL && 488 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
488 state->interface != PHY_INTERFACE_MODE_MOCA) { 489 state->interface != PHY_INTERFACE_MODE_MOCA) {
489 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 490 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
490 dev_err(ds->dev, 491 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
491 "Unsupported interface: %d\n", state->interface); 492 dev_err(ds->dev,
493 "Unsupported interface: %d for port %d\n",
494 state->interface, port);
492 return; 495 return;
493 } 496 }
494 497
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
526 u32 id_mode_dis = 0, port_mode; 529 u32 id_mode_dis = 0, port_mode;
527 u32 reg, offset; 530 u32 reg, offset;
528 531
532 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
533 return;
534
529 if (priv->type == BCM7445_DEVICE_ID) 535 if (priv->type == BCM7445_DEVICE_ID)
530 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 536 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
531 else 537 else
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 5a9e27b337a8..098b01e4ed1a 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
81 { .compatible = "microchip,ksz9897" }, 81 { .compatible = "microchip,ksz9897" },
82 { .compatible = "microchip,ksz9893" }, 82 { .compatible = "microchip,ksz9893" },
83 { .compatible = "microchip,ksz9563" }, 83 { .compatible = "microchip,ksz9563" },
84 { .compatible = "microchip,ksz8563" },
84 {}, 85 {},
85}; 86};
86MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); 87MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index ee7096d8af07..72ec250b9540 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
128 128
129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ 129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
130 { \ 130 { \
131 .name = #width, \
131 .val_bits = (width), \ 132 .val_bits = (width), \
132 .reg_stride = (width) / 8, \ 133 .reg_stride = (width) / 8, \
133 .reg_bits = (regbits) + (regalign), \ 134 .reg_bits = (regbits) + (regalign), \
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index d073baffc20b..df976b259e43 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1223,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1223{ 1223{
1224 struct sja1105_private *priv = ds->priv; 1224 struct sja1105_private *priv = ds->priv;
1225 struct device *dev = ds->dev; 1225 struct device *dev = ds->dev;
1226 u16 rx_vid, tx_vid;
1227 int i; 1226 int i;
1228 1227
1229 rx_vid = dsa_8021q_rx_vid(ds, port);
1230 tx_vid = dsa_8021q_tx_vid(ds, port);
1231
1232 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1228 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1233 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1229 struct sja1105_l2_lookup_entry l2_lookup = {0};
1234 u8 macaddr[ETH_ALEN]; 1230 u8 macaddr[ETH_ALEN];
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b41f23679a08..7ce9c69e9c44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
469 469
470 ret = xgbe_platform_init(); 470 ret = xgbe_platform_init();
471 if (ret) 471 if (ret)
472 return ret; 472 goto err_platform_init;
473 473
474 ret = xgbe_pci_init(); 474 ret = xgbe_pci_init();
475 if (ret) 475 if (ret)
476 return ret; 476 goto err_pci_init;
477 477
478 return 0; 478 return 0;
479
480err_pci_init:
481 xgbe_platform_exit();
482err_platform_init:
483 unregister_netdevice_notifier(&xgbe_netdev_notifier);
484 return ret;
479} 485}
480 486
481static void __exit xgbe_mod_exit(void) 487static void __exit xgbe_mod_exit(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 440690b18734..aee827f07c16 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) 431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
432 break; 432 break;
433 } 433 }
434 if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { 434 if (rule && rule->type == aq_rx_filter_vlan &&
435 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
435 struct ethtool_rxnfc cmd; 436 struct ethtool_rxnfc cmd;
436 437
437 cmd.fs.location = rule->aq_fsp.location; 438 cmd.fs.location = rule->aq_fsp.location;
@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
843 return err; 844 return err;
844 845
845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { 846 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
846 if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { 847 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, 848 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
848 !(aq_nic->packet_filter & IFF_PROMISC)); 849 !(aq_nic->packet_filter & IFF_PROMISC));
849 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; 850 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 100722ad5c2d..b4a0fb281e69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
61 if (err < 0) 61 if (err < 0)
62 goto err_exit; 62 goto err_exit;
63 63
64 err = aq_filters_vlans_update(aq_nic);
65 if (err < 0)
66 goto err_exit;
67
64 err = aq_nic_start(aq_nic); 68 err = aq_nic_start(aq_nic);
65 if (err < 0) 69 if (err < 0)
66 goto err_exit; 70 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e1392766e21e..8f66e7817811 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self)
393 self->aq_nic_cfg.link_irq_vec); 393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL, 394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr, 395 aq_linkstate_threaded_isr,
396 IRQF_SHARED, 396 IRQF_SHARED | IRQF_ONESHOT,
397 self->ndev->name, self); 397 self->ndev->name, self);
398 if (err < 0) 398 if (err < 0)
399 goto err_exit; 399 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 715685aa48c3..28892b8acd0e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
86 } 86 }
87 } 87 }
88 88
89err_exit:
89 if (!was_tx_cleaned) 90 if (!was_tx_cleaned)
90 work_done = budget; 91 work_done = budget;
91 92
@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
95 1U << self->aq_ring_param.vec_idx); 96 1U << self->aq_ring_param.vec_idx);
96 } 97 }
97 } 98 }
98err_exit: 99
99 return work_done; 100 return work_done;
100} 101}
101 102
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e47ea92e2ae3..d10b421ed1f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3057 /* if VF indicate to PF this function is going down (PF will delete sp 3057 /* if VF indicate to PF this function is going down (PF will delete sp
3058 * elements and clear initializations 3058 * elements and clear initializations
3059 */ 3059 */
3060 if (IS_VF(bp)) 3060 if (IS_VF(bp)) {
3061 bnx2x_clear_vlan_info(bp);
3061 bnx2x_vfpf_close_vf(bp); 3062 bnx2x_vfpf_close_vf(bp);
3062 else if (unload_mode != UNLOAD_RECOVERY) 3063 } else if (unload_mode != UNLOAD_RECOVERY) {
3063 /* if this is a normal/close unload need to clean up chip*/ 3064 /* if this is a normal/close unload need to clean up chip*/
3064 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3065 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3065 else { 3066 } else {
3066 /* Send the UNLOAD_REQUEST to the MCP */ 3067 /* Send the UNLOAD_REQUEST to the MCP */
3067 bnx2x_send_unload_req(bp, unload_mode); 3068 bnx2x_send_unload_req(bp, unload_mode);
3068 3069
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c2f6e44e9a3f..8b08cb18e363 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
425void bnx2x_disable_close_the_gate(struct bnx2x *bp); 425void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 426int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
427 427
428void bnx2x_clear_vlan_info(struct bnx2x *bp);
429
428/** 430/**
429 * bnx2x_sp_event - handle ramrods completion. 431 * bnx2x_sp_event - handle ramrods completion.
430 * 432 *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2cc14db8f0ec..192ff8d5da32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8482 return rc; 8482 return rc;
8483} 8483}
8484 8484
8485void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486{
8487 struct bnx2x_vlan_entry *vlan;
8488
8489 /* Mark that hw forgot all entries */
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494}
8495
8485static int bnx2x_del_all_vlans(struct bnx2x *bp) 8496static int bnx2x_del_all_vlans(struct bnx2x *bp)
8486{ 8497{
8487 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8488 unsigned long ramrod_flags = 0, vlan_flags = 0; 8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8489 struct bnx2x_vlan_entry *vlan;
8490 int rc; 8500 int rc;
8491 8501
8492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
8495 if (rc) 8505 if (rc)
8496 return rc; 8506 return rc;
8497 8507
8498 /* Mark that hw forgot all entries */ 8508 bnx2x_clear_vlan_info(bp);
8499 list_for_each_entry(vlan, &bp->vlan_reg, link)
8500 vlan->hw = false;
8501 bp->vlan_cnt = 0;
8502 8509
8503 return 0; 8510 return 0;
8504} 8511}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7070349915bc..8dce4069472b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2021 if (bnapi->events & BNXT_RX_EVENT) { 2021 if (bnapi->events & BNXT_RX_EVENT) {
2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2023 2023
2024 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2025 if (bnapi->events & BNXT_AGG_EVENT) 2024 if (bnapi->events & BNXT_AGG_EVENT)
2026 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2025 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2026 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2027 } 2027 }
2028 bnapi->events = 0; 2028 bnapi->events = 0;
2029} 2029}
@@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5064 5064
5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5066{ 5066{
5067 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5067 int i, rc = 0; 5068 int i, rc = 0;
5068 u32 type; 5069 u32 type;
5069 5070
@@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5139 if (rc) 5140 if (rc)
5140 goto err_out; 5141 goto err_out;
5141 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5142 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5142 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5143 /* If we have agg rings, post agg buffers first. */
5144 if (!agg_rings)
5145 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5143 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5146 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5144 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5147 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5145 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5148 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5158 } 5161 }
5159 } 5162 }
5160 5163
5161 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5164 if (agg_rings) {
5162 type = HWRM_RING_ALLOC_AGG; 5165 type = HWRM_RING_ALLOC_AGG;
5163 for (i = 0; i < bp->rx_nr_rings; i++) { 5166 for (i = 0; i < bp->rx_nr_rings; i++) {
5164 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5167 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5174 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5177 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5175 ring->fw_ring_id); 5178 ring->fw_ring_id);
5176 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5179 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5180 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5177 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5181 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5178 } 5182 }
5179 } 5183 }
@@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7016 bnxt_hwrm_vnic_set_rss(bp, i, false); 7020 bnxt_hwrm_vnic_set_rss(bp, i, false);
7017} 7021}
7018 7022
7019static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7023static void bnxt_clear_vnic(struct bnxt *bp)
7020 bool irq_re_init)
7021{ 7024{
7022 if (bp->vnic_info) { 7025 if (!bp->vnic_info)
7023 bnxt_hwrm_clear_vnic_filter(bp); 7026 return;
7027
7028 bnxt_hwrm_clear_vnic_filter(bp);
7029 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7024 /* clear all RSS setting before free vnic ctx */ 7030 /* clear all RSS setting before free vnic ctx */
7025 bnxt_hwrm_clear_vnic_rss(bp); 7031 bnxt_hwrm_clear_vnic_rss(bp);
7026 bnxt_hwrm_vnic_ctx_free(bp); 7032 bnxt_hwrm_vnic_ctx_free(bp);
7027 /* before free the vnic, undo the vnic tpa settings */
7028 if (bp->flags & BNXT_FLAG_TPA)
7029 bnxt_set_tpa(bp, false);
7030 bnxt_hwrm_vnic_free(bp);
7031 } 7033 }
7034 /* before free the vnic, undo the vnic tpa settings */
7035 if (bp->flags & BNXT_FLAG_TPA)
7036 bnxt_set_tpa(bp, false);
7037 bnxt_hwrm_vnic_free(bp);
7038 if (bp->flags & BNXT_FLAG_CHIP_P5)
7039 bnxt_hwrm_vnic_ctx_free(bp);
7040}
7041
7042static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7043 bool irq_re_init)
7044{
7045 bnxt_clear_vnic(bp);
7032 bnxt_hwrm_ring_free(bp, close_path); 7046 bnxt_hwrm_ring_free(bp, close_path);
7033 bnxt_hwrm_ring_grp_free(bp); 7047 bnxt_hwrm_ring_grp_free(bp);
7034 if (irq_re_init) { 7048 if (irq_re_init) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 549c90d3e465..c05d663212b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
98 if (idx) 98 if (idx)
99 req->dimensions = cpu_to_le16(1); 99 req->dimensions = cpu_to_le16(1);
100 100
101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) 101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
102 memcpy(data_addr, buf, bytesize); 102 memcpy(data_addr, buf, bytesize);
103 103 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
104 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 104 } else {
105 rc = hwrm_send_message_silent(bp, msg, msg_len,
106 HWRM_CMD_TIMEOUT);
107 }
105 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) 108 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
106 memcpy(buf, data_addr, bytesize); 109 memcpy(buf, data_addr, bytesize);
107 110
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c7ee63d69679..8445a0cce849 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
2016 mutex_lock(&bp->hwrm_cmd_lock); 2016 mutex_lock(&bp->hwrm_cmd_lock);
2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
2018 INSTALL_PACKAGE_TIMEOUT); 2018 INSTALL_PACKAGE_TIMEOUT);
2019 if (hwrm_err) 2019 if (hwrm_err) {
2020 goto flash_pkg_exit;
2021
2022 if (resp->error_code) {
2023 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 2020 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2024 2021
2025 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2022 if (resp->error_code && error_code ==
2023 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2026 install.flags |= cpu_to_le16( 2024 install.flags |= cpu_to_le16(
2027 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2025 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2028 hwrm_err = _hwrm_send_message(bp, &install, 2026 hwrm_err = _hwrm_send_message(bp, &install,
2029 sizeof(install), 2027 sizeof(install),
2030 INSTALL_PACKAGE_TIMEOUT); 2028 INSTALL_PACKAGE_TIMEOUT);
2031 if (hwrm_err)
2032 goto flash_pkg_exit;
2033 } 2029 }
2030 if (hwrm_err)
2031 goto flash_pkg_exit;
2034 } 2032 }
2035 2033
2036 if (resp->result) { 2034 if (resp->result) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 6fe4a7174271..dd621f6bd127 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, 1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1237 u16 src_fid) 1237 u16 src_fid)
1238{ 1238{
1239 flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; 1239 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1240} 1240}
1241 1241
1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, 1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1285 goto free_node; 1285 goto free_node;
1286 1286
1287 bnxt_tc_set_src_fid(bp, flow, src_fid); 1287 bnxt_tc_set_src_fid(bp, flow, src_fid);
1288 1288 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1289 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1290 bnxt_tc_set_flow_dir(bp, flow, src_fid);
1291 1289
1292 if (!bnxt_tc_can_offload(bp, flow)) { 1290 if (!bnxt_tc_can_offload(bp, flow)) {
1293 rc = -EOPNOTSUPP; 1291 rc = -EOPNOTSUPP;
@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1407 * 2. 15th bit of flow_handle must specify the flow 1405 * 2. 15th bit of flow_handle must specify the flow
1408 * direction (TX/RX). 1406 * direction (TX/RX).
1409 */ 1407 */
1410 if (flow_node->flow.dir == BNXT_DIR_RX) 1408 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1411 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | 1409 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1412 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; 1410 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1413 else 1411 else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index ffec57d1a5ec..4f05305052f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
23 __be16 inner_vlan_tci; 23 __be16 inner_vlan_tci;
24 __be16 ether_type; 24 __be16 ether_type;
25 u8 num_vlans; 25 u8 num_vlans;
26 u8 dir;
27#define BNXT_DIR_RX 1
28#define BNXT_DIR_TX 0
26}; 29};
27 30
28struct bnxt_tc_l3_key { 31struct bnxt_tc_l3_key {
@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
98 101
99 /* flow applicable to pkts ingressing on this fid */ 102 /* flow applicable to pkts ingressing on this fid */
100 u16 src_fid; 103 u16 src_fid;
101 u8 dir;
102#define BNXT_DIR_RX 1
103#define BNXT_DIR_TX 0
104 struct bnxt_tc_l2_key l2_key; 104 struct bnxt_tc_l2_key l2_key;
105 struct bnxt_tc_l2_key l2_mask; 105 struct bnxt_tc_l2_key l2_mask;
106 struct bnxt_tc_l3_key l3_key; 106 struct bnxt_tc_l3_key l3_key;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index d3a0b614dbfa..b22196880d6d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
1124 .set_coalesce = bcmgenet_set_coalesce, 1124 .set_coalesce = bcmgenet_set_coalesce,
1125 .get_link_ksettings = bcmgenet_get_link_ksettings, 1125 .get_link_ksettings = bcmgenet_get_link_ksettings,
1126 .set_link_ksettings = bcmgenet_set_link_ksettings, 1126 .set_link_ksettings = bcmgenet_set_link_ksettings,
1127 .get_ts_info = ethtool_op_get_ts_info,
1127}; 1128};
1128 1129
1129/* Power down the unimac, based on mode. */ 1130/* Power down the unimac, based on mode. */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5ca17e62dc3e..35b59b5edf0f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
4154 { .compatible = "cdns,emac", .data = &emac_config }, 4154 { .compatible = "cdns,emac", .data = &emac_config },
4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4157 { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, 4157 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4158 { /* sentinel */ } 4158 { /* sentinel */ }
4159}; 4159};
4160MODULE_DEVICE_TABLE(of, macb_dt_ids); 4160MODULE_DEVICE_TABLE(of, macb_dt_ids);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 73632b843749..b821c9e1604c 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
10 10
11#include "cavium_ptp.h" 11#include "cavium_ptp.h"
12 12
13#define DRV_NAME "Cavium PTP Driver" 13#define DRV_NAME "cavium_ptp"
14 14
15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C 15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E 16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 032224178b64..6dd65f9b347c 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct,
237 } 237 }
238 238
239 oct->num_iqs++; 239 oct->num_iqs++;
240 if (oct->fn_list.enable_io_queues(oct)) 240 if (oct->fn_list.enable_io_queues(oct)) {
241 octeon_delete_instr_queue(oct, iq_no);
241 return 1; 242 return 1;
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 02959035ed3f..d692251ee252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
3236 return -ENOMEM; 3236 return -ENOMEM;
3237 3237
3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); 3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
3239 if (err) 3239 if (err) {
3240 kvfree(t);
3240 return err; 3241 return err;
3242 }
3241 3243
3242 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 3244 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
3243 kvfree(t); 3245 kvfree(t);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 133acca0bf31..092da2d90026 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -167,7 +167,7 @@ struct nps_enet_priv {
167}; 167};
168 168
169/** 169/**
170 * nps_reg_set - Sets ENET register with provided value. 170 * nps_enet_reg_set - Sets ENET register with provided value.
171 * @priv: Pointer to EZchip ENET private data structure. 171 * @priv: Pointer to EZchip ENET private data structure.
172 * @reg: Register offset from base address. 172 * @reg: Register offset from base address.
173 * @value: Value to set in register. 173 * @value: Value to set in register.
@@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
179} 179}
180 180
181/** 181/**
182 * nps_reg_get - Gets value of specified ENET register. 182 * nps_enet_reg_get - Gets value of specified ENET register.
183 * @priv: Pointer to EZchip ENET private data structure. 183 * @priv: Pointer to EZchip ENET private data structure.
184 * @reg: Register offset from base address. 184 * @reg: Register offset from base address.
185 * 185 *
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 2fd2586e42bf..bc594892507a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
83 if (n != 1) { 83 if (n != 1) {
84 err = -EPERM; 84 err = -EPERM;
85 goto err_irq; 85 goto err_irq_vectors;
86 } 86 }
87 87
88 ptp_qoriq->irq = pci_irq_vector(pdev, 0); 88 ptp_qoriq->irq = pci_irq_vector(pdev, 0);
@@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
107err_no_clock: 107err_no_clock:
108 free_irq(ptp_qoriq->irq, ptp_qoriq); 108 free_irq(ptp_qoriq->irq, ptp_qoriq);
109err_irq: 109err_irq:
110 pci_free_irq_vectors(pdev);
111err_irq_vectors:
110 iounmap(base); 112 iounmap(base);
111err_ioremap: 113err_ioremap:
112 kfree(ptp_qoriq); 114 kfree(ptp_qoriq);
@@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
125 127
126 enetc_phc_index = -1; 128 enetc_phc_index = -1;
127 ptp_qoriq_free(ptp_qoriq); 129 ptp_qoriq_free(ptp_qoriq);
130 pci_free_irq_vectors(pdev);
128 kfree(ptp_qoriq); 131 kfree(ptp_qoriq);
129 132
130 pci_release_mem_regions(pdev); 133 pci_release_mem_regions(pdev);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
50 u64_stats_fetch_begin(&priv->tx[ring].statss); 50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done; 51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done; 52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start)); 54 start));
55 } 55 }
56 } 56 }
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d654c234aaf7..c5be4ebd8437 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1605 struct net_device *netdev; 1605 struct net_device *netdev;
1606 struct ibmveth_adapter *adapter; 1606 struct ibmveth_adapter *adapter;
1607 unsigned char *mac_addr_p; 1607 unsigned char *mac_addr_p;
1608 unsigned int *mcastFilterSize_p; 1608 __be32 *mcastFilterSize_p;
1609 long ret; 1609 long ret;
1610 unsigned long ret_attr; 1610 unsigned long ret_attr;
1611 1611
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1627 return -EINVAL; 1627 return -EINVAL;
1628 } 1628 }
1629 1629
1630 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, 1630 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1631 VETH_MCAST_FILTER_SIZE, NULL); 1631 VETH_MCAST_FILTER_SIZE,
1632 NULL);
1632 if (!mcastFilterSize_p) { 1633 if (!mcastFilterSize_p) {
1633 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 1634 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1634 "attribute\n"); 1635 "attribute\n");
@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1645 1646
1646 adapter->vdev = dev; 1647 adapter->vdev = dev;
1647 adapter->netdev = netdev; 1648 adapter->netdev = netdev;
1648 adapter->mcastFilterSize = *mcastFilterSize_p; 1649 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1649 adapter->pool_config = 0; 1650 adapter->pool_config = 0;
1650 1651
1651 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1652 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3da680073265..fa4bb940665c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma, 1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries); 1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1571 } else { 1573 } else {
1572 tx_buff->num_entries = num_entries; 1574 tx_buff->num_entries = num_entries;
1573 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1981,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work)
1981 1983
1982 rwi = get_next_rwi(adapter); 1984 rwi = get_next_rwi(adapter);
1983 while (rwi) { 1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED)
1988 goto out;
1989
1984 if (adapter->force_reset_recovery) { 1990 if (adapter->force_reset_recovery) {
1985 adapter->force_reset_recovery = false; 1991 adapter->force_reset_recovery = false;
1986 rc = do_hard_reset(adapter, rwi, reset_state); 1992 rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2005,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2005 netdev_dbg(adapter->netdev, "Reset failed\n"); 2011 netdev_dbg(adapter->netdev, "Reset failed\n");
2006 free_all_rwi(adapter); 2012 free_all_rwi(adapter);
2007 } 2013 }
2008 2014out:
2009 adapter->resetting = false; 2015 adapter->resetting = false;
2010 if (we_lock_rtnl) 2016 if (we_lock_rtnl)
2011 rtnl_unlock(); 2017 rtnl_unlock();
@@ -2788,7 +2794,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2788 union sub_crq *next; 2794 union sub_crq *next;
2789 int index; 2795 int index;
2790 int i, j; 2796 int i, j;
2791 u8 *first;
2792 2797
2793restart_loop: 2798restart_loop:
2794 while (pending_scrq(adapter, scrq)) { 2799 while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2823,6 @@ restart_loop:
2818 2823
2819 txbuff->data_dma[j] = 0; 2824 txbuff->data_dma[j] = 0;
2820 } 2825 }
2821 /* if sub_crq was sent indirectly */
2822 first = &txbuff->indir_arr[0].generic.first;
2823 if (*first == IBMVNIC_CRQ_CMD) {
2824 dma_unmap_single(dev, txbuff->indir_dma,
2825 sizeof(txbuff->indir_arr),
2826 DMA_TO_DEVICE);
2827 *first = 0;
2828 }
2829 2826
2830 if (txbuff->last_frag) { 2827 if (txbuff->last_frag) {
2831 dev_kfree_skb_any(txbuff->skb); 2828 dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbaf712d6529..7882148abb43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work)
7897 return; 7897 return;
7898 } 7898 }
7899 if (ixgbe_check_fw_error(adapter)) { 7899 if (ixgbe_check_fw_error(adapter)) {
7900 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7900 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7901 rtnl_lock();
7902 unregister_netdev(adapter->netdev); 7901 unregister_netdev(adapter->netdev);
7903 rtnl_unlock();
7904 }
7905 ixgbe_service_event_complete(adapter); 7902 ixgbe_service_event_complete(adapter);
7906 return; 7903 return;
7907 } 7904 }
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a01c75ede871..e0363870f3a5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = {
4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"), 4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"),
4932 }, 4932 },
4933 }, 4933 },
4934 {
4935 .ident = "ASUS P6X",
4936 .matches = {
4937 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4938 DMI_MATCH(DMI_BOARD_NAME, "P6X"),
4939 },
4940 },
4934 {} 4941 {}
4935}; 4942};
4936 4943
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6c01314e87b0..db3552f2d087 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); 1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1188 if (err) { 1188 if (err) {
1189 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1189 en_err(priv, "Failed to allocate RSS indirection QP\n");
1190 goto rss_err; 1190 goto qp_alloc_err;
1191 } 1191 }
1192 1192
1193 rss_map->indir_qp->event = mlx4_en_sqp_event; 1193 rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1241,6 +1241,7 @@ indir_err:
1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); 1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp); 1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp); 1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1244qp_alloc_err:
1244 kfree(rss_map->indir_qp); 1245 kfree(rss_map->indir_qp);
1245 rss_map->indir_qp = NULL; 1246 rss_map->indir_qp = NULL;
1246rss_err: 1247rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ce1be2a84231..65bec19a438f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
184 184
185struct mlx5e_tx_wqe { 185struct mlx5e_tx_wqe {
186 struct mlx5_wqe_ctrl_seg ctrl; 186 struct mlx5_wqe_ctrl_seg ctrl;
187 struct mlx5_wqe_eth_seg eth; 187 union {
188 struct mlx5_wqe_data_seg data[0]; 188 struct {
189 struct mlx5_wqe_eth_seg eth;
190 struct mlx5_wqe_data_seg data[0];
191 };
192 u8 tls_progress_params_ctx[0];
193 };
189}; 194};
190 195
191struct mlx5e_rx_wqe_ll { 196struct mlx5e_rx_wqe_ll {
@@ -1100,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1100u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1105u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1101int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1106int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1102 struct ethtool_ts_info *info); 1107 struct ethtool_ts_info *info);
1108int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1109 struct ethtool_flash *flash);
1103void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1110void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1104 struct ethtool_pauseparam *pauseparam); 1111 struct ethtool_pauseparam *pauseparam);
1105int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1112int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index f3d98748b211..c7f86453c638 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
76 u8 state; 76 u8 state;
77 int err; 77 int err;
78 78
79 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
80 return 0;
81
82 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
83 if (err) { 80 if (err) {
84 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 sq->sqn, err); 82 sq->sqn, err);
86 return err; 83 goto out;
87 } 84 }
88 85
89 if (state != MLX5_SQC_STATE_ERR) { 86 if (state != MLX5_SQC_STATE_ERR)
90 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); 87 goto out;
91 return -EINVAL;
92 }
93 88
94 mlx5e_tx_disable_queue(sq->txq); 89 mlx5e_tx_disable_queue(sq->txq);
95 90
96 err = mlx5e_wait_for_sq_flush(sq); 91 err = mlx5e_wait_for_sq_flush(sq);
97 if (err) 92 if (err)
98 return err; 93 goto out;
99 94
100 /* At this point, no new packets will arrive from the stack as TXQ is 95 /* At this point, no new packets will arrive from the stack as TXQ is
101 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all 96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
104 99
105 err = mlx5e_sq_to_ready(sq, state); 100 err = mlx5e_sq_to_ready(sq, state);
106 if (err) 101 if (err)
107 return err; 102 goto out;
108 103
109 mlx5e_reset_txqsq_cc_pc(sq); 104 mlx5e_reset_txqsq_cc_pc(sq);
110 sq->stats->recover++; 105 sq->stats->recover++;
106 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 mlx5e_activate_txqsq(sq); 107 mlx5e_activate_txqsq(sq);
112 108
113 return 0; 109 return 0;
110out:
111 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
112 return err;
114} 113}
115 114
116static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, 115static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index aaffa6f68dc0..7f78c004d12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
143{ 143{
144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
145 /* TX queue is created active. */ 145 /* TX queue is created active. */
146
147 spin_lock(&c->xskicosq_lock);
146 mlx5e_trigger_irq(&c->xskicosq); 148 mlx5e_trigger_irq(&c->xskicosq);
149 spin_unlock(&c->xskicosq_lock);
147} 150}
148 151
149void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 152void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 407da83474ef..b7298f9ee3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,12 +11,14 @@
11#include "accel/tls.h" 11#include "accel/tls.h"
12 12
13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ 13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) 14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
15#define MLX5E_KTLS_STATIC_WQEBBS \ 16#define MLX5E_KTLS_STATIC_WQEBBS \
16 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) 17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
17 18
18#define MLX5E_KTLS_PROGRESS_WQE_SZ \ 19#define MLX5E_KTLS_PROGRESS_WQE_SZ \
19 (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
20#define MLX5E_KTLS_PROGRESS_WQEBBS \ 22#define MLX5E_KTLS_PROGRESS_WQEBBS \
21 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
22#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 24#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3766545ce259..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70 STATIC_PARAMS_DS_CNT); 70 STATIC_PARAMS_DS_CNT);
71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72 cseg->imm = cpu_to_be32(priv_tx->tisn); 72 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
73 73
74 ucseg->flags = MLX5_UMR_INLINE; 74 ucseg->flags = MLX5_UMR_INLINE;
75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); 75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
80static void 80static void
81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82{ 82{
83 MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); 83 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84 MLX5_SET(tls_progress_params, ctx, record_tracker_state, 84 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); 85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86 MLX5_SET(tls_progress_params, ctx, auth_state, 86 MLX5_SET(tls_progress_params, ctx, auth_state,
@@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
104 PROGRESS_PARAMS_DS_CNT); 104 PROGRESS_PARAMS_DS_CNT);
105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106 106
107 fill_progress_params_ctx(wqe->data, priv_tx); 107 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108} 108}
109 109
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 110static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag) 112 skb_frag_t *resync_dump_frag,
113 u32 num_bytes)
113{ 114{
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115 116
116 wi->skb = NULL; 117 wi->skb = NULL;
117 wi->num_wqebbs = num_wqebbs; 118 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag; 119 wi->resync_dump_frag = resync_dump_frag;
120 wi->num_bytes = num_bytes;
119} 121}
120 122
121void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
143 145
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); 148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148} 150}
149 151
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
157 159
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); 162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162} 164}
163 165
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249} 251}
250 252
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
251static int 258static int
252tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, 259tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first) 260 skb_frag_t *frag, u32 tisn, bool first)
254{ 261{
255 struct mlx5_wqe_ctrl_seg *cseg; 262 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe; 264 struct mlx5e_dump_wqe *wqe;
259 dma_addr_t dma_addr = 0; 265 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
261 u8 num_wqebbs; 266 u8 num_wqebbs;
262 u16 pi, ihs; 267 u16 ds_cnt;
263 int fsz; 268 int fsz;
264 269 u16 pi;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
270 270
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272 272
273 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 274 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 275
275 cseg = &wqe->ctrl; 276 cseg = &wqe->ctrl;
276 eseg = &wqe->eth; 277 dseg = &wqe->data;
277 dseg = wqe->data;
278 278
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->imm = cpu_to_be32(tisn); 281 cseg->tisn = cpu_to_be32(tisn << 8);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283 283
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
286 dseg += ds_cnt_inl;
287
288 fsz = skb_frag_size(frag); 284 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 285 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290 DMA_TO_DEVICE); 286 DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 dseg->byte_count = cpu_to_be32(fsz); 292 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 293 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298 294
299 tx_fill_wi(sq, pi, num_wqebbs, frag); 295 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
300 sq->pc += num_wqebbs; 296 sq->pc += num_wqebbs;
301 297
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, 298 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq; 319 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 320 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325 321
326 tx_fill_wi(sq, pi, 1, NULL); 322 tx_fill_wi(sq, pi, 1, NULL, 0);
327 323
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 324 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329} 325}
@@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
434 priv_tx->expected_seq = seq + datalen; 430 priv_tx->expected_seq = seq + datalen;
435 431
436 cseg = &(*wqe)->ctrl; 432 cseg = &(*wqe)->ctrl;
437 cseg->imm = cpu_to_be32(priv_tx->tisn); 433 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
438 434
439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440 stats->tls_encrypted_bytes += datalen; 436 stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 8657e0f26995..2c75b2752f58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
437 return &arfs_t->rules_hash[bucket_idx]; 437 return &arfs_t->rules_hash[bucket_idx];
438} 438}
439 439
440static u8 arfs_get_ip_proto(const struct sk_buff *skb)
441{
442 return (skb->protocol == htons(ETH_P_IP)) ?
443 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
444}
445
446static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, 440static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
447 u8 ip_proto, __be16 etype) 441 u8 ip_proto, __be16 etype)
448{ 442{
@@ -602,31 +596,9 @@ out:
602 arfs_may_expire_flow(priv); 596 arfs_may_expire_flow(priv);
603} 597}
604 598
605/* return L4 destination port from ip4/6 packets */
606static __be16 arfs_get_dst_port(const struct sk_buff *skb)
607{
608 char *transport_header;
609
610 transport_header = skb_transport_header(skb);
611 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
612 return ((struct tcphdr *)transport_header)->dest;
613 return ((struct udphdr *)transport_header)->dest;
614}
615
616/* return L4 source port from ip4/6 packets */
617static __be16 arfs_get_src_port(const struct sk_buff *skb)
618{
619 char *transport_header;
620
621 transport_header = skb_transport_header(skb);
622 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
623 return ((struct tcphdr *)transport_header)->source;
624 return ((struct udphdr *)transport_header)->source;
625}
626
627static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, 599static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
628 struct arfs_table *arfs_t, 600 struct arfs_table *arfs_t,
629 const struct sk_buff *skb, 601 const struct flow_keys *fk,
630 u16 rxq, u32 flow_id) 602 u16 rxq, u32 flow_id)
631{ 603{
632 struct arfs_rule *rule; 604 struct arfs_rule *rule;
@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
641 INIT_WORK(&rule->arfs_work, arfs_handle_work); 613 INIT_WORK(&rule->arfs_work, arfs_handle_work);
642 614
643 tuple = &rule->tuple; 615 tuple = &rule->tuple;
644 tuple->etype = skb->protocol; 616 tuple->etype = fk->basic.n_proto;
617 tuple->ip_proto = fk->basic.ip_proto;
645 if (tuple->etype == htons(ETH_P_IP)) { 618 if (tuple->etype == htons(ETH_P_IP)) {
646 tuple->src_ipv4 = ip_hdr(skb)->saddr; 619 tuple->src_ipv4 = fk->addrs.v4addrs.src;
647 tuple->dst_ipv4 = ip_hdr(skb)->daddr; 620 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
648 } else { 621 } else {
649 memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 622 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
650 sizeof(struct in6_addr)); 623 sizeof(struct in6_addr));
651 memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 624 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr)); 625 sizeof(struct in6_addr));
653 } 626 }
654 tuple->ip_proto = arfs_get_ip_proto(skb); 627 tuple->src_port = fk->ports.src;
655 tuple->src_port = arfs_get_src_port(skb); 628 tuple->dst_port = fk->ports.dst;
656 tuple->dst_port = arfs_get_dst_port(skb);
657 629
658 rule->flow_id = flow_id; 630 rule->flow_id = flow_id;
659 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; 631 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
664 return rule; 636 return rule;
665} 637}
666 638
667static bool arfs_cmp_ips(struct arfs_tuple *tuple, 639static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
668 const struct sk_buff *skb)
669{ 640{
670 if (tuple->etype == htons(ETH_P_IP) && 641 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
671 tuple->src_ipv4 == ip_hdr(skb)->saddr && 642 return false;
672 tuple->dst_ipv4 == ip_hdr(skb)->daddr) 643 if (tuple->etype != fk->basic.n_proto)
673 return true; 644 return false;
674 if (tuple->etype == htons(ETH_P_IPV6) && 645 if (tuple->etype == htons(ETH_P_IP))
675 (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 646 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
676 sizeof(struct in6_addr))) && 647 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
677 (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 648 if (tuple->etype == htons(ETH_P_IPV6))
678 sizeof(struct in6_addr)))) 649 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
679 return true; 650 sizeof(struct in6_addr)) &&
651 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr));
680 return false; 653 return false;
681} 654}
682 655
683static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, 656static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
684 const struct sk_buff *skb) 657 const struct flow_keys *fk)
685{ 658{
686 struct arfs_rule *arfs_rule; 659 struct arfs_rule *arfs_rule;
687 struct hlist_head *head; 660 struct hlist_head *head;
688 __be16 src_port = arfs_get_src_port(skb);
689 __be16 dst_port = arfs_get_dst_port(skb);
690 661
691 head = arfs_hash_bucket(arfs_t, src_port, dst_port); 662 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
692 hlist_for_each_entry(arfs_rule, head, hlist) { 663 hlist_for_each_entry(arfs_rule, head, hlist) {
693 if (arfs_rule->tuple.src_port == src_port && 664 if (arfs_cmp(&arfs_rule->tuple, fk))
694 arfs_rule->tuple.dst_port == dst_port &&
695 arfs_cmp_ips(&arfs_rule->tuple, skb)) {
696 return arfs_rule; 665 return arfs_rule;
697 }
698 } 666 }
699 667
700 return NULL; 668 return NULL;
@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
707 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; 675 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
708 struct arfs_table *arfs_t; 676 struct arfs_table *arfs_t;
709 struct arfs_rule *arfs_rule; 677 struct arfs_rule *arfs_rule;
678 struct flow_keys fk;
679
680 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
681 return -EPROTONOSUPPORT;
710 682
711 if (skb->protocol != htons(ETH_P_IP) && 683 if (fk.basic.n_proto != htons(ETH_P_IP) &&
712 skb->protocol != htons(ETH_P_IPV6)) 684 fk.basic.n_proto != htons(ETH_P_IPV6))
713 return -EPROTONOSUPPORT; 685 return -EPROTONOSUPPORT;
714 686
715 if (skb->encapsulation) 687 if (skb->encapsulation)
716 return -EPROTONOSUPPORT; 688 return -EPROTONOSUPPORT;
717 689
718 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 690 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
719 if (!arfs_t) 691 if (!arfs_t)
720 return -EPROTONOSUPPORT; 692 return -EPROTONOSUPPORT;
721 693
722 spin_lock_bh(&arfs->arfs_lock); 694 spin_lock_bh(&arfs->arfs_lock);
723 arfs_rule = arfs_find_rule(arfs_t, skb); 695 arfs_rule = arfs_find_rule(arfs_t, &fk);
724 if (arfs_rule) { 696 if (arfs_rule) {
725 if (arfs_rule->rxq == rxq_index) { 697 if (arfs_rule->rxq == rxq_index) {
726 spin_unlock_bh(&arfs->arfs_lock); 698 spin_unlock_bh(&arfs->arfs_lock);
@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
728 } 700 }
729 arfs_rule->rxq = rxq_index; 701 arfs_rule->rxq = rxq_index;
730 } else { 702 } else {
731 arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, 703 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
732 rxq_index, flow_id);
733 if (!arfs_rule) { 704 if (!arfs_rule) {
734 spin_unlock_bh(&arfs->arfs_lock); 705 spin_unlock_bh(&arfs->arfs_lock);
735 return -ENOMEM; 706 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 03bed714bac3..20e628c907e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : 1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext); 1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext);
1083 1083
1084 if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
1085 autoneg != AUTONEG_ENABLE) {
1086 netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
1087 __func__);
1088 err = -EINVAL;
1089 goto out;
1090 }
1091
1084 link_modes = link_modes & eproto.cap; 1092 link_modes = link_modes & eproto.cap;
1085 if (!link_modes) { 1093 if (!link_modes) {
1086 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", 1094 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
@@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1338 struct mlx5_core_dev *mdev = priv->mdev; 1346 struct mlx5_core_dev *mdev = priv->mdev;
1339 int err; 1347 int err;
1340 1348
1349 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1350 return -EOPNOTSUPP;
1351
1341 if (pauseparam->autoneg) 1352 if (pauseparam->autoneg)
1342 return -EINVAL; 1353 return -EINVAL;
1343 1354
@@ -1679,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
1679 return 0; 1690 return 0;
1680} 1691}
1681 1692
1693int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1694 struct ethtool_flash *flash)
1695{
1696 struct mlx5_core_dev *mdev = priv->mdev;
1697 struct net_device *dev = priv->netdev;
1698 const struct firmware *fw;
1699 int err;
1700
1701 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
1702 return -EOPNOTSUPP;
1703
1704 err = request_firmware_direct(&fw, flash->data, &dev->dev);
1705 if (err)
1706 return err;
1707
1708 dev_hold(dev);
1709 rtnl_unlock();
1710
1711 err = mlx5_firmware_flash(mdev, fw, NULL);
1712 release_firmware(fw);
1713
1714 rtnl_lock();
1715 dev_put(dev);
1716 return err;
1717}
1718
1719static int mlx5e_flash_device(struct net_device *dev,
1720 struct ethtool_flash *flash)
1721{
1722 struct mlx5e_priv *priv = netdev_priv(dev);
1723
1724 return mlx5e_ethtool_flash_device(priv, flash);
1725}
1726
1682static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, 1727static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
1683 bool is_rx_cq) 1728 bool is_rx_cq)
1684{ 1729{
@@ -1961,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1961 .set_wol = mlx5e_set_wol, 2006 .set_wol = mlx5e_set_wol,
1962 .get_module_info = mlx5e_get_module_info, 2007 .get_module_info = mlx5e_get_module_info,
1963 .get_module_eeprom = mlx5e_get_module_eeprom, 2008 .get_module_eeprom = mlx5e_get_module_eeprom,
2009 .flash_device = mlx5e_flash_device,
1964 .get_priv_flags = mlx5e_get_priv_flags, 2010 .get_priv_flags = mlx5e_get_priv_flags,
1965 .set_priv_flags = mlx5e_set_priv_flags, 2011 .set_priv_flags = mlx5e_set_priv_flags,
1966 .self_test = mlx5e_self_test, 2012 .self_test = mlx5e_self_test,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6c712c5be4d8..9d5f6e56188f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1321,7 +1321,6 @@ err_free_txqsq:
1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1322{ 1322{
1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); 1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1324 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1325 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1324 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1326 netdev_tx_reset_queue(sq->txq); 1325 netdev_tx_reset_queue(sq->txq);
1327 netif_tx_start_queue(sq->txq); 1326 netif_tx_start_queue(sq->txq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7ecfc53cf5f6..00b2d4a86159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1480 struct mlx5_flow_spec *spec, 1480 struct mlx5_flow_spec *spec,
1481 struct flow_cls_offload *f, 1481 struct flow_cls_offload *f,
1482 struct net_device *filter_dev, 1482 struct net_device *filter_dev,
1483 u8 *match_level, u8 *tunnel_match_level) 1483 u8 *inner_match_level, u8 *outer_match_level)
1484{ 1484{
1485 struct netlink_ext_ack *extack = f->common.extack; 1485 struct netlink_ext_ack *extack = f->common.extack;
1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1495 struct flow_dissector *dissector = rule->match.dissector; 1495 struct flow_dissector *dissector = rule->match.dissector;
1496 u16 addr_type = 0; 1496 u16 addr_type = 0;
1497 u8 ip_proto = 0; 1497 u8 ip_proto = 0;
1498 u8 *match_level;
1498 1499
1499 *match_level = MLX5_MATCH_NONE; 1500 match_level = outer_match_level;
1500 1501
1501 if (dissector->used_keys & 1502 if (dissector->used_keys &
1502 ~(BIT(FLOW_DISSECTOR_KEY_META) | 1503 ~(BIT(FLOW_DISSECTOR_KEY_META) |
@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1524 } 1525 }
1525 1526
1526 if (mlx5e_get_tc_tun(filter_dev)) { 1527 if (mlx5e_get_tc_tun(filter_dev)) {
1527 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1528 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1529 outer_match_level))
1528 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1529 1531
1530 /* In decap flow, header pointers should point to the inner 1532 /* At this point, header pointers should point to the inner
1531 * headers, outer header were already set by parse_tunnel_attr 1533 * headers, outer header were already set by parse_tunnel_attr
1532 */ 1534 */
1535 match_level = inner_match_level;
1533 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1536 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1534 spec); 1537 spec);
1535 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1538 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1831 struct flow_cls_offload *f, 1834 struct flow_cls_offload *f,
1832 struct net_device *filter_dev) 1835 struct net_device *filter_dev)
1833{ 1836{
1837 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
1834 struct netlink_ext_ack *extack = f->common.extack; 1838 struct netlink_ext_ack *extack = f->common.extack;
1835 struct mlx5_core_dev *dev = priv->mdev; 1839 struct mlx5_core_dev *dev = priv->mdev;
1836 struct mlx5_eswitch *esw = dev->priv.eswitch; 1840 struct mlx5_eswitch *esw = dev->priv.eswitch;
1837 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1841 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1838 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1839 struct mlx5_eswitch_rep *rep; 1842 struct mlx5_eswitch_rep *rep;
1840 int err; 1843 int err;
1841 1844
1842 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1845 inner_match_level = MLX5_MATCH_NONE;
1846 outer_match_level = MLX5_MATCH_NONE;
1847
1848 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
1849 &outer_match_level);
1850 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
1851 outer_match_level : inner_match_level;
1843 1852
1844 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1853 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1845 rep = rpriv->rep; 1854 rep = rpriv->rep;
1846 if (rep->vport != MLX5_VPORT_UPLINK && 1855 if (rep->vport != MLX5_VPORT_UPLINK &&
1847 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1856 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1848 esw->offloads.inline_mode < match_level)) { 1857 esw->offloads.inline_mode < non_tunnel_match_level)) {
1849 NL_SET_ERR_MSG_MOD(extack, 1858 NL_SET_ERR_MSG_MOD(extack,
1850 "Flow is not offloaded due to min inline setting"); 1859 "Flow is not offloaded due to min inline setting");
1851 netdev_warn(priv->netdev, 1860 netdev_warn(priv->netdev,
1852 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1861 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1853 match_level, esw->offloads.inline_mode); 1862 non_tunnel_match_level, esw->offloads.inline_mode);
1854 return -EOPNOTSUPP; 1863 return -EOPNOTSUPP;
1855 } 1864 }
1856 } 1865 }
1857 1866
1858 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1867 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1859 flow->esw_attr->match_level = match_level; 1868 flow->esw_attr->inner_match_level = inner_match_level;
1860 flow->esw_attr->tunnel_match_level = tunnel_match_level; 1869 flow->esw_attr->outer_match_level = outer_match_level;
1861 } else { 1870 } else {
1862 flow->nic_attr->match_level = match_level; 1871 flow->nic_attr->match_level = non_tunnel_match_level;
1863 } 1872 }
1864 1873
1865 return err; 1874 return err;
@@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3158 3167
3159 esw_attr->parse_attr = parse_attr; 3168 esw_attr->parse_attr = parse_attr;
3160 esw_attr->chain = f->common.chain_index; 3169 esw_attr->chain = f->common.chain_index;
3161 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; 3170 esw_attr->prio = f->common.prio;
3162 3171
3163 esw_attr->in_rep = in_rep; 3172 esw_attr->in_rep = in_rep;
3164 esw_attr->in_mdev = in_mdev; 3173 esw_attr->in_mdev = in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a38e8a3c7c9a..04685dbb280c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
377 struct mlx5_termtbl_handle *termtbl; 377 struct mlx5_termtbl_handle *termtbl;
378 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 378 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
379 u32 mod_hdr_id; 379 u32 mod_hdr_id;
380 u8 match_level; 380 u8 inner_match_level;
381 u8 tunnel_match_level; 381 u8 outer_match_level;
382 struct mlx5_fc *counter; 382 struct mlx5_fc *counter;
383 u32 chain; 383 u32 chain;
384 u16 prio; 384 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 089ae4d48a82..0323fd078271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
207 207
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
209 209
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
217 } 212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
218 214
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id; 216 flow_act.modify_id = attr->mod_hdr_id;
@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
290 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
291 287
292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
293 if (attr->match_level != MLX5_MATCH_NONE) 289 if (attr->outer_match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
295 291
296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
590 data_size = crdump_size - offset; 590 data_size = crdump_size - offset;
591 else 591 else
592 data_size = MLX5_CR_DUMP_CHUNK_SIZE; 592 data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); 593 err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 data_size);
594 if (err) 595 if (err)
595 goto free_data; 596 goto free_data;
596 } 597 }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 701 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
701 goto out; 702 goto out;
702 703
704 fatal_error = check_fatal_sensors(dev);
705
706 if (fatal_error && !health->fatal_error) {
707 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 dev->priv.health.fatal_error = fatal_error;
709 print_health_info(dev);
710 mlx5_trigger_health_work(dev);
711 goto out;
712 }
713
703 count = ioread32be(health->health_counter); 714 count = ioread32be(health->health_counter);
704 if (count == health->prev) 715 if (count == health->prev)
705 ++health->miss_counter; 716 ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
718 if (health->synd && health->synd != prev_synd) 729 if (health->synd && health->synd != prev_synd)
719 queue_work(health->wq, &health->report_work); 730 queue_work(health->wq, &health->report_work);
720 731
721 fatal_error = check_fatal_sensors(dev);
722
723 if (fatal_error && !health->fatal_error) {
724 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
725 dev->priv.health.fatal_error = fatal_error;
726 print_health_info(dev);
727 mlx5_trigger_health_work(dev);
728 }
729
730out: 732out:
731 mod_timer(&health->timer, get_next_poll_jiffies()); 733 mod_timer(&health->timer, get_next_poll_jiffies());
732} 734}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ebd81f6b556e..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
122 return mlx5e_ethtool_get_ts_info(priv, info); 122 return mlx5e_ethtool_get_ts_info(priv, info);
123} 123}
124 124
125static int mlx5i_flash_device(struct net_device *netdev,
126 struct ethtool_flash *flash)
127{
128 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
129
130 return mlx5e_ethtool_flash_device(priv, flash);
131}
132
125enum mlx5_ptys_width { 133enum mlx5_ptys_width {
126 MLX5_PTYS_WIDTH_1X = 1 << 0, 134 MLX5_PTYS_WIDTH_1X = 1 << 0,
127 MLX5_PTYS_WIDTH_2X = 1 << 1, 135 MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
233 .get_ethtool_stats = mlx5i_get_ethtool_stats, 241 .get_ethtool_stats = mlx5i_get_ethtool_stats,
234 .get_ringparam = mlx5i_get_ringparam, 242 .get_ringparam = mlx5i_get_ringparam,
235 .set_ringparam = mlx5i_set_ringparam, 243 .set_ringparam = mlx5i_set_ringparam,
244 .flash_device = mlx5i_flash_device,
236 .get_channels = mlx5i_get_channels, 245 .get_channels = mlx5i_get_channels,
237 .set_channels = mlx5i_set_channels, 246 .set_channels = mlx5i_set_channels,
238 .get_coalesce = mlx5i_get_coalesce, 247 .get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea9ee88491e5..ea1d4d26ece0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
27 case 128: 27 case 128:
28 general_obj_key_size = 28 general_obj_key_size =
29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; 29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
30 key_p += sz_bytes;
30 break; 31 break;
31 case 256: 32 case 256:
32 general_obj_key_size = 33 general_obj_key_size =
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index e8ac90564dbe..84a87d059333 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
472 unsigned int priority) 472 unsigned int priority)
473{ 473{
474 rulei->priority = priority >> 16; 474 rulei->priority = priority;
475} 475}
476 476
477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 63b07edd9d81..38bb1cfe4e8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -29,7 +29,7 @@
29 29
30struct mlxsw_sp_ptp_state { 30struct mlxsw_sp_ptp_state {
31 struct mlxsw_sp *mlxsw_sp; 31 struct mlxsw_sp *mlxsw_sp;
32 struct rhashtable unmatched_ht; 32 struct rhltable unmatched_ht;
33 spinlock_t unmatched_lock; /* protects the HT */ 33 spinlock_t unmatched_lock; /* protects the HT */
34 struct delayed_work ht_gc_dw; 34 struct delayed_work ht_gc_dw;
35 u32 gc_cycle; 35 u32 gc_cycle;
@@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key {
45 45
46struct mlxsw_sp1_ptp_unmatched { 46struct mlxsw_sp1_ptp_unmatched {
47 struct mlxsw_sp1_ptp_key key; 47 struct mlxsw_sp1_ptp_key key;
48 struct rhash_head ht_node; 48 struct rhlist_head ht_node;
49 struct rcu_head rcu; 49 struct rcu_head rcu;
50 struct sk_buff *skb; 50 struct sk_buff *skb;
51 u64 timestamp; 51 u64 timestamp;
@@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on 359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
360 * error. 360 * error.
361 */ 361 */
362static struct mlxsw_sp1_ptp_unmatched * 362static int
363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, 363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp1_ptp_key key, 364 struct mlxsw_sp1_ptp_key key,
365 struct sk_buff *skb, 365 struct sk_buff *skb,
@@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; 368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; 369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
370 struct mlxsw_sp1_ptp_unmatched *unmatched; 370 struct mlxsw_sp1_ptp_unmatched *unmatched;
371 struct mlxsw_sp1_ptp_unmatched *conflict; 371 int err;
372 372
373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); 373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
374 if (!unmatched) 374 if (!unmatched)
375 return ERR_PTR(-ENOMEM); 375 return -ENOMEM;
376 376
377 unmatched->key = key; 377 unmatched->key = key;
378 unmatched->skb = skb; 378 unmatched->skb = skb;
379 unmatched->timestamp = timestamp; 379 unmatched->timestamp = timestamp;
380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; 380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
381 381
382 conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, 382 err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
383 &unmatched->ht_node, 383 mlxsw_sp1_ptp_unmatched_ht_params);
384 mlxsw_sp1_ptp_unmatched_ht_params); 384 if (err)
385 if (conflict)
386 kfree(unmatched); 385 kfree(unmatched);
387 386
388 return conflict; 387 return err;
389} 388}
390 389
391static struct mlxsw_sp1_ptp_unmatched * 390static struct mlxsw_sp1_ptp_unmatched *
392mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, 391mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
393 struct mlxsw_sp1_ptp_key key) 392 struct mlxsw_sp1_ptp_key key, int *p_length)
394{ 393{
395 return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, 394 struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
396 mlxsw_sp1_ptp_unmatched_ht_params); 395 struct rhlist_head *tmp, *list;
396 int length = 0;
397
398 list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
399 mlxsw_sp1_ptp_unmatched_ht_params);
400 rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
401 last = unmatched;
402 length++;
403 }
404
405 *p_length = length;
406 return last;
397} 407}
398 408
399static int 409static int
400mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, 410mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
401 struct mlxsw_sp1_ptp_unmatched *unmatched) 411 struct mlxsw_sp1_ptp_unmatched *unmatched)
402{ 412{
403 return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, 413 return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
404 &unmatched->ht_node, 414 &unmatched->ht_node,
405 mlxsw_sp1_ptp_unmatched_ht_params); 415 mlxsw_sp1_ptp_unmatched_ht_params);
406} 416}
407 417
408/* This function is called in the following scenarios: 418/* This function is called in the following scenarios:
@@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
489 struct mlxsw_sp1_ptp_key key, 499 struct mlxsw_sp1_ptp_key key,
490 struct sk_buff *skb, u64 timestamp) 500 struct sk_buff *skb, u64 timestamp)
491{ 501{
492 struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; 502 struct mlxsw_sp1_ptp_unmatched *unmatched;
503 int length;
493 int err; 504 int err;
494 505
495 rcu_read_lock(); 506 rcu_read_lock();
496 507
497 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
498
499 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); 508 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
500 509
501 if (unmatched) { 510 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
502 /* There was an unmatched entry when we looked, but it may have
503 * been removed before we took the lock.
504 */
505 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
506 if (err)
507 unmatched = NULL;
508 }
509
510 if (!unmatched) {
511 /* We have no unmatched entry, but one may have been added after
512 * we looked, but before we took the lock.
513 */
514 unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
515 skb, timestamp);
516 if (IS_ERR(unmatched)) {
517 if (skb)
518 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
519 key.local_port,
520 key.ingress, NULL);
521 unmatched = NULL;
522 } else if (unmatched) {
523 /* Save just told us, under lock, that the entry is
524 * there, so this has to work.
525 */
526 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
527 unmatched);
528 WARN_ON_ONCE(err);
529 }
530 }
531
532 /* If unmatched is non-NULL here, it comes either from the lookup, or
533 * from the save attempt above. In either case the entry was removed
534 * from the hash table. If unmatched is NULL, a new unmatched entry was
535 * added to the hash table, and there was no conflict.
536 */
537
538 if (skb && unmatched && unmatched->timestamp) { 511 if (skb && unmatched && unmatched->timestamp) {
539 unmatched->skb = skb; 512 unmatched->skb = skb;
540 } else if (timestamp && unmatched && unmatched->skb) { 513 } else if (timestamp && unmatched && unmatched->skb) {
541 unmatched->timestamp = timestamp; 514 unmatched->timestamp = timestamp;
542 } else if (unmatched) { 515 } else {
543 /* unmatched holds an older entry of the same type: either an 516 /* Either there is no entry to match, or one that is there is
544 * skb if we are handling skb, or a timestamp if we are handling 517 * incompatible.
545 * timestamp. We can't match that up, so save what we have.
546 */ 518 */
547 conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, 519 if (length < 100)
548 skb, timestamp); 520 err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
549 if (IS_ERR(conflict)) { 521 skb, timestamp);
550 if (skb) 522 else
551 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, 523 err = -E2BIG;
552 key.local_port, 524 if (err && skb)
553 key.ingress, NULL); 525 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
554 } else { 526 key.local_port,
555 /* Above, we removed an object with this key from the 527 key.ingress, NULL);
556 * hash table, under lock, so conflict can not be a 528 unmatched = NULL;
557 * valid pointer. 529 }
558 */ 530
559 WARN_ON_ONCE(conflict); 531 if (unmatched) {
560 } 532 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
533 WARN_ON_ONCE(err);
561 } 534 }
562 535
563 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); 536 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
@@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
669 local_bh_disable(); 642 local_bh_disable();
670 643
671 spin_lock(&ptp_state->unmatched_lock); 644 spin_lock(&ptp_state->unmatched_lock);
672 err = rhashtable_remove_fast(&ptp_state->unmatched_ht, 645 err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
673 &unmatched->ht_node, 646 mlxsw_sp1_ptp_unmatched_ht_params);
674 mlxsw_sp1_ptp_unmatched_ht_params);
675 spin_unlock(&ptp_state->unmatched_lock); 647 spin_unlock(&ptp_state->unmatched_lock);
676 648
677 if (err) 649 if (err)
@@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
702 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); 674 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
703 gc_cycle = ptp_state->gc_cycle++; 675 gc_cycle = ptp_state->gc_cycle++;
704 676
705 rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); 677 rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
706 rhashtable_walk_start(&iter); 678 rhashtable_walk_start(&iter);
707 while ((obj = rhashtable_walk_next(&iter))) { 679 while ((obj = rhashtable_walk_next(&iter))) {
708 if (IS_ERR(obj)) 680 if (IS_ERR(obj))
@@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
855 827
856 spin_lock_init(&ptp_state->unmatched_lock); 828 spin_lock_init(&ptp_state->unmatched_lock);
857 829
858 err = rhashtable_init(&ptp_state->unmatched_ht, 830 err = rhltable_init(&ptp_state->unmatched_ht,
859 &mlxsw_sp1_ptp_unmatched_ht_params); 831 &mlxsw_sp1_ptp_unmatched_ht_params);
860 if (err) 832 if (err)
861 goto err_hashtable_init; 833 goto err_hashtable_init;
862 834
@@ -891,7 +863,7 @@ err_fifo_clr:
891err_mtptpt1_set: 863err_mtptpt1_set:
892 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 864 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
893err_mtptpt_set: 865err_mtptpt_set:
894 rhashtable_destroy(&ptp_state->unmatched_ht); 866 rhltable_destroy(&ptp_state->unmatched_ht);
895err_hashtable_init: 867err_hashtable_init:
896 kfree(ptp_state); 868 kfree(ptp_state);
897 return ERR_PTR(err); 869 return ERR_PTR(err);
@@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
906 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); 878 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
907 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); 879 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
908 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 880 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
909 rhashtable_free_and_destroy(&ptp_state->unmatched_ht, 881 rhltable_free_and_destroy(&ptp_state->unmatched_ht,
910 &mlxsw_sp1_ptp_unmatched_free_fn, NULL); 882 &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
911 kfree(ptp_state); 883 kfree(ptp_state);
912} 884}
913 885
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
317 break; 317 break;
318 case OCELOT_ACL_ACTION_TRAP: 318 case OCELOT_ACL_ACTION_TRAP:
319 VCAP_ACT_SET(PORT_MASK, 0x0); 319 VCAP_ACT_SET(PORT_MASK, 0x0);
320 VCAP_ACT_SET(MASK_MODE, 0x0); 320 VCAP_ACT_SET(MASK_MODE, 0x1);
321 VCAP_ACT_SET(POLICE_ENA, 0x0); 321 VCAP_ACT_SET(POLICE_ENA, 0x0);
322 VCAP_ACT_SET(POLICE_IDX, 0x0); 322 VCAP_ACT_SET(POLICE_IDX, 0x0);
323 VCAP_ACT_SET(CPU_QU_NUM, 0x0); 323 VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 59487d446a09..b894bc0c9c16 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -13,12 +13,6 @@ struct ocelot_port_block {
13 struct ocelot_port *port; 13 struct ocelot_port *port;
14}; 14};
15 15
16static u16 get_prio(u32 prio)
17{
18 /* prio starts from 0x1000 while the ids starts from 0 */
19 return prio >> 16;
20}
21
22static int ocelot_flower_parse_action(struct flow_cls_offload *f, 16static int ocelot_flower_parse_action(struct flow_cls_offload *f,
23 struct ocelot_ace_rule *rule) 17 struct ocelot_ace_rule *rule)
24{ 18{
@@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
168 } 162 }
169 163
170finished_key_parsing: 164finished_key_parsing:
171 ocelot_rule->prio = get_prio(f->common.prio); 165 ocelot_rule->prio = f->common.prio;
172 ocelot_rule->id = f->cookie; 166 ocelot_rule->id = f->cookie;
173 return ocelot_flower_parse_action(f, ocelot_rule); 167 return ocelot_flower_parse_action(f, ocelot_rule);
174} 168}
@@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
218 struct ocelot_ace_rule rule; 212 struct ocelot_ace_rule rule;
219 int ret; 213 int ret;
220 214
221 rule.prio = get_prio(f->common.prio); 215 rule.prio = f->common.prio;
222 rule.port = port_block->port; 216 rule.port = port_block->port;
223 rule.id = f->cookie; 217 rule.id = f->cookie;
224 218
@@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
236 struct ocelot_ace_rule rule; 230 struct ocelot_ace_rule rule;
237 int ret; 231 int ret;
238 232
239 rule.prio = get_prio(f->common.prio); 233 rule.prio = f->common.prio;
240 rule.port = port_block->port; 234 rule.port = port_block->port;
241 rule.id = f->cookie; 235 rule.id = f->cookie;
242 ret = ocelot_ace_rule_stats_update(&rule); 236 ret = ocelot_ace_rule_stats_update(&rule);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index d8b7fba96d58..337b0cbfd153 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3919 * setup (if available). */ 3919 * setup (if available). */
3920 status = myri10ge_request_irq(mgp); 3920 status = myri10ge_request_irq(mgp);
3921 if (status != 0) 3921 if (status != 0)
3922 goto abort_with_firmware; 3922 goto abort_with_slices;
3923 myri10ge_free_irq(mgp); 3923 myri10ge_free_irq(mgp);
3924 3924
3925 /* Save configuration space to be restored if the 3925 /* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4054b70d7719..5afcb3c4c2ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1163 bool clr_gpr, lmem_step step) 1163 bool clr_gpr, lmem_step step)
1164{ 1164{
1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1166 bool first = true, last; 1166 bool first = true, narrow_ld, last;
1167 bool needs_inc = false; 1167 bool needs_inc = false;
1168 swreg stack_off_reg; 1168 swreg stack_off_reg;
1169 u8 prev_gpr = 255; 1169 u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1209 1209
1210 needs_inc = true; 1210 needs_inc = true;
1211 } 1211 }
1212
1213 narrow_ld = clr_gpr && size < 8;
1214
1212 if (lm3) { 1215 if (lm3) {
1216 unsigned int nop_cnt;
1217
1213 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1218 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1214 /* For size < 4 one slot will be filled by zeroing of upper. */ 1219 /* For size < 4 one slot will be filled by zeroing of upper,
1215 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1220 * but be careful, that zeroing could be eliminated by zext
1221 * optimization.
1222 */
1223 nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
1224 wrp_nops(nfp_prog, nop_cnt);
1216 } 1225 }
1217 1226
1218 if (clr_gpr && size < 8) 1227 if (narrow_ld)
1219 wrp_zext(nfp_prog, meta, gpr); 1228 wrp_zext(nfp_prog, meta, gpr);
1220 1229
1221 while (size) { 1230 while (size) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e209f150c5f2..457bdc60f3ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1409 struct nfp_flower_priv *priv = app->priv; 1409 struct nfp_flower_priv *priv = app->priv;
1410 struct flow_block_cb *block_cb; 1410 struct flow_block_cb *block_cb;
1411 1411
1412 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1412 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1413 !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1413 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1414 nfp_flower_internal_port_can_offload(app, netdev))) 1414 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1415 nfp_flower_internal_port_can_offload(app, netdev)))
1415 return -EOPNOTSUPP; 1416 return -EOPNOTSUPP;
1416 1417
1417 switch (f->command) { 1418 switch (f->command) {
1418 case FLOW_BLOCK_BIND: 1419 case FLOW_BLOCK_BIND:
1420 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1421 if (cb_priv &&
1422 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1423 cb_priv,
1424 &nfp_block_cb_list))
1425 return -EBUSY;
1426
1419 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1427 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1420 if (!cb_priv) 1428 if (!cb_priv)
1421 return -ENOMEM; 1429 return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 86e968cd5ffd..124a43dc136a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
93 return -EOPNOTSUPP; 93 return -EOPNOTSUPP;
94 } 94 }
95 95
96 if (flow->common.prio != (1 << 16)) { 96 if (flow->common.prio != 1) {
97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); 97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
98 return -EOPNOTSUPP; 98 return -EOPNOTSUPP;
99 } 99 }
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a7a80f4b722a..f0ee982eb1b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
328 328
329 flow.daddr = *(__be32 *)n->primary_key; 329 flow.daddr = *(__be32 *)n->primary_key;
330 330
331 /* Only concerned with route changes for representors. */
332 if (!nfp_netdev_is_nfp_repr(n->dev))
333 return NOTIFY_DONE;
334
335 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 331 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
336 app = app_priv->app; 332 app = app_priv->app;
337 333
334 if (!nfp_netdev_is_nfp_repr(n->dev) &&
335 !nfp_flower_internal_port_can_offload(app, n->dev))
336 return NOTIFY_DONE;
337
338 /* Only concerned with changes to routes already added to NFP. */ 338 /* Only concerned with changes to routes already added to NFP. */
339 if (!nfp_tun_has_route(app, flow.daddr)) 339 if (!nfp_tun_has_route(app, flow.daddr))
340 return NOTIFY_DONE; 340 return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 829dd60ab937..1efff7f68ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1325 &drv_version); 1325 &drv_version);
1326 if (rc) { 1326 if (rc) {
1327 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1327 DP_NOTICE(cdev, "Failed sending drv version command\n");
1328 return rc; 1328 goto err4;
1329 } 1329 }
1330 } 1330 }
1331 1331
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1333 1333
1334 return 0; 1334 return 0;
1335 1335
1336err4:
1337 qed_ll2_dealloc_if(cdev);
1336err3: 1338err3:
1337 qed_hw_stop(cdev); 1339 qed_hw_stop(cdev);
1338err2: 1340err2:
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index e1dd6ea60d67..bae0074ab9aa 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5921 skb = napi_alloc_skb(&tp->napi, pkt_size); 5921 skb = napi_alloc_skb(&tp->napi, pkt_size);
5922 if (skb) 5922 if (skb)
5923 skb_copy_to_linear_data(skb, data, pkt_size); 5923 skb_copy_to_linear_data(skb, data, pkt_size);
5924 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5924 5925
5925 return skb; 5926 return skb;
5926} 5927}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ef8f08931fe8..6cacd5e893ac 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
3 * 3 *
4 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * 7 *
@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
513 kfree(ts_skb); 513 kfree(ts_skb);
514 if (tag == tfa_tag) { 514 if (tag == tfa_tag) {
515 skb_tstamp_tx(skb, &shhwtstamps); 515 skb_tstamp_tx(skb, &shhwtstamps);
516 dev_consume_skb_any(skb);
516 break; 517 break;
518 } else {
519 dev_kfree_skb_any(skb);
517 } 520 }
518 } 521 }
519 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 522 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1564 } 1567 }
1565 goto unmap; 1568 goto unmap;
1566 } 1569 }
1567 ts_skb->skb = skb; 1570 ts_skb->skb = skb_get(skb);
1568 ts_skb->tag = priv->ts_skb_tag++; 1571 ts_skb->tag = priv->ts_skb_tag++;
1569 priv->ts_skb_tag &= 0x3ff; 1572 priv->ts_skb_tag &= 0x3ff;
1570 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 1573 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
1693 /* Clear the timestamp list */ 1696 /* Clear the timestamp list */
1694 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 1697 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1695 list_del(&ts_skb->list); 1698 list_del(&ts_skb->list);
1699 kfree_skb(ts_skb->skb);
1696 kfree(ts_skb); 1700 kfree(ts_skb);
1697 } 1701 }
1698 1702
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 7a5e6c5abb57..276c7cae7cee 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
794 printk(KERN_ERR "Sgiseeq: Cannot register net device, " 794 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
795 "aborting.\n"); 795 "aborting.\n");
796 err = -ENODEV; 796 err = -ENODEV;
797 goto err_out_free_page; 797 goto err_out_free_attrs;
798 } 798 }
799 799
800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
801 801
802 return 0; 802 return 0;
803 803
804err_out_free_page: 804err_out_free_attrs:
805 free_page((unsigned long) sp->srings); 805 dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
806 sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
806err_out_free_dev: 807err_out_free_dev:
807 free_netdev(dev); 808 free_netdev(dev);
808 809
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 4644b2aeeba1..e2e469c37a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
1194 int ret; 1194 int ret;
1195 struct device *dev = &bsp_priv->pdev->dev; 1195 struct device *dev = &bsp_priv->pdev->dev;
1196 1196
1197 if (!ldo) { 1197 if (!ldo)
1198 dev_err(dev, "no regulator found\n"); 1198 return 0;
1199 return -1;
1200 }
1201 1199
1202 if (enable) { 1200 if (enable) {
1203 ret = regulator_enable(ldo); 1201 ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 37c0bc699cd9..6c305b6ecad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv,
94 struct stmmac_tc_entry *entry, *frag = NULL; 94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel; 95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem; 96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio; 97 u32 prio = cls->common.prio << 16;
98 int ret; 98 int ret;
99 99
100 /* Only 1 match per entry */ 100 /* Only 1 match per entry */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 32a89744972d..a46b8b2e44e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
2775 if (!cpsw) 2775 if (!cpsw)
2776 return -ENOMEM; 2776 return -ENOMEM;
2777 2777
2778 platform_set_drvdata(pdev, cpsw);
2778 cpsw->dev = dev; 2779 cpsw->dev = dev;
2779 2780
2780 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2781 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
2879 goto clean_cpts; 2880 goto clean_cpts;
2880 } 2881 }
2881 2882
2882 platform_set_drvdata(pdev, cpsw);
2883 priv = netdev_priv(ndev); 2883 priv = netdev_priv(ndev);
2884 priv->cpsw = cpsw; 2884 priv->cpsw = cpsw;
2885 priv->ndev = ndev; 2885 priv->ndev = ndev;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 8479a440527b..12466a72cefc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
1504 pci_unmap_single(lp->pci_dev, 1504 pci_unmap_single(lp->pci_dev,
1505 lp->rx_skbs[cur_bd].skb_dma, 1505 lp->rx_skbs[cur_bd].skb_dma,
1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) 1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1508 memmove(skb->data, skb->data - NET_IP_ALIGN, 1508 memmove(skb->data, skb->data - NET_IP_ALIGN,
1509 pkt_len); 1509 pkt_len);
1510 data = skb_put(skb, pkt_len); 1510 data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 78a7de3fb622..c62f474b6d08 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
371static void tsi108_stat_carry(struct net_device *dev) 371static void tsi108_stat_carry(struct net_device *dev)
372{ 372{
373 struct tsi108_prv_data *data = netdev_priv(dev); 373 struct tsi108_prv_data *data = netdev_priv(dev);
374 unsigned long flags;
374 u32 carry1, carry2; 375 u32 carry1, carry2;
375 376
376 spin_lock_irq(&data->misclock); 377 spin_lock_irqsave(&data->misclock, flags);
377 378
378 carry1 = TSI_READ(TSI108_STAT_CARRY1); 379 carry1 = TSI_READ(TSI108_STAT_CARRY1);
379 carry2 = TSI_READ(TSI108_STAT_CARRY2); 380 carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
441 TSI108_STAT_TXPAUSEDROP_CARRY, 442 TSI108_STAT_TXPAUSEDROP_CARRY,
442 &data->tx_pause_drop); 443 &data->tx_pause_drop);
443 444
444 spin_unlock_irq(&data->misclock); 445 spin_unlock_irqrestore(&data->misclock, flags);
445} 446}
446 447
447/* Read a stat counter atomically with respect to carries. 448/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3544e1991579..e8fce6d715ef 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
1239 struct rtnl_link_stats64 *t) 1239 struct rtnl_link_stats64 *t)
1240{ 1240{
1241 struct net_device_context *ndev_ctx = netdev_priv(net); 1241 struct net_device_context *ndev_ctx = netdev_priv(net);
1242 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1242 struct netvsc_device *nvdev;
1243 struct netvsc_vf_pcpu_stats vf_tot; 1243 struct netvsc_vf_pcpu_stats vf_tot;
1244 int i; 1244 int i;
1245 1245
1246 rcu_read_lock();
1247
1248 nvdev = rcu_dereference(ndev_ctx->nvdev);
1246 if (!nvdev) 1249 if (!nvdev)
1247 return; 1250 goto out;
1248 1251
1249 netdev_stats_to_stats64(t, &net->stats); 1252 netdev_stats_to_stats64(t, &net->stats);
1250 1253
@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
1283 t->rx_packets += packets; 1286 t->rx_packets += packets;
1284 t->multicast += multicast; 1287 t->multicast += multicast;
1285 } 1288 }
1289out:
1290 rcu_read_unlock();
1286} 1291}
1287 1292
1288static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1293static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
802 err = hwsim_subscribe_all_others(phy); 802 err = hwsim_subscribe_all_others(phy);
803 if (err < 0) { 803 if (err < 0) {
804 mutex_unlock(&hwsim_phys_lock); 804 mutex_unlock(&hwsim_phys_lock);
805 goto err_reg; 805 goto err_subscribe;
806 } 806 }
807 } 807 }
808 list_add_tail(&phy->list, &hwsim_phys); 808 list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
812 812
813 return idx; 813 return idx;
814 814
815err_subscribe:
816 ieee802154_unregister_hw(phy->hw);
815err_reg: 817err_reg:
816 kfree(pib); 818 kfree(pib);
817err_pib: 819err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
901 return 0; 903 return 0;
902 904
903platform_drv: 905platform_drv:
904 genl_unregister_family(&hwsim_genl_family);
905platform_dev:
906 platform_device_unregister(mac802154hwsim_dev); 906 platform_device_unregister(mac802154hwsim_dev);
907platform_dev:
908 genl_unregister_family(&hwsim_genl_family);
907 return rc; 909 return rc;
908} 910}
909 911
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index c5c417a3c0ce..bcc40a236624 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
73 debugfs_remove_recursive(nsim_dev_port->ddir); 73 debugfs_remove_recursive(nsim_dev_port->ddir);
74} 74}
75 75
76static struct net *nsim_devlink_net(struct devlink *devlink)
77{
78 return &init_net;
79}
80
76static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) 81static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
77{ 82{
78 struct nsim_dev *nsim_dev = priv; 83 struct net *net = priv;
79 84
80 return nsim_fib_get_val(nsim_dev->fib_data, 85 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
81 NSIM_RESOURCE_IPV4_FIB, false);
82} 86}
83 87
84static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) 88static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
85{ 89{
86 struct nsim_dev *nsim_dev = priv; 90 struct net *net = priv;
87 91
88 return nsim_fib_get_val(nsim_dev->fib_data, 92 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
89 NSIM_RESOURCE_IPV4_FIB_RULES, false);
90} 93}
91 94
92static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) 95static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
93{ 96{
94 struct nsim_dev *nsim_dev = priv; 97 struct net *net = priv;
95 98
96 return nsim_fib_get_val(nsim_dev->fib_data, 99 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
97 NSIM_RESOURCE_IPV6_FIB, false);
98} 100}
99 101
100static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) 102static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
101{ 103{
102 struct nsim_dev *nsim_dev = priv; 104 struct net *net = priv;
103 105
104 return nsim_fib_get_val(nsim_dev->fib_data, 106 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
105 NSIM_RESOURCE_IPV6_FIB_RULES, false);
106} 107}
107 108
108static int nsim_dev_resources_register(struct devlink *devlink) 109static int nsim_dev_resources_register(struct devlink *devlink)
109{ 110{
110 struct nsim_dev *nsim_dev = devlink_priv(devlink);
111 struct devlink_resource_size_params params = { 111 struct devlink_resource_size_params params = {
112 .size_max = (u64)-1, 112 .size_max = (u64)-1,
113 .size_granularity = 1, 113 .size_granularity = 1,
114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY 114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY
115 }; 115 };
116 struct net *net = nsim_devlink_net(devlink);
116 int err; 117 int err;
117 u64 n; 118 u64 n;
118 119
@@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
126 goto out; 127 goto out;
127 } 128 }
128 129
129 n = nsim_fib_get_val(nsim_dev->fib_data, 130 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
130 NSIM_RESOURCE_IPV4_FIB, true);
131 err = devlink_resource_register(devlink, "fib", n, 131 err = devlink_resource_register(devlink, "fib", n,
132 NSIM_RESOURCE_IPV4_FIB, 132 NSIM_RESOURCE_IPV4_FIB,
133 NSIM_RESOURCE_IPV4, &params); 133 NSIM_RESOURCE_IPV4, &params);
@@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
136 return err; 136 return err;
137 } 137 }
138 138
139 n = nsim_fib_get_val(nsim_dev->fib_data, 139 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
140 NSIM_RESOURCE_IPV4_FIB_RULES, true);
141 err = devlink_resource_register(devlink, "fib-rules", n, 140 err = devlink_resource_register(devlink, "fib-rules", n,
142 NSIM_RESOURCE_IPV4_FIB_RULES, 141 NSIM_RESOURCE_IPV4_FIB_RULES,
143 NSIM_RESOURCE_IPV4, &params); 142 NSIM_RESOURCE_IPV4, &params);
@@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
156 goto out; 155 goto out;
157 } 156 }
158 157
159 n = nsim_fib_get_val(nsim_dev->fib_data, 158 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
160 NSIM_RESOURCE_IPV6_FIB, true);
161 err = devlink_resource_register(devlink, "fib", n, 159 err = devlink_resource_register(devlink, "fib", n,
162 NSIM_RESOURCE_IPV6_FIB, 160 NSIM_RESOURCE_IPV6_FIB,
163 NSIM_RESOURCE_IPV6, &params); 161 NSIM_RESOURCE_IPV6, &params);
@@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
166 return err; 164 return err;
167 } 165 }
168 166
169 n = nsim_fib_get_val(nsim_dev->fib_data, 167 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
170 NSIM_RESOURCE_IPV6_FIB_RULES, true);
171 err = devlink_resource_register(devlink, "fib-rules", n, 168 err = devlink_resource_register(devlink, "fib-rules", n,
172 NSIM_RESOURCE_IPV6_FIB_RULES, 169 NSIM_RESOURCE_IPV6_FIB_RULES,
173 NSIM_RESOURCE_IPV6, &params); 170 NSIM_RESOURCE_IPV6, &params);
@@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
179 devlink_resource_occ_get_register(devlink, 176 devlink_resource_occ_get_register(devlink,
180 NSIM_RESOURCE_IPV4_FIB, 177 NSIM_RESOURCE_IPV4_FIB,
181 nsim_dev_ipv4_fib_resource_occ_get, 178 nsim_dev_ipv4_fib_resource_occ_get,
182 nsim_dev); 179 net);
183 devlink_resource_occ_get_register(devlink, 180 devlink_resource_occ_get_register(devlink,
184 NSIM_RESOURCE_IPV4_FIB_RULES, 181 NSIM_RESOURCE_IPV4_FIB_RULES,
185 nsim_dev_ipv4_fib_rules_res_occ_get, 182 nsim_dev_ipv4_fib_rules_res_occ_get,
186 nsim_dev); 183 net);
187 devlink_resource_occ_get_register(devlink, 184 devlink_resource_occ_get_register(devlink,
188 NSIM_RESOURCE_IPV6_FIB, 185 NSIM_RESOURCE_IPV6_FIB,
189 nsim_dev_ipv6_fib_resource_occ_get, 186 nsim_dev_ipv6_fib_resource_occ_get,
190 nsim_dev); 187 net);
191 devlink_resource_occ_get_register(devlink, 188 devlink_resource_occ_get_register(devlink,
192 NSIM_RESOURCE_IPV6_FIB_RULES, 189 NSIM_RESOURCE_IPV6_FIB_RULES,
193 nsim_dev_ipv6_fib_rules_res_occ_get, 190 nsim_dev_ipv6_fib_rules_res_occ_get,
194 nsim_dev); 191 net);
195out: 192out:
196 return err; 193 return err;
197} 194}
@@ -199,11 +196,11 @@ out:
199static int nsim_dev_reload(struct devlink *devlink, 196static int nsim_dev_reload(struct devlink *devlink,
200 struct netlink_ext_ack *extack) 197 struct netlink_ext_ack *extack)
201{ 198{
202 struct nsim_dev *nsim_dev = devlink_priv(devlink);
203 enum nsim_resource_id res_ids[] = { 199 enum nsim_resource_id res_ids[] = {
204 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, 200 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
205 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES 201 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
206 }; 202 };
203 struct net *net = nsim_devlink_net(devlink);
207 int i; 204 int i;
208 205
209 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { 206 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
@@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink,
212 209
213 err = devlink_resource_size_get(devlink, res_ids[i], &val); 210 err = devlink_resource_size_get(devlink, res_ids[i], &val);
214 if (!err) { 211 if (!err) {
215 err = nsim_fib_set_max(nsim_dev->fib_data, 212 err = nsim_fib_set_max(net, res_ids[i], val, extack);
216 res_ids[i], val, extack);
217 if (err) 213 if (err)
218 return err; 214 return err;
219 } 215 }
@@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
285 mutex_init(&nsim_dev->port_list_lock); 281 mutex_init(&nsim_dev->port_list_lock);
286 nsim_dev->fw_update_status = true; 282 nsim_dev->fw_update_status = true;
287 283
288 nsim_dev->fib_data = nsim_fib_create();
289 if (IS_ERR(nsim_dev->fib_data)) {
290 err = PTR_ERR(nsim_dev->fib_data);
291 goto err_devlink_free;
292 }
293
294 err = nsim_dev_resources_register(devlink); 284 err = nsim_dev_resources_register(devlink);
295 if (err) 285 if (err)
296 goto err_fib_destroy; 286 goto err_devlink_free;
297 287
298 err = devlink_register(devlink, &nsim_bus_dev->dev); 288 err = devlink_register(devlink, &nsim_bus_dev->dev);
299 if (err) 289 if (err)
@@ -315,8 +305,6 @@ err_dl_unregister:
315 devlink_unregister(devlink); 305 devlink_unregister(devlink);
316err_resources_unregister: 306err_resources_unregister:
317 devlink_resources_unregister(devlink, NULL); 307 devlink_resources_unregister(devlink, NULL);
318err_fib_destroy:
319 nsim_fib_destroy(nsim_dev->fib_data);
320err_devlink_free: 308err_devlink_free:
321 devlink_free(devlink); 309 devlink_free(devlink);
322 return ERR_PTR(err); 310 return ERR_PTR(err);
@@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
330 nsim_dev_debugfs_exit(nsim_dev); 318 nsim_dev_debugfs_exit(nsim_dev);
331 devlink_unregister(devlink); 319 devlink_unregister(devlink);
332 devlink_resources_unregister(devlink, NULL); 320 devlink_resources_unregister(devlink, NULL);
333 nsim_fib_destroy(nsim_dev->fib_data);
334 mutex_destroy(&nsim_dev->port_list_lock); 321 mutex_destroy(&nsim_dev->port_list_lock);
335 devlink_free(devlink); 322 devlink_free(devlink);
336} 323}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 8c57ba747772..f61d094746c0 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,6 +18,7 @@
18#include <net/ip_fib.h> 18#include <net/ip_fib.h>
19#include <net/ip6_fib.h> 19#include <net/ip6_fib.h>
20#include <net/fib_rules.h> 20#include <net/fib_rules.h>
21#include <net/netns/generic.h>
21 22
22#include "netdevsim.h" 23#include "netdevsim.h"
23 24
@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
32}; 33};
33 34
34struct nsim_fib_data { 35struct nsim_fib_data {
35 struct notifier_block fib_nb;
36 struct nsim_per_fib_data ipv4; 36 struct nsim_per_fib_data ipv4;
37 struct nsim_per_fib_data ipv6; 37 struct nsim_per_fib_data ipv6;
38}; 38};
39 39
40u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 40static unsigned int nsim_fib_net_id;
41 enum nsim_resource_id res_id, bool max) 41
42u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
42{ 43{
44 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
43 struct nsim_fib_entry *entry; 45 struct nsim_fib_entry *entry;
44 46
45 switch (res_id) { 47 switch (res_id) {
@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
62 return max ? entry->max : entry->num; 64 return max ? entry->max : entry->num;
63} 65}
64 66
65int nsim_fib_set_max(struct nsim_fib_data *fib_data, 67int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
66 enum nsim_resource_id res_id, u64 val,
67 struct netlink_ext_ack *extack) 68 struct netlink_ext_ack *extack)
68{ 69{
70 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
69 struct nsim_fib_entry *entry; 71 struct nsim_fib_entry *entry;
70 int err = 0; 72 int err = 0;
71 73
@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
118 return err; 120 return err;
119} 121}
120 122
121static int nsim_fib_rule_event(struct nsim_fib_data *data, 123static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
122 struct fib_notifier_info *info, bool add)
123{ 124{
125 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
124 struct netlink_ext_ack *extack = info->extack; 126 struct netlink_ext_ack *extack = info->extack;
125 int err = 0; 127 int err = 0;
126 128
@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
155 return err; 157 return err;
156} 158}
157 159
158static int nsim_fib_event(struct nsim_fib_data *data, 160static int nsim_fib_event(struct fib_notifier_info *info, bool add)
159 struct fib_notifier_info *info, bool add)
160{ 161{
162 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
161 struct netlink_ext_ack *extack = info->extack; 163 struct netlink_ext_ack *extack = info->extack;
162 int err = 0; 164 int err = 0;
163 165
@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
176static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, 178static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
177 void *ptr) 179 void *ptr)
178{ 180{
179 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
180 fib_nb);
181 struct fib_notifier_info *info = ptr; 181 struct fib_notifier_info *info = ptr;
182 int err = 0; 182 int err = 0;
183 183
184 switch (event) { 184 switch (event) {
185 case FIB_EVENT_RULE_ADD: /* fall through */ 185 case FIB_EVENT_RULE_ADD: /* fall through */
186 case FIB_EVENT_RULE_DEL: 186 case FIB_EVENT_RULE_DEL:
187 err = nsim_fib_rule_event(data, info, 187 err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
188 event == FIB_EVENT_RULE_ADD);
189 break; 188 break;
190 189
191 case FIB_EVENT_ENTRY_ADD: /* fall through */ 190 case FIB_EVENT_ENTRY_ADD: /* fall through */
192 case FIB_EVENT_ENTRY_DEL: 191 case FIB_EVENT_ENTRY_DEL:
193 err = nsim_fib_event(data, info, 192 err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
194 event == FIB_EVENT_ENTRY_ADD);
195 break; 193 break;
196 } 194 }
197 195
@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
201/* inconsistent dump, trying again */ 199/* inconsistent dump, trying again */
202static void nsim_fib_dump_inconsistent(struct notifier_block *nb) 200static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
203{ 201{
204 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, 202 struct nsim_fib_data *data;
205 fib_nb); 203 struct net *net;
204
205 rcu_read_lock();
206 for_each_net_rcu(net) {
207 data = net_generic(net, nsim_fib_net_id);
208
209 data->ipv4.fib.num = 0ULL;
210 data->ipv4.rules.num = 0ULL;
206 211
207 data->ipv4.fib.num = 0ULL; 212 data->ipv6.fib.num = 0ULL;
208 data->ipv4.rules.num = 0ULL; 213 data->ipv6.rules.num = 0ULL;
209 data->ipv6.fib.num = 0ULL; 214 }
210 data->ipv6.rules.num = 0ULL; 215 rcu_read_unlock();
211} 216}
212 217
213struct nsim_fib_data *nsim_fib_create(void) 218static struct notifier_block nsim_fib_nb = {
214{ 219 .notifier_call = nsim_fib_event_nb,
215 struct nsim_fib_data *data; 220};
216 int err;
217 221
218 data = kzalloc(sizeof(*data), GFP_KERNEL); 222/* Initialize per network namespace state */
219 if (!data) 223static int __net_init nsim_fib_netns_init(struct net *net)
220 return ERR_PTR(-ENOMEM); 224{
225 struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
221 226
222 data->ipv4.fib.max = (u64)-1; 227 data->ipv4.fib.max = (u64)-1;
223 data->ipv4.rules.max = (u64)-1; 228 data->ipv4.rules.max = (u64)-1;
@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
225 data->ipv6.fib.max = (u64)-1; 230 data->ipv6.fib.max = (u64)-1;
226 data->ipv6.rules.max = (u64)-1; 231 data->ipv6.rules.max = (u64)-1;
227 232
228 data->fib_nb.notifier_call = nsim_fib_event_nb; 233 return 0;
229 err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); 234}
230 if (err) {
231 pr_err("Failed to register fib notifier\n");
232 goto err_out;
233 }
234 235
235 return data; 236static struct pernet_operations nsim_fib_net_ops = {
237 .init = nsim_fib_netns_init,
238 .id = &nsim_fib_net_id,
239 .size = sizeof(struct nsim_fib_data),
240};
236 241
237err_out: 242void nsim_fib_exit(void)
238 kfree(data); 243{
239 return ERR_PTR(err); 244 unregister_pernet_subsys(&nsim_fib_net_ops);
245 unregister_fib_notifier(&nsim_fib_nb);
240} 246}
241 247
242void nsim_fib_destroy(struct nsim_fib_data *data) 248int nsim_fib_init(void)
243{ 249{
244 unregister_fib_notifier(&data->fib_nb); 250 int err;
245 kfree(data); 251
252 err = register_pernet_subsys(&nsim_fib_net_ops);
253 if (err < 0) {
254 pr_err("Failed to register pernet subsystem\n");
255 goto err_out;
256 }
257
258 err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
259 if (err < 0) {
260 pr_err("Failed to register fib notifier\n");
261 goto err_out;
262 }
263
264err_out:
265 return err;
246} 266}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0740940f41b1..55f57f76d01b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -357,12 +357,18 @@ static int __init nsim_module_init(void)
357 if (err) 357 if (err)
358 goto err_dev_exit; 358 goto err_dev_exit;
359 359
360 err = rtnl_link_register(&nsim_link_ops); 360 err = nsim_fib_init();
361 if (err) 361 if (err)
362 goto err_bus_exit; 362 goto err_bus_exit;
363 363
364 err = rtnl_link_register(&nsim_link_ops);
365 if (err)
366 goto err_fib_exit;
367
364 return 0; 368 return 0;
365 369
370err_fib_exit:
371 nsim_fib_exit();
366err_bus_exit: 372err_bus_exit:
367 nsim_bus_exit(); 373 nsim_bus_exit();
368err_dev_exit: 374err_dev_exit:
@@ -373,6 +379,7 @@ err_dev_exit:
373static void __exit nsim_module_exit(void) 379static void __exit nsim_module_exit(void)
374{ 380{
375 rtnl_link_unregister(&nsim_link_ops); 381 rtnl_link_unregister(&nsim_link_ops);
382 nsim_fib_exit();
376 nsim_bus_exit(); 383 nsim_bus_exit();
377 nsim_dev_exit(); 384 nsim_dev_exit();
378} 385}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 79c05af2a7c0..9404637d34b7 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, 169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
170 unsigned int port_index); 170 unsigned int port_index);
171 171
172struct nsim_fib_data *nsim_fib_create(void); 172int nsim_fib_init(void);
173void nsim_fib_destroy(struct nsim_fib_data *fib_data); 173void nsim_fib_exit(void);
174u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 174u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
175 enum nsim_resource_id res_id, bool max); 175int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
176int nsim_fib_set_max(struct nsim_fib_data *fib_data,
177 enum nsim_resource_id res_id, u64 val,
178 struct netlink_ext_ack *extack); 176 struct netlink_ext_ack *extack);
179 177
180#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) 178#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 222ccd9ecfce..6ad8b1c63c34 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev)
257 * after HW reset: RX delay enabled and TX delay disabled 257 * after HW reset: RX delay enabled and TX delay disabled
258 * after SW reset: RX delay enabled, while TX delay retains the 258 * after SW reset: RX delay enabled, while TX delay retains the
259 * value before reset. 259 * value before reset.
260 *
261 * So let's first disable the RX and TX delays in PHY and enable
262 * them based on the mode selected (this also takes care of RGMII
263 * mode where we expect delays to be disabled)
264 */ 260 */
265
266 ret = at803x_disable_rx_delay(phydev);
267 if (ret < 0)
268 return ret;
269 ret = at803x_disable_tx_delay(phydev);
270 if (ret < 0)
271 return ret;
272
273 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 261 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
274 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { 262 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
275 /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
276 * otherwise keep it disabled
277 */
278 ret = at803x_enable_rx_delay(phydev); 263 ret = at803x_enable_rx_delay(phydev);
279 if (ret < 0) 264 else
280 return ret; 265 ret = at803x_disable_rx_delay(phydev);
281 } 266 if (ret < 0)
267 return ret;
282 268
283 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 269 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
284 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 270 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
285 /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
286 * otherwise keep it disabled
287 */
288 ret = at803x_enable_tx_delay(phydev); 271 ret = at803x_enable_tx_delay(phydev);
289 } 272 else
273 ret = at803x_disable_tx_delay(phydev);
290 274
291 return ret; 275 return ret;
292} 276}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index b9d4145781ca..7935593debb1 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
219 int val, devad; 219 int val, devad;
220 bool link = true; 220 bool link = true;
221 221
222 if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
223 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
224 if (val < 0)
225 return val;
226
227 /* Autoneg is being started, therefore disregard current
228 * link status and report link as down.
229 */
230 if (val & MDIO_AN_CTRL1_RESTART) {
231 phydev->link = 0;
232 return 0;
233 }
234 }
235
222 while (mmd_mask && link) { 236 while (mmd_mask && link) {
223 devad = __ffs(mmd_mask); 237 devad = __ffs(mmd_mask);
224 mmd_mask &= ~BIT(devad); 238 mmd_mask &= ~BIT(devad);
@@ -509,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
509} 523}
510EXPORT_SYMBOL_GPL(genphy_c45_read_status); 524EXPORT_SYMBOL_GPL(genphy_c45_read_status);
511 525
526/**
527 * genphy_c45_config_aneg - restart auto-negotiation or forced setup
528 * @phydev: target phy_device struct
529 *
530 * Description: If auto-negotiation is enabled, we configure the
531 * advertising, and then restart auto-negotiation. If it is not
532 * enabled, then we force a configuration.
533 */
534int genphy_c45_config_aneg(struct phy_device *phydev)
535{
536 bool changed = false;
537 int ret;
538
539 if (phydev->autoneg == AUTONEG_DISABLE)
540 return genphy_c45_pma_setup_forced(phydev);
541
542 ret = genphy_c45_an_config_aneg(phydev);
543 if (ret < 0)
544 return ret;
545 if (ret > 0)
546 changed = true;
547
548 return genphy_c45_check_and_restart_aneg(phydev, changed);
549}
550EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
551
512/* The gen10g_* functions are the old Clause 45 stub */ 552/* The gen10g_* functions are the old Clause 45 stub */
513 553
514int gen10g_config_aneg(struct phy_device *phydev) 554int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef7aa738e0dc..6b0f89369b46 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev)
507 * allowed to call genphy_config_aneg() 507 * allowed to call genphy_config_aneg()
508 */ 508 */
509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
510 return -EOPNOTSUPP; 510 return genphy_c45_config_aneg(phydev);
511 511
512 return genphy_config_aneg(phydev); 512 return genphy_config_aneg(phydev);
513} 513}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7ddd91df99e3..27ebc2c6c2d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
1752 */ 1752 */
1753int genphy_update_link(struct phy_device *phydev) 1753int genphy_update_link(struct phy_device *phydev)
1754{ 1754{
1755 int status; 1755 int status = 0, bmcr;
1756
1757 bmcr = phy_read(phydev, MII_BMCR);
1758 if (bmcr < 0)
1759 return bmcr;
1760
1761 /* Autoneg is being started, therefore disregard BMSR value and
1762 * report link as down.
1763 */
1764 if (bmcr & BMCR_ANRESTART)
1765 goto done;
1756 1766
1757 /* The link state is latched low so that momentary link 1767 /* The link state is latched low so that momentary link
1758 * drops can be detected. Do not double-read the status 1768 * drops can be detected. Do not double-read the status
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index abfa0da9bbd2..e8089def5a46 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
1004 1004
1005 team->dev->vlan_features = vlan_features; 1005 team->dev->vlan_features = vlan_features;
1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1007 NETIF_F_HW_VLAN_CTAG_TX |
1008 NETIF_F_HW_VLAN_STAG_TX |
1007 NETIF_F_GSO_UDP_L4; 1009 NETIF_F_GSO_UDP_L4;
1008 team->dev->hard_header_len = max_hard_header_len; 1010 team->dev->hard_header_len = max_hard_header_len;
1009 1011
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 5519248a791e..32b08b18e120 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
163 } 163 }
164 if (!timeout) { 164 if (!timeout) {
165 dev_err(&udev->dev, "firmware not ready in time\n"); 165 dev_err(&udev->dev, "firmware not ready in time\n");
166 return -ETIMEDOUT; 166 ret = -ETIMEDOUT;
167 goto err;
167 } 168 }
168 169
169 /* enable ethernet mode (?) */ 170 /* enable ethernet mode (?) */
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d62b6706a537..fc5895f85cee 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), 113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
114 usb_buf, 24); 114 usb_buf, 24);
115 if (status != 0) 115 if (status != 0)
116 return status; 116 goto out;
117 117
118 memcpy(usb_buf, init_msg_2, 12); 118 memcpy(usb_buf, init_msg_2, 12);
119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), 119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
120 usb_buf, 28); 120 usb_buf, 28);
121 if (status != 0) 121 if (status != 0)
122 return status; 122 goto out;
123 123
124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); 124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
125 125out:
126 kfree(usb_buf); 126 kfree(usb_buf);
127 return status; 127 return status;
128} 128}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3d92ea6fcc02..f033fee225a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3792 ret = register_netdev(netdev); 3792 ret = register_netdev(netdev);
3793 if (ret != 0) { 3793 if (ret != 0) {
3794 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3794 netif_err(dev, probe, netdev, "couldn't register the device\n");
3795 goto out3; 3795 goto out4;
3796 } 3796 }
3797 3797
3798 usb_set_intfdata(intf, dev); 3798 usb_set_intfdata(intf, dev);
@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
3807 3807
3808 ret = lan78xx_phy_init(dev); 3808 ret = lan78xx_phy_init(dev);
3809 if (ret < 0) 3809 if (ret < 0)
3810 goto out4; 3810 goto out5;
3811 3811
3812 return 0; 3812 return 0;
3813 3813
3814out4: 3814out5:
3815 unregister_netdev(netdev); 3815 unregister_netdev(netdev);
3816out4:
3817 usb_free_urb(dev->urb_intr);
3816out3: 3818out3:
3817 lan78xx_unbind(dev, intf); 3819 lan78xx_unbind(dev, intf);
3818out2: 3820out2:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0cc03a9ff545..04137ac373b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
801 value, index, tmp, size, 500); 801 value, index, tmp, size, 500);
802 if (ret < 0)
803 memset(data, 0xff, size);
804 else
805 memcpy(data, tmp, size);
802 806
803 memcpy(data, tmp, size);
804 kfree(tmp); 807 kfree(tmp);
805 808
806 return ret; 809 return ret;
@@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev)
4018#ifdef CONFIG_PM_SLEEP 4021#ifdef CONFIG_PM_SLEEP
4019 unregister_pm_notifier(&tp->pm_notifier); 4022 unregister_pm_notifier(&tp->pm_notifier);
4020#endif 4023#endif
4021 if (!test_bit(RTL8152_UNPLUG, &tp->flags)) 4024 napi_disable(&tp->napi);
4022 napi_disable(&tp->napi);
4023 clear_bit(WORK_ENABLE, &tp->flags); 4025 clear_bit(WORK_ENABLE, &tp->flags);
4024 usb_kill_urb(tp->intr_urb); 4026 usb_kill_urb(tp->intr_urb);
4025 cancel_delayed_work_sync(&tp->schedule); 4027 cancel_delayed_work_sync(&tp->schedule);
@@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf,
5350 return 0; 5352 return 0;
5351 5353
5352out1: 5354out1:
5353 netif_napi_del(&tp->napi);
5354 usb_set_intfdata(intf, NULL); 5355 usb_set_intfdata(intf, NULL);
5355out: 5356out:
5356 free_netdev(netdev); 5357 free_netdev(netdev);
@@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
5365 if (tp) { 5366 if (tp) {
5366 rtl_set_unplug(tp); 5367 rtl_set_unplug(tp);
5367 5368
5368 netif_napi_del(&tp->napi);
5369 unregister_netdev(tp->netdev); 5369 unregister_netdev(tp->netdev);
5370 cancel_delayed_work_sync(&tp->hw_phy_work); 5370 cancel_delayed_work_sync(&tp->hw_phy_work);
5371 tp->rtl_ops.unload(tp); 5371 tp->rtl_ops.unload(tp);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168bb734..489cba9b284d 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
351 } 351 }
352 result = i2400m_barker_db_add(barker); 352 result = i2400m_barker_db_add(barker);
353 if (result < 0) 353 if (result < 0)
354 goto error_add; 354 goto error_parse_add;
355 } 355 }
356 kfree(options_orig); 356 kfree(options_orig);
357 } 357 }
358 return 0; 358 return 0;
359 359
360error_parse_add:
360error_parse: 361error_parse:
362 kfree(options_orig);
361error_add: 363error_add:
362 kfree(i2400m_barker_db); 364 kfree(i2400m_barker_db);
363 return result; 365 return result;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 1f500cddb3a7..55b713255b8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
557}; 557};
558 558
559const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
560 .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
561 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
562 IWL_DEVICE_22500,
563 /*
564 * This device doesn't support receiving BlockAck with a large bitmap
565 * so we need to restrict the size of transmitted aggregation to the
566 * HT size; mac80211 would otherwise pick the HE max (256) by default.
567 */
568 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
569};
570
571const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
572 .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
573 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
574 IWL_DEVICE_22500,
575 /*
576 * This device doesn't support receiving BlockAck with a large bitmap
577 * so we need to restrict the size of transmitted aggregation to the
578 * HT size; mac80211 would otherwise pick the HE max (256) by default.
579 */
580 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
581};
582
559const struct iwl_cfg iwl22000_2ax_cfg_jf = { 583const struct iwl_cfg iwl22000_2ax_cfg_jf = {
560 .name = "Intel(R) Dual Band Wireless AX 22000", 584 .name = "Intel(R) Dual Band Wireless AX 22000",
561 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, 585 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 1c1bf1b281cd..6c04f8223aff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; 577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; 578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; 579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
580extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
581extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
580extern const struct iwl_cfg killer1650x_2ax_cfg; 582extern const struct iwl_cfg killer1650x_2ax_cfg;
581extern const struct iwl_cfg killer1650w_2ax_cfg; 583extern const struct iwl_cfg killer1650w_2ax_cfg;
582extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; 584extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
554 cpu_to_le32(vif->bss_conf.use_short_slot ? 554 cpu_to_le32(vif->bss_conf.use_short_slot ?
555 MAC_FLG_SHORT_SLOT : 0); 555 MAC_FLG_SHORT_SLOT : 0);
556 556
557 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); 557 cmd->filter_flags = 0;
558 558
559 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 559 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); 560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
623 /* We need the dtim_period to set the MAC as associated */ 623 /* We need the dtim_period to set the MAC as associated */
624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
625 !force_assoc_off) { 625 !force_assoc_off) {
626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
627 u8 ap_sta_id = mvmvif->ap_sta_id;
626 u32 dtim_offs; 628 u32 dtim_offs;
627 629
628 /* 630 /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
658 dtim_offs); 660 dtim_offs);
659 661
660 ctxt_sta->is_assoc = cpu_to_le32(1); 662 ctxt_sta->is_assoc = cpu_to_le32(1);
663
664 /*
665 * allow multicast data frames only as long as the station is
666 * authorized, i.e., GTK keys are already installed (if needed)
667 */
668 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
669 struct ieee80211_sta *sta;
670
671 rcu_read_lock();
672
673 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
674 if (!IS_ERR_OR_NULL(sta)) {
675 struct iwl_mvm_sta *mvmsta =
676 iwl_mvm_sta_from_mac80211(sta);
677
678 if (mvmsta->sta_state ==
679 IEEE80211_STA_AUTHORIZED)
680 cmd.filter_flags |=
681 cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
682 }
683
684 rcu_read_unlock();
685 }
661 } else { 686 } else {
662 ctxt_sta->is_assoc = cpu_to_le32(0); 687 ctxt_sta->is_assoc = cpu_to_le32(0);
663 688
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
703 MAC_FILTER_IN_CONTROL_AND_MGMT | 728 MAC_FILTER_IN_CONTROL_AND_MGMT |
704 MAC_FILTER_IN_BEACON | 729 MAC_FILTER_IN_BEACON |
705 MAC_FILTER_IN_PROBE_REQUEST | 730 MAC_FILTER_IN_PROBE_REQUEST |
706 MAC_FILTER_IN_CRC32); 731 MAC_FILTER_IN_CRC32 |
732 MAC_FILTER_ACCEPT_GRP);
707 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); 733 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
708 734
709 /* Allocate sniffer station */ 735 /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
727 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); 753 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
728 754
729 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | 755 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
730 MAC_FILTER_IN_PROBE_REQUEST); 756 MAC_FILTER_IN_PROBE_REQUEST |
757 MAC_FILTER_ACCEPT_GRP);
731 758
732 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ 759 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
733 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); 760 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1c904b5226aa..a7bc00d1296f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3327 /* enable beacon filtering */ 3327 /* enable beacon filtering */
3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3329 3329
3330 /*
3331 * Now that the station is authorized, i.e., keys were already
3332 * installed, need to indicate to the FW that
3333 * multicast data frames can be forwarded to the driver
3334 */
3335 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3336
3330 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3337 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3331 true); 3338 true);
3332 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3339 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
3333 new_state == IEEE80211_STA_ASSOC) { 3340 new_state == IEEE80211_STA_ASSOC) {
3341 /* Multicast data frames are no longer allowed */
3342 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3343
3334 /* disable beacon filtering */ 3344 /* disable beacon filtering */
3335 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3336 WARN_ON(ret && 3346 WARN_ON(ret &&
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index de711c1160d3..d9ed53b7c768 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1062,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; 1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
1065 else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
1066 iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
1067 else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
1068 iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
1065 } 1069 }
1070
1071 /* same thing for QuZ... */
1072 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1073 if (cfg == &iwl_ax101_cfg_qu_hr)
1074 cfg = &iwl_ax101_cfg_quz_hr;
1075 else if (cfg == &iwl_ax201_cfg_qu_hr)
1076 cfg = &iwl_ax201_cfg_quz_hr;
1077 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1078 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1079 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1080 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1081 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1082 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1083 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1084 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1085 }
1086
1066#endif 1087#endif
1067 1088
1068 pci_set_drvdata(pdev, iwl_trans); 1089 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..db62c8314603 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3602 } 3602 }
3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605 ((trans->cfg != &iwl_ax200_cfg_cc && 3605 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3606 trans->cfg != &killer1650x_2ax_cfg &&
3607 trans->cfg != &killer1650w_2ax_cfg &&
3608 trans->cfg != &iwl_ax201_cfg_quz_hr) ||
3609 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
3610 u32 hw_status; 3606 u32 hw_status;
3611 3607
3612 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); 3608 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
99 u16 len = byte_cnt; 99 u16 len = byte_cnt;
100 __le16 bc_ent; 100 __le16 bc_ent;
101 101
102 if (trans_pcie->bc_table_dword) 102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return; 103 return;
107 104
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
117 */ 114 */
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119 116
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else 123 } else {
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
124 scd_bc_tbl->tfd_offset[idx] = bc_ent; 129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
130 }
125} 131}
126 132
127/* 133/*
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
136 .release_buffered_frames = mt76_release_buffered_frames, 136 .release_buffered_frames = mt76_release_buffered_frames,
137}; 137};
138 138
139static int mt76x0u_init_hardware(struct mt76x02_dev *dev) 139static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
140{ 140{
141 int err; 141 int err;
142 142
143 mt76x0_chip_onoff(dev, true, true); 143 mt76x0_chip_onoff(dev, true, reset);
144 144
145 if (!mt76x02_wait_for_mac(&dev->mt76)) 145 if (!mt76x02_wait_for_mac(&dev->mt76))
146 return -ETIMEDOUT; 146 return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
173 if (err < 0) 173 if (err < 0)
174 goto out_err; 174 goto out_err;
175 175
176 err = mt76x0u_init_hardware(dev); 176 err = mt76x0u_init_hardware(dev, true);
177 if (err < 0) 177 if (err < 0)
178 goto out_err; 178 goto out_err;
179 179
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
309 if (ret < 0) 309 if (ret < 0)
310 goto err; 310 goto err;
311 311
312 ret = mt76x0u_init_hardware(dev); 312 ret = mt76x0u_init_hardware(dev, false);
313 if (ret) 313 if (ret)
314 goto err; 314 goto err;
315 315
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..ecbe78b8027b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
6095 } 6095 }
6096 6096
6097 /* 6097 /*
6098 * Clear encryption initialization vectors on start, but keep them
6099 * for watchdog reset. Otherwise we will have wrong IVs and not be
6100 * able to keep connections after reset.
6101 */
6102 if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
6103 for (i = 0; i < 256; i++)
6104 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
6105
6106 /*
6098 * Clear all beacons 6107 * Clear all beacons
6099 */ 6108 */
6100 for (i = 0; i < 8; i++) 6109 for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
658 DEVICE_STATE_ENABLED_RADIO, 658 DEVICE_STATE_ENABLED_RADIO,
659 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
660 DEVICE_STATE_FLUSHING, 660 DEVICE_STATE_FLUSHING,
661 DEVICE_STATE_RESET,
661 662
662 /* 663 /*
663 * Driver configuration 664 * Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1256 1256
1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1258{ 1258{
1259 int retval; 1259 int retval = 0;
1260 1260
1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { 1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
1262 /* 1262 /*
1263 * This is special case for ieee80211_restart_hw(), otherwise 1263 * This is special case for ieee80211_restart_hw(), otherwise
1264 * mac80211 never call start() two times in row without stop(); 1264 * mac80211 never call start() two times in row without stop();
1265 */ 1265 */
1266 set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1266 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); 1267 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
1267 rt2x00lib_stop(rt2x00dev); 1268 rt2x00lib_stop(rt2x00dev);
1268 } 1269 }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1273 */ 1274 */
1274 retval = rt2x00lib_load_firmware(rt2x00dev); 1275 retval = rt2x00lib_load_firmware(rt2x00dev);
1275 if (retval) 1276 if (retval)
1276 return retval; 1277 goto out;
1277 1278
1278 /* 1279 /*
1279 * Initialize the device. 1280 * Initialize the device.
1280 */ 1281 */
1281 retval = rt2x00lib_initialize(rt2x00dev); 1282 retval = rt2x00lib_initialize(rt2x00dev);
1282 if (retval) 1283 if (retval)
1283 return retval; 1284 goto out;
1284 1285
1285 rt2x00dev->intf_ap_count = 0; 1286 rt2x00dev->intf_ap_count = 0;
1286 rt2x00dev->intf_sta_count = 0; 1287 rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1289 /* Enable the radio */ 1290 /* Enable the radio */
1290 retval = rt2x00lib_enable_radio(rt2x00dev); 1291 retval = rt2x00lib_enable_radio(rt2x00dev);
1291 if (retval) 1292 if (retval)
1292 return retval; 1293 goto out;
1293 1294
1294 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 1295 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1295 1296
1296 return 0; 1297out:
1298 clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1299 return retval;
1297} 1300}
1298 1301
1299void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1302void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1d9940d4e8c7..c9262ffeefe4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; 925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0); 926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) { 927 if (unlikely(nskb == NULL)) {
928 skb_shinfo(skb)->nr_frags = 0;
928 kfree_skb(skb); 929 kfree_skb(skb);
929 xenvif_tx_err(queue, &txreq, extra_count, idx); 930 xenvif_tx_err(queue, &txreq, extra_count, idx);
930 if (net_ratelimit()) 931 if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
940 941
941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 942 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
942 /* Failure in xenvif_set_skb_gso is fatal. */ 943 /* Failure in xenvif_set_skb_gso is fatal. */
944 skb_shinfo(skb)->nr_frags = 0;
943 kfree_skb(skb); 945 kfree_skb(skb);
944 kfree_skb(nskb); 946 kfree_skb(nskb);
945 break; 947 break;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c258a1ce4b28..d3d6b7bd6903 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2257 .vid = 0x1179, 2257 .vid = 0x1179,
2258 .mn = "THNSF5256GPUK TOSHIBA", 2258 .mn = "THNSF5256GPUK TOSHIBA",
2259 .quirks = NVME_QUIRK_NO_APST, 2259 .quirks = NVME_QUIRK_NO_APST,
2260 },
2261 {
2262 /*
2263 * This LiteON CL1-3D*-Q11 firmware version has a race
2264 * condition associated with actions related to suspend to idle
2265 * LiteON has resolved the problem in future firmware
2266 */
2267 .vid = 0x14a4,
2268 .fr = "22301111",
2269 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2260 } 2270 }
2261}; 2271};
2262 2272
@@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2597 goto out_free; 2607 goto out_free;
2598 } 2608 }
2599 2609
2610 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2611 ctrl->cntlid = le16_to_cpu(id->cntlid);
2612
2600 if (!ctrl->identified) { 2613 if (!ctrl->identified) {
2601 int i; 2614 int i;
2602 2615
@@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2697 goto out_free; 2710 goto out_free;
2698 } 2711 }
2699 } else { 2712 } else {
2700 ctrl->cntlid = le16_to_cpu(id->cntlid);
2701 ctrl->hmpre = le32_to_cpu(id->hmpre); 2713 ctrl->hmpre = le32_to_cpu(id->hmpre);
2702 ctrl->hmmin = le32_to_cpu(id->hmmin); 2714 ctrl->hmmin = le32_to_cpu(id->hmmin);
2703 ctrl->hmminds = le32_to_cpu(id->hmminds); 2715 ctrl->hmminds = le32_to_cpu(id->hmminds);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 888d4543894e..af831d3d15d0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
428 srcu_read_unlock(&head->srcu, srcu_idx); 428 srcu_read_unlock(&head->srcu, srcu_idx);
429 } 429 }
430 430
431 synchronize_srcu(&ns->head->srcu);
431 kblockd_schedule_work(&ns->head->requeue_work); 432 kblockd_schedule_work(&ns->head->requeue_work);
432} 433}
433 434
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 778b3a0b6adb..2d678fb968c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -92,6 +92,11 @@ enum nvme_quirks {
92 * Broken Write Zeroes. 92 * Broken Write Zeroes.
93 */ 93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
95
96 /*
97 * Force simple suspend/resume path.
98 */
99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
95}; 100};
96 101
97/* 102/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6bd9b1033965..732d5b63ec05 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev)
2876 * state (which may not be possible if the link is up). 2876 * state (which may not be possible if the link is up).
2877 */ 2877 */
2878 if (pm_suspend_via_firmware() || !ctrl->npss || 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev)) { 2879 !pcie_aspm_enabled(pdev) ||
2880 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
2880 nvme_dev_disable(ndev, true); 2881 nvme_dev_disable(ndev, true);
2881 return 0; 2882 return 0;
2882 } 2883 }
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5256 */ 5256 */
5257 if (ioread32(map + 0x2240c) & 0x2) { 5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); 5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_function(pdev); 5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0) 5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret); 5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 } 5262 }
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
707 */ 707 */
708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) 708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
709{ 709{
710 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 710 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
713 713
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
722 */ 722 */
723static int __maybe_unused cros_ec_ishtp_resume(struct device *device) 723static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
724{ 724{
725 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 725 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
728 728
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
742 USB_CH_IP_CUR_LVL_1P5; 742 USB_CH_IP_CUR_LVL_1P5;
743 break; 743 break;
744 } 744 }
745 /* Else, fall through */
745 case USB_STAT_HM_IDGND: 746 case USB_STAT_HM_IDGND:
746 dev_err(di->dev, "USB Type - Charging not allowed\n"); 747 dev_err(di->dev, "USB Type - Charging not allowed\n");
747 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; 748 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c7ee07ce3615..28db887d38ed 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -629,6 +629,7 @@ struct qeth_seqno {
629struct qeth_reply { 629struct qeth_reply {
630 struct list_head list; 630 struct list_head list;
631 struct completion received; 631 struct completion received;
632 spinlock_t lock;
632 int (*callback)(struct qeth_card *, struct qeth_reply *, 633 int (*callback)(struct qeth_card *, struct qeth_reply *,
633 unsigned long); 634 unsigned long);
634 u32 seqno; 635 u32 seqno;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d0caeebc802..6502b148541e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
544 if (reply) { 544 if (reply) {
545 refcount_set(&reply->refcnt, 1); 545 refcount_set(&reply->refcnt, 1);
546 init_completion(&reply->received); 546 init_completion(&reply->received);
547 spin_lock_init(&reply->lock);
547 } 548 }
548 return reply; 549 return reply;
549} 550}
@@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
799 800
800 if (!reply->callback) { 801 if (!reply->callback) {
801 rc = 0; 802 rc = 0;
803 goto no_callback;
804 }
805
806 spin_lock_irqsave(&reply->lock, flags);
807 if (reply->rc) {
808 /* Bail out when the requestor has already left: */
809 rc = reply->rc;
802 } else { 810 } else {
803 if (cmd) { 811 if (cmd) {
804 reply->offset = (u16)((char *)cmd - (char *)iob->data); 812 reply->offset = (u16)((char *)cmd - (char *)iob->data);
@@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
807 rc = reply->callback(card, reply, (unsigned long)iob); 815 rc = reply->callback(card, reply, (unsigned long)iob);
808 } 816 }
809 } 817 }
818 spin_unlock_irqrestore(&reply->lock, flags);
810 819
820no_callback:
811 if (rc <= 0) 821 if (rc <= 0)
812 qeth_notify_reply(reply, rc); 822 qeth_notify_reply(reply, rc);
813 qeth_put_reply(reply); 823 qeth_put_reply(reply);
@@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card,
1749 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1759 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1750 1760
1751 qeth_dequeue_reply(card, reply); 1761 qeth_dequeue_reply(card, reply);
1762
1763 if (reply_cb) {
1764 /* Wait until the callback for a late reply has completed: */
1765 spin_lock_irq(&reply->lock);
1766 if (rc)
1767 /* Zap any callback that's still pending: */
1768 reply->rc = rc;
1769 spin_unlock_irq(&reply->lock);
1770 }
1771
1752 if (!rc) 1772 if (!rc)
1753 rc = reply->rc; 1773 rc = reply->rc;
1754 qeth_put_reply(reply); 1774 qeth_put_reply(reply);
@@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4354 get_user(req_len, &ureq->hdr.req_len)) 4374 get_user(req_len, &ureq->hdr.req_len))
4355 return -EFAULT; 4375 return -EFAULT;
4356 4376
4377 /* Sanitize user input, to avoid overflows in iob size calculation: */
4378 if (req_len > QETH_BUFSIZE)
4379 return -EINVAL;
4380
4357 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4381 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4358 if (!iob) 4382 if (!iob)
4359 return -ENOMEM; 4383 return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
460 /* Fall through */ 460 /* Fall through */
461#endif 461#endif
462 /* Fall through - only for the #else condition above. */
462 default: 463 default:
463 error = -ENXIO; 464 error = -ENXIO;
464 pr_err("unhandled device %d\n", dev->dev_type); 465 pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
824 uint32_t cfg_cq_poll_threshold; 824 uint32_t cfg_cq_poll_threshold;
825 uint32_t cfg_cq_max_proc_limit; 825 uint32_t cfg_cq_max_proc_limit;
826 uint32_t cfg_fcp_cpu_map; 826 uint32_t cfg_fcp_cpu_map;
827 uint32_t cfg_fcp_mq_threshold;
827 uint32_t cfg_hdw_queue; 828 uint32_t cfg_hdw_queue;
828 uint32_t cfg_irq_chann; 829 uint32_t cfg_irq_chann;
829 uint32_t cfg_suppress_rsp; 830 uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..8d8c495b5b60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5709 "Embed NVME Command in WQE"); 5709 "Embed NVME Command in WQE");
5710 5710
5711/* 5711/*
5712 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5713 * the driver will advertise it supports to the SCSI layer.
5714 *
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 *
5718 * Value range is [0,128]. Default value is 8.
5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5722 "Set the number of SCSI Queues advertised");
5723
5724/*
5712 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5725 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5713 * will advertise it supports to the NVME and SCSI layers. This also 5726 * will advertise it supports to the NVME and SCSI layers. This also
5714 * will map to the number of CQ/WQ pairs the driver will create. 5727 * will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
6030 &dev_attr_lpfc_cq_poll_threshold, 6043 &dev_attr_lpfc_cq_poll_threshold,
6031 &dev_attr_lpfc_cq_max_proc_limit, 6044 &dev_attr_lpfc_cq_max_proc_limit,
6032 &dev_attr_lpfc_fcp_cpu_map, 6045 &dev_attr_lpfc_fcp_cpu_map,
6046 &dev_attr_lpfc_fcp_mq_threshold,
6033 &dev_attr_lpfc_hdw_queue, 6047 &dev_attr_lpfc_hdw_queue,
6034 &dev_attr_lpfc_irq_chann, 6048 &dev_attr_lpfc_irq_chann,
6035 &dev_attr_lpfc_suppress_rsp, 6049 &dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7112 /* Initialize first burst. Target vs Initiator are different. */ 7126 /* Initialize first burst. Target vs Initiator are different. */
7113 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7127 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7114 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7128 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7129 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7115 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7130 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7116 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7131 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7117 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7132 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a7549ae32542..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4309 shost->max_cmd_len = 16; 4309 shost->max_cmd_len = 16;
4310 4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 if (!phba->cfg_fcp_mq_threshold ||
4313 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 else 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4316 4318
4317 shost->dma_boundary = 4319 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..329f7aa7e169 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
44#define LPFC_HBA_HDWQ_MAX 128 44#define LPFC_HBA_HDWQ_MAX 128
45#define LPFC_HBA_HDWQ_DEF 0 45#define LPFC_HBA_HDWQ_DEF 0
46 46
47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 128
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51
47/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
48#define LPFC_COMMON_IO_BUF_SZ 768 53#define LPFC_COMMON_IO_BUF_SZ 768
49 54
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 vha->gnl.ldma); 2957 vha->gnl.ldma);
2958 2958
2959 vha->gnl.l = NULL;
2960
2959 vfree(vha->scan.l); 2961 vfree(vha->scan.l);
2960 2962
2961 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
3440 return 0; 3440 return 0;
3441 3441
3442probe_failed: 3442probe_failed:
3443 if (base_vha->gnl.l) {
3444 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3445 base_vha->gnl.l, base_vha->gnl.ldma);
3446 base_vha->gnl.l = NULL;
3447 }
3448
3443 if (base_vha->timer_active) 3449 if (base_vha->timer_active)
3444 qla2x00_stop_timer(base_vha); 3450 qla2x00_stop_timer(base_vha);
3445 base_vha->flags.online = 0; 3451 base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3673 if (!atomic_read(&pdev->enable_cnt)) { 3679 if (!atomic_read(&pdev->enable_cnt)) {
3674 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3680 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3675 base_vha->gnl.l, base_vha->gnl.ldma); 3681 base_vha->gnl.l, base_vha->gnl.ldma);
3676 3682 base_vha->gnl.l = NULL;
3677 scsi_host_put(base_vha->host); 3683 scsi_host_put(base_vha->host);
3678 kfree(ha); 3684 kfree(ha);
3679 pci_set_drvdata(pdev, NULL); 3685 pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3713 dma_free_coherent(&ha->pdev->dev, 3719 dma_free_coherent(&ha->pdev->dev,
3714 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3720 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3715 3721
3722 base_vha->gnl.l = NULL;
3723
3716 vfree(base_vha->scan.l); 3724 vfree(base_vha->scan.l);
3717 3725
3718 if (IS_QLAFX00(ha)) 3726 if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4816 "Alloc failed for scan database.\n"); 4824 "Alloc failed for scan database.\n");
4817 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4818 vha->gnl.l, vha->gnl.ldma); 4826 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL;
4819 scsi_remove_host(vha->host); 4828 scsi_remove_host(vha->host);
4820 return NULL; 4829 return NULL;
4821 } 4830 }
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg) 7063 struct ufs_vreg *vreg)
7064{ 7064{
7065 if (!vreg)
7066 return 0;
7067
7065 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7066} 7069}
7067 7070
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index de2e62c3310a..e3eb19b85fa4 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -1,4 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2if ARCH_IXP4XX || COMPILE_TEST
3
2menu "IXP4xx SoC drivers" 4menu "IXP4xx SoC drivers"
3 5
4config IXP4XX_QMGR 6config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
15 and is automatically selected by Ethernet and HSS drivers. 17 and is automatically selected by Ethernet and HSS drivers.
16 18
17endmenu 19endmenu
20
21endif
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index bb77c220b6f8..ccc6d53fe788 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
141} 141}
142 142
143#ifdef CONFIG_SUSPEND 143#ifdef CONFIG_SUSPEND
144struct wkup_m3_wakeup_src rtc_wake_src(void) 144static struct wkup_m3_wakeup_src rtc_wake_src(void)
145{ 145{
146 u32 i; 146 u32 i;
147 147
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
157 return rtc_ext_wakeup; 157 return rtc_ext_wakeup;
158} 158}
159 159
160int am33xx_rtc_only_idle(unsigned long wfi_flags) 160static int am33xx_rtc_only_idle(unsigned long wfi_flags)
161{ 161{
162 omap_rtc_power_off_program(&omap_rtc->dev); 162 omap_rtc_power_off_program(&omap_rtc->dev);
163 am33xx_do_wfi_sram(wfi_flags); 163 am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { 252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
253 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 253 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
254 "omap_rtc_scratch0"); 254 "omap_rtc_scratch0");
255 if (nvmem) 255 if (!IS_ERR(nvmem))
256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
257 (void *)&rtc_magic_val); 257 (void *)&rtc_magic_val);
258 rtc_only_idle = 1; 258 rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
278 struct nvmem_device *nvmem; 278 struct nvmem_device *nvmem;
279 279
280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); 280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
281 if (IS_ERR(nvmem))
282 return;
283
281 m3_ipc->ops->finish_low_power(m3_ipc); 284 m3_ipc->ops->finish_low_power(m3_ipc);
282 if (rtc_only_idle) { 285 if (rtc_only_idle) {
283 if (retrigger_irq) 286 if (retrigger_irq) {
284 /* 287 /*
285 * 32 bits of Interrupt Set-Pending correspond to 32 288 * 32 bits of Interrupt Set-Pending correspond to 32
286 * 32 interrupts. Compute the bit offset of the 289 * 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
291 writel_relaxed(1 << (retrigger_irq & 31), 294 writel_relaxed(1 << (retrigger_irq & 31),
292 gic_dist_base + GIC_INT_SET_PENDING_BASE 295 gic_dist_base + GIC_INT_SET_PENDING_BASE
293 + retrigger_irq / 32 * 4); 296 + retrigger_irq / 32 * 4);
294 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 297 }
295 (void *)&val); 298
299 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
300 (void *)&val);
296 } 301 }
297 302
298 rtc_only_idle = 0; 303 rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
415 420
416 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 421 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
417 "omap_rtc_scratch0"); 422 "omap_rtc_scratch0");
418 if (nvmem) { 423 if (!IS_ERR(nvmem)) {
419 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 424 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
420 4, (void *)&rtc_magic_val); 425 4, (void *)&rtc_magic_val);
421 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) 426 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1132 struct se_cmd *se_cmd = cmd->se_cmd; 1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev; 1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false; 1134 bool read_len_valid = false;
1135 uint32_t read_len = se_cmd->data_length; 1135 uint32_t read_len;
1136 1136
1137 /* 1137 /*
1138 * cmd has been completed already from timeout, just reclaim 1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd 1139 * data area space and free cmd
1140 */ 1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1142 goto out; 1143 goto out;
1144 }
1143 1145
1144 list_del_init(&cmd->queue_entry); 1146 list_del_init(&cmd->queue_entry);
1145 1147
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1152 goto done; 1154 goto done;
1153 } 1155 }
1154 1156
1157 read_len = se_cmd->data_length;
1155 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1156 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1157 read_len_valid = true; 1160 read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1307 */ 1310 */
1308 scsi_status = SAM_STAT_CHECK_CONDITION; 1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1309 list_del_init(&cmd->queue_entry); 1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1310 } else { 1314 } else {
1311 list_del_init(&cmd->queue_entry); 1315 list_del_init(&cmd->queue_entry);
1312 idr_remove(&udev->commands, id); 1316 idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2022 2026
2023 idr_remove(&udev->commands, i); 2027 idr_remove(&udev->commands, i);
2024 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2028 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029 WARN_ON(!cmd->se_cmd);
2025 list_del_init(&cmd->queue_entry); 2030 list_del_init(&cmd->queue_entry);
2026 if (err_level == 1) { 2031 if (err_level == 1) {
2027 /* 2032 /*
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 053432d79bf7..8f18e7b6cadf 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); 709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
710 unsigned long flags; 710 unsigned long flags;
711 711
712 spin_lock_irqsave(&ci->lock, flags);
713 ci->gadget.speed = USB_SPEED_UNKNOWN;
714 ci->remote_wakeup = 0;
715 ci->suspended = 0;
716 spin_unlock_irqrestore(&ci->lock, flags);
717
718 /* flush all endpoints */ 712 /* flush all endpoints */
719 gadget_for_each_ep(ep, gadget) { 713 gadget_for_each_ep(ep, gadget) {
720 usb_ep_fifo_flush(ep); 714 usb_ep_fifo_flush(ep);
@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
732 ci->status = NULL; 726 ci->status = NULL;
733 } 727 }
734 728
729 spin_lock_irqsave(&ci->lock, flags);
730 ci->gadget.speed = USB_SPEED_UNKNOWN;
731 ci->remote_wakeup = 0;
732 ci->suspended = 0;
733 spin_unlock_irqrestore(&ci->lock, flags);
734
735 return 0; 735 return 0;
736} 736}
737 737
@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
1303 return -EBUSY; 1303 return -EBUSY;
1304 1304
1305 spin_lock_irqsave(hwep->lock, flags); 1305 spin_lock_irqsave(hwep->lock, flags);
1306 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1307 spin_unlock_irqrestore(hwep->lock, flags);
1308 return 0;
1309 }
1306 1310
1307 /* only internal SW should disable ctrl endpts */ 1311 /* only internal SW should disable ctrl endpts */
1308 1312
@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1392 return -EINVAL; 1396 return -EINVAL;
1393 1397
1394 spin_lock_irqsave(hwep->lock, flags); 1398 spin_lock_irqsave(hwep->lock, flags);
1399 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1400 spin_unlock_irqrestore(hwep->lock, flags);
1401 return 0;
1402 }
1395 retval = _ep_queue(ep, req, gfp_flags); 1403 retval = _ep_queue(ep, req, gfp_flags);
1396 spin_unlock_irqrestore(hwep->lock, flags); 1404 spin_unlock_irqrestore(hwep->lock, flags);
1397 return retval; 1405 return retval;
@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1415 return -EINVAL; 1423 return -EINVAL;
1416 1424
1417 spin_lock_irqsave(hwep->lock, flags); 1425 spin_lock_irqsave(hwep->lock, flags);
1418 1426 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1419 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1427 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1420 1428
1421 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { 1429 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1422 dma_pool_free(hwep->td_pool, node->ptr, node->dma); 1430 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
1487 } 1495 }
1488 1496
1489 spin_lock_irqsave(hwep->lock, flags); 1497 spin_lock_irqsave(hwep->lock, flags);
1498 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1499 spin_unlock_irqrestore(hwep->lock, flags);
1500 return;
1501 }
1490 1502
1491 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1503 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1492 1504
@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
1559 int ret = 0; 1571 int ret = 0;
1560 1572
1561 spin_lock_irqsave(&ci->lock, flags); 1573 spin_lock_irqsave(&ci->lock, flags);
1574 if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1575 spin_unlock_irqrestore(&ci->lock, flags);
1576 return 0;
1577 }
1562 if (!ci->remote_wakeup) { 1578 if (!ci->remote_wakeup) {
1563 ret = -EOPNOTSUPP; 1579 ret = -EOPNOTSUPP;
1564 goto out; 1580 goto out;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a7824a51f86d..70afb2ca1eab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
587{ 587{
588 struct wdm_device *desc = file->private_data; 588 struct wdm_device *desc = file->private_data;
589 589
590 wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); 590 wait_event(desc->wait,
591 /*
592 * needs both flags. We cannot do with one
593 * because resetting it would cause a race
594 * with write() yet we need to signal
595 * a disconnect
596 */
597 !test_bit(WDM_IN_USE, &desc->flags) ||
598 test_bit(WDM_DISCONNECTING, &desc->flags));
591 599
592 /* cannot dereference desc->intf if WDM_DISCONNECTING */ 600 /* cannot dereference desc->intf if WDM_DISCONNECTING */
593 if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) 601 if (test_bit(WDM_DISCONNECTING, &desc->flags))
602 return -ENODEV;
603 if (desc->werr < 0)
594 dev_err(&desc->intf->dev, "Error in flush path: %d\n", 604 dev_err(&desc->intf->dev, "Error in flush path: %d\n",
595 desc->werr); 605 desc->werr);
596 606
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
974 spin_lock_irqsave(&desc->iuspin, flags); 984 spin_lock_irqsave(&desc->iuspin, flags);
975 set_bit(WDM_DISCONNECTING, &desc->flags); 985 set_bit(WDM_DISCONNECTING, &desc->flags);
976 set_bit(WDM_READ, &desc->flags); 986 set_bit(WDM_READ, &desc->flags);
977 /* to terminate pending flushes */
978 clear_bit(WDM_IN_USE, &desc->flags);
979 spin_unlock_irqrestore(&desc->iuspin, flags); 987 spin_unlock_irqrestore(&desc->iuspin, flags);
980 wake_up_all(&desc->wait); 988 wake_up_all(&desc->wait);
981 mutex_lock(&desc->rlock); 989 mutex_lock(&desc->rlock);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7ff831f2fd21..dcd7066ffba2 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2359,8 +2359,11 @@ static int usbtmc_probe(struct usb_interface *intf,
2359 goto err_put; 2359 goto err_put;
2360 } 2360 }
2361 2361
2362 retcode = -EINVAL;
2362 data->bulk_in = bulk_in->bEndpointAddress; 2363 data->bulk_in = bulk_in->bEndpointAddress;
2363 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in); 2364 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
2365 if (!data->wMaxPacketSize)
2366 goto err_put;
2364 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in); 2367 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
2365 2368
2366 data->bulk_out = bulk_out->bEndpointAddress; 2369 data->bulk_out = bulk_out->bEndpointAddress;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 03bee698d7eb..9e26b0143a59 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
216 /* EHCI, OHCI */ 216 /* EHCI, OHCI */
217 hcd->rsrc_start = pci_resource_start(dev, 0); 217 hcd->rsrc_start = pci_resource_start(dev, 0);
218 hcd->rsrc_len = pci_resource_len(dev, 0); 218 hcd->rsrc_len = pci_resource_len(dev, 0);
219 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 219 if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
220 driver->description)) { 220 hcd->rsrc_len, driver->description)) {
221 dev_dbg(&dev->dev, "controller already in use\n"); 221 dev_dbg(&dev->dev, "controller already in use\n");
222 retval = -EBUSY; 222 retval = -EBUSY;
223 goto put_hcd; 223 goto put_hcd;
224 } 224 }
225 hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); 225 hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
226 hcd->rsrc_len);
226 if (hcd->regs == NULL) { 227 if (hcd->regs == NULL) {
227 dev_dbg(&dev->dev, "error mapping memory\n"); 228 dev_dbg(&dev->dev, "error mapping memory\n");
228 retval = -EFAULT; 229 retval = -EFAULT;
229 goto release_mem_region; 230 goto put_hcd;
230 } 231 }
231 232
232 } else { 233 } else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
240 241
241 hcd->rsrc_start = pci_resource_start(dev, region); 242 hcd->rsrc_start = pci_resource_start(dev, region);
242 hcd->rsrc_len = pci_resource_len(dev, region); 243 hcd->rsrc_len = pci_resource_len(dev, region);
243 if (request_region(hcd->rsrc_start, hcd->rsrc_len, 244 if (devm_request_region(&dev->dev, hcd->rsrc_start,
244 driver->description)) 245 hcd->rsrc_len, driver->description))
245 break; 246 break;
246 } 247 }
247 if (region == PCI_ROM_RESOURCE) { 248 if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
275 } 276 }
276 277
277 if (retval != 0) 278 if (retval != 0)
278 goto unmap_registers; 279 goto put_hcd;
279 device_wakeup_enable(hcd->self.controller); 280 device_wakeup_enable(hcd->self.controller);
280 281
281 if (pci_dev_run_wake(dev)) 282 if (pci_dev_run_wake(dev))
282 pm_runtime_put_noidle(&dev->dev); 283 pm_runtime_put_noidle(&dev->dev);
283 return retval; 284 return retval;
284 285
285unmap_registers:
286 if (driver->flags & HCD_MEMORY) {
287 iounmap(hcd->regs);
288release_mem_region:
289 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
290 } else
291 release_region(hcd->rsrc_start, hcd->rsrc_len);
292put_hcd: 286put_hcd:
293 usb_put_hcd(hcd); 287 usb_put_hcd(hcd);
294disable_pci: 288disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
347 dev_set_drvdata(&dev->dev, NULL); 341 dev_set_drvdata(&dev->dev, NULL);
348 up_read(&companions_rwsem); 342 up_read(&companions_rwsem);
349 } 343 }
350
351 if (hcd->driver->flags & HCD_MEMORY) {
352 iounmap(hcd->regs);
353 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
354 } else {
355 release_region(hcd->rsrc_start, hcd->rsrc_len);
356 }
357
358 usb_put_hcd(hcd); 344 usb_put_hcd(hcd);
359 pci_disable_device(dev); 345 pci_disable_device(dev);
360} 346}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1eb8d17e19db..4de91653a2c7 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
419 * other cases where the next software may expect clean state from the 419 * other cases where the next software may expect clean state from the
420 * "firmware". this is bus-neutral, unlike shutdown() methods. 420 * "firmware". this is bus-neutral, unlike shutdown() methods.
421 */ 421 */
422static void 422static void _ohci_shutdown(struct usb_hcd *hcd)
423ohci_shutdown (struct usb_hcd *hcd)
424{ 423{
425 struct ohci_hcd *ohci; 424 struct ohci_hcd *ohci;
426 425
@@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd)
436 ohci->rh_state = OHCI_RH_HALTED; 435 ohci->rh_state = OHCI_RH_HALTED;
437} 436}
438 437
438static void ohci_shutdown(struct usb_hcd *hcd)
439{
440 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
441 unsigned long flags;
442
443 spin_lock_irqsave(&ohci->lock, flags);
444 _ohci_shutdown(hcd);
445 spin_unlock_irqrestore(&ohci->lock, flags);
446}
447
439/*-------------------------------------------------------------------------* 448/*-------------------------------------------------------------------------*
440 * HC functions 449 * HC functions
441 *-------------------------------------------------------------------------*/ 450 *-------------------------------------------------------------------------*/
@@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t)
760 died: 769 died:
761 usb_hc_died(ohci_to_hcd(ohci)); 770 usb_hc_died(ohci_to_hcd(ohci));
762 ohci_dump(ohci); 771 ohci_dump(ohci);
763 ohci_shutdown(ohci_to_hcd(ohci)); 772 _ohci_shutdown(ohci_to_hcd(ohci));
764 goto done; 773 goto done;
765 } else { 774 } else {
766 /* No write back because the done queue was empty */ 775 /* No write back because the done queue was empty */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 8616c52849c6..2b0ccd150209 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") || 104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
105 of_device_is_compatible(node, "renesas,xhci-r8a7791") || 105 of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
106 of_device_is_compatible(node, "renesas,xhci-r8a7793") || 106 of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
107 of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); 107 of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
108} 108}
109 109
110static int xhci_rcar_is_gen3(struct device *dev) 110static int xhci_rcar_is_gen3(struct device *dev)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index dafc65911fc0..2ff7c911fbd0 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
1194 1194
1195 tegra_xusb_config(tegra, regs); 1195 tegra_xusb_config(tegra, regs);
1196 1196
1197 /*
1198 * The XUSB Falcon microcontroller can only address 40 bits, so set
1199 * the DMA mask accordingly.
1200 */
1201 err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
1202 if (err < 0) {
1203 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1204 goto put_rpm;
1205 }
1206
1197 err = tegra_xusb_load_firmware(tegra); 1207 err = tegra_xusb_load_firmware(tegra);
1198 if (err < 0) { 1208 if (err < 0) {
1199 dev_err(&pdev->dev, "failed to load firmware: %d\n", err); 1209 dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index cc794e25a0b6..1d9ce9cbc831 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
38 38
39static int auto_delink_en = 1; 39static int auto_delink_en = 1;
40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); 40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); 41MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
42 42
43#ifdef CONFIG_REALTEK_AUTOPM 43#ifdef CONFIG_REALTEK_AUTOPM
44static int ss_en = 1; 44static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
996 goto INIT_FAIL; 996 goto INIT_FAIL;
997 } 997 }
998 998
999 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || 999 if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1000 CHECK_FW_VER(chip, 0x5901)) 1000 CHECK_PID(chip, 0x0159)) {
1001 SET_AUTO_DELINK(chip); 1001 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1002 if (STATUS_LEN(chip) == 16) { 1002 CHECK_FW_VER(chip, 0x5901))
1003 if (SUPPORT_AUTO_DELINK(chip))
1004 SET_AUTO_DELINK(chip); 1003 SET_AUTO_DELINK(chip);
1004 if (STATUS_LEN(chip) == 16) {
1005 if (SUPPORT_AUTO_DELINK(chip))
1006 SET_AUTO_DELINK(chip);
1007 }
1005 } 1008 }
1006#ifdef CONFIG_REALTEK_AUTOPM 1009#ifdef CONFIG_REALTEK_AUTOPM
1007 if (ss_en) 1010 if (ss_en)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ea0d27a94afe..1cd9b6305b06 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
2100 US_FL_IGNORE_RESIDUE ), 2100 US_FL_IGNORE_RESIDUE ),
2101 2101
2102/* Reported by Michael Büsch <m@bues.ch> */ 2102/* Reported by Michael Büsch <m@bues.ch> */
2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, 2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
2104 "JMicron", 2104 "JMicron",
2105 "USB to ATA/ATAPI Bridge", 2105 "USB to ATA/ATAPI Bridge",
2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 166b28562395..96562744101c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1439,7 +1439,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1439 else if ((pdo_min_voltage(pdo[i]) == 1439 else if ((pdo_min_voltage(pdo[i]) ==
1440 pdo_min_voltage(pdo[i - 1])) && 1440 pdo_min_voltage(pdo[i - 1])) &&
1441 (pdo_max_voltage(pdo[i]) == 1441 (pdo_max_voltage(pdo[i]) ==
1442 pdo_min_voltage(pdo[i - 1]))) 1442 pdo_max_voltage(pdo[i - 1])))
1443 return PDO_ERR_DUPE_PDO; 1443 return PDO_ERR_DUPE_PDO;
1444 break; 1444 break;
1445 /* 1445 /*
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
858 case 'M': 858 case 'M':
859 case 'm': 859 case 'm':
860 size *= 1024; 860 size *= 1024;
861 /* Fall through */
861 case 'K': 862 case 'K':
862 case 'k': 863 case 'k':
863 size *= 1024; 864 size *= 1024;
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
168 soft_margin = new_margin; 168 soft_margin = new_margin;
169 reload = soft_margin * (mem_fclk_21285 / 256); 169 reload = soft_margin * (mem_fclk_21285 / 256);
170 watchdog_ping(); 170 watchdog_ping();
171 /* Fall */ 171 /* Fall through */
172 case WDIOC_GETTIMEOUT: 172 case WDIOC_GETTIMEOUT:
173 ret = put_user(soft_margin, int_arg); 173 ret = put_user(soft_margin, int_arg);
174 break; 174 break;
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
74 cell = rcu_dereference_raw(net->ws_cell); 74 cell = rcu_dereference_raw(net->ws_cell);
75 if (cell) { 75 if (cell) {
76 afs_get_cell(cell); 76 afs_get_cell(cell);
77 ret = 0;
77 break; 78 break;
78 } 79 }
79 ret = -EDESTADDRREQ; 80 ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
108 109
109 done_seqretry(&net->cells_lock, seq); 110 done_seqretry(&net->cells_lock, seq);
110 111
112 if (ret != 0 && cell)
113 afs_put_cell(net, cell);
114
111 return ret == 0 ? cell : ERR_PTR(ret); 115 return ret == 0 ? cell : ERR_PTR(ret);
112} 116}
113 117
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 81207dc3c997..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -959,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
959 inode ? AFS_FS_I(inode) : NULL); 959 inode ? AFS_FS_I(inode) : NULL);
960 } else { 960 } else {
961 trace_afs_lookup(dvnode, &dentry->d_name, 961 trace_afs_lookup(dvnode, &dentry->d_name,
962 inode ? AFS_FS_I(inode) : NULL); 962 IS_ERR_OR_NULL(inode) ? NULL
963 : AFS_FS_I(inode));
963 } 964 }
964 return d; 965 return d;
965} 966}
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 2575503170fc..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); 2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
2172 2172
2173 size = round_up(acl->size, 4); 2173 size = round_up(acl->size, 4);
2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, 2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
2175 sizeof(__be32) * 2 + 2175 sizeof(__be32) * 2 +
2176 sizeof(struct yfs_xdr_YFSFid) + 2176 sizeof(struct yfs_xdr_YFSFid) +
2177 sizeof(__be32) + size, 2177 sizeof(__be32) + size,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
913 if (page_offset(page) >= ceph_wbc.i_size) { 913 if (page_offset(page) >= ceph_wbc.i_size) {
914 dout("%p page eof %llu\n", 914 dout("%p page eof %llu\n",
915 page, ceph_wbc.i_size); 915 page, ceph_wbc.i_size);
916 if (ceph_wbc.size_stable || 916 if ((ceph_wbc.size_stable ||
917 page_offset(page) >= i_size_read(inode)) 917 page_offset(page) >= i_size_read(inode)) &&
918 clear_page_dirty_for_io(page))
918 mapping->a_ops->invalidatepage(page, 919 mapping->a_ops->invalidatepage(page,
919 0, PAGE_SIZE); 920 0, PAGE_SIZE);
920 unlock_page(page); 921 unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1301{ 1301{
1302 struct ceph_inode_info *ci = cap->ci; 1302 struct ceph_inode_info *ci = cap->ci;
1303 struct inode *inode = &ci->vfs_inode; 1303 struct inode *inode = &ci->vfs_inode;
1304 struct ceph_buffer *old_blob = NULL;
1304 struct cap_msg_args arg; 1305 struct cap_msg_args arg;
1305 int held, revoking; 1306 int held, revoking;
1306 int wake = 0; 1307 int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1365 ci->i_requested_max_size = arg.max_size; 1366 ci->i_requested_max_size = arg.max_size;
1366 1367
1367 if (flushing & CEPH_CAP_XATTR_EXCL) { 1368 if (flushing & CEPH_CAP_XATTR_EXCL) {
1368 __ceph_build_xattrs_blob(ci); 1369 old_blob = __ceph_build_xattrs_blob(ci);
1369 arg.xattr_version = ci->i_xattrs.version; 1370 arg.xattr_version = ci->i_xattrs.version;
1370 arg.xattr_buf = ci->i_xattrs.blob; 1371 arg.xattr_buf = ci->i_xattrs.blob;
1371 } else { 1372 } else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1409 1410
1410 spin_unlock(&ci->i_ceph_lock); 1411 spin_unlock(&ci->i_ceph_lock);
1411 1412
1413 ceph_buffer_put(old_blob);
1414
1412 ret = send_cap_msg(&arg); 1415 ret = send_cap_msg(&arg);
1413 if (ret < 0) { 1416 if (ret < 0) {
1414 dout("error sending cap msg, must requeue %p\n", inode); 1417 dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
736 int issued, new_issued, info_caps; 736 int issued, new_issued, info_caps;
737 struct timespec64 mtime, atime, ctime; 737 struct timespec64 mtime, atime, ctime;
738 struct ceph_buffer *xattr_blob = NULL; 738 struct ceph_buffer *xattr_blob = NULL;
739 struct ceph_buffer *old_blob = NULL;
739 struct ceph_string *pool_ns = NULL; 740 struct ceph_string *pool_ns = NULL;
740 struct ceph_cap *new_cap = NULL; 741 struct ceph_cap *new_cap = NULL;
741 int err = 0; 742 int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
881 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 882 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
882 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 883 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
883 if (ci->i_xattrs.blob) 884 if (ci->i_xattrs.blob)
884 ceph_buffer_put(ci->i_xattrs.blob); 885 old_blob = ci->i_xattrs.blob;
885 ci->i_xattrs.blob = xattr_blob; 886 ci->i_xattrs.blob = xattr_blob;
886 if (xattr_blob) 887 if (xattr_blob)
887 memcpy(ci->i_xattrs.blob->vec.iov_base, 888 memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1022out: 1023out:
1023 if (new_cap) 1024 if (new_cap)
1024 ceph_put_cap(mdsc, new_cap); 1025 ceph_put_cap(mdsc, new_cap);
1025 if (xattr_blob) 1026 ceph_buffer_put(old_blob);
1026 ceph_buffer_put(xattr_blob); 1027 ceph_buffer_put(xattr_blob);
1027 ceph_put_string(pool_ns); 1028 ceph_put_string(pool_ns);
1028 return err; 1029 return err;
1029} 1030}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
111 req->r_wait_for_completion = ceph_lock_wait_for_completion; 111 req->r_wait_for_completion = ceph_lock_wait_for_completion;
112 112
113 err = ceph_mdsc_do_request(mdsc, inode, req); 113 err = ceph_mdsc_do_request(mdsc, inode, req);
114 114 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115 if (operation == CEPH_MDS_OP_GETFILELOCK) {
116 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); 115 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
117 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 116 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
118 fl->fl_type = F_RDLCK; 117 fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
465 struct inode *inode = &ci->vfs_inode; 465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap; 466 struct ceph_cap_snap *capsnap;
467 struct ceph_snap_context *old_snapc, *new_snapc; 467 struct ceph_snap_context *old_snapc, *new_snapc;
468 struct ceph_buffer *old_blob = NULL;
468 int used, dirty; 469 int used, dirty;
469 470
470 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
541 capsnap->gid = inode->i_gid; 542 capsnap->gid = inode->i_gid;
542 543
543 if (dirty & CEPH_CAP_XATTR_EXCL) { 544 if (dirty & CEPH_CAP_XATTR_EXCL) {
544 __ceph_build_xattrs_blob(ci); 545 old_blob = __ceph_build_xattrs_blob(ci);
545 capsnap->xattr_blob = 546 capsnap->xattr_blob =
546 ceph_buffer_get(ci->i_xattrs.blob); 547 ceph_buffer_get(ci->i_xattrs.blob);
547 capsnap->xattr_version = ci->i_xattrs.version; 548 capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
584 } 585 }
585 spin_unlock(&ci->i_ceph_lock); 586 spin_unlock(&ci->i_ceph_lock);
586 587
588 ceph_buffer_put(old_blob);
587 kfree(capsnap); 589 kfree(capsnap);
588 ceph_put_snap_context(old_snapc); 590 ceph_put_snap_context(old_snapc);
589} 591}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); 926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); 927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); 928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
929extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); 929extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); 930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
931extern const struct xattr_handler *ceph_xattr_handlers[]; 931extern const struct xattr_handler *ceph_xattr_handlers[];
932 932
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
754 754
755/* 755/*
756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
757 * and swap into place. 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
758 * that it can be freed by the caller as the i_ceph_lock is likely to be
759 * held.
758 */ 760 */
759void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 761struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
760{ 762{
761 struct rb_node *p; 763 struct rb_node *p;
762 struct ceph_inode_xattr *xattr = NULL; 764 struct ceph_inode_xattr *xattr = NULL;
765 struct ceph_buffer *old_blob = NULL;
763 void *dest; 766 void *dest;
764 767
765 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
790 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
791 794
792 if (ci->i_xattrs.blob) 795 if (ci->i_xattrs.blob)
793 ceph_buffer_put(ci->i_xattrs.blob); 796 old_blob = ci->i_xattrs.blob;
794 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
795 ci->i_xattrs.prealloc_blob = NULL; 798 ci->i_xattrs.prealloc_blob = NULL;
796 ci->i_xattrs.dirty = false; 799 ci->i_xattrs.dirty = false;
797 ci->i_xattrs.version++; 800 ci->i_xattrs.version++;
798 } 801 }
802
803 return old_blob;
799} 804}
800 805
801static inline int __get_request_mask(struct inode *in) { 806static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1036 struct ceph_inode_info *ci = ceph_inode(inode); 1041 struct ceph_inode_info *ci = ceph_inode(inode);
1037 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1038 struct ceph_cap_flush *prealloc_cf = NULL; 1043 struct ceph_cap_flush *prealloc_cf = NULL;
1044 struct ceph_buffer *old_blob = NULL;
1039 int issued; 1045 int issued;
1040 int err; 1046 int err;
1041 int dirty = 0; 1047 int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
1109 struct ceph_buffer *blob; 1115 struct ceph_buffer *blob;
1110 1116
1111 spin_unlock(&ci->i_ceph_lock); 1117 spin_unlock(&ci->i_ceph_lock);
1112 dout(" preaallocating new blob size=%d\n", required_blob_size); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */
1119 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1113 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1114 if (!blob) 1121 if (!blob)
1115 goto do_sync_unlocked; 1122 goto do_sync_unlocked;
1116 spin_lock(&ci->i_ceph_lock); 1123 spin_lock(&ci->i_ceph_lock);
1124 /* prealloc_blob can't be released while holding i_ceph_lock */
1117 if (ci->i_xattrs.prealloc_blob) 1125 if (ci->i_xattrs.prealloc_blob)
1118 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1126 old_blob = ci->i_xattrs.prealloc_blob;
1119 ci->i_xattrs.prealloc_blob = blob; 1127 ci->i_xattrs.prealloc_blob = blob;
1120 goto retry; 1128 goto retry;
1121 } 1129 }
@@ -1131,6 +1139,7 @@ retry:
1131 } 1139 }
1132 1140
1133 spin_unlock(&ci->i_ceph_lock); 1141 spin_unlock(&ci->i_ceph_lock);
1142 ceph_buffer_put(old_blob);
1134 if (lock_snap_rwsem) 1143 if (lock_snap_rwsem)
1135 up_read(&mdsc->snap_rwsem); 1144 up_read(&mdsc->snap_rwsem);
1136 if (dirty) 1145 if (dirty)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..99caf77df4a2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
152extern const struct export_operations cifs_export_ops; 152extern const struct export_operations cifs_export_ops;
153#endif /* CONFIG_CIFS_NFSD_EXPORT */ 153#endif /* CONFIG_CIFS_NFSD_EXPORT */
154 154
155#define CIFS_VERSION "2.21" 155#define CIFS_VERSION "2.22"
156#endif /* _CIFSFS_H */ 156#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..592a6cea2b79 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
579 unsigned int *len, unsigned int *offset); 579 unsigned int *len, unsigned int *offset);
580 580
581void extract_unc_hostname(const char *unc, const char **h, size_t *len); 581void extract_unc_hostname(const char *unc, const char **h, size_t *len);
582int copy_path_name(char *dst, const char *src);
582 583
583#ifdef CONFIG_CIFS_DFS_UPCALL 584#ifdef CONFIG_CIFS_DFS_UPCALL
584static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, 585static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..3907653e63c7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
942 PATH_MAX, nls_codepage, remap); 942 PATH_MAX, nls_codepage, remap);
943 name_len++; /* trailing null */ 943 name_len++; /* trailing null */
944 name_len *= 2; 944 name_len *= 2;
945 } else { /* BB add path length overrun check */ 945 } else {
946 name_len = strnlen(fileName, PATH_MAX); 946 name_len = copy_path_name(pSMB->FileName, fileName);
947 name_len++; /* trailing null */
948 strncpy(pSMB->FileName, fileName, name_len);
949 } 947 }
950 948
951 params = 6 + name_len; 949 params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
1015 remap); 1013 remap);
1016 name_len++; /* trailing null */ 1014 name_len++; /* trailing null */
1017 name_len *= 2; 1015 name_len *= 2;
1018 } else { /* BB improve check for buffer overruns BB */ 1016 } else {
1019 name_len = strnlen(name, PATH_MAX); 1017 name_len = copy_path_name(pSMB->fileName, name);
1020 name_len++; /* trailing null */
1021 strncpy(pSMB->fileName, name, name_len);
1022 } 1018 }
1023 pSMB->SearchAttributes = 1019 pSMB->SearchAttributes =
1024 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); 1020 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
1062 remap); 1058 remap);
1063 name_len++; /* trailing null */ 1059 name_len++; /* trailing null */
1064 name_len *= 2; 1060 name_len *= 2;
1065 } else { /* BB improve check for buffer overruns BB */ 1061 } else {
1066 name_len = strnlen(name, PATH_MAX); 1062 name_len = copy_path_name(pSMB->DirName, name);
1067 name_len++; /* trailing null */
1068 strncpy(pSMB->DirName, name, name_len);
1069 } 1063 }
1070 1064
1071 pSMB->BufferFormat = 0x04; 1065 pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
1107 remap); 1101 remap);
1108 name_len++; /* trailing null */ 1102 name_len++; /* trailing null */
1109 name_len *= 2; 1103 name_len *= 2;
1110 } else { /* BB improve check for buffer overruns BB */ 1104 } else {
1111 name_len = strnlen(name, PATH_MAX); 1105 name_len = copy_path_name(pSMB->DirName, name);
1112 name_len++; /* trailing null */
1113 strncpy(pSMB->DirName, name, name_len);
1114 } 1106 }
1115 1107
1116 pSMB->BufferFormat = 0x04; 1108 pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
1157 PATH_MAX, nls_codepage, remap); 1149 PATH_MAX, nls_codepage, remap);
1158 name_len++; /* trailing null */ 1150 name_len++; /* trailing null */
1159 name_len *= 2; 1151 name_len *= 2;
1160 } else { /* BB improve the check for buffer overruns BB */ 1152 } else {
1161 name_len = strnlen(name, PATH_MAX); 1153 name_len = copy_path_name(pSMB->FileName, name);
1162 name_len++; /* trailing null */
1163 strncpy(pSMB->FileName, name, name_len);
1164 } 1154 }
1165 1155
1166 params = 6 + name_len; 1156 params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
1324 fileName, PATH_MAX, nls_codepage, remap); 1314 fileName, PATH_MAX, nls_codepage, remap);
1325 name_len++; /* trailing null */ 1315 name_len++; /* trailing null */
1326 name_len *= 2; 1316 name_len *= 2;
1327 } else { /* BB improve check for buffer overruns BB */ 1317 } else {
1328 count = 0; /* no pad */ 1318 count = 0; /* no pad */
1329 name_len = strnlen(fileName, PATH_MAX); 1319 name_len = copy_path_name(pSMB->fileName, fileName);
1330 name_len++; /* trailing null */
1331 strncpy(pSMB->fileName, fileName, name_len);
1332 } 1320 }
1333 if (*pOplock & REQ_OPLOCK) 1321 if (*pOplock & REQ_OPLOCK)
1334 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); 1322 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
1442 /* BB improve check for buffer overruns BB */ 1430 /* BB improve check for buffer overruns BB */
1443 /* no pad */ 1431 /* no pad */
1444 count = 0; 1432 count = 0;
1445 name_len = strnlen(path, PATH_MAX); 1433 name_len = copy_path_name(req->fileName, path);
1446 /* trailing null */
1447 name_len++;
1448 req->NameLength = cpu_to_le16(name_len); 1434 req->NameLength = cpu_to_le16(name_len);
1449 strncpy(req->fileName, path, name_len);
1450 } 1435 }
1451 1436
1452 if (*oplock & REQ_OPLOCK) 1437 if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
2812 remap); 2797 remap);
2813 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2798 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2814 name_len2 *= 2; /* convert to bytes */ 2799 name_len2 *= 2; /* convert to bytes */
2815 } else { /* BB improve the check for buffer overruns BB */ 2800 } else {
2816 name_len = strnlen(from_name, PATH_MAX); 2801 name_len = copy_path_name(pSMB->OldFileName, from_name);
2817 name_len++; /* trailing null */ 2802 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
2818 strncpy(pSMB->OldFileName, from_name, name_len);
2819 name_len2 = strnlen(to_name, PATH_MAX);
2820 name_len2++; /* trailing null */
2821 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2803 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2822 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
2823 name_len2++; /* trailing null */
2824 name_len2++; /* signature byte */ 2804 name_len2++; /* signature byte */
2825 } 2805 }
2826 2806
@@ -2962,15 +2942,10 @@ copyRetry:
2962 toName, PATH_MAX, nls_codepage, remap); 2942 toName, PATH_MAX, nls_codepage, remap);
2963 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2943 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2964 name_len2 *= 2; /* convert to bytes */ 2944 name_len2 *= 2; /* convert to bytes */
2965 } else { /* BB improve the check for buffer overruns BB */ 2945 } else {
2966 name_len = strnlen(fromName, PATH_MAX); 2946 name_len = copy_path_name(pSMB->OldFileName, fromName);
2967 name_len++; /* trailing null */
2968 strncpy(pSMB->OldFileName, fromName, name_len);
2969 name_len2 = strnlen(toName, PATH_MAX);
2970 name_len2++; /* trailing null */
2971 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2947 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2972 strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); 2948 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
2973 name_len2++; /* trailing null */
2974 name_len2++; /* signature byte */ 2949 name_len2++; /* signature byte */
2975 } 2950 }
2976 2951
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
3021 name_len++; /* trailing null */ 2996 name_len++; /* trailing null */
3022 name_len *= 2; 2997 name_len *= 2;
3023 2998
3024 } else { /* BB improve the check for buffer overruns BB */ 2999 } else {
3025 name_len = strnlen(fromName, PATH_MAX); 3000 name_len = copy_path_name(pSMB->FileName, fromName);
3026 name_len++; /* trailing null */
3027 strncpy(pSMB->FileName, fromName, name_len);
3028 } 3001 }
3029 params = 6 + name_len; 3002 params = 6 + name_len;
3030 pSMB->MaxSetupCount = 0; 3003 pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
3044 PATH_MAX, nls_codepage, remap); 3017 PATH_MAX, nls_codepage, remap);
3045 name_len_target++; /* trailing null */ 3018 name_len_target++; /* trailing null */
3046 name_len_target *= 2; 3019 name_len_target *= 2;
3047 } else { /* BB improve the check for buffer overruns BB */ 3020 } else {
3048 name_len_target = strnlen(toName, PATH_MAX); 3021 name_len_target = copy_path_name(data_offset, toName);
3049 name_len_target++; /* trailing null */
3050 strncpy(data_offset, toName, name_len_target);
3051 } 3022 }
3052 3023
3053 pSMB->MaxParameterCount = cpu_to_le16(2); 3024 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
3109 name_len++; /* trailing null */ 3080 name_len++; /* trailing null */
3110 name_len *= 2; 3081 name_len *= 2;
3111 3082
3112 } else { /* BB improve the check for buffer overruns BB */ 3083 } else {
3113 name_len = strnlen(toName, PATH_MAX); 3084 name_len = copy_path_name(pSMB->FileName, toName);
3114 name_len++; /* trailing null */
3115 strncpy(pSMB->FileName, toName, name_len);
3116 } 3085 }
3117 params = 6 + name_len; 3086 params = 6 + name_len;
3118 pSMB->MaxSetupCount = 0; 3087 pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
3131 PATH_MAX, nls_codepage, remap); 3100 PATH_MAX, nls_codepage, remap);
3132 name_len_target++; /* trailing null */ 3101 name_len_target++; /* trailing null */
3133 name_len_target *= 2; 3102 name_len_target *= 2;
3134 } else { /* BB improve the check for buffer overruns BB */ 3103 } else {
3135 name_len_target = strnlen(fromName, PATH_MAX); 3104 name_len_target = copy_path_name(data_offset, fromName);
3136 name_len_target++; /* trailing null */
3137 strncpy(data_offset, fromName, name_len_target);
3138 } 3105 }
3139 3106
3140 pSMB->MaxParameterCount = cpu_to_le16(2); 3107 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
3213 remap); 3180 remap);
3214 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 3181 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
3215 name_len2 *= 2; /* convert to bytes */ 3182 name_len2 *= 2; /* convert to bytes */
3216 } else { /* BB improve the check for buffer overruns BB */ 3183 } else {
3217 name_len = strnlen(from_name, PATH_MAX); 3184 name_len = copy_path_name(pSMB->OldFileName, from_name);
3218 name_len++; /* trailing null */
3219 strncpy(pSMB->OldFileName, from_name, name_len);
3220 name_len2 = strnlen(to_name, PATH_MAX);
3221 name_len2++; /* trailing null */
3222 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 3185 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
3223 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); 3186 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
3224 name_len2++; /* trailing null */
3225 name_len2++; /* signature byte */ 3187 name_len2++; /* signature byte */
3226 } 3188 }
3227 3189
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
3271 remap); 3233 remap);
3272 name_len++; /* trailing null */ 3234 name_len++; /* trailing null */
3273 name_len *= 2; 3235 name_len *= 2;
3274 } else { /* BB improve the check for buffer overruns BB */ 3236 } else {
3275 name_len = strnlen(searchName, PATH_MAX); 3237 name_len = copy_path_name(pSMB->FileName, searchName);
3276 name_len++; /* trailing null */
3277 strncpy(pSMB->FileName, searchName, name_len);
3278 } 3238 }
3279 3239
3280 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3240 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
3691 name_len *= 2; 3651 name_len *= 2;
3692 pSMB->FileName[name_len] = 0; 3652 pSMB->FileName[name_len] = 0;
3693 pSMB->FileName[name_len+1] = 0; 3653 pSMB->FileName[name_len+1] = 0;
3694 } else { /* BB improve the check for buffer overruns BB */ 3654 } else {
3695 name_len = strnlen(searchName, PATH_MAX); 3655 name_len = copy_path_name(pSMB->FileName, searchName);
3696 name_len++; /* trailing null */
3697 strncpy(pSMB->FileName, searchName, name_len);
3698 } 3656 }
3699 3657
3700 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3658 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
3776 PATH_MAX, nls_codepage, remap); 3734 PATH_MAX, nls_codepage, remap);
3777 name_len++; /* trailing null */ 3735 name_len++; /* trailing null */
3778 name_len *= 2; 3736 name_len *= 2;
3779 } else { /* BB improve the check for buffer overruns BB */ 3737 } else {
3780 name_len = strnlen(fileName, PATH_MAX); 3738 name_len = copy_path_name(pSMB->FileName, fileName);
3781 name_len++; /* trailing null */
3782 strncpy(pSMB->FileName, fileName, name_len);
3783 } 3739 }
3784 params = 6 + name_len; 3740 params = 6 + name_len;
3785 pSMB->MaxParameterCount = cpu_to_le16(2); 3741 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
4184 name_len++; /* trailing null */ 4140 name_len++; /* trailing null */
4185 name_len *= 2; 4141 name_len *= 2;
4186 } else { 4142 } else {
4187 name_len = strnlen(search_name, PATH_MAX); 4143 name_len = copy_path_name(pSMB->FileName, search_name);
4188 name_len++; /* trailing null */
4189 strncpy(pSMB->FileName, search_name, name_len);
4190 } 4144 }
4191 pSMB->BufferFormat = 0x04; 4145 pSMB->BufferFormat = 0x04;
4192 name_len++; /* account for buffer type byte */ 4146 name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
4321 PATH_MAX, nls_codepage, remap); 4275 PATH_MAX, nls_codepage, remap);
4322 name_len++; /* trailing null */ 4276 name_len++; /* trailing null */
4323 name_len *= 2; 4277 name_len *= 2;
4324 } else { /* BB improve the check for buffer overruns BB */ 4278 } else {
4325 name_len = strnlen(search_name, PATH_MAX); 4279 name_len = copy_path_name(pSMB->FileName, search_name);
4326 name_len++; /* trailing null */
4327 strncpy(pSMB->FileName, search_name, name_len);
4328 } 4280 }
4329 4281
4330 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4282 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
4490 PATH_MAX, nls_codepage, remap); 4442 PATH_MAX, nls_codepage, remap);
4491 name_len++; /* trailing null */ 4443 name_len++; /* trailing null */
4492 name_len *= 2; 4444 name_len *= 2;
4493 } else { /* BB improve the check for buffer overruns BB */ 4445 } else {
4494 name_len = strnlen(searchName, PATH_MAX); 4446 name_len = copy_path_name(pSMB->FileName, searchName);
4495 name_len++; /* trailing null */
4496 strncpy(pSMB->FileName, searchName, name_len);
4497 } 4447 }
4498 4448
4499 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4449 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
4593 pSMB->FileName[name_len+1] = 0; 4543 pSMB->FileName[name_len+1] = 0;
4594 name_len += 2; 4544 name_len += 2;
4595 } 4545 }
4596 } else { /* BB add check for overrun of SMB buf BB */ 4546 } else {
4597 name_len = strnlen(searchName, PATH_MAX); 4547 name_len = copy_path_name(pSMB->FileName, searchName);
4598/* BB fix here and in unicode clause above ie
4599 if (name_len > buffersize-header)
4600 free buffer exit; BB */
4601 strncpy(pSMB->FileName, searchName, name_len);
4602 if (msearch) { 4548 if (msearch) {
4603 pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); 4549 if (WARN_ON_ONCE(name_len > PATH_MAX-2))
4604 pSMB->FileName[name_len+1] = '*'; 4550 name_len = PATH_MAX-2;
4605 pSMB->FileName[name_len+2] = 0; 4551 /* overwrite nul byte */
4606 name_len += 3; 4552 pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
4553 pSMB->FileName[name_len] = '*';
4554 pSMB->FileName[name_len+1] = 0;
4555 name_len += 2;
4607 } 4556 }
4608 } 4557 }
4609 4558
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
4898 remap); 4847 remap);
4899 name_len++; /* trailing null */ 4848 name_len++; /* trailing null */
4900 name_len *= 2; 4849 name_len *= 2;
4901 } else { /* BB improve the check for buffer overruns BB */ 4850 } else {
4902 name_len = strnlen(search_name, PATH_MAX); 4851 name_len = copy_path_name(pSMB->FileName, search_name);
4903 name_len++; /* trailing null */
4904 strncpy(pSMB->FileName, search_name, name_len);
4905 } 4852 }
4906 4853
4907 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 4854 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
5008 name_len++; /* trailing null */ 4955 name_len++; /* trailing null */
5009 name_len *= 2; 4956 name_len *= 2;
5010 } else { /* BB improve the check for buffer overruns BB */ 4957 } else { /* BB improve the check for buffer overruns BB */
5011 name_len = strnlen(search_name, PATH_MAX); 4958 name_len = copy_path_name(pSMB->RequestFileName, search_name);
5012 name_len++; /* trailing null */
5013 strncpy(pSMB->RequestFileName, search_name, name_len);
5014 } 4959 }
5015 4960
5016 if (ses->server->sign) 4961 if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
5663 PATH_MAX, cifs_sb->local_nls, remap); 5608 PATH_MAX, cifs_sb->local_nls, remap);
5664 name_len++; /* trailing null */ 5609 name_len++; /* trailing null */
5665 name_len *= 2; 5610 name_len *= 2;
5666 } else { /* BB improve the check for buffer overruns BB */ 5611 } else {
5667 name_len = strnlen(file_name, PATH_MAX); 5612 name_len = copy_path_name(pSMB->FileName, file_name);
5668 name_len++; /* trailing null */
5669 strncpy(pSMB->FileName, file_name, name_len);
5670 } 5613 }
5671 params = 6 + name_len; 5614 params = 6 + name_len;
5672 data_count = sizeof(struct file_end_of_file_info); 5615 data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
5959 PATH_MAX, nls_codepage, remap); 5902 PATH_MAX, nls_codepage, remap);
5960 name_len++; /* trailing null */ 5903 name_len++; /* trailing null */
5961 name_len *= 2; 5904 name_len *= 2;
5962 } else { /* BB improve the check for buffer overruns BB */ 5905 } else {
5963 name_len = strnlen(fileName, PATH_MAX); 5906 name_len = copy_path_name(pSMB->FileName, fileName);
5964 name_len++; /* trailing null */
5965 strncpy(pSMB->FileName, fileName, name_len);
5966 } 5907 }
5967 5908
5968 params = 6 + name_len; 5909 params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
6040 PATH_MAX, nls_codepage); 5981 PATH_MAX, nls_codepage);
6041 name_len++; /* trailing null */ 5982 name_len++; /* trailing null */
6042 name_len *= 2; 5983 name_len *= 2;
6043 } else { /* BB improve the check for buffer overruns BB */ 5984 } else {
6044 name_len = strnlen(fileName, PATH_MAX); 5985 name_len = copy_path_name(pSMB->fileName, fileName);
6045 name_len++; /* trailing null */
6046 strncpy(pSMB->fileName, fileName, name_len);
6047 } 5986 }
6048 pSMB->attr = cpu_to_le16(dos_attrs); 5987 pSMB->attr = cpu_to_le16(dos_attrs);
6049 pSMB->BufferFormat = 0x04; 5988 pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
6203 PATH_MAX, nls_codepage, remap); 6142 PATH_MAX, nls_codepage, remap);
6204 name_len++; /* trailing null */ 6143 name_len++; /* trailing null */
6205 name_len *= 2; 6144 name_len *= 2;
6206 } else { /* BB improve the check for buffer overruns BB */ 6145 } else {
6207 name_len = strnlen(file_name, PATH_MAX); 6146 name_len = copy_path_name(pSMB->FileName, file_name);
6208 name_len++; /* trailing null */
6209 strncpy(pSMB->FileName, file_name, name_len);
6210 } 6147 }
6211 6148
6212 params = 6 + name_len; 6149 params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
6298 PATH_MAX, nls_codepage, remap); 6235 PATH_MAX, nls_codepage, remap);
6299 list_len++; /* trailing null */ 6236 list_len++; /* trailing null */
6300 list_len *= 2; 6237 list_len *= 2;
6301 } else { /* BB improve the check for buffer overruns BB */ 6238 } else {
6302 list_len = strnlen(searchName, PATH_MAX); 6239 list_len = copy_path_name(pSMB->FileName, searchName);
6303 list_len++; /* trailing null */
6304 strncpy(pSMB->FileName, searchName, list_len);
6305 } 6240 }
6306 6241
6307 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; 6242 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
6480 PATH_MAX, nls_codepage, remap); 6415 PATH_MAX, nls_codepage, remap);
6481 name_len++; /* trailing null */ 6416 name_len++; /* trailing null */
6482 name_len *= 2; 6417 name_len *= 2;
6483 } else { /* BB improve the check for buffer overruns BB */ 6418 } else {
6484 name_len = strnlen(fileName, PATH_MAX); 6419 name_len = copy_path_name(pSMB->FileName, fileName);
6485 name_len++; /* trailing null */
6486 strncpy(pSMB->FileName, fileName, name_len);
6487 } 6420 }
6488 6421
6489 params = 6 + name_len; 6422 params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a15a6e738eb5..5299effa6f7d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,7 +1113,7 @@ cifs_demultiplex_thread(void *p)
1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1114 1114
1115 set_freezable(); 1115 set_freezable();
1116 allow_signal(SIGKILL); 1116 allow_kernel_signal(SIGKILL);
1117 while (server->tcpStatus != CifsExiting) { 1117 while (server->tcpStatus != CifsExiting) {
1118 if (try_to_freeze()) 1118 if (try_to_freeze())
1119 continue; 1119 continue;
@@ -2981,6 +2981,7 @@ static int
2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) 2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2982{ 2982{
2983 int rc = 0; 2983 int rc = 0;
2984 int is_domain = 0;
2984 const char *delim, *payload; 2985 const char *delim, *payload;
2985 char *desc; 2986 char *desc;
2986 ssize_t len; 2987 ssize_t len;
@@ -3028,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3028 rc = PTR_ERR(key); 3029 rc = PTR_ERR(key);
3029 goto out_err; 3030 goto out_err;
3030 } 3031 }
3032 is_domain = 1;
3031 } 3033 }
3032 3034
3033 down_read(&key->sem); 3035 down_read(&key->sem);
@@ -3085,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3085 goto out_key_put; 3087 goto out_key_put;
3086 } 3088 }
3087 3089
3090 /*
3091 * If we have a domain key then we must set the domainName in the
3092 * for the request.
3093 */
3094 if (is_domain && ses->domainName) {
3095 vol->domainname = kstrndup(ses->domainName,
3096 strlen(ses->domainName),
3097 GFP_KERNEL);
3098 if (!vol->domainname) {
3099 cifs_dbg(FYI, "Unable to allocate %zd bytes for "
3100 "domain\n", len);
3101 rc = -ENOMEM;
3102 kfree(vol->username);
3103 vol->username = NULL;
3104 kzfree(vol->password);
3105 vol->password = NULL;
3106 goto out_key_put;
3107 }
3108 }
3109
3088out_key_put: 3110out_key_put:
3089 up_read(&key->sem); 3111 up_read(&key->sem);
3090 key_put(key); 3112 key_put(key);
@@ -4209,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
4209 strlen(vol->prepath) + 1 : 0; 4231 strlen(vol->prepath) + 1 : 0;
4210 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); 4232 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
4211 4233
4234 if (unc_len > MAX_TREE_SIZE)
4235 return ERR_PTR(-EINVAL);
4236
4212 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); 4237 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
4213 if (full_path == NULL) 4238 if (full_path == NULL)
4214 return ERR_PTR(-ENOMEM); 4239 return ERR_PTR(-ENOMEM);
4215 4240
4216 strncpy(full_path, vol->UNC, unc_len); 4241 memcpy(full_path, vol->UNC, unc_len);
4217 pos = full_path + unc_len; 4242 pos = full_path + unc_len;
4218 4243
4219 if (pplen) { 4244 if (pplen) {
4220 *pos = CIFS_DIR_SEP(cifs_sb); 4245 *pos = CIFS_DIR_SEP(cifs_sb);
4221 strncpy(pos + 1, vol->prepath, pplen); 4246 memcpy(pos + 1, vol->prepath, pplen);
4222 pos += pplen; 4247 pos += pplen;
4223 } 4248 }
4224 4249
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..be424e81e3ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
69 return full_path; 69 return full_path;
70 70
71 if (dfsplen) 71 if (dfsplen)
72 strncpy(full_path, tcon->treeName, dfsplen); 72 memcpy(full_path, tcon->treeName, dfsplen);
73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); 73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
74 strncpy(full_path + dfsplen + 1, vol->prepath, pplen); 74 memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); 75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
76 full_path[dfsplen + pplen] = 0; /* add trailing null */
77 return full_path; 76 return full_path;
78} 77}
79 78
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..5ad83bdb9bea 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1011 *h = unc; 1011 *h = unc;
1012 *len = end - unc; 1012 *len = end - unc;
1013} 1013}
1014
1015/**
1016 * copy_path_name - copy src path to dst, possibly truncating
1017 *
1018 * returns number of bytes written (including trailing nul)
1019 */
1020int copy_path_name(char *dst, const char *src)
1021{
1022 int name_len;
1023
1024 /*
1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1026 * will truncate and strlen(dst) will be PATH_MAX-1
1027 */
1028 name_len = strscpy(dst, src, PATH_MAX);
1029 if (WARN_ON_ONCE(name_len < 0))
1030 name_len = PATH_MAX-1;
1031
1032 /* we count the trailing nul */
1033 name_len++;
1034 return name_len;
1035}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..4c764ff7edd2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
159 const struct nls_table *nls_cp) 159 const struct nls_table *nls_cp)
160{ 160{
161 char *bcc_ptr = *pbcc_area; 161 char *bcc_ptr = *pbcc_area;
162 int len;
162 163
163 /* copy user */ 164 /* copy user */
164 /* BB what about null user mounts - check that we do this BB */ 165 /* BB what about null user mounts - check that we do this BB */
165 /* copy user */ 166 /* copy user */
166 if (ses->user_name != NULL) { 167 if (ses->user_name != NULL) {
167 strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); 168 len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
168 bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); 169 if (WARN_ON_ONCE(len < 0))
170 len = CIFS_MAX_USERNAME_LEN - 1;
171 bcc_ptr += len;
169 } 172 }
170 /* else null user mount */ 173 /* else null user mount */
171 *bcc_ptr = 0; 174 *bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
173 176
174 /* copy domain */ 177 /* copy domain */
175 if (ses->domainName != NULL) { 178 if (ses->domainName != NULL) {
176 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 179 len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
177 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 180 if (WARN_ON_ONCE(len < 0))
181 len = CIFS_MAX_DOMAINNAME_LEN - 1;
182 bcc_ptr += len;
178 } /* else we will send a null domain name 183 } /* else we will send a null domain name
179 so the server will default to its own domain */ 184 so the server will default to its own domain */
180 *bcc_ptr = 0; 185 *bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
242 247
243 kfree(ses->serverOS); 248 kfree(ses->serverOS);
244 249
245 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 250 ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
246 if (ses->serverOS) { 251 if (ses->serverOS) {
247 strncpy(ses->serverOS, bcc_ptr, len); 252 memcpy(ses->serverOS, bcc_ptr, len);
253 ses->serverOS[len] = 0;
248 if (strncmp(ses->serverOS, "OS/2", 4) == 0) 254 if (strncmp(ses->serverOS, "OS/2", 4) == 0)
249 cifs_dbg(FYI, "OS/2 server\n"); 255 cifs_dbg(FYI, "OS/2 server\n");
250 } 256 }
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
258 264
259 kfree(ses->serverNOS); 265 kfree(ses->serverNOS);
260 266
261 ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); 267 ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
262 if (ses->serverNOS) 268 if (ses->serverNOS) {
263 strncpy(ses->serverNOS, bcc_ptr, len); 269 memcpy(ses->serverNOS, bcc_ptr, len);
270 ses->serverNOS[len] = 0;
271 }
264 272
265 bcc_ptr += len + 1; 273 bcc_ptr += len + 1;
266 bleft -= len + 1; 274 bleft -= len + 1;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 24bbe3cb7ad4..cfb48bd088e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
679 io_free_req(req); 679 io_free_req(req);
680} 680}
681 681
682static unsigned io_cqring_events(struct io_cq_ring *ring)
683{
684 /* See comment at the top of this file */
685 smp_rmb();
686 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
687}
688
682/* 689/*
683 * Find and free completed poll iocbs 690 * Find and free completed poll iocbs
684 */ 691 */
@@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
771static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, 778static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
772 long min) 779 long min)
773{ 780{
774 while (!list_empty(&ctx->poll_list)) { 781 while (!list_empty(&ctx->poll_list) && !need_resched()) {
775 int ret; 782 int ret;
776 783
777 ret = io_do_iopoll(ctx, nr_events, min); 784 ret = io_do_iopoll(ctx, nr_events, min);
@@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
798 unsigned int nr_events = 0; 805 unsigned int nr_events = 0;
799 806
800 io_iopoll_getevents(ctx, &nr_events, 1); 807 io_iopoll_getevents(ctx, &nr_events, 1);
808
809 /*
810 * Ensure we allow local-to-the-cpu processing to take place,
811 * in this case we need to ensure that we reap all events.
812 */
813 cond_resched();
801 } 814 }
802 mutex_unlock(&ctx->uring_lock); 815 mutex_unlock(&ctx->uring_lock);
803} 816}
@@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
805static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 818static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
806 long min) 819 long min)
807{ 820{
808 int ret = 0; 821 int iters, ret = 0;
809 822
823 /*
824 * We disallow the app entering submit/complete with polling, but we
825 * still need to lock the ring to prevent racing with polled issue
826 * that got punted to a workqueue.
827 */
828 mutex_lock(&ctx->uring_lock);
829
830 iters = 0;
810 do { 831 do {
811 int tmin = 0; 832 int tmin = 0;
812 833
834 /*
835 * Don't enter poll loop if we already have events pending.
836 * If we do, we can potentially be spinning for commands that
837 * already triggered a CQE (eg in error).
838 */
839 if (io_cqring_events(ctx->cq_ring))
840 break;
841
842 /*
843 * If a submit got punted to a workqueue, we can have the
844 * application entering polling for a command before it gets
845 * issued. That app will hold the uring_lock for the duration
846 * of the poll right here, so we need to take a breather every
847 * now and then to ensure that the issue has a chance to add
848 * the poll to the issued list. Otherwise we can spin here
849 * forever, while the workqueue is stuck trying to acquire the
850 * very same mutex.
851 */
852 if (!(++iters & 7)) {
853 mutex_unlock(&ctx->uring_lock);
854 mutex_lock(&ctx->uring_lock);
855 }
856
813 if (*nr_events < min) 857 if (*nr_events < min)
814 tmin = min - *nr_events; 858 tmin = min - *nr_events;
815 859
@@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
819 ret = 0; 863 ret = 0;
820 } while (min && !*nr_events && !need_resched()); 864 } while (min && !*nr_events && !need_resched());
821 865
866 mutex_unlock(&ctx->uring_lock);
822 return ret; 867 return ret;
823} 868}
824 869
@@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data)
2280 unsigned nr_events = 0; 2325 unsigned nr_events = 0;
2281 2326
2282 if (ctx->flags & IORING_SETUP_IOPOLL) { 2327 if (ctx->flags & IORING_SETUP_IOPOLL) {
2283 /*
2284 * We disallow the app entering submit/complete
2285 * with polling, but we still need to lock the
2286 * ring to prevent racing with polled issue
2287 * that got punted to a workqueue.
2288 */
2289 mutex_lock(&ctx->uring_lock);
2290 io_iopoll_check(ctx, &nr_events, 0); 2328 io_iopoll_check(ctx, &nr_events, 0);
2291 mutex_unlock(&ctx->uring_lock);
2292 } else { 2329 } else {
2293 /* 2330 /*
2294 * Normal IO, just pretend everything completed. 2331 * Normal IO, just pretend everything completed.
@@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2433 return submit; 2470 return submit;
2434} 2471}
2435 2472
2436static unsigned io_cqring_events(struct io_cq_ring *ring)
2437{
2438 /* See comment at the top of this file */
2439 smp_rmb();
2440 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2441}
2442
2443/* 2473/*
2444 * Wait until events become available, if we don't already have some. The 2474 * Wait until events become available, if we don't already have some. The
2445 * application must reap them itself, as they reside on the shared cq ring. 2475 * application must reap them itself, as they reside on the shared cq ring.
@@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3190 min_complete = min(min_complete, ctx->cq_entries); 3220 min_complete = min(min_complete, ctx->cq_entries);
3191 3221
3192 if (ctx->flags & IORING_SETUP_IOPOLL) { 3222 if (ctx->flags & IORING_SETUP_IOPOLL) {
3193 mutex_lock(&ctx->uring_lock);
3194 ret = io_iopoll_check(ctx, &nr_events, min_complete); 3223 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3195 mutex_unlock(&ctx->uring_lock);
3196 } else { 3224 } else {
3197 ret = io_cqring_wait(ctx, min_complete, sig, sigsz); 3225 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3198 } 3226 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) 1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1488 nfs_file_set_open_context(file, ctx); 1488 nfs_file_set_open_context(file, ctx);
1489 else 1489 else
1490 err = -ESTALE; 1490 err = -EOPENSTALE;
1491out: 1491out:
1492 return err; 1492 return err;
1493} 1493}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
401 unsigned long bytes = 0; 401 unsigned long bytes = 0;
402 struct nfs_direct_req *dreq = hdr->dreq; 402 struct nfs_direct_req *dreq = hdr->dreq;
403 403
404 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405 goto out_put;
406
407 spin_lock(&dreq->lock); 404 spin_lock(&dreq->lock);
408 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) 405 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
409 dreq->error = hdr->error; 406 dreq->error = hdr->error;
410 else 407
408 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
409 spin_unlock(&dreq->lock);
410 goto out_put;
411 }
412
413 if (hdr->good_bytes != 0)
411 nfs_direct_good_bytes(dreq, hdr); 414 nfs_direct_good_bytes(dreq, hdr);
412 415
416 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
417 dreq->error = 0;
418
413 spin_unlock(&dreq->lock); 419 spin_unlock(&dreq->lock);
414 420
415 while (!list_empty(&hdr->pages)) { 421 while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
782 bool request_commit = false; 788 bool request_commit = false;
783 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784 790
785 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786 goto out_put;
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq); 791 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 792
790 spin_lock(&dreq->lock); 793 spin_lock(&dreq->lock);
791 794
792 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 795 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793 dreq->error = hdr->error; 796 dreq->error = hdr->error;
794 if (dreq->error == 0) { 797
798 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
799 spin_unlock(&dreq->lock);
800 goto out_put;
801 }
802
803 if (hdr->good_bytes != 0) {
795 nfs_direct_good_bytes(dreq, hdr); 804 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 805 if (nfs_write_need_commit(hdr)) {
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 806 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
11#include <linux/nfs_page.h> 12#include <linux/nfs_page.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/sched/mm.h> 14#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
928 pgm = &pgio->pg_mirrors[0]; 929 pgm = &pgio->pg_mirrors[0];
929 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 931
931 pgio->pg_maxretrans = io_maxretrans; 932 if (NFS_SERVER(pgio->pg_inode)->flags &
933 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 pgio->pg_maxretrans = io_maxretrans;
932 return; 935 return;
933out_nolseg: 936out_nolseg:
934 if (pgio->pg_error < 0) 937 if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
940 pgio->pg_lseg); 943 pgio->pg_lseg);
941 pnfs_put_lseg(pgio->pg_lseg); 944 pnfs_put_lseg(pgio->pg_lseg);
942 pgio->pg_lseg = NULL; 945 pgio->pg_lseg = NULL;
946 pgio->pg_maxretrans = 0;
943 nfs_pageio_reset_read_mds(pgio); 947 nfs_pageio_reset_read_mds(pgio);
944} 948}
945 949
@@ -1000,7 +1004,9 @@ retry:
1000 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 1004 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1001 } 1005 }
1002 1006
1003 pgio->pg_maxretrans = io_maxretrans; 1007 if (NFS_SERVER(pgio->pg_inode)->flags &
1008 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009 pgio->pg_maxretrans = io_maxretrans;
1004 return; 1010 return;
1005 1011
1006out_mds: 1012out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
1010 pgio->pg_lseg); 1016 pgio->pg_lseg);
1011 pnfs_put_lseg(pgio->pg_lseg); 1017 pnfs_put_lseg(pgio->pg_lseg);
1012 pgio->pg_lseg = NULL; 1018 pgio->pg_lseg = NULL;
1019 pgio->pg_maxretrans = 0;
1013 nfs_pageio_reset_write_mds(pgio); 1020 nfs_pageio_reset_write_mds(pgio);
1014} 1021}
1015 1022
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1148 break; 1155 break;
1149 case -NFS4ERR_RETRY_UNCACHED_REP: 1156 case -NFS4ERR_RETRY_UNCACHED_REP:
1150 break; 1157 break;
1151 case -EAGAIN:
1152 return -NFS4ERR_RESET_TO_PNFS;
1153 /* Invalidate Layout errors */ 1158 /* Invalidate Layout errors */
1154 case -NFS4ERR_PNFS_NO_LAYOUT: 1159 case -NFS4ERR_PNFS_NO_LAYOUT:
1155 case -ESTALE: /* mapped NFS4ERR_STALE */ 1160 case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 case -EBADHANDLE: 1215 case -EBADHANDLE:
1211 case -ELOOP: 1216 case -ELOOP:
1212 case -ENOSPC: 1217 case -ENOSPC:
1213 case -EAGAIN:
1214 break; 1218 break;
1215 case -EJUKEBOX: 1219 case -EJUKEBOX:
1216 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1220 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1445 ff_layout_read_prepare_common(task, hdr); 1449 ff_layout_read_prepare_common(task, hdr);
1446} 1450}
1447 1451
1448static void
1449ff_layout_io_prepare_transmit(struct rpc_task *task,
1450 void *data)
1451{
1452 struct nfs_pgio_header *hdr = data;
1453
1454 if (!pnfs_is_valid_lseg(hdr->lseg))
1455 rpc_exit(task, -EAGAIN);
1456}
1457
1458static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459{ 1453{
1460 struct nfs_pgio_header *hdr = data; 1454 struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
1740 1734
1741static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1742 .rpc_call_prepare = ff_layout_read_prepare_v3, 1736 .rpc_call_prepare = ff_layout_read_prepare_v3,
1743 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1744 .rpc_call_done = ff_layout_read_call_done, 1737 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats, 1738 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release, 1739 .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1748 1741
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4, 1743 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1752 .rpc_call_done = ff_layout_read_call_done, 1744 .rpc_call_done = ff_layout_read_call_done,
1753 .rpc_count_stats = ff_layout_read_count_stats, 1745 .rpc_count_stats = ff_layout_read_count_stats,
1754 .rpc_release = ff_layout_read_release, 1746 .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1756 1748
1757static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1758 .rpc_call_prepare = ff_layout_write_prepare_v3, 1750 .rpc_call_prepare = ff_layout_write_prepare_v3,
1759 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1760 .rpc_call_done = ff_layout_write_call_done, 1751 .rpc_call_done = ff_layout_write_call_done,
1761 .rpc_count_stats = ff_layout_write_count_stats, 1752 .rpc_count_stats = ff_layout_write_count_stats,
1762 .rpc_release = ff_layout_write_release, 1753 .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1764 1755
1765static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1766 .rpc_call_prepare = ff_layout_write_prepare_v4, 1757 .rpc_call_prepare = ff_layout_write_prepare_v4,
1767 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1768 .rpc_call_done = ff_layout_write_call_done, 1758 .rpc_call_done = ff_layout_write_call_done,
1769 .rpc_count_stats = ff_layout_write_count_stats, 1759 .rpc_count_stats = ff_layout_write_count_stats,
1770 .rpc_release = ff_layout_write_release, 1760 .rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..c764cfe456e5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 /* No fileid? Just exit */
1407 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1408 return 0;
1406 /* Has the inode gone and changed behind our back? */ 1409 /* Has the inode gone and changed behind our back? */
1407 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1410 if (nfsi->fileid != fattr->fileid) {
1411 /* Is this perhaps the mounted-on fileid? */
1412 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1413 nfsi->fileid == fattr->mounted_on_fileid)
1414 return 0;
1408 return -ESTALE; 1415 return -ESTALE;
1416 }
1409 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1417 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
1410 return -ESTALE; 1418 return -ESTALE;
1411 1419
1420
1412 if (!nfs_file_has_buffered_writers(nfsi)) { 1421 if (!nfs_file_has_buffered_writers(nfsi)) {
1413 /* Verify a few of the more important attributes */ 1422 /* Verify a few of the more important attributes */
1414 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) 1423 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1777,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1768EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); 1777EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
1769 1778
1770 1779
1771static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
1772 struct nfs_fattr *fattr)
1773{
1774 bool ret1 = true, ret2 = true;
1775
1776 if (fattr->valid & NFS_ATTR_FATTR_FILEID)
1777 ret1 = (nfsi->fileid == fattr->fileid);
1778 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1779 ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
1780 return ret1 || ret2;
1781}
1782
1783/* 1780/*
1784 * Many nfs protocol calls return the new file attributes after 1781 * Many nfs protocol calls return the new file attributes after
1785 * an operation. Here we update the inode to reflect the state 1782 * an operation. Here we update the inode to reflect the state
@@ -1810,7 +1807,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1810 nfs_display_fhandle_hash(NFS_FH(inode)), 1807 nfs_display_fhandle_hash(NFS_FH(inode)),
1811 atomic_read(&inode->i_count), fattr->valid); 1808 atomic_read(&inode->i_count), fattr->valid);
1812 1809
1813 if (!nfs_fileid_valid(nfsi, fattr)) { 1810 /* No fileid? Just exit */
1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1812 return 0;
1813 /* Has the inode gone and changed behind our back? */
1814 if (nfsi->fileid != fattr->fileid) {
1815 /* Is this perhaps the mounted-on fileid? */
1816 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1817 nfsi->fileid == fattr->mounted_on_fileid)
1818 return 0;
1814 printk(KERN_ERR "NFS: server %s error: fileid changed\n" 1819 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1815 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", 1820 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1816 NFS_SERVER(inode)->nfs_client->cl_hostname, 1821 NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
775 } 775 }
776} 776}
777 777
778static inline bool nfs_error_is_fatal_on_server(int err)
779{
780 switch (err) {
781 case 0:
782 case -ERESTARTSYS:
783 case -EINTR:
784 return false;
785 }
786 return nfs_error_is_fatal(err);
787}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
73 if (IS_ERR(inode)) { 73 if (IS_ERR(inode)) {
74 err = PTR_ERR(inode); 74 err = PTR_ERR(inode);
75 switch (err) { 75 switch (err) {
76 case -EPERM:
77 case -EACCES:
78 case -EDQUOT:
79 case -ENOSPC:
80 case -EROFS:
81 goto out_put_ctx;
82 default: 76 default:
77 goto out_put_ctx;
78 case -ENOENT:
79 case -ESTALE:
80 case -EISDIR:
81 case -ENOTDIR:
82 case -ELOOP:
83 goto out_drop; 83 goto out_drop;
84 } 84 }
85 } 85 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
590 } 590 }
591 591
592 hdr->res.fattr = &hdr->fattr; 592 hdr->res.fattr = &hdr->fattr;
593 hdr->res.count = count; 593 hdr->res.count = 0;
594 hdr->res.eof = 0; 594 hdr->res.eof = 0;
595 hdr->res.verf = &hdr->verf; 595 hdr->res.verf = &hdr->verf;
596 nfs_fattr_init(&hdr->fattr); 596 nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252 struct nfs_pgio_header *hdr) 1252 struct nfs_pgio_header *hdr)
1253{ 1253{
1254 LIST_HEAD(failed); 1254 LIST_HEAD(pages);
1255 1255
1256 desc->pg_io_completion = hdr->io_completion; 1256 desc->pg_io_completion = hdr->io_completion;
1257 desc->pg_dreq = hdr->dreq; 1257 desc->pg_dreq = hdr->dreq;
1258 while (!list_empty(&hdr->pages)) { 1258 list_splice_init(&hdr->pages, &pages);
1259 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1259 while (!list_empty(&pages)) {
1260 struct nfs_page *req = nfs_list_entry(pages.next);
1260 1261
1261 if (!nfs_pageio_add_request(desc, req)) 1262 if (!nfs_pageio_add_request(desc, req))
1262 nfs_list_move_request(req, &failed); 1263 break;
1263 } 1264 }
1264 nfs_pageio_complete(desc); 1265 nfs_pageio_complete(desc);
1265 if (!list_empty(&failed)) { 1266 if (!list_empty(&pages)) {
1266 list_move(&failed, &hdr->pages); 1267 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1267 return desc->pg_error < 0 ? desc->pg_error : -EIO; 1268 hdr->completion_ops->error_cleanup(&pages, err);
1269 nfs_set_pgio_error(hdr, err, hdr->io_start);
1270 return err;
1268 } 1271 }
1269 return 0; 1272 return 0;
1270} 1273}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
627 /* Add this address as an alias */ 627 /* Add this address as an alias */
628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
629 rpc_clnt_test_and_add_xprt, NULL); 629 rpc_clnt_test_and_add_xprt, NULL);
630 } else 630 continue;
631 clp = get_v3_ds_connect(mds_srv, 631 }
632 (struct sockaddr *)&da->da_addr, 632 clp = get_v3_ds_connect(mds_srv,
633 da->da_addrlen, IPPROTO_TCP, 633 (struct sockaddr *)&da->da_addr,
634 timeo, retrans); 634 da->da_addrlen, IPPROTO_TCP,
635 timeo, retrans);
636 if (IS_ERR(clp))
637 continue;
638 clp->cl_rpcclient->cl_softerr = 0;
639 clp->cl_rpcclient->cl_softrtry = 0;
635 } 640 }
636 641
637 if (IS_ERR(clp)) { 642 if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
594 /* Emulate the eof flag, which isn't normally needed in NFSv2 594 /* Emulate the eof flag, which isn't normally needed in NFSv2
595 * as it is guaranteed to always return the file attributes 595 * as it is guaranteed to always return the file attributes
596 */ 596 */
597 if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) 597 if ((hdr->res.count == 0 && hdr->args.count > 0) ||
598 hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
598 hdr->res.eof = 1; 599 hdr->res.eof = 1;
599 } 600 }
600 return 0; 601 return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
615 616
616static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 617static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
617{ 618{
618 if (task->tk_status >= 0) 619 if (task->tk_status >= 0) {
620 hdr->res.count = hdr->args.count;
619 nfs_writeback_update_inode(hdr); 621 nfs_writeback_update_inode(hdr);
622 }
620 return 0; 623 return 0;
621} 624}
622 625
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
91} 91}
92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93 93
94static void nfs_readpage_release(struct nfs_page *req) 94static void nfs_readpage_release(struct nfs_page *req, int error)
95{ 95{
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97 struct page *page = req->wb_page;
97 98
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 99 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 100 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req)); 101 (long long)req_offset(req));
101 102
103 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104 SetPageError(page);
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 105 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page)) 106 struct address_space *mapping = page_file_mapping(page);
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
105 107
106 unlock_page(req->wb_page); 108 if (PageUptodate(page))
109 nfs_readpage_to_fscache(inode, page, 0);
110 else if (!PageError(page) && !PagePrivate(page))
111 generic_error_remove_page(mapping, page);
112 unlock_page(page);
107 } 113 }
108 nfs_release_request(req); 114 nfs_release_request(req);
109} 115}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 &nfs_async_read_completion_ops); 137 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) { 138 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new); 139 nfs_list_remove_request(new);
134 nfs_readpage_release(new); 140 nfs_readpage_release(new, pgio.pg_error);
135 } 141 }
136 nfs_pageio_complete(&pgio); 142 nfs_pageio_complete(&pgio);
137 143
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
153static void nfs_read_completion(struct nfs_pgio_header *hdr) 159static void nfs_read_completion(struct nfs_pgio_header *hdr)
154{ 160{
155 unsigned long bytes = 0; 161 unsigned long bytes = 0;
162 int error;
156 163
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 164 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
158 goto out; 165 goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
179 zero_user_segment(page, start, end); 186 zero_user_segment(page, start, end);
180 } 187 }
181 } 188 }
189 error = 0;
182 bytes += req->wb_bytes; 190 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 191 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes) 192 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req); 193 nfs_page_group_set_uptodate(req);
194 else {
195 error = hdr->error;
196 xchg(&nfs_req_openctx(req)->error, error);
197 }
186 } else 198 } else
187 nfs_page_group_set_uptodate(req); 199 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req); 200 nfs_list_remove_request(req);
189 nfs_readpage_release(req); 201 nfs_readpage_release(req, error);
190 } 202 }
191out: 203out:
192 hdr->release(hdr); 204 hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
213 while (!list_empty(head)) { 225 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next); 226 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req); 227 nfs_list_remove_request(req);
216 nfs_readpage_release(req); 228 nfs_readpage_release(req, error);
217 } 229 }
218} 230}
219 231
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
337 goto out; 349 goto out;
338 } 350 }
339 351
352 xchg(&ctx->error, 0);
340 error = nfs_readpage_async(ctx, inode, page); 353 error = nfs_readpage_async(ctx, inode, page);
341 354 if (!error) {
355 error = wait_on_page_locked_killable(page);
356 if (!PageUptodate(page) && !error)
357 error = xchg(&ctx->error, 0);
358 }
342out: 359out:
343 put_nfs_open_context(ctx); 360 put_nfs_open_context(ctx);
344 return error; 361 return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
372 zero_user_segment(page, len, PAGE_SIZE); 389 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) { 390 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new); 391 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error; 392 error = desc->pgio->pg_error;
393 nfs_readpage_release(new, error);
377 goto out; 394 goto out;
378 } 395 }
379 return 0; 396 return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops; 59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req); 61static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode); 63 struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
591 592
592static void nfs_write_error(struct nfs_page *req, int error) 593static void nfs_write_error(struct nfs_page *req, int error)
593{ 594{
595 nfs_set_pageerror(page_file_mapping(req->wb_page));
594 nfs_mapping_set_error(req->wb_page, error); 596 nfs_mapping_set_error(req->wb_page, error);
597 nfs_inode_remove_request(req);
595 nfs_end_page_writeback(req); 598 nfs_end_page_writeback(req);
596 nfs_release_request(req); 599 nfs_release_request(req);
597} 600}
598 601
599static bool
600nfs_error_is_fatal_on_server(int err)
601{
602 switch (err) {
603 case 0:
604 case -ERESTARTSYS:
605 case -EINTR:
606 return false;
607 }
608 return nfs_error_is_fatal(err);
609}
610
611/* 602/*
612 * Find an associated nfs write request, and prepare to flush it out 603 * Find an associated nfs write request, and prepare to flush it out
613 * May return an error if the user signalled nfs_wait_on_request(). 604 * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
615static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 606static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page) 607 struct page *page)
617{ 608{
618 struct address_space *mapping;
619 struct nfs_page *req; 609 struct nfs_page *req;
620 int ret = 0; 610 int ret = 0;
621 611
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
631 621
632 /* If there is a fatal error that covers this write, just exit */ 622 /* If there is a fatal error that covers this write, just exit */
633 ret = 0; 623 ret = pgio->pg_error;
634 mapping = page_file_mapping(page); 624 if (nfs_error_is_fatal_on_server(ret))
635 if (test_bit(AS_ENOSPC, &mapping->flags) ||
636 test_bit(AS_EIO, &mapping->flags))
637 goto out_launder; 625 goto out_launder;
638 626
627 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) { 628 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error; 629 ret = pgio->pg_error;
641 /* 630 /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
647 } else 636 } else
648 ret = -EAGAIN; 637 ret = -EAGAIN;
649 nfs_redirty_request(req); 638 nfs_redirty_request(req);
639 pgio->pg_error = 0;
650 } else 640 } else
651 nfs_add_stats(page_file_mapping(page)->host, 641 nfs_add_stats(page_file_mapping(page)->host,
652 NFSIOS_WRITEPAGES, 1); 642 NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
666 ret = nfs_page_async_flush(pgio, page); 656 ret = nfs_page_async_flush(pgio, page);
667 if (ret == -EAGAIN) { 657 if (ret == -EAGAIN) {
668 redirty_page_for_writepage(wbc, page); 658 redirty_page_for_writepage(wbc, page);
669 ret = 0; 659 ret = AOP_WRITEPAGE_ACTIVATE;
670 } 660 }
671 return ret; 661 return ret;
672} 662}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
685 nfs_pageio_init_write(&pgio, inode, 0, 675 nfs_pageio_init_write(&pgio, inode, 0,
686 false, &nfs_async_write_completion_ops); 676 false, &nfs_async_write_completion_ops);
687 err = nfs_do_writepage(page, wbc, &pgio); 677 err = nfs_do_writepage(page, wbc, &pgio);
678 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio); 679 nfs_pageio_complete(&pgio);
689 if (err < 0) 680 if (err < 0)
690 return err; 681 return err;
691 if (pgio.pg_error < 0) 682 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error; 683 return pgio.pg_error;
693 return 0; 684 return 0;
694} 685}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
698 int ret; 689 int ret;
699 690
700 ret = nfs_writepage_locked(page, wbc); 691 ret = nfs_writepage_locked(page, wbc);
701 unlock_page(page); 692 if (ret != AOP_WRITEPAGE_ACTIVATE)
693 unlock_page(page);
702 return ret; 694 return ret;
703} 695}
704 696
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
707 int ret; 699 int ret;
708 700
709 ret = nfs_do_writepage(page, wbc, data); 701 ret = nfs_do_writepage(page, wbc, data);
710 unlock_page(page); 702 if (ret != AOP_WRITEPAGE_ACTIVATE)
703 unlock_page(page);
711 return ret; 704 return ret;
712} 705}
713 706
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
733 &nfs_async_write_completion_ops); 726 &nfs_async_write_completion_ops);
734 pgio.pg_io_completion = ioc; 727 pgio.pg_io_completion = ioc;
735 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
729 pgio.pg_error = 0;
736 nfs_pageio_complete(&pgio); 730 nfs_pageio_complete(&pgio);
737 nfs_io_completion_put(ioc); 731 nfs_io_completion_put(ioc);
738 732
739 if (err < 0) 733 if (err < 0)
740 goto out_err; 734 goto out_err;
741 err = pgio.pg_error; 735 err = pgio.pg_error;
742 if (err < 0) 736 if (nfs_error_is_fatal(err))
743 goto out_err; 737 goto out_err;
744 return 0; 738 return 0;
745out_err: 739out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
571 */ 571 */
572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
573{ 573{
574 struct nfsd_net *nn = v; 574 struct nfsd_net *nn = m->private;
575 575
576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
577 seq_printf(m, "num entries: %u\n", 577 seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..3cf4f6aa48d6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
1171 return inode; 1171 return inode;
1172} 1172}
1173 1173
1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
1175{ 1175{
1176 struct inode *inode; 1176 struct inode *inode;
1177 1177
1178 inode = nfsd_get_inode(dir->i_sb, mode); 1178 inode = nfsd_get_inode(dir->i_sb, mode);
1179 if (!inode) 1179 if (!inode)
1180 return -ENOMEM; 1180 return -ENOMEM;
1181 if (ncl) {
1182 inode->i_private = ncl;
1183 kref_get(&ncl->cl_ref);
1184 }
1181 d_add(dentry, inode); 1185 d_add(dentry, inode);
1182 inc_nlink(dir); 1186 inc_nlink(dir);
1183 fsnotify_mkdir(dir, dentry); 1187 fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
1194 dentry = d_alloc_name(parent, name); 1198 dentry = d_alloc_name(parent, name);
1195 if (!dentry) 1199 if (!dentry)
1196 goto out_err; 1200 goto out_err;
1197 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); 1201 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
1198 if (ret) 1202 if (ret)
1199 goto out_err; 1203 goto out_err;
1200 if (ncl) {
1201 d_inode(dentry)->i_private = ncl;
1202 kref_get(&ncl->cl_ref);
1203 }
1204out: 1204out:
1205 inode_unlock(dir); 1205 inode_unlock(dir);
1206 return dentry; 1206 return dentry;
1207out_err: 1207out_err:
1208 dput(dentry);
1208 dentry = ERR_PTR(ret); 1209 dentry = ERR_PTR(ret);
1209 goto out; 1210 goto out;
1210} 1211}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
1214 struct nfsdfs_client *ncl = inode->i_private; 1215 struct nfsdfs_client *ncl = inode->i_private;
1215 1216
1216 inode->i_private = NULL; 1217 inode->i_private = NULL;
1217 synchronize_rcu();
1218 kref_put(&ncl->cl_ref, ncl->cl_release); 1218 kref_put(&ncl->cl_ref, ncl->cl_release);
1219} 1219}
1220 1220
1221
1222static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) 1221static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
1223{ 1222{
1224 struct nfsdfs_client *nc = inode->i_private; 1223 struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
1232{ 1231{
1233 struct nfsdfs_client *nc; 1232 struct nfsdfs_client *nc;
1234 1233
1235 rcu_read_lock(); 1234 inode_lock_shared(inode);
1236 nc = __get_nfsdfs_client(inode); 1235 nc = __get_nfsdfs_client(inode);
1237 rcu_read_unlock(); 1236 inode_unlock_shared(inode);
1238 return nc; 1237 return nc;
1239} 1238}
1240/* from __rpc_unlink */ 1239/* from __rpc_unlink */
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1812} 1812}
1813 1813
1814/* 1814/* Read a page's worth of file data into the page cache. */
1815 * Read a page's worth of file data into the page cache. Return the page
1816 * locked.
1817 */
1818static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) 1815static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1819{ 1816{
1820 struct page *page; 1817 struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1826 put_page(page); 1823 put_page(page);
1827 return ERR_PTR(-EIO); 1824 return ERR_PTR(-EIO);
1828 } 1825 }
1829 lock_page(page);
1830 return page; 1826 return page;
1831} 1827}
1832 1828
1833/* 1829/*
1830 * Lock two pages, ensuring that we lock in offset order if the pages are from
1831 * the same file.
1832 */
1833static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1834{
1835 /* Always lock in order of increasing index. */
1836 if (page1->index > page2->index)
1837 swap(page1, page2);
1838
1839 lock_page(page1);
1840 if (page1 != page2)
1841 lock_page(page2);
1842}
1843
1844/* Unlock two pages, being careful not to unlock the same page twice. */
1845static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1846{
1847 unlock_page(page1);
1848 if (page1 != page2)
1849 unlock_page(page2);
1850}
1851
1852/*
1834 * Compare extents of two files to see if they are the same. 1853 * Compare extents of two files to see if they are the same.
1835 * Caller must have locked both inodes to prevent write races. 1854 * Caller must have locked both inodes to prevent write races.
1836 */ 1855 */
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1867 dest_page = vfs_dedupe_get_page(dest, destoff); 1886 dest_page = vfs_dedupe_get_page(dest, destoff);
1868 if (IS_ERR(dest_page)) { 1887 if (IS_ERR(dest_page)) {
1869 error = PTR_ERR(dest_page); 1888 error = PTR_ERR(dest_page);
1870 unlock_page(src_page);
1871 put_page(src_page); 1889 put_page(src_page);
1872 goto out_error; 1890 goto out_error;
1873 } 1891 }
1892
1893 vfs_lock_two_pages(src_page, dest_page);
1894
1895 /*
1896 * Now that we've locked both pages, make sure they're still
1897 * mapped to the file data we're interested in. If not,
1898 * someone is invalidating pages on us and we lose.
1899 */
1900 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1901 src_page->mapping != src->i_mapping ||
1902 dest_page->mapping != dest->i_mapping) {
1903 same = false;
1904 goto unlock;
1905 }
1906
1874 src_addr = kmap_atomic(src_page); 1907 src_addr = kmap_atomic(src_page);
1875 dest_addr = kmap_atomic(dest_page); 1908 dest_addr = kmap_atomic(dest_page);
1876 1909
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1882 1915
1883 kunmap_atomic(dest_addr); 1916 kunmap_atomic(dest_addr);
1884 kunmap_atomic(src_addr); 1917 kunmap_atomic(src_addr);
1885 unlock_page(dest_page); 1918unlock:
1886 unlock_page(src_page); 1919 vfs_unlock_two_pages(src_page, dest_page);
1887 put_page(dest_page); 1920 put_page(dest_page);
1888 put_page(src_page); 1921 put_page(src_page);
1889 1922
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
51static void shrink_liability(struct ubifs_info *c, int nr_to_write) 51static void shrink_liability(struct ubifs_info *c, int nr_to_write)
52{ 52{
53 down_read(&c->vfs_sb->s_umount); 53 down_read(&c->vfs_sb->s_umount);
54 writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); 54 writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
55 up_read(&c->vfs_sb->s_umount); 55 up_read(&c->vfs_sb->s_umount);
56} 56}
57 57
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) 129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
130{ 130{
131 if (orph->del) { 131 if (orph->del) {
132 spin_unlock(&c->orphan_lock);
133 dbg_gen("deleted twice ino %lu", orph->inum); 132 dbg_gen("deleted twice ino %lu", orph->inum);
134 return; 133 return;
135 } 134 }
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
138 orph->del = 1; 137 orph->del = 1;
139 orph->dnext = c->orph_dnext; 138 orph->dnext = c->orph_dnext;
140 c->orph_dnext = orph; 139 c->orph_dnext = orph;
141 spin_unlock(&c->orphan_lock);
142 dbg_gen("delete later ino %lu", orph->inum); 140 dbg_gen("delete later ino %lu", orph->inum);
143 return; 141 return;
144 } 142 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..8c1d571334bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c)
609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
610 if (c->max_bu_buf_len > c->leb_size) 610 if (c->max_bu_buf_len > c->leb_size)
611 c->max_bu_buf_len = c->leb_size; 611 c->max_bu_buf_len = c->leb_size;
612
613 /* Log is ready, preserve one LEB for commits. */
614 c->min_log_bytes = c->leb_size;
615
612 return 0; 616 return 0;
613} 617}
614 618
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
880 /* len == 0 means wake all */ 880 /* len == 0 means wake all */
881 struct userfaultfd_wake_range range = { .len = 0, }; 881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags; 882 unsigned long new_flags;
883 bool still_valid;
883 884
884 WRITE_ONCE(ctx->released, true); 885 WRITE_ONCE(ctx->released, true);
885 886
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
895 * taking the mmap_sem for writing. 896 * taking the mmap_sem for writing.
896 */ 897 */
897 down_write(&mm->mmap_sem); 898 down_write(&mm->mmap_sem);
898 if (!mmget_still_valid(mm)) 899 still_valid = mmget_still_valid(mm);
899 goto skip_mm;
900 prev = NULL; 900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { 901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched(); 902 cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
907 continue; 907 continue;
908 } 908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); 909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 910 if (still_valid) {
911 new_flags, vma->anon_vma, 911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 vma->vm_file, vma->vm_pgoff, 912 new_flags, vma->anon_vma,
913 vma_policy(vma), 913 vma->vm_file, vma->vm_pgoff,
914 NULL_VM_UFFD_CTX); 914 vma_policy(vma),
915 if (prev) 915 NULL_VM_UFFD_CTX);
916 vma = prev; 916 if (prev)
917 else 917 vma = prev;
918 prev = vma; 918 else
919 prev = vma;
920 }
919 vma->vm_flags = new_flags; 921 vma->vm_flags = new_flags;
920 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
921 } 923 }
922skip_mm:
923 up_write(&mm->mmap_sem); 924 up_write(&mm->mmap_sem);
924 mmput(mm); 925 mmput(mm);
925wakeup: 926wakeup:
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp); 547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode); 548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount; 549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p; 550 void __user *arg = compat_ptr(p);
551 int error; 551 int error;
552 552
553 trace_xfs_file_compat_ioctl(ip); 553 trace_xfs_file_compat_ioctl(ip);
554 554
555 switch (cmd) { 555 switch (cmd) {
556 /* No size or alignment issues on any arch */
557 case XFS_IOC_DIOINFO:
558 case XFS_IOC_FSGEOMETRY_V4:
559 case XFS_IOC_FSGEOMETRY:
560 case XFS_IOC_AG_GEOMETRY:
561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
571 case XFS_IOC_FSGROWFSLOG:
572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
575 case FS_IOC_GETFSMAP:
576 case XFS_IOC_SCRUB_METADATA:
577 case XFS_IOC_BULKSTAT:
578 case XFS_IOC_INUMBERS:
579 return xfs_file_ioctl(filp, cmd, p);
580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
597 case XFS_IOC_ZERO_RANGE:
598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
605 return xfs_file_ioctl(filp, cmd, p);
606#endif
607#if defined(BROKEN_X86_ALIGNMENT) 556#if defined(BROKEN_X86_ALIGNMENT)
608 case XFS_IOC_ALLOCSP_32: 557 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32: 558 case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
705 case XFS_IOC_FSSETDM_BY_HANDLE_32: 654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
706 return xfs_compat_fssetdm_by_handle(filp, arg); 655 return xfs_compat_fssetdm_by_handle(filp, arg);
707 default: 656 default:
708 return -ENOIOCTLCMD; 657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
709 } 659 }
710} 660}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
793 793
794out_cancel: 794out_cancel:
795 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796out_dqrele: 797out_dqrele:
797 xfs_qm_dqrele(udqp); 798 xfs_qm_dqrele(udqp);
798 xfs_qm_dqrele(gdqp); 799 xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
32 struct xfs_inode *ip = XFS_I(inode); 32 struct xfs_inode *ip = XFS_I(inode);
33 int error; 33 int error;
34 34
35 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 35 while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
36 xfs_iunlock(ip, *iolock); 36 xfs_iunlock(ip, *iolock);
37 *did_unlock = true; 37 *did_unlock = true;
38 error = break_layout(inode, true); 38 error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Grab the exclusive iolock for a data copy from src to dest, making 1193 * Grab the exclusive iolock for a data copy from src to dest, making sure to
1194 * sure to abide vfs locking order (lowest pointer value goes first) and 1194 * abide vfs locking order (lowest pointer value goes first) and breaking the
1195 * breaking the pnfs layout leases on dest before proceeding. The loop 1195 * layout leases before proceeding. The loop is needed because we cannot call
1196 * is needed because we cannot call the blocking break_layout() with the 1196 * the blocking break_layout() with the iolocks held, and therefore have to
1197 * src iolock held, and therefore have to back out both locks. 1197 * back out both locks.
1198 */ 1198 */
1199static int 1199static int
1200xfs_iolock_two_inodes_and_break_layout( 1200xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
1203{ 1203{
1204 int error; 1204 int error;
1205 1205
1206retry: 1206 if (src > dest)
1207 if (src < dest) { 1207 swap(src, dest);
1208 inode_lock_shared(src);
1209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214 1208
1215 error = break_layout(dest, false); 1209retry:
1216 if (error == -EWOULDBLOCK) { 1210 /* Wait to break both inodes' layouts before we start locking. */
1217 inode_unlock(dest); 1211 error = break_layout(src, true);
1218 if (src < dest) 1212 if (error)
1219 inode_unlock_shared(src); 1213 return error;
1214 if (src != dest) {
1220 error = break_layout(dest, true); 1215 error = break_layout(dest, true);
1221 if (error) 1216 if (error)
1222 return error; 1217 return error;
1223 goto retry;
1224 } 1218 }
1219
1220 /* Lock one inode and make sure nobody got in and leased it. */
1221 inode_lock(src);
1222 error = break_layout(src, false);
1225 if (error) { 1223 if (error) {
1224 inode_unlock(src);
1225 if (error == -EWOULDBLOCK)
1226 goto retry;
1227 return error;
1228 }
1229
1230 if (src == dest)
1231 return 0;
1232
1233 /* Lock the other inode and make sure nobody got in and leased it. */
1234 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1235 error = break_layout(dest, false);
1236 if (error) {
1237 inode_unlock(src);
1226 inode_unlock(dest); 1238 inode_unlock(dest);
1227 if (src < dest) 1239 if (error == -EWOULDBLOCK)
1228 inode_unlock_shared(src); 1240 goto retry;
1229 return error; 1241 return error;
1230 } 1242 }
1231 if (src > dest) 1243
1232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1233 return 0; 1244 return 0;
1234} 1245}
1235 1246
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
1247 1258
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1259 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode) 1260 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1261 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1251 inode_unlock(inode_out); 1262 inode_unlock(inode_out);
1252 if (!same_inode) 1263 if (!same_inode)
1253 inode_unlock_shared(inode_in); 1264 inode_unlock(inode_in);
1254} 1265}
1255 1266
1256/* 1267/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
1325 if (same_inode) 1336 if (same_inode)
1326 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1337 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1327 else 1338 else
1328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, 1339 xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
1329 XFS_MMAPLOCK_EXCL); 1340 XFS_MMAPLOCK_EXCL);
1330 1341
1331 /* Check file eligibility and prepare for block sharing. */ 1342 /* Check file eligibility and prepare for block sharing. */
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
30 30
31static inline void ceph_buffer_put(struct ceph_buffer *b) 31static inline void ceph_buffer_put(struct ceph_buffer *b)
32{ 32{
33 kref_put(&b->kref, ceph_buffer_release); 33 if (b)
34 kref_put(&b->kref, ceph_buffer_release);
34} 35}
35 36
36extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); 37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index c05d4e661489..03f8e98e3bcc 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 gfp_t gfp) 161 gfp_t gfp)
162{ 162{
163 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 163 return NULL;
164 size_t align = get_order(PAGE_ALIGN(size));
165
166 return alloc_pages_node(node, gfp, align);
167} 164}
168 165
169static inline void dma_free_contiguous(struct device *dev, struct page *page, 166static inline void dma_free_contiguous(struct device *dev, struct page *page,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 40915b461f18..f757a58191a6 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
241 return -EINVAL; 241 return -EINVAL;
242} 242}
243 243
244static inline int
245gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
246 unsigned int gpio_offset, unsigned int pin_offset,
247 unsigned int npins)
248{
249 WARN_ON(1);
250 return -EINVAL;
251}
252
253static inline int
254gpiochip_add_pingroup_range(struct gpio_chip *chip,
255 struct pinctrl_dev *pctldev,
256 unsigned int gpio_offset, const char *pin_group)
257{
258 WARN_ON(1);
259 return -EINVAL;
260}
261
262static inline void
263gpiochip_remove_pin_ranges(struct gpio_chip *chip)
264{
265 WARN_ON(1);
266}
267
268static inline int devm_gpio_request(struct device *dev, unsigned gpio, 244static inline int devm_gpio_request(struct device *dev, unsigned gpio,
269 const char *label) 245 const char *label)
270{ 246{
diff --git a/include/linux/key.h b/include/linux/key.h
index 91f391cd272e..50028338a4cc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -94,11 +94,11 @@ struct keyring_index_key {
94 union { 94 union {
95 struct { 95 struct {
96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ 96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
97 u8 desc_len; 97 u16 desc_len;
98 char desc[sizeof(long) - 1]; /* First few chars of description */ 98 char desc[sizeof(long) - 2]; /* First few chars of description */
99#else 99#else
100 char desc[sizeof(long) - 1]; /* First few chars of description */ 100 char desc[sizeof(long) - 2]; /* First few chars of description */
101 u8 desc_len; 101 u16 desc_len;
102#endif 102#endif
103 }; 103 };
104 unsigned long x; 104 unsigned long x;
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index cbd9d8495690..88e1e6304a71 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, 117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
118 resource_size_t hw_addr, resource_size_t size); 118 resource_size_t hw_addr, resource_size_t size);
119int logic_pio_register_range(struct logic_pio_hwaddr *newrange); 119int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
120void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
120resource_size_t logic_pio_to_hwaddr(unsigned long pio); 121resource_size_t logic_pio_to_hwaddr(unsigned long pio);
121unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); 122unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
122 123
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ce9839c8bc1a..c2f056b5766d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -446,11 +446,11 @@ enum {
446}; 446};
447 447
448enum { 448enum {
449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
450}; 450};
451 451
452enum { 452enum {
453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
454}; 454};
455 455
456enum { 456enum {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ec571fd7fcf8..b8b570c30b5e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10054,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
10054}; 10054};
10055 10055
10056struct mlx5_ifc_tls_progress_params_bits { 10056struct mlx5_ifc_tls_progress_params_bits {
10057 u8 valid[0x1]; 10057 u8 reserved_at_0[0x8];
10058 u8 reserved_at_1[0x7]; 10058 u8 tisn[0x18];
10059 u8 pd[0x18];
10060 10059
10061 u8 next_record_tcp_sn[0x20]; 10060 u8 next_record_tcp_sn[0x20];
10062 10061
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d77d717c620c..3f38c30d2f13 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,8 +215,9 @@ enum node_stat_item {
215 NR_INACTIVE_FILE, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */
216 NR_ACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */
217 NR_UNEVICTABLE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */
218 NR_SLAB_RECLAIMABLE, 218 NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
219 NR_SLAB_UNRECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
220 * memcg_flush_percpu_vmstats() first. */
220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
222 WORKINGSET_NODES, 223 WORKINGSET_NODES,
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index 7a6871ac8784..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -4,6 +4,9 @@
4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> 4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
5 */ 5 */
6 6
7#ifndef _NF_CONNTRACK_H323_TYPES_H
8#define _NF_CONNTRACK_H323_TYPES_H
9
7typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 10typedef struct TransportAddress_ipAddress { /* SEQUENCE */
8 int options; /* No use */ 11 int options; /* No use */
9 unsigned int ip; 12 unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
931 InfoRequestResponse infoRequestResponse; 934 InfoRequestResponse infoRequestResponse;
932 }; 935 };
933} RasMessage; 936} RasMessage;
937
938#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 462b90b73f93..2fb9c8ffaf10 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
1107int genphy_c45_read_mdix(struct phy_device *phydev); 1107int genphy_c45_read_mdix(struct phy_device *phydev);
1108int genphy_c45_pma_read_abilities(struct phy_device *phydev); 1108int genphy_c45_pma_read_abilities(struct phy_device *phydev);
1109int genphy_c45_read_status(struct phy_device *phydev); 1109int genphy_c45_read_status(struct phy_device *phydev);
1110int genphy_c45_config_aneg(struct phy_device *phydev);
1110 1111
1111/* The gen10g_* functions are the old Clause 45 stub */ 1112/* The gen10g_* functions are the old Clause 45 stub */
1112int gen10g_config_aneg(struct phy_device *phydev); 1113int gen10g_config_aneg(struct phy_device *phydev);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b5d99482d3fe..1a5f88316b08 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
282extern void exit_signals(struct task_struct *tsk); 282extern void exit_signals(struct task_struct *tsk);
283extern void kernel_sigaction(int, __sighandler_t); 283extern void kernel_sigaction(int, __sighandler_t);
284 284
285#define SIG_KTHREAD ((__force __sighandler_t)2)
286#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
287
285static inline void allow_signal(int sig) 288static inline void allow_signal(int sig)
286{ 289{
287 /* 290 /*
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
289 * know it'll be handled, so that they don't get converted to 292 * know it'll be handled, so that they don't get converted to
290 * SIGKILL or just silently dropped. 293 * SIGKILL or just silently dropped.
291 */ 294 */
292 kernel_sigaction(sig, (__force __sighandler_t)2); 295 kernel_sigaction(sig, SIG_KTHREAD);
296}
297
298static inline void allow_kernel_signal(int sig)
299{
300 /*
301 * Kernel threads handle their own signals. Let the signal code
302 * know signals sent by the kernel will be handled, so that they
303 * don't get silently dropped.
304 */
305 kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
293} 306}
294 307
295static inline void disallow_signal(int sig) 308static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d8af86d995d6..ba5583522d24 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1374,6 +1374,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1374 to->l4_hash = from->l4_hash; 1374 to->l4_hash = from->l4_hash;
1375}; 1375};
1376 1376
1377static inline void skb_copy_decrypted(struct sk_buff *to,
1378 const struct sk_buff *from)
1379{
1380#ifdef CONFIG_TLS_DEVICE
1381 to->decrypted = from->decrypted;
1382#endif
1383}
1384
1377#ifdef NET_SKBUFF_DATA_USES_OFFSET 1385#ifdef NET_SKBUFF_DATA_USES_OFFSET
1378static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1386static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1379{ 1387{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 97523818cb14..fc0bed59fc84 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -292,6 +292,9 @@ struct ucred {
292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ 292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
293#define MSG_EOF MSG_FIN 293#define MSG_EOF MSG_FIN
294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ 294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
295#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
296 * plain text and require encryption
297 */
295 298
296#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ 299#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
297#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 300#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index baa3ecdb882f..27536b961552 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
98 98
99struct rpc_call_ops { 99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *); 100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
102 void (*rpc_call_done)(struct rpc_task *, void *); 101 void (*rpc_call_done)(struct rpc_task *, void *);
103 void (*rpc_count_stats)(struct rpc_task *, void *); 102 void (*rpc_count_stats)(struct rpc_task *, void *);
104 void (*rpc_release)(void *); 103 void (*rpc_release)(void *);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7acb953298a7..84ff2844df2a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,6 +57,7 @@ struct tk_read_base {
57 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
60 * @cycle_interval: Number of clock cycles in one NTP interval 61 * @cycle_interval: Number of clock cycles in one NTP interval
61 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
62 * interval. 63 * interval.
@@ -84,6 +85,9 @@ struct tk_read_base {
84 * 85 *
85 * wall_to_monotonic is no longer the boot time, getboottime must be 86 * wall_to_monotonic is no longer the boot time, getboottime must be
86 * used instead. 87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
87 */ 91 */
88struct timekeeper { 92struct timekeeper {
89 struct tk_read_base tkr_mono; 93 struct tk_read_base tkr_mono;
@@ -99,6 +103,7 @@ struct timekeeper {
99 u8 cs_was_changed_seq; 103 u8 cs_was_changed_seq;
100 ktime_t next_leap_ktime; 104 ktime_t next_leap_ktime;
101 u64 raw_sec; 105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
102 107
103 /* The following members are for timekeeping internal use */ 108 /* The following members are for timekeeping internal use */
104 u64 cycle_interval; 109 u64 cycle_interval;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5150436783e8..30a8cdcfd4a4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
548 548
549#define is_signed_type(type) (((type)(-1)) < (type)1) 549#define is_signed_type(type) (((type)(-1)) < (type)1)
550 550
551int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
551int trace_set_clr_event(const char *system, const char *event, int set); 552int trace_set_clr_event(const char *system, const char *event, int set);
552 553
553/* 554/*
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index f37d12877754..adcc6a97db61 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -308,6 +308,7 @@ do { \
308 \ 308 \
309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
310 R##_e = X##_e; \ 310 R##_e = X##_e; \
311 /* Fall through */ \
311 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ 312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 314 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
@@ -318,6 +319,7 @@ do { \
318 \ 319 \
319 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ 320 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
320 R##_e = Y##_e; \ 321 R##_e = Y##_e; \
322 /* Fall through */ \
321 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ 323 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
322 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 324 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
323 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 325 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
@@ -415,6 +417,7 @@ do { \
415 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 417 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
416 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 418 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
417 R##_s = X##_s; \ 419 R##_s = X##_s; \
420 /* Fall through */ \
418 \ 421 \
419 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ 422 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
420 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 423 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
@@ -428,6 +431,7 @@ do { \
428 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 431 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
429 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 432 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
430 R##_s = Y##_s; \ 433 R##_s = Y##_s; \
434 /* Fall through */ \
431 \ 435 \
432 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ 436 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
433 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 437 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -493,6 +497,7 @@ do { \
493 \ 497 \
494 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 498 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
495 FP_SET_EXCEPTION(FP_EX_DIVZERO); \ 499 FP_SET_EXCEPTION(FP_EX_DIVZERO); \
500 /* Fall through */ \
496 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ 501 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
497 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 502 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
498 R##_c = FP_CLS_INF; \ 503 R##_c = FP_CLS_INF; \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c61a1bf4e3de..3a1a72990fce 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -15,6 +15,7 @@
15struct tcf_idrinfo { 15struct tcf_idrinfo {
16 struct mutex lock; 16 struct mutex lock;
17 struct idr action_idr; 17 struct idr action_idr;
18 struct net *net;
18}; 19};
19 20
20struct tc_action_ops; 21struct tc_action_ops;
@@ -108,7 +109,7 @@ struct tc_action_net {
108}; 109};
109 110
110static inline 111static inline
111int tc_action_net_init(struct tc_action_net *tn, 112int tc_action_net_init(struct net *net, struct tc_action_net *tn,
112 const struct tc_action_ops *ops) 113 const struct tc_action_ops *ops)
113{ 114{
114 int err = 0; 115 int err = 0;
@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn,
117 if (!tn->idrinfo) 118 if (!tn->idrinfo)
118 return -ENOMEM; 119 return -ENOMEM;
119 tn->ops = ops; 120 tn->ops = ops;
121 tn->idrinfo->net = net;
120 mutex_init(&tn->idrinfo->lock); 122 mutex_init(&tn->idrinfo->lock);
121 idr_init(&tn->idrinfo->action_idr); 123 idr_init(&tn->idrinfo->action_idr);
122 return err; 124 return err;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index becdad576859..3f62b347b04a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
206 unsigned int len) 206 unsigned int len)
207{ 207{
208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) 208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
209 return -EINVAL; 209 return 0;
210 210
211 return pskb_may_pull(skb, len); 211 return pskb_may_pull(skb, len);
212} 212}
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ded574b32c20..ffc95b382eb5 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -278,6 +278,7 @@ struct hci_dev {
278 __u16 conn_info_min_age; 278 __u16 conn_info_min_age;
279 __u16 conn_info_max_age; 279 __u16 conn_info_max_age;
280 __u16 auth_payload_timeout; 280 __u16 auth_payload_timeout;
281 __u8 min_enc_key_size;
281 __u8 ssp_debug_mode; 282 __u8 ssp_debug_mode;
282 __u8 hw_error_code; 283 __u8 hw_error_code;
283 __u32 clock; 284 __u32 clock;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 010f26b31c89..bac79e817776 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -171,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, 171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
172 struct sk_buff *parent); 172 struct sk_buff *parent);
173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
174 void *reasm_data); 174 void *reasm_data, bool try_coalesce);
175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); 175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
176 176
177#endif 177#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4a9da951a794..ab40d7afdc54 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,7 +52,7 @@ struct bpf_prog;
52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
53 53
54struct net { 54struct net {
55 refcount_t passive; /* To decided when the network 55 refcount_t passive; /* To decide when the network
56 * namespace should be freed. 56 * namespace should be freed.
57 */ 57 */
58 refcount_t count; /* To decided when the network 58 refcount_t count; /* To decided when the network
@@ -61,7 +61,6 @@ struct net {
61 spinlock_t rules_mod_lock; 61 spinlock_t rules_mod_lock;
62 62
63 u32 hash_mix; 63 u32 hash_mix;
64 atomic64_t cookie_gen;
65 64
66 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
67 struct list_head exit_list; /* To linked to call pernet exit 66 struct list_head exit_list; /* To linked to call pernet exit
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9b624566b82d..475d6f28ca67 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -421,8 +421,7 @@ struct nft_set {
421 unsigned char *udata; 421 unsigned char *udata;
422 /* runtime data below here */ 422 /* runtime data below here */
423 const struct nft_set_ops *ops ____cacheline_aligned; 423 const struct nft_set_ops *ops ____cacheline_aligned;
424 u16 flags:13, 424 u16 flags:14,
425 bound:1,
426 genmask:2; 425 genmask:2;
427 u8 klen; 426 u8 klen;
428 u8 dlen; 427 u8 dlen;
@@ -1348,12 +1347,15 @@ struct nft_trans_rule {
1348struct nft_trans_set { 1347struct nft_trans_set {
1349 struct nft_set *set; 1348 struct nft_set *set;
1350 u32 set_id; 1349 u32 set_id;
1350 bool bound;
1351}; 1351};
1352 1352
1353#define nft_trans_set(trans) \ 1353#define nft_trans_set(trans) \
1354 (((struct nft_trans_set *)trans->data)->set) 1354 (((struct nft_trans_set *)trans->data)->set)
1355#define nft_trans_set_id(trans) \ 1355#define nft_trans_set_id(trans) \
1356 (((struct nft_trans_set *)trans->data)->set_id) 1356 (((struct nft_trans_set *)trans->data)->set_id)
1357#define nft_trans_set_bound(trans) \
1358 (((struct nft_trans_set *)trans->data)->bound)
1357 1359
1358struct nft_trans_chain { 1360struct nft_trans_chain {
1359 bool update; 1361 bool update;
@@ -1384,12 +1386,15 @@ struct nft_trans_table {
1384struct nft_trans_elem { 1386struct nft_trans_elem {
1385 struct nft_set *set; 1387 struct nft_set *set;
1386 struct nft_set_elem elem; 1388 struct nft_set_elem elem;
1389 bool bound;
1387}; 1390};
1388 1391
1389#define nft_trans_elem_set(trans) \ 1392#define nft_trans_elem_set(trans) \
1390 (((struct nft_trans_elem *)trans->data)->set) 1393 (((struct nft_trans_elem *)trans->data)->set)
1391#define nft_trans_elem(trans) \ 1394#define nft_trans_elem(trans) \
1392 (((struct nft_trans_elem *)trans->data)->elem) 1395 (((struct nft_trans_elem *)trans->data)->elem)
1396#define nft_trans_elem_set_bound(trans) \
1397 (((struct nft_trans_elem *)trans->data)->bound)
1393 1398
1394struct nft_trans_obj { 1399struct nft_trans_obj {
1395 struct nft_object *obj; 1400 struct nft_object *obj;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 3196663a10e3..c8b9dec376f5 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -73,4 +73,6 @@ int nft_flow_rule_offload_commit(struct net *net);
73 (__reg)->key = __key; \ 73 (__reg)->key = __key; \
74 memset(&(__reg)->mask, 0xff, (__reg)->len); 74 memset(&(__reg)->mask, 0xff, (__reg)->len);
75 75
76int nft_chain_offload_priority(struct nft_base_chain *basechain);
77
76#endif 78#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e4650e5b64a1..b140c8f1be22 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -684,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
684 const struct nla_policy *policy, 684 const struct nla_policy *policy,
685 struct netlink_ext_ack *extack) 685 struct netlink_ext_ack *extack)
686{ 686{
687 return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), 687 return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
688 nlmsg_attrlen(nlh, hdrlen), policy, 688 NL_VALIDATE_STRICT, extack);
689 NL_VALIDATE_STRICT, extack);
690} 689}
691 690
692/** 691/**
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 25f1f9a8419b..95f766c31c90 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
141 141
142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp);
143 rc = nh_grp->num_nh; 143 rc = nh_grp->num_nh;
144 } else {
145 const struct nh_info *nhi;
146
147 nhi = rcu_dereference_rtnl(nh->nh_info);
148 if (nhi->reject_nh)
149 rc = 0;
150 } 144 }
151 145
152 return rc; 146 return rc;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e429809ca90d..98be18ef1ed3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -646,7 +646,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
646{ 646{
647 cls_common->chain_index = tp->chain->index; 647 cls_common->chain_index = tp->chain->index;
648 cls_common->protocol = tp->protocol; 648 cls_common->protocol = tp->protocol;
649 cls_common->prio = tp->prio; 649 cls_common->prio = tp->prio >> 16;
650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
651 cls_common->extack = extack; 651 cls_common->extack = extack;
652} 652}
diff --git a/include/net/psample.h b/include/net/psample.h
index 37a4df2325b2..6b578ce69cd8 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -11,6 +11,7 @@ struct psample_group {
11 u32 group_num; 11 u32 group_num;
12 u32 refcount; 12 u32 refcount;
13 u32 seq; 13 u32 seq;
14 struct rcu_head rcu;
14}; 15};
15 16
16struct psample_group *psample_group_get(struct net *net, u32 group_num); 17struct psample_group *psample_group_get(struct net *net, u32 group_num);
diff --git a/include/net/route.h b/include/net/route.h
index 630a0493f1f3..dfce19c9fa96 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
233 233
234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
235 u32 table_id, struct fib_info *fi, 235 u32 table_id, struct fib_info *fi,
236 int *fa_index, int fa_start); 236 int *fa_index, int fa_start, unsigned int flags);
237 237
238static inline void ip_rt_put(struct rtable *rt) 238static inline void ip_rt_put(struct rtable *rt)
239{ 239{
diff --git a/include/net/sock.h b/include/net/sock.h
index 228db3998e46..2c53f1a1d905 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
2482 2482
2483/* Checks if this SKB belongs to an HW offloaded socket 2483/* Checks if this SKB belongs to an HW offloaded socket
2484 * and whether any SW fallbacks are required based on dev. 2484 * and whether any SW fallbacks are required based on dev.
2485 * Check decrypted mark in case skb_orphan() cleared socket.
2485 */ 2486 */
2486static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, 2487static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2487 struct net_device *dev) 2488 struct net_device *dev)
@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2489#ifdef CONFIG_SOCK_VALIDATE_XMIT 2490#ifdef CONFIG_SOCK_VALIDATE_XMIT
2490 struct sock *sk = skb->sk; 2491 struct sock *sk = skb->sk;
2491 2492
2492 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) 2493 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2493 skb = sk->sk_validate_xmit_skb(sk, dev, skb); 2494 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2495#ifdef CONFIG_TLS_DEVICE
2496 } else if (unlikely(skb->decrypted)) {
2497 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2498 kfree_skb(skb);
2499 skb = NULL;
2500#endif
2501 }
2494#endif 2502#endif
2495 2503
2496 return skb; 2504 return skb;
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index b0fc6b26bdf5..83df1ec6664e 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -105,8 +105,7 @@ struct rdma_restrack_entry {
105}; 105};
106 106
107int rdma_restrack_count(struct ib_device *dev, 107int rdma_restrack_count(struct ib_device *dev,
108 enum rdma_restrack_type type, 108 enum rdma_restrack_type type);
109 struct pid_namespace *ns);
110 109
111void rdma_restrack_kadd(struct rdma_restrack_entry *res); 110void rdma_restrack_kadd(struct rdma_restrack_entry *res);
112void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111void rdma_restrack_uadd(struct rdma_restrack_entry *res);
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 50f49e043668..d1a93c73f006 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -46,7 +46,9 @@ struct mcip_cmd {
46#define CMD_IDU_ENABLE 0x71 46#define CMD_IDU_ENABLE 0x71
47#define CMD_IDU_DISABLE 0x72 47#define CMD_IDU_DISABLE 0x72
48#define CMD_IDU_SET_MODE 0x74 48#define CMD_IDU_SET_MODE 0x74
49#define CMD_IDU_READ_MODE 0x75
49#define CMD_IDU_SET_DEST 0x76 50#define CMD_IDU_SET_DEST 0x76
51#define CMD_IDU_ACK_CIRQ 0x79
50#define CMD_IDU_SET_MASK 0x7C 52#define CMD_IDU_SET_MASK 0x7C
51 53
52#define IDU_M_TRIG_LEVEL 0x0 54#define IDU_M_TRIG_LEVEL 0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
119 __mcip_cmd(cmd, param); 121 __mcip_cmd(cmd, param);
120} 122}
121 123
124/*
125 * Read MCIP register
126 */
127static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
128{
129 __mcip_cmd(cmd, param);
130 return read_aux_reg(ARC_REG_MCIP_READBACK);
131}
132
122#endif 133#endif
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index cc1d060cbf13..a13a62db3565 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -23,20 +23,17 @@
23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY 23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
24 24
25enum rxrpc_skb_trace { 25enum rxrpc_skb_trace {
26 rxrpc_skb_rx_cleaned, 26 rxrpc_skb_cleaned,
27 rxrpc_skb_rx_freed, 27 rxrpc_skb_freed,
28 rxrpc_skb_rx_got, 28 rxrpc_skb_got,
29 rxrpc_skb_rx_lost, 29 rxrpc_skb_lost,
30 rxrpc_skb_rx_purged, 30 rxrpc_skb_new,
31 rxrpc_skb_rx_received, 31 rxrpc_skb_purged,
32 rxrpc_skb_rx_rotated, 32 rxrpc_skb_received,
33 rxrpc_skb_rx_seen, 33 rxrpc_skb_rotated,
34 rxrpc_skb_tx_cleaned, 34 rxrpc_skb_seen,
35 rxrpc_skb_tx_freed, 35 rxrpc_skb_unshared,
36 rxrpc_skb_tx_got, 36 rxrpc_skb_unshared_nomem,
37 rxrpc_skb_tx_new,
38 rxrpc_skb_tx_rotated,
39 rxrpc_skb_tx_seen,
40}; 37};
41 38
42enum rxrpc_local_trace { 39enum rxrpc_local_trace {
@@ -228,20 +225,17 @@ enum rxrpc_tx_point {
228 * Declare tracing information enums and their string mappings for display. 225 * Declare tracing information enums and their string mappings for display.
229 */ 226 */
230#define rxrpc_skb_traces \ 227#define rxrpc_skb_traces \
231 EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ 228 EM(rxrpc_skb_cleaned, "CLN") \
232 EM(rxrpc_skb_rx_freed, "Rx FRE") \ 229 EM(rxrpc_skb_freed, "FRE") \
233 EM(rxrpc_skb_rx_got, "Rx GOT") \ 230 EM(rxrpc_skb_got, "GOT") \
234 EM(rxrpc_skb_rx_lost, "Rx *L*") \ 231 EM(rxrpc_skb_lost, "*L*") \
235 EM(rxrpc_skb_rx_purged, "Rx PUR") \ 232 EM(rxrpc_skb_new, "NEW") \
236 EM(rxrpc_skb_rx_received, "Rx RCV") \ 233 EM(rxrpc_skb_purged, "PUR") \
237 EM(rxrpc_skb_rx_rotated, "Rx ROT") \ 234 EM(rxrpc_skb_received, "RCV") \
238 EM(rxrpc_skb_rx_seen, "Rx SEE") \ 235 EM(rxrpc_skb_rotated, "ROT") \
239 EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ 236 EM(rxrpc_skb_seen, "SEE") \
240 EM(rxrpc_skb_tx_freed, "Tx FRE") \ 237 EM(rxrpc_skb_unshared, "UNS") \
241 EM(rxrpc_skb_tx_got, "Tx GOT") \ 238 E_(rxrpc_skb_unshared_nomem, "US0")
242 EM(rxrpc_skb_tx_new, "Tx NEW") \
243 EM(rxrpc_skb_tx_rotated, "Tx ROT") \
244 E_(rxrpc_skb_tx_seen, "Tx SEE")
245 239
246#define rxrpc_local_traces \ 240#define rxrpc_local_traces \
247 EM(rxrpc_local_got, "GOT") \ 241 EM(rxrpc_local_got, "GOT") \
@@ -498,10 +492,10 @@ rxrpc_tx_points;
498#define E_(a, b) { a, b } 492#define E_(a, b) { a, b }
499 493
500TRACE_EVENT(rxrpc_local, 494TRACE_EVENT(rxrpc_local,
501 TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, 495 TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
502 int usage, const void *where), 496 int usage, const void *where),
503 497
504 TP_ARGS(local, op, usage, where), 498 TP_ARGS(local_debug_id, op, usage, where),
505 499
506 TP_STRUCT__entry( 500 TP_STRUCT__entry(
507 __field(unsigned int, local ) 501 __field(unsigned int, local )
@@ -511,7 +505,7 @@ TRACE_EVENT(rxrpc_local,
511 ), 505 ),
512 506
513 TP_fast_assign( 507 TP_fast_assign(
514 __entry->local = local->debug_id; 508 __entry->local = local_debug_id;
515 __entry->op = op; 509 __entry->op = op;
516 __entry->usage = usage; 510 __entry->usage = usage;
517 __entry->where = where; 511 __entry->where = where;
@@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call,
643 637
644TRACE_EVENT(rxrpc_skb, 638TRACE_EVENT(rxrpc_skb,
645 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, 639 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
646 int usage, int mod_count, const void *where), 640 int usage, int mod_count, u8 flags, const void *where),
647 641
648 TP_ARGS(skb, op, usage, mod_count, where), 642 TP_ARGS(skb, op, usage, mod_count, flags, where),
649 643
650 TP_STRUCT__entry( 644 TP_STRUCT__entry(
651 __field(struct sk_buff *, skb ) 645 __field(struct sk_buff *, skb )
652 __field(enum rxrpc_skb_trace, op ) 646 __field(enum rxrpc_skb_trace, op )
647 __field(u8, flags )
653 __field(int, usage ) 648 __field(int, usage )
654 __field(int, mod_count ) 649 __field(int, mod_count )
655 __field(const void *, where ) 650 __field(const void *, where )
@@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb,
657 652
658 TP_fast_assign( 653 TP_fast_assign(
659 __entry->skb = skb; 654 __entry->skb = skb;
655 __entry->flags = flags;
660 __entry->op = op; 656 __entry->op = op;
661 __entry->usage = usage; 657 __entry->usage = usage;
662 __entry->mod_count = mod_count; 658 __entry->mod_count = mod_count;
663 __entry->where = where; 659 __entry->where = where;
664 ), 660 ),
665 661
666 TP_printk("s=%p %s u=%d m=%d p=%pSR", 662 TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
667 __entry->skb, 663 __entry->skb,
664 __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
668 __print_symbolic(__entry->op, rxrpc_skb_traces), 665 __print_symbolic(__entry->op, rxrpc_skb_traces),
669 __entry->usage, 666 __entry->usage,
670 __entry->mod_count, 667 __entry->mod_count,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index fa1c753dcdbc..a5aa7d3ac6a1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f49d4..784ba0b9690a 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -77,11 +77,6 @@
77 77
78#define JFFS2_ACL_VERSION 0x0001 78#define JFFS2_ACL_VERSION 0x0001
79 79
80// Maybe later...
81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
83
84
85#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at 80#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
86 mount time, don't wait for it to 81 mount time, don't wait for it to
87 happen later */ 82 happen later */
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
11 struct nf_acct *nfacct; 11 struct nf_acct *nfacct;
12}; 12};
13 13
14struct xt_nfacct_match_info_v1 {
15 char name[NFACCT_NAME_MAX];
16 struct nf_acct *nfacct __attribute__((aligned(8)));
17};
18
14#endif /* _XT_NFACCT_MATCH_H */ 19#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index fd6b5f66e2c5..cba368e55863 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
250 __u32 rdma_mr_max; 250 __u32 rdma_mr_max;
251 __u32 rdma_mr_size; 251 __u32 rdma_mr_size;
252 __u8 tos; 252 __u8 tos;
253 __u8 sl;
253 __u32 cache_allocs; 254 __u32 cache_allocs;
254}; 255};
255 256
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
265 __u32 rdma_mr_max; 266 __u32 rdma_mr_max;
266 __u32 rdma_mr_size; 267 __u32 rdma_mr_size;
267 __u8 tos; 268 __u8 tos;
269 __u8 sl;
268 __u32 cache_allocs; 270 __u32 cache_allocs;
269}; 271};
270 272
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 8191a7db2777..66088a9e9b9e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
890 890
891static int bpf_jit_blind_insn(const struct bpf_insn *from, 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
892 const struct bpf_insn *aux, 892 const struct bpf_insn *aux,
893 struct bpf_insn *to_buff) 893 struct bpf_insn *to_buff,
894 bool emit_zext)
894{ 895{
895 struct bpf_insn *to = to_buff; 896 struct bpf_insn *to = to_buff;
896 u32 imm_rnd = get_random_int(); 897 u32 imm_rnd = get_random_int();
@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
1005 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1006 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1007 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1008 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009 if (emit_zext)
1010 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1008 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1011 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1009 break; 1012 break;
1010 1013
@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1088 insn[1].code == 0) 1091 insn[1].code == 0)
1089 memcpy(aux, insn, sizeof(aux)); 1092 memcpy(aux, insn, sizeof(aux));
1090 1093
1091 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 1094 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095 clone->aux->verifier_zext);
1092 if (!rewritten) 1096 if (!rewritten)
1093 continue; 1097 continue;
1094 1098
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..272071e9112f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1707 if (err) 1707 if (err)
1708 goto free_used_maps; 1708 goto free_used_maps;
1709 1709
1710 err = bpf_prog_new_fd(prog); 1710 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1711 if (err < 0) { 1711 * effectively publicly exposed. However, retrieving via
1712 /* failed to allocate fd. 1712 * bpf_prog_get_fd_by_id() will take another reference,
1713 * bpf_prog_put() is needed because the above 1713 * therefore it cannot be gone underneath us.
1714 * bpf_prog_alloc_id() has published the prog 1714 *
1715 * to the userspace and the userspace may 1715 * Only for the time /after/ successful bpf_prog_new_fd()
1716 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1716 * and before returning to userspace, we might just hold
1717 */ 1717 * one reference and any parallel close on that fd could
1718 bpf_prog_put(prog); 1718 * rip everything out. Hence, below notifications must
1719 return err; 1719 * happen before bpf_prog_new_fd().
1720 } 1720 *
1721 1721 * Also, any failure handling from this point onwards must
1722 * be using bpf_prog_put() given the program is exposed.
1723 */
1722 bpf_prog_kallsyms_add(prog); 1724 bpf_prog_kallsyms_add(prog);
1723 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1725 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1726
1727 err = bpf_prog_new_fd(prog);
1728 if (err < 0)
1729 bpf_prog_put(prog);
1724 return err; 1730 return err;
1725 1731
1726free_used_maps: 1732free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c84d83f86141..b5c14c9d7b98 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
985 reg->smax_value = S64_MAX; 985 reg->smax_value = S64_MAX;
986 reg->umin_value = 0; 986 reg->umin_value = 0;
987 reg->umax_value = U64_MAX; 987 reg->umax_value = U64_MAX;
988
989 /* constant backtracking is enabled for root only for now */
990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991} 988}
992 989
993/* Mark a register as having a completely unknown (scalar) value. */ 990/* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
1014 __mark_reg_not_init(regs + regno); 1011 __mark_reg_not_init(regs + regno);
1015 return; 1012 return;
1016 } 1013 }
1017 __mark_reg_unknown(regs + regno); 1014 regs += regno;
1015 __mark_reg_unknown(regs);
1016 /* constant backtracking is enabled for root without bpf2bpf calls */
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1018 true : false;
1018} 1019}
1019 1020
1020static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021static void __mark_reg_not_init(struct bpf_reg_state *reg)
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 2bd410f934b3..69cfb4345388 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
230 */ 230 */
231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) 231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
232{ 232{
233 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 233 size_t count = size >> PAGE_SHIFT;
234 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
235 size_t align = get_order(PAGE_ALIGN(size));
236 struct page *page = NULL; 234 struct page *page = NULL;
237 struct cma *cma = NULL; 235 struct cma *cma = NULL;
238 236
@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
243 241
244 /* CMA can be used only in the context which permits sleeping */ 242 /* CMA can be used only in the context which permits sleeping */
245 if (cma && gfpflags_allow_blocking(gfp)) { 243 if (cma && gfpflags_allow_blocking(gfp)) {
244 size_t align = get_order(size);
246 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); 245 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
247 246
248 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); 247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
249 } 248 }
250 249
251 /* Fallback allocation of normal pages */
252 if (!page)
253 page = alloc_pages_node(node, gfp, align);
254 return page; 250 return page;
255} 251}
256 252
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 795c9b095d75..706113c6bebc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
87{ 87{
88 size_t alloc_size = PAGE_ALIGN(size);
89 int node = dev_to_node(dev);
88 struct page *page = NULL; 90 struct page *page = NULL;
89 u64 phys_mask; 91 u64 phys_mask;
90 92
@@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
95 gfp &= ~__GFP_ZERO; 97 gfp &= ~__GFP_ZERO;
96 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
97 &phys_mask); 99 &phys_mask);
100 page = dma_alloc_contiguous(dev, alloc_size, gfp);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 dma_free_contiguous(dev, page, alloc_size);
103 page = NULL;
104 }
98again: 105again:
99 page = dma_alloc_contiguous(dev, size, gfp); 106 if (!page)
107 page = alloc_pages_node(node, gfp, get_order(alloc_size));
100 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
101 dma_free_contiguous(dev, page, size); 109 dma_free_contiguous(dev, page, size);
102 page = NULL; 110 page = NULL;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9484e88dabc2..9be995fc3c5a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
295 } 295 }
296} 296}
297 297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300 /*
301 * If irq_sysfs_init() has not yet been invoked (early boot), then
302 * irq_kobj_base is NULL and the descriptor was never added.
303 * kobject_del() complains about a object with no parent, so make
304 * it conditional.
305 */
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
298static int __init irq_sysfs_init(void) 310static int __init irq_sysfs_init(void)
299{ 311{
300 struct irq_desc *desc; 312 struct irq_desc *desc;
@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
325}; 337};
326 338
327static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
328 341
329#endif /* CONFIG_SYSFS */ 342#endif /* CONFIG_SYSFS */
330 343
@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
438 * The sysfs entry must be serialized against a concurrent 451 * The sysfs entry must be serialized against a concurrent
439 * irq_sysfs_init() as well. 452 * irq_sysfs_init() as well.
440 */ 453 */
441 kobject_del(&desc->kobj); 454 irq_sysfs_del(desc);
442 delete_irq_desc(irq); 455 delete_irq_desc(irq);
443 456
444 /* 457 /*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 95a260f9214b..136ce049c4ad 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
263{ 263{
264 char namebuf[KSYM_NAME_LEN]; 264 char namebuf[KSYM_NAME_LEN];
265 265
266 if (is_ksym_addr(addr)) 266 if (is_ksym_addr(addr)) {
267 return !!get_symbol_pos(addr, symbolsize, offset); 267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
268 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
269 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
270} 272}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..9ee93421269c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -65,9 +65,9 @@
65/* 65/*
66 * Modules' sections will be aligned on page boundaries 66 * Modules' sections will be aligned on page boundaries
67 * to ensure complete separation of code and data, but 67 * to ensure complete separation of code and data, but
68 * only when CONFIG_STRICT_MODULE_RWX=y 68 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
69 */ 69 */
70#ifdef CONFIG_STRICT_MODULE_RWX 70#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
71# define debug_align(X) ALIGN(X, PAGE_SIZE) 71# define debug_align(X) ALIGN(X, PAGE_SIZE)
72#else 72#else
73# define debug_align(X) (X) 73# define debug_align(X) (X)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..010d578118d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void)
3904 3904
3905static inline void sched_submit_work(struct task_struct *tsk) 3905static inline void sched_submit_work(struct task_struct *tsk)
3906{ 3906{
3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3907 if (!tsk->state)
3908 return; 3908 return;
3909 3909
3910 /* 3910 /*
@@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
3920 preempt_enable_no_resched(); 3920 preempt_enable_no_resched();
3921 } 3921 }
3922 3922
3923 if (tsk_is_pi_blocked(tsk))
3924 return;
3925
3923 /* 3926 /*
3924 * If we are going to sleep and we have plugged IO queued, 3927 * If we are going to sleep and we have plugged IO queued,
3925 * make sure to submit it to avoid deadlocks. 3928 * make sure to submit it to avoid deadlocks.
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 23fbbcc414d5..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);
diff --git a/kernel/signal.c b/kernel/signal.c
index e667be6907d7..534fec266a33 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true; 91 return true;
92 92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
93 return sig_handler_ignored(handler, sig); 98 return sig_handler_ignored(handler, sig);
94} 99}
95 100
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d911c8470149..ca69290bee2a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147{ 147{
148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149 /*
150 * Timespec representation for VDSO update to avoid 64bit division
151 * on every update.
152 */
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
149} 154}
150 155
151/* 156/*
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8cf3596a4ce6..4bc37ac3bb05 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
17 struct timekeeper *tk) 17 struct timekeeper *tk)
18{ 18{
19 struct vdso_timestamp *vdso_ts; 19 struct vdso_timestamp *vdso_ts;
20 u64 nsec; 20 u64 nsec, sec;
21 21
22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
@@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata,
45 } 45 }
46 vdso_ts->nsec = nsec; 46 vdso_ts->nsec = nsec;
47 47
48 /* CLOCK_MONOTONIC_RAW */ 48 /* Copy MONOTONIC time for BOOTTIME */
49 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 49 sec = vdso_ts->sec;
50 vdso_ts->sec = tk->raw_sec; 50 /* Add the boot offset */
51 vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 51 sec += tk->monotonic_to_boot.tv_sec;
52 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
52 53
53 /* CLOCK_BOOTTIME */ 54 /* CLOCK_BOOTTIME */
54 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
55 vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 vdso_ts->sec = sec;
56 nsec = tk->tkr_mono.xtime_nsec; 57
57 nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
58 ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
59 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
60 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
61 vdso_ts->sec++; 60 vdso_ts->sec++;
62 } 61 }
63 vdso_ts->nsec = nsec; 62 vdso_ts->nsec = nsec;
64 63
64 /* CLOCK_MONOTONIC_RAW */
65 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
66 vdso_ts->sec = tk->raw_sec;
67 vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
68
65 /* CLOCK_TAI */ 69 /* CLOCK_TAI */
66 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
67 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; 71 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..f9821a3374e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3095 hnd = &iter->probe_entry->hlist; 3095 hnd = &iter->probe_entry->hlist;
3096 3096
3097 hash = iter->probe->ops.func_hash->filter_hash; 3097 hash = iter->probe->ops.func_hash->filter_hash;
3098
3099 /*
3100 * A probe being registered may temporarily have an empty hash
3101 * and it's at the end of the func_probes list.
3102 */
3103 if (!hash || hash == EMPTY_HASH)
3104 return NULL;
3105
3098 size = 1 << hash->size_bits; 3106 size = 1 << hash->size_bits;
3099 3107
3100 retry: 3108 retry:
@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4320 4328
4321 mutex_unlock(&ftrace_lock); 4329 mutex_unlock(&ftrace_lock);
4322 4330
4331 /*
4332 * Note, there's a small window here that the func_hash->filter_hash
4333 * may be NULL or empty. Need to be carefule when reading the loop.
4334 */
4323 mutex_lock(&probe->ops.func_hash->regex_lock); 4335 mutex_lock(&probe->ops.func_hash->regex_lock);
4324 4336
4325 orig_hash = &probe->ops.func_hash->filter_hash; 4337 orig_hash = &probe->ops.func_hash->filter_hash;
4326 old_hash = *orig_hash; 4338 old_hash = *orig_hash;
4327 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4339 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4328 4340
4341 if (!hash) {
4342 ret = -ENOMEM;
4343 goto out;
4344 }
4345
4329 ret = ftrace_match_records(hash, glob, strlen(glob)); 4346 ret = ftrace_match_records(hash, glob, strlen(glob));
4330 4347
4331 /* Nothing found? */ 4348 /* Nothing found? */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 525a97fbbc60..563e80f9006a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1567 1567
1568/** 1568/**
1569 * update_max_tr_single - only copy one trace over, and reset the rest 1569 * update_max_tr_single - only copy one trace over, and reset the rest
1570 * @tr - tracer 1570 * @tr: tracer
1571 * @tsk - task with the latency 1571 * @tsk: task with the latency
1572 * @cpu - the cpu of the buffer to copy. 1572 * @cpu: the cpu of the buffer to copy.
1573 * 1573 *
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1575 */ 1575 */
@@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void);
1767 1767
1768/** 1768/**
1769 * register_tracer - register a tracer with the ftrace system. 1769 * register_tracer - register a tracer with the ftrace system.
1770 * @type - the plugin for the tracer 1770 * @type: the plugin for the tracer
1771 * 1771 *
1772 * Register a new plugin tracer. 1772 * Register a new plugin tracer.
1773 */ 1773 */
@@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags)
2230/** 2230/**
2231 * tracing_record_taskinfo - record the task info of a task 2231 * tracing_record_taskinfo - record the task info of a task
2232 * 2232 *
2233 * @task - task to record 2233 * @task: task to record
2234 * @flags - TRACE_RECORD_CMDLINE for recording comm 2234 * @flags: TRACE_RECORD_CMDLINE for recording comm
2235 * - TRACE_RECORD_TGID for recording tgid 2235 * TRACE_RECORD_TGID for recording tgid
2236 */ 2236 */
2237void tracing_record_taskinfo(struct task_struct *task, int flags) 2237void tracing_record_taskinfo(struct task_struct *task, int flags)
2238{ 2238{
@@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
2258/** 2258/**
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2260 * 2260 *
2261 * @prev - previous task during sched_switch 2261 * @prev: previous task during sched_switch
2262 * @next - next task during sched_switch 2262 * @next: next task during sched_switch
2263 * @flags - TRACE_RECORD_CMDLINE for recording comm 2263 * @flags: TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid 2264 * TRACE_RECORD_TGID for recording tgid
2265 */ 2265 */
2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags) 2267 struct task_struct *next, int flags)
@@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled)
3072 3072
3073/** 3073/**
3074 * trace_vbprintk - write binary msg to tracing buffer 3074 * trace_vbprintk - write binary msg to tracing buffer
3075 * 3075 * @ip: The address of the caller
3076 * @fmt: The string format to write to the buffer
3077 * @args: Arguments for @fmt
3076 */ 3078 */
3077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3078{ 3080{
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c7506bc81b75..648930823b57 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
787 return ret; 787 return ret;
788} 788}
789 789
790static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 790int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
791{ 791{
792 char *event = NULL, *sub = NULL, *match; 792 char *event = NULL, *sub = NULL, *match;
793 int ret; 793 int ret;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index dbef0d135075..fb6bfbc5bf86 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp)
895 for (i = 0; i < tp->nr_args; i++) 895 for (i = 0; i < tp->nr_args; i++)
896 traceprobe_free_probe_arg(&tp->args[i]); 896 traceprobe_free_probe_arg(&tp->args[i]);
897 897
898 kfree(call->class->system); 898 if (call->class)
899 kfree(call->class->system);
899 kfree(call->name); 900 kfree(call->name);
900 kfree(call->print_fmt); 901 kfree(call->print_fmt);
901} 902}
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..70dab9ac7827 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
68{ 68{
69 size /= esize; 69 size /= esize;
70 70
71 size = roundup_pow_of_two(size); 71 if (!is_power_of_2(size))
72 size = rounddown_pow_of_two(size);
72 73
73 fifo->in = 0; 74 fifo->in = 0;
74 fifo->out = 0; 75 fifo->out = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48fd1a0d..905027574e5d 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
35 struct logic_pio_hwaddr *range; 35 struct logic_pio_hwaddr *range;
36 resource_size_t start; 36 resource_size_t start;
37 resource_size_t end; 37 resource_size_t end;
38 resource_size_t mmio_sz = 0; 38 resource_size_t mmio_end = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT; 39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0; 40 int ret = 0;
41 41
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
46 end = new_range->hw_start + new_range->size; 46 end = new_range->hw_start + new_range->size;
47 47
48 mutex_lock(&io_range_mutex); 48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) { 49 list_for_each_entry(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) { 50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */ 51 /* range already there */
52 goto end_register; 52 goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
56 /* for MMIO ranges we need to check for overlap */ 56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size || 57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) { 58 end < range->hw_start) {
59 mmio_sz += range->size; 59 mmio_end = range->io_start + range->size;
60 } else { 60 } else {
61 ret = -EFAULT; 61 ret = -EFAULT;
62 goto end_register; 62 goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
69 69
70 /* range not registered yet, check for available space */ 70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) { 71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { 72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */ 73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { 74 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG; 75 ret = -E2BIG;
76 goto end_register; 76 goto end_register;
77 } 77 }
78 new_range->size = SZ_64K; 78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n"); 79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 } 80 }
81 new_range->io_start = mmio_sz; 81 new_range->io_start = mmio_end;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) { 82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG; 84 ret = -E2BIG;
@@ -99,6 +99,20 @@ end_register:
99} 99}
100 100
101/** 101/**
102 * logic_pio_unregister_range - unregister a logical PIO range for a host
103 * @range: pointer to the IO range which has been already registered.
104 *
105 * Unregister a previously-registered IO range node.
106 */
107void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108{
109 mutex_lock(&io_range_mutex);
110 list_del_rcu(&range->list);
111 mutex_unlock(&io_range_mutex);
112 synchronize_rcu();
113}
114
115/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node 116 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range 117 * @fwnode: FW node handle associated with logical PIO range
104 * 118 *
@@ -108,26 +122,38 @@ end_register:
108 */ 122 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) 123struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{ 124{
111 struct logic_pio_hwaddr *range; 125 struct logic_pio_hwaddr *range, *found_range = NULL;
112 126
127 rcu_read_lock();
113 list_for_each_entry_rcu(range, &io_range_list, list) { 128 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode) 129 if (range->fwnode == fwnode) {
115 return range; 130 found_range = range;
131 break;
132 }
116 } 133 }
117 return NULL; 134 rcu_read_unlock();
135
136 return found_range;
118} 137}
119 138
120/* Return a registered range given an input PIO token */ 139/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio) 140static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{ 141{
123 struct logic_pio_hwaddr *range; 142 struct logic_pio_hwaddr *range, *found_range = NULL;
124 143
144 rcu_read_lock();
125 list_for_each_entry_rcu(range, &io_range_list, list) { 145 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size)) 146 if (in_range(pio, range->io_start, range->size)) {
127 return range; 147 found_range = range;
148 break;
149 }
128 } 150 }
129 pr_err("PIO entry token %lx invalid\n", pio); 151 rcu_read_unlock();
130 return NULL; 152
153 if (!found_range)
154 pr_err("PIO entry token 0x%lx invalid\n", pio);
155
156 return found_range;
131} 157}
132 158
133/** 159/**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{ 206{
181 struct logic_pio_hwaddr *range; 207 struct logic_pio_hwaddr *range;
182 208
209 rcu_read_lock();
183 list_for_each_entry_rcu(range, &io_range_list, list) { 210 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO) 211 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue; 212 continue;
186 if (in_range(addr, range->hw_start, range->size)) 213 if (in_range(addr, range->hw_start, range->size)) {
187 return addr - range->hw_start + range->io_start; 214 unsigned long cpuaddr;
215
216 cpuaddr = addr - range->hw_start + range->io_start;
217
218 rcu_read_unlock();
219 return cpuaddr;
220 }
188 } 221 }
189 pr_err("addr %llx not registered in io_range_list\n", 222 rcu_read_unlock();
190 (unsigned long long) addr); 223
224 pr_err("addr %pa not registered in io_range_list\n", &addr);
225
191 return ~0UL; 226 return ~0UL;
192} 227}
193 228
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 738065f765ab..de1f15969e27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
32#include <linux/shmem_fs.h> 32#include <linux/shmem_fs.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/numa.h> 34#include <linux/numa.h>
35#include <linux/page_owner.h>
35 36
36#include <asm/tlb.h> 37#include <asm/tlb.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -2516,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2516 } 2517 }
2517 2518
2518 ClearPageCompound(head); 2519 ClearPageCompound(head);
2520
2521 split_page_owner(head, HPAGE_PMD_ORDER);
2522
2519 /* See comment in __split_huge_page_tail() */ 2523 /* See comment in __split_huge_page_tail() */
2520 if (PageAnon(head)) { 2524 if (PageAnon(head)) {
2521 /* Additional pin to swap cache */ 2525 /* Additional pin to swap cache */
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2277b82902d8..95d16a42db6b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 || 408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410 else 410
411 return tag != (u8)shadow_byte; 411 /* else CONFIG_KASAN_SW_TAGS: */
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
412} 418}
413 419
414static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f5c0c517c49..9ec5e12486a7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
752 /* Update memcg */ 752 /* Update memcg */
753 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
754 754
755 /* Update lruvec */
756 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
757
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 758 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 759 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 760 struct mem_cgroup_per_node *pi;
758 761
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 762 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
765 atomic_long_add(x, &pi->lruvec_stat[idx]); 763 atomic_long_add(x, &pi->lruvec_stat[idx]);
766 x = 0; 764 x = 0;
@@ -3260,6 +3258,72 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3260 } 3258 }
3261} 3259}
3262 3260
3261static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3262{
3263 unsigned long stat[MEMCG_NR_STAT];
3264 struct mem_cgroup *mi;
3265 int node, cpu, i;
3266 int min_idx, max_idx;
3267
3268 if (slab_only) {
3269 min_idx = NR_SLAB_RECLAIMABLE;
3270 max_idx = NR_SLAB_UNRECLAIMABLE;
3271 } else {
3272 min_idx = 0;
3273 max_idx = MEMCG_NR_STAT;
3274 }
3275
3276 for (i = min_idx; i < max_idx; i++)
3277 stat[i] = 0;
3278
3279 for_each_online_cpu(cpu)
3280 for (i = min_idx; i < max_idx; i++)
3281 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3282
3283 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3284 for (i = min_idx; i < max_idx; i++)
3285 atomic_long_add(stat[i], &mi->vmstats[i]);
3286
3287 if (!slab_only)
3288 max_idx = NR_VM_NODE_STAT_ITEMS;
3289
3290 for_each_node(node) {
3291 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3292 struct mem_cgroup_per_node *pi;
3293
3294 for (i = min_idx; i < max_idx; i++)
3295 stat[i] = 0;
3296
3297 for_each_online_cpu(cpu)
3298 for (i = min_idx; i < max_idx; i++)
3299 stat[i] += per_cpu(
3300 pn->lruvec_stat_cpu->count[i], cpu);
3301
3302 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3303 for (i = min_idx; i < max_idx; i++)
3304 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3305 }
3306}
3307
3308static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3309{
3310 unsigned long events[NR_VM_EVENT_ITEMS];
3311 struct mem_cgroup *mi;
3312 int cpu, i;
3313
3314 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3315 events[i] = 0;
3316
3317 for_each_online_cpu(cpu)
3318 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3319 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3320 cpu);
3321
3322 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3323 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3324 atomic_long_add(events[i], &mi->vmevents[i]);
3325}
3326
3263#ifdef CONFIG_MEMCG_KMEM 3327#ifdef CONFIG_MEMCG_KMEM
3264static int memcg_online_kmem(struct mem_cgroup *memcg) 3328static int memcg_online_kmem(struct mem_cgroup *memcg)
3265{ 3329{
@@ -3309,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
3309 if (!parent) 3373 if (!parent)
3310 parent = root_mem_cgroup; 3374 parent = root_mem_cgroup;
3311 3375
3376 /*
3377 * Deactivate and reparent kmem_caches. Then flush percpu
3378 * slab statistics to have precise values at the parent and
3379 * all ancestor levels. It's required to keep slab stats
3380 * accurate after the reparenting of kmem_caches.
3381 */
3312 memcg_deactivate_kmem_caches(memcg, parent); 3382 memcg_deactivate_kmem_caches(memcg, parent);
3383 memcg_flush_percpu_vmstats(memcg, true);
3313 3384
3314 kmemcg_id = memcg->kmemcg_id; 3385 kmemcg_id = memcg->kmemcg_id;
3315 BUG_ON(kmemcg_id < 0); 3386 BUG_ON(kmemcg_id < 0);
@@ -4682,6 +4753,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4682{ 4753{
4683 int node; 4754 int node;
4684 4755
4756 /*
4757 * Flush percpu vmstats and vmevents to guarantee the value correctness
4758 * on parent's and all ancestor levels.
4759 */
4760 memcg_flush_percpu_vmstats(memcg, false);
4761 memcg_flush_percpu_vmevents(memcg);
4685 for_each_node(node) 4762 for_each_node(node)
4686 free_mem_cgroup_per_node_info(memcg, node); 4763 free_mem_cgroup_per_node_info(memcg, node);
4687 free_percpu(memcg->vmstats_percpu); 4764 free_percpu(memcg->vmstats_percpu);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 272c6de1bf4e..9c9194959271 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
2238 unsigned int order; 2238 unsigned int order;
2239 int pages_moved = 0; 2239 int pages_moved = 0;
2240 2240
2241#ifndef CONFIG_HOLES_IN_ZONE
2242 /*
2243 * page_zone is not safe to call in this context when
2244 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2245 * anyway as we check zone boundaries in move_freepages_block().
2246 * Remove at a later date when no bug reports exist related to
2247 * grouping pages by mobility
2248 */
2249 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2250 pfn_valid(page_to_pfn(end_page)) &&
2251 page_zone(start_page) != page_zone(end_page));
2252#endif
2253 for (page = start_page; page <= end_page;) { 2241 for (page = start_page; page <= end_page;) {
2254 if (!pfn_valid_within(page_to_pfn(page))) { 2242 if (!pfn_valid_within(page_to_pfn(page))) {
2255 page++; 2243 page++;
2256 continue; 2244 continue;
2257 } 2245 }
2258 2246
2259 /* Make sure we are not inadvertently changing nodes */
2260 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2261
2262 if (!PageBuddy(page)) { 2247 if (!PageBuddy(page)) {
2263 /* 2248 /*
2264 * We assume that pages that could be isolated for 2249 * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
2273 continue; 2258 continue;
2274 } 2259 }
2275 2260
2261 /* Make sure we are not inadvertently changing nodes */
2262 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2263 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2264
2276 order = page_order(page); 2265 order = page_order(page);
2277 move_to_free_area(page, &zone->free_area[order], migratetype); 2266 move_to_free_area(page, &zone->free_area[order], migratetype);
2278 page += 1 << order; 2267 page += 1 << order;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c77d1e3761a7..a6c5d0b28321 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3220 3220
3221#ifdef CONFIG_MEMCG 3221#ifdef CONFIG_MEMCG
3222 3222
3223/* Only used by soft limit reclaim. Do not reuse for anything else. */
3223unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 3224unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3224 gfp_t gfp_mask, bool noswap, 3225 gfp_t gfp_mask, bool noswap,
3225 pg_data_t *pgdat, 3226 pg_data_t *pgdat,
@@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3235 }; 3236 };
3236 unsigned long lru_pages; 3237 unsigned long lru_pages;
3237 3238
3238 set_task_reclaim_state(current, &sc.reclaim_state); 3239 WARN_ON_ONCE(!current->reclaim_state);
3240
3239 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 3241 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3240 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3242 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3241 3243
@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3253 3255
3254 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3256 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3255 3257
3256 set_task_reclaim_state(current, NULL);
3257 *nr_scanned = sc.nr_scanned; 3258 *nr_scanned = sc.nr_scanned;
3258 3259
3259 return sc.nr_reclaimed; 3260 return sc.nr_reclaimed;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index ed19d98c9dcd..75b7962439ff 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/wait.h>
44#include <linux/zpool.h> 45#include <linux/zpool.h>
45#include <linux/magic.h> 46#include <linux/magic.h>
46 47
@@ -145,6 +146,8 @@ struct z3fold_header {
145 * @release_wq: workqueue for safe page release 146 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release 147 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem 148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
148 * 151 *
149 * This structure is allocated at pool creation time and maintains metadata 152 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool. 153 * pertaining to a particular z3fold pool.
@@ -163,8 +166,11 @@ struct z3fold_pool {
163 const struct zpool_ops *zpool_ops; 166 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq; 167 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq; 168 struct workqueue_struct *release_wq;
169 struct wait_queue_head isolate_wait;
166 struct work_struct work; 170 struct work_struct work;
167 struct inode *inode; 171 struct inode *inode;
172 bool destroying;
173 int isolated;
168}; 174};
169 175
170/* 176/*
@@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
769 goto out_c; 775 goto out_c;
770 spin_lock_init(&pool->lock); 776 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock); 777 spin_lock_init(&pool->stale_lock);
778 init_waitqueue_head(&pool->isolate_wait);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied) 780 if (!pool->unbuddied)
774 goto out_pool; 781 goto out_pool;
@@ -808,6 +815,15 @@ out:
808 return NULL; 815 return NULL;
809} 816}
810 817
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
811/** 827/**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool 828 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed 829 * @pool: the z3fold pool to be destroyed
@@ -817,6 +833,22 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 833static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 834{
819 kmem_cache_destroy(pool->c_handle); 835 kmem_cache_destroy(pool->c_handle);
836 /*
837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
820 852
821 /* 853 /*
822 * We need to destroy pool->compact_wq before pool->release_wq, 854 * We need to destroy pool->compact_wq before pool->release_wq,
@@ -1307,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1307 return atomic64_read(&pool->pages_nr); 1339 return atomic64_read(&pool->pages_nr);
1308} 1340}
1309 1341
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1310static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1311{ 1365{
1312 struct z3fold_header *zhdr; 1366 struct z3fold_header *zhdr;
@@ -1333,6 +1387,34 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1333 spin_lock(&pool->lock); 1387 spin_lock(&pool->lock);
1334 if (!list_empty(&page->lru)) 1388 if (!list_empty(&page->lru))
1335 list_del(&page->lru); 1389 list_del(&page->lru);
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 z3fold_page_unlock(zhdr);
1410 return false;
1411 }
1412 z3fold_page_unlock(zhdr);
1413 return false;
1414 }
1415
1416
1417 z3fold_inc_isolated(pool);
1336 spin_unlock(&pool->lock); 1418 spin_unlock(&pool->lock);
1337 z3fold_page_unlock(zhdr); 1419 z3fold_page_unlock(zhdr);
1338 return true; 1420 return true;
@@ -1401,6 +1483,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
1401 1483
1402 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1484 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1403 1485
1486 spin_lock(&pool->lock);
1487 z3fold_dec_isolated(pool);
1488 spin_unlock(&pool->lock);
1489
1404 page_mapcount_reset(page); 1490 page_mapcount_reset(page);
1405 put_page(page); 1491 put_page(page);
1406 return 0; 1492 return 0;
@@ -1420,10 +1506,14 @@ static void z3fold_page_putback(struct page *page)
1420 INIT_LIST_HEAD(&page->lru); 1506 INIT_LIST_HEAD(&page->lru);
1421 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1507 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1422 atomic64_dec(&pool->pages_nr); 1508 atomic64_dec(&pool->pages_nr);
1509 spin_lock(&pool->lock);
1510 z3fold_dec_isolated(pool);
1511 spin_unlock(&pool->lock);
1423 return; 1512 return;
1424 } 1513 }
1425 spin_lock(&pool->lock); 1514 spin_lock(&pool->lock);
1426 list_add(&page->lru, &pool->lru); 1515 list_add(&page->lru, &pool->lru);
1516 z3fold_dec_isolated(pool);
1427 spin_unlock(&pool->lock); 1517 spin_unlock(&pool->lock);
1428 z3fold_page_unlock(zhdr); 1518 z3fold_page_unlock(zhdr);
1429} 1519}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 57fbb7ced69f..e98bb6ab4f7e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -54,6 +54,7 @@
54#include <linux/mount.h> 54#include <linux/mount.h>
55#include <linux/pseudo_fs.h> 55#include <linux/pseudo_fs.h>
56#include <linux/migrate.h> 56#include <linux/migrate.h>
57#include <linux/wait.h>
57#include <linux/pagemap.h> 58#include <linux/pagemap.h>
58#include <linux/fs.h> 59#include <linux/fs.h>
59 60
@@ -268,6 +269,10 @@ struct zs_pool {
268#ifdef CONFIG_COMPACTION 269#ifdef CONFIG_COMPACTION
269 struct inode *inode; 270 struct inode *inode;
270 struct work_struct free_work; 271 struct work_struct free_work;
272 /* A wait queue for when migration races with async_free_zspage() */
273 struct wait_queue_head migration_wait;
274 atomic_long_t isolated_pages;
275 bool destroying;
271#endif 276#endif
272}; 277};
273 278
@@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
1862 zspage->isolated--; 1867 zspage->isolated--;
1863} 1868}
1864 1869
1870static void putback_zspage_deferred(struct zs_pool *pool,
1871 struct size_class *class,
1872 struct zspage *zspage)
1873{
1874 enum fullness_group fg;
1875
1876 fg = putback_zspage(class, zspage);
1877 if (fg == ZS_EMPTY)
1878 schedule_work(&pool->free_work);
1879
1880}
1881
1882static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1883{
1884 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1885 atomic_long_dec(&pool->isolated_pages);
1886 /*
1887 * There's no possibility of racing, since wait_for_isolated_drain()
1888 * checks the isolated count under &class->lock after enqueuing
1889 * on migration_wait.
1890 */
1891 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1892 wake_up_all(&pool->migration_wait);
1893}
1894
1865static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1895static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1866 struct page *newpage, struct page *oldpage) 1896 struct page *newpage, struct page *oldpage)
1867{ 1897{
@@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1931 */ 1961 */
1932 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { 1962 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1933 get_zspage_mapping(zspage, &class_idx, &fullness); 1963 get_zspage_mapping(zspage, &class_idx, &fullness);
1964 atomic_long_inc(&pool->isolated_pages);
1934 remove_zspage(class, zspage, fullness); 1965 remove_zspage(class, zspage, fullness);
1935 } 1966 }
1936 1967
@@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2030 * Page migration is done so let's putback isolated zspage to 2061 * Page migration is done so let's putback isolated zspage to
2031 * the list if @page is final isolated subpage in the zspage. 2062 * the list if @page is final isolated subpage in the zspage.
2032 */ 2063 */
2033 if (!is_zspage_isolated(zspage)) 2064 if (!is_zspage_isolated(zspage)) {
2034 putback_zspage(class, zspage); 2065 /*
2066 * We cannot race with zs_destroy_pool() here because we wait
2067 * for isolation to hit zero before we start destroying.
2068 * Also, we ensure that everyone can see pool->destroying before
2069 * we start waiting.
2070 */
2071 putback_zspage_deferred(pool, class, zspage);
2072 zs_pool_dec_isolated(pool);
2073 }
2035 2074
2036 reset_page(page); 2075 reset_page(page);
2037 put_page(page); 2076 put_page(page);
@@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page)
2077 spin_lock(&class->lock); 2116 spin_lock(&class->lock);
2078 dec_zspage_isolation(zspage); 2117 dec_zspage_isolation(zspage);
2079 if (!is_zspage_isolated(zspage)) { 2118 if (!is_zspage_isolated(zspage)) {
2080 fg = putback_zspage(class, zspage);
2081 /* 2119 /*
2082 * Due to page_lock, we cannot free zspage immediately 2120 * Due to page_lock, we cannot free zspage immediately
2083 * so let's defer. 2121 * so let's defer.
2084 */ 2122 */
2085 if (fg == ZS_EMPTY) 2123 putback_zspage_deferred(pool, class, zspage);
2086 schedule_work(&pool->free_work); 2124 zs_pool_dec_isolated(pool);
2087 } 2125 }
2088 spin_unlock(&class->lock); 2126 spin_unlock(&class->lock);
2089} 2127}
@@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool)
2107 return 0; 2145 return 0;
2108} 2146}
2109 2147
2148static bool pool_isolated_are_drained(struct zs_pool *pool)
2149{
2150 return atomic_long_read(&pool->isolated_pages) == 0;
2151}
2152
2153/* Function for resolving migration */
2154static void wait_for_isolated_drain(struct zs_pool *pool)
2155{
2156
2157 /*
2158 * We're in the process of destroying the pool, so there are no
2159 * active allocations. zs_page_isolate() fails for completely free
2160 * zspages, so we need only wait for the zs_pool's isolated
2161 * count to hit zero.
2162 */
2163 wait_event(pool->migration_wait,
2164 pool_isolated_are_drained(pool));
2165}
2166
2110static void zs_unregister_migration(struct zs_pool *pool) 2167static void zs_unregister_migration(struct zs_pool *pool)
2111{ 2168{
2169 pool->destroying = true;
2170 /*
2171 * We need a memory barrier here to ensure global visibility of
2172 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2173 * case we don't care, or it will be > 0 and pool->destroying will
2174 * ensure that we wake up once isolation hits 0.
2175 */
2176 smp_mb();
2177 wait_for_isolated_drain(pool); /* This can block */
2112 flush_work(&pool->free_work); 2178 flush_work(&pool->free_work);
2113 iput(pool->inode); 2179 iput(pool->inode);
2114} 2180}
@@ -2346,6 +2412,10 @@ struct zs_pool *zs_create_pool(const char *name)
2346 if (!pool->name) 2412 if (!pool->name)
2347 goto err; 2413 goto err;
2348 2414
2415#ifdef CONFIG_COMPACTION
2416 init_waitqueue_head(&pool->migration_wait);
2417#endif
2418
2349 if (create_cache(pool)) 2419 if (create_cache(pool))
2350 goto err; 2420 goto err;
2351 2421
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 240ed70912d6..d78938e3e008 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached 277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
278 * @buff_pos: current position in the skb 278 * @buff_pos: current position in the skb
279 * @packet_len: total length of the skb 279 * @packet_len: total length of the skb
280 * @tvlv_len: tvlv length of the previously considered OGM 280 * @ogm_packet: potential OGM in buffer
281 * 281 *
282 * Return: true if there is enough space for another OGM, false otherwise. 282 * Return: true if there is enough space for another OGM, false otherwise.
283 */ 283 */
284static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, 284static bool
285 __be16 tvlv_len) 285batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
286 const struct batadv_ogm_packet *ogm_packet)
286{ 287{
287 int next_buff_pos = 0; 288 int next_buff_pos = 0;
288 289
289 next_buff_pos += buff_pos + BATADV_OGM_HLEN; 290 /* check if there is enough space for the header */
290 next_buff_pos += ntohs(tvlv_len); 291 next_buff_pos += buff_pos + sizeof(*ogm_packet);
292 if (next_buff_pos > packet_len)
293 return false;
294
295 /* check if there is enough space for the optional TVLV */
296 next_buff_pos += ntohs(ogm_packet->tvlv_len);
291 297
292 return (next_buff_pos <= packet_len) && 298 return (next_buff_pos <= packet_len) &&
293 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 299 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
315 321
316 /* adjust all flags and log packets */ 322 /* adjust all flags and log packets */
317 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 323 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
318 batadv_ogm_packet->tvlv_len)) { 324 batadv_ogm_packet)) {
319 /* we might have aggregated direct link packets with an 325 /* we might have aggregated direct link packets with an
320 * ordinary base packet 326 * ordinary base packet
321 */ 327 */
@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1704 1710
1705 /* unpack the aggregated packets and process them one by one */ 1711 /* unpack the aggregated packets and process them one by one */
1706 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 1712 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1707 ogm_packet->tvlv_len)) { 1713 ogm_packet)) {
1708 batadv_iv_ogm_process(skb, ogm_offset, if_incoming); 1714 batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
1709 1715
1710 ogm_offset += BATADV_OGM_HLEN; 1716 ogm_offset += BATADV_OGM_HLEN;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index fad95ef64e01..bc06e3cdfa84 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated 631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
632 * @buff_pos: current position in the skb 632 * @buff_pos: current position in the skb
633 * @packet_len: total length of the skb 633 * @packet_len: total length of the skb
634 * @tvlv_len: tvlv length of the previously considered OGM 634 * @ogm2_packet: potential OGM2 in buffer
635 * 635 *
636 * Return: true if there is enough space for another OGM, false otherwise. 636 * Return: true if there is enough space for another OGM, false otherwise.
637 */ 637 */
638static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, 638static bool
639 __be16 tvlv_len) 639batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
640 const struct batadv_ogm2_packet *ogm2_packet)
640{ 641{
641 int next_buff_pos = 0; 642 int next_buff_pos = 0;
642 643
643 next_buff_pos += buff_pos + BATADV_OGM2_HLEN; 644 /* check if there is enough space for the header */
644 next_buff_pos += ntohs(tvlv_len); 645 next_buff_pos += buff_pos + sizeof(*ogm2_packet);
646 if (next_buff_pos > packet_len)
647 return false;
648
649 /* check if there is enough space for the optional TVLV */
650 next_buff_pos += ntohs(ogm2_packet->tvlv_len);
645 651
646 return (next_buff_pos <= packet_len) && 652 return (next_buff_pos <= packet_len) &&
647 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 653 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
818 ogm_packet = (struct batadv_ogm2_packet *)skb->data; 824 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
819 825
820 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 826 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
821 ogm_packet->tvlv_len)) { 827 ogm_packet)) {
822 batadv_v_ogm_process(skb, ogm_offset, if_incoming); 828 batadv_v_ogm_process(skb, ogm_offset, if_incoming);
823 829
824 ogm_offset += BATADV_OGM2_HLEN; 830 ogm_offset += BATADV_OGM2_HLEN;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 67d7f83009ae..1d5bdf3a4b65 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2303 2303
2304 while (bucket_tmp < hash->size) { 2304 while (bucket_tmp < hash->size) {
2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2306 *bucket, &idx_tmp)) 2306 bucket_tmp, &idx_tmp))
2307 break; 2307 break;
2308 2308
2309 bucket_tmp++; 2309 bucket_tmp++;
@@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2423 batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS); 2423 batadv_mcast_want_rtr4_update(bat_priv, orig,
2424 batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS); 2424 BATADV_MCAST_WANT_NO_RTR4);
2425 batadv_mcast_want_rtr6_update(bat_priv, orig,
2426 BATADV_MCAST_WANT_NO_RTR6);
2425 2427
2426 spin_unlock_bh(&orig->mcast_handler_lock); 2428 spin_unlock_bh(&orig->mcast_handler_lock);
2427} 2429}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 6f08fd122a8d..7e052d6f759b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
164{ 164{
165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); 165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
166 166
167 return attr ? nla_get_u32(attr) : 0; 167 return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
168} 168}
169 169
170/** 170/**
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b9585e7d9d2e..04bc79359a17 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void)
3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3205 3206
3206 mutex_init(&hdev->lock); 3207 mutex_init(&hdev->lock);
3207 mutex_init(&hdev->req_lock); 3208 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index bb67f4a5479a..402e2cc54044 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val)
433 return 0; 433 return 0;
434} 434}
435 435
436static int min_encrypt_key_size_set(void *data, u64 val)
437{
438 struct hci_dev *hdev = data;
439
440 if (val < 1 || val > 16)
441 return -EINVAL;
442
443 hci_dev_lock(hdev);
444 hdev->min_enc_key_size = val;
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int min_encrypt_key_size_get(void *data, u64 *val)
451{
452 struct hci_dev *hdev = data;
453
454 hci_dev_lock(hdev);
455 *val = hdev->min_enc_key_size;
456 hci_dev_unlock(hdev);
457
458 return 0;
459}
460
461DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
462 min_encrypt_key_size_get,
463 min_encrypt_key_size_set, "%llu\n");
464
436static int auto_accept_delay_get(void *data, u64 *val) 465static int auto_accept_delay_get(void *data, u64 *val)
437{ 466{
438 struct hci_dev *hdev = data; 467 struct hci_dev *hdev = data;
@@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
545 if (lmp_ssp_capable(hdev)) { 574 if (lmp_ssp_capable(hdev)) {
546 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, 575 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
547 hdev, &ssp_debug_mode_fops); 576 hdev, &ssp_debug_mode_fops);
577 debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
578 hdev, &min_encrypt_key_size_fops);
548 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 579 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
549 hdev, &auto_accept_delay_fops); 580 hdev, &auto_accept_delay_fops);
550 } 581 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5abd423b55fa..8d889969ae7e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
101{ 101{
102 struct sk_buff *skb; 102 struct sk_buff *skb;
103 struct sock *sk = sock->sk; 103 struct sock *sk = sock->sk;
104 int ret;
104 105
105 BT_DBG("session %p data %p size %d", session, data, size); 106 BT_DBG("session %p data %p size %d", session, data, size);
106 107
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
114 } 115 }
115 116
116 skb_put_u8(skb, hdr); 117 skb_put_u8(skb, hdr);
117 if (data && size > 0) 118 if (data && size > 0) {
118 skb_put_data(skb, data, size); 119 skb_put_data(skb, data, size);
120 ret = size;
121 } else {
122 ret = 0;
123 }
119 124
120 skb_queue_tail(transmit, skb); 125 skb_queue_tail(transmit, skb);
121 wake_up_interruptible(sk_sleep(sk)); 126 wake_up_interruptible(sk_sleep(sk));
122 127
123 return 0; 128 return ret;
124} 129}
125 130
126static int hidp_send_ctrl_message(struct hidp_session *session, 131static int hidp_send_ctrl_message(struct hidp_session *session,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cc506fe99b4d..dfc1edb168b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1361 * actually encrypted before enforcing a key size. 1361 * actually encrypted before enforcing a key size.
1362 */ 1362 */
1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1364 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); 1364 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1365} 1365}
1366 1366
1367static void l2cap_do_start(struct l2cap_chan *chan) 1367static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c8177a89f52c..4096d8a74a2b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
221 return NF_DROP; 221 return NF_DROP;
222 } 222 }
223 223
224 ADD_COUNTER(*(counter_base + i), 1, skb->len); 224 ADD_COUNTER(*(counter_base + i), skb->len, 1);
225 225
226 /* these should only watch: not modify, nor tell us 226 /* these should only watch: not modify, nor tell us
227 * what to do with the packet 227 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
959 continue; 959 continue;
960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
961 for (i = 0; i < nentries; i++) 961 for (i = 0; i < nentries; i++)
962 ADD_COUNTER(counters[i], counter_base[i].pcnt, 962 ADD_COUNTER(counters[i], counter_base[i].bcnt,
963 counter_base[i].bcnt); 963 counter_base[i].pcnt);
964 } 964 }
965} 965}
966 966
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
1280 1280
1281 /* we add to the counters of the first cpu */ 1281 /* we add to the counters of the first cpu */
1282 for (i = 0; i < num_counters; i++) 1282 for (i = 0; i < num_counters; i++)
1283 ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); 1283 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
1284 1284
1285 write_unlock_bh(&t->lock); 1285 write_unlock_bh(&t->lock);
1286 ret = 0; 1286 ret = 0;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 1804e867f715..7c9e92b2f806 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
53 goto err; 53 goto err;
54 54
55 br_vlan_get_proto(br_dev, &p_proto); 55 br_vlan_get_proto(br_dev, &p_proto);
56 nft_reg_store16(dest, p_proto); 56 nft_reg_store16(dest, htons(p_proto));
57 return; 57 return;
58 } 58 }
59 default: 59 default:
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5d6724cee38f..4f75df40fb12 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_sync_skcipher(key->tfm); 139 if (key->tfm) {
140 key->tfm = NULL; 140 crypto_free_sync_skcipher(key->tfm);
141 key->tfm = NULL;
142 }
141 } 143 }
142} 144}
143 145
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0b2df09b2554..78ae6e8c953d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1496 struct ceph_osds up, acting; 1496 struct ceph_osds up, acting;
1497 bool force_resend = false; 1497 bool force_resend = false;
1498 bool unpaused = false; 1498 bool unpaused = false;
1499 bool legacy_change; 1499 bool legacy_change = false;
1500 bool split = false; 1500 bool split = false;
1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502 bool recovery_deletes = ceph_osdmap_flag(osdc, 1502 bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1584 t->osd = acting.primary; 1584 t->osd = acting.primary;
1585 } 1585 }
1586 1586
1587 if (unpaused || legacy_change || force_resend || 1587 if (unpaused || legacy_change || force_resend || split)
1588 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589 RESEND_ON_SPLIT)))
1590 ct_res = CALC_TARGET_NEED_RESEND; 1588 ct_res = CALC_TARGET_NEED_RESEND;
1591 else 1589 else
1592 ct_res = CALC_TARGET_NO_ACTION; 1590 ct_res = CALC_TARGET_NO_ACTION;
1593 1591
1594out: 1592out:
1595 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); 1593 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1594 legacy_change, force_resend, split, ct_res, t->osd);
1596 return ct_res; 1595 return ct_res;
1597} 1596}
1598 1597
diff --git a/net/core/filter.c b/net/core/filter.c
index 7878f918b8c0..4c6a252d4212 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
8757 return size == size_default; 8757 return size == size_default;
8758 8758
8759 /* Fields that allow narrowing */ 8759 /* Fields that allow narrowing */
8760 case offsetof(struct sk_reuseport_md, eth_protocol): 8760 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8762 return false; 8762 return false;
8763 /* fall through */ 8763 /* fall through */
8764 case offsetof(struct sk_reuseport_md, ip_protocol): 8764 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8765 case offsetof(struct sk_reuseport_md, bind_inany): 8765 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8766 case offsetof(struct sk_reuseport_md, len): 8766 case bpf_ctx_range(struct sk_reuseport_md, len):
8767 bpf_ctx_record_field_size(info, size_default); 8767 bpf_ctx_record_field_size(info, size_default);
8768 return bpf_ctx_narrow_access_ok(off, size, size_default); 8768 return bpf_ctx_narrow_access_ok(off, size, size_default);
8769 8769
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3e6fedb57bc1..2470b4b404e6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
142 mutex_unlock(&flow_dissector_mutex); 142 mutex_unlock(&flow_dissector_mutex);
143 return -ENOENT; 143 return -ENOENT;
144 } 144 }
145 bpf_prog_put(attached);
146 RCU_INIT_POINTER(net->flow_dissector_prog, NULL); 145 RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
146 bpf_prog_put(attached);
147 mutex_unlock(&flow_dissector_mutex); 147 mutex_unlock(&flow_dissector_mutex);
148 return 0; 148 return 0;
149} 149}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2cf27da1baeb..849380a622ef 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
122 txq = netdev_get_tx_queue(dev, q_index); 122 txq = netdev_get_tx_queue(dev, q_index);
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
126 skb_queue_head(&npinfo->txq, skb); 126 skb_queue_head(&npinfo->txq, skb);
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags); 128 local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
335 335
336 HARD_TX_UNLOCK(dev, txq); 336 HARD_TX_UNLOCK(dev, txq);
337 337
338 if (status == NETDEV_TX_OK) 338 if (dev_xmit_complete(status))
339 break; 339 break;
340 340
341 } 341 }
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
352 352
353 } 353 }
354 354
355 if (status != NETDEV_TX_OK) { 355 if (!dev_xmit_complete(status)) {
356 skb_queue_tail(&npinfo->txq, skb); 356 skb_queue_tail(&npinfo->txq, skb);
357 schedule_delayed_work(&npinfo->tx_work,0); 357 schedule_delayed_work(&npinfo->tx_work,0);
358 } 358 }
diff --git a/net/core/sock.c b/net/core/sock.c
index d57b0cc995a0..545fac19a711 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1992} 1992}
1993EXPORT_SYMBOL(skb_set_owner_w); 1993EXPORT_SYMBOL(skb_set_owner_w);
1994 1994
1995static bool can_skb_orphan_partial(const struct sk_buff *skb)
1996{
1997#ifdef CONFIG_TLS_DEVICE
1998 /* Drivers depend on in-order delivery for crypto offload,
1999 * partial orphan breaks out-of-order-OK logic.
2000 */
2001 if (skb->decrypted)
2002 return false;
2003#endif
2004 return (skb->destructor == sock_wfree ||
2005 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2006}
2007
1995/* This helper is used by netem, as it can hold packets in its 2008/* This helper is used by netem, as it can hold packets in its
1996 * delay queue. We want to allow the owner socket to send more 2009 * delay queue. We want to allow the owner socket to send more
1997 * packets, as if they were already TX completed by a typical driver. 2010 * packets, as if they were already TX completed by a typical driver.
@@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb)
2003 if (skb_is_tcp_pure_ack(skb)) 2016 if (skb_is_tcp_pure_ack(skb))
2004 return; 2017 return;
2005 2018
2006 if (skb->destructor == sock_wfree 2019 if (can_skb_orphan_partial(skb)) {
2007#ifdef CONFIG_INET
2008 || skb->destructor == tcp_wfree
2009#endif
2010 ) {
2011 struct sock *sk = skb->sk; 2020 struct sock *sk = skb->sk;
2012 2021
2013 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2022 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
@@ -3278,16 +3287,17 @@ static __init int net_inuse_init(void)
3278 3287
3279core_initcall(net_inuse_init); 3288core_initcall(net_inuse_init);
3280 3289
3281static void assign_proto_idx(struct proto *prot) 3290static int assign_proto_idx(struct proto *prot)
3282{ 3291{
3283 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3284 3293
3285 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3286 pr_err("PROTO_INUSE_NR exhausted\n"); 3295 pr_err("PROTO_INUSE_NR exhausted\n");
3287 return; 3296 return -ENOSPC;
3288 } 3297 }
3289 3298
3290 set_bit(prot->inuse_idx, proto_inuse_idx); 3299 set_bit(prot->inuse_idx, proto_inuse_idx);
3300 return 0;
3291} 3301}
3292 3302
3293static void release_proto_idx(struct proto *prot) 3303static void release_proto_idx(struct proto *prot)
@@ -3296,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
3296 clear_bit(prot->inuse_idx, proto_inuse_idx); 3306 clear_bit(prot->inuse_idx, proto_inuse_idx);
3297} 3307}
3298#else 3308#else
3299static inline void assign_proto_idx(struct proto *prot) 3309static inline int assign_proto_idx(struct proto *prot)
3300{ 3310{
3311 return 0;
3301} 3312}
3302 3313
3303static inline void release_proto_idx(struct proto *prot) 3314static inline void release_proto_idx(struct proto *prot)
@@ -3346,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
3346 3357
3347int proto_register(struct proto *prot, int alloc_slab) 3358int proto_register(struct proto *prot, int alloc_slab)
3348{ 3359{
3360 int ret = -ENOBUFS;
3361
3349 if (alloc_slab) { 3362 if (alloc_slab) {
3350 prot->slab = kmem_cache_create_usercopy(prot->name, 3363 prot->slab = kmem_cache_create_usercopy(prot->name,
3351 prot->obj_size, 0, 3364 prot->obj_size, 0,
@@ -3382,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
3382 } 3395 }
3383 3396
3384 mutex_lock(&proto_list_mutex); 3397 mutex_lock(&proto_list_mutex);
3398 ret = assign_proto_idx(prot);
3399 if (ret) {
3400 mutex_unlock(&proto_list_mutex);
3401 goto out_free_timewait_sock_slab_name;
3402 }
3385 list_add(&prot->node, &proto_list); 3403 list_add(&prot->node, &proto_list);
3386 assign_proto_idx(prot);
3387 mutex_unlock(&proto_list_mutex); 3404 mutex_unlock(&proto_list_mutex);
3388 return 0; 3405 return ret;
3389 3406
3390out_free_timewait_sock_slab_name: 3407out_free_timewait_sock_slab_name:
3391 kfree(prot->twsk_prot->twsk_slab_name); 3408 if (alloc_slab && prot->twsk_prot)
3409 kfree(prot->twsk_prot->twsk_slab_name);
3392out_free_request_sock_slab: 3410out_free_request_sock_slab:
3393 req_prot_cleanup(prot->rsk_prot); 3411 if (alloc_slab) {
3412 req_prot_cleanup(prot->rsk_prot);
3394 3413
3395 kmem_cache_destroy(prot->slab); 3414 kmem_cache_destroy(prot->slab);
3396 prot->slab = NULL; 3415 prot->slab = NULL;
3416 }
3397out: 3417out:
3398 return -ENOBUFS; 3418 return ret;
3399} 3419}
3400EXPORT_SYMBOL(proto_register); 3420EXPORT_SYMBOL(proto_register);
3401 3421
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 3312a5849a97..c13ffbd33d8d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
20static DEFINE_MUTEX(sock_diag_table_mutex); 20static DEFINE_MUTEX(sock_diag_table_mutex);
21static struct workqueue_struct *broadcast_wq; 21static struct workqueue_struct *broadcast_wq;
22static atomic64_t cookie_gen;
22 23
23u64 sock_gen_cookie(struct sock *sk) 24u64 sock_gen_cookie(struct sock *sk)
24{ 25{
@@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk)
27 28
28 if (res) 29 if (res)
29 return res; 30 return res;
30 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); 31 res = atomic64_inc_return(&cookie_gen);
31 atomic64_cmpxchg(&sk->sk_cookie, 0, res); 32 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
32 } 33 }
33} 34}
diff --git a/net/core/stream.c b/net/core/stream.c
index e94bb02a5629..4f1d4aa5fb38 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
120 int err = 0; 120 int err = 0;
121 long vm_wait = 0; 121 long vm_wait = 0;
122 long current_timeo = *timeo_p; 122 long current_timeo = *timeo_p;
123 bool noblock = (*timeo_p ? false : true);
124 DEFINE_WAIT_FUNC(wait, woken_wake_function); 123 DEFINE_WAIT_FUNC(wait, woken_wake_function);
125 124
126 if (sk_stream_memory_free(sk)) 125 if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
133 132
134 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 133 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
135 goto do_error; 134 goto do_error;
136 if (!*timeo_p) { 135 if (!*timeo_p)
137 if (noblock) 136 goto do_eagain;
138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
139 goto do_nonblock;
140 }
141 if (signal_pending(current)) 137 if (signal_pending(current))
142 goto do_interrupted; 138 goto do_interrupted;
143 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 139 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
169do_error: 165do_error:
170 err = -EPIPE; 166 err = -EPIPE;
171 goto out; 167 goto out;
172do_nonblock: 168do_eagain:
169 /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 * be generated later.
171 * When TCP receives ACK packets that make room, tcp_check_space()
172 * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 */
174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
173 err = -EAGAIN; 175 err = -EAGAIN;
174 goto out; 176 goto out;
175do_interrupted: 177do_interrupted:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4ec5b7f85d51..09d9286b27cc 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
153{ 153{
154 int port; 154 int port;
155 155
156 if (!ds->ops->port_mdb_add)
157 return;
158
156 for_each_set_bit(port, bitmap, ds->num_ports) 159 for_each_set_bit(port, bitmap, ds->num_ports)
157 ds->ops->port_mdb_add(ds, port, mdb); 160 ds->ops->port_mdb_add(ds, port, mdb);
158} 161}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 6ebbd799c4eb..67a1bc635a7b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -28,6 +28,7 @@
28 * 28 *
29 * RSV - VID[9]: 29 * RSV - VID[9]:
30 * To be used for further expansion of SWITCH_ID or for other purposes. 30 * To be used for further expansion of SWITCH_ID or for other purposes.
31 * Must be transmitted as zero and ignored on receive.
31 * 32 *
32 * SWITCH_ID - VID[8:6]: 33 * SWITCH_ID - VID[8:6]:
33 * Index of switch within DSA tree. Must be between 0 and 34 * Index of switch within DSA tree. Must be between 0 and
@@ -35,6 +36,7 @@
35 * 36 *
36 * RSV - VID[5:4]: 37 * RSV - VID[5:4]:
37 * To be used for further expansion of PORT or for other purposes. 38 * To be used for further expansion of PORT or for other purposes.
39 * Must be transmitted as zero and ignored on receive.
38 * 40 *
39 * PORT - VID[3:0]: 41 * PORT - VID[3:0]:
40 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. 42 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e4aba5d485be..bbe9b3b2d395 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); 170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
171 if (!reasm_data) 171 if (!reasm_data)
172 goto out_oom; 172 goto out_oom;
173 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 173 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
174 174
175 skb->dev = ldev; 175 skb->dev = ldev;
176 skb->tstamp = fq->q.stamp; 176 skb->tstamp = fq->q.stamp;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index dacbd58e1799..badc5cfe4dc6 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
1092 1092
1093static int __init af_ieee802154_init(void) 1093static int __init af_ieee802154_init(void)
1094{ 1094{
1095 int rc = -EINVAL; 1095 int rc;
1096 1096
1097 rc = proto_register(&ieee802154_raw_prot, 1); 1097 rc = proto_register(&ieee802154_raw_prot, 1);
1098 if (rc) 1098 if (rc)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b2b3d291ab0..1ab2fb6bb37d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2145 2145
2146 if (filter->dump_exceptions) { 2146 if (filter->dump_exceptions) {
2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, 2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
2148 &i_fa, s_fa); 2148 &i_fa, s_fa, flags);
2149 if (err < 0) 2149 if (err < 0)
2150 goto stop; 2150 goto stop;
2151 } 2151 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1510e951f451..4298aae74e0e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
582 582
583 if (!rt) 583 if (!rt)
584 goto out; 584 goto out;
585 net = dev_net(rt->dst.dev); 585
586 if (rt->dst.dev)
587 net = dev_net(rt->dst.dev);
588 else if (skb_in->dev)
589 net = dev_net(skb_in->dev);
590 else
591 goto out;
586 592
587 /* 593 /*
588 * Find the original header. It is expected to be valid, of course. 594 * Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
902 return false; 908 return false;
903 } 909 }
904 910
905 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
906 return true; 912 return true;
907} 913}
908 914
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 180f6896b98b..480d0b22db1a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
1475 1475
1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1477{ 1477{
1478 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); 1478 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1479} 1479}
1480EXPORT_SYMBOL(ip_mc_inc_group); 1480EXPORT_SYMBOL(ip_mc_inc_group);
1481 1481
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2197 iml->sflist = NULL; 2197 iml->sflist = NULL;
2198 iml->sfmode = mode; 2198 iml->sfmode = mode;
2199 rcu_assign_pointer(inet->mc_list, iml); 2199 rcu_assign_pointer(inet->mc_list, iml);
2200 __ip_mc_inc_group(in_dev, addr, mode); 2200 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2201 err = 0; 2201 err = 0;
2202done: 2202done:
2203 return err; 2203 return err;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a999451345f9..10d31733297d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
475EXPORT_SYMBOL(inet_frag_reasm_prepare); 475EXPORT_SYMBOL(inet_frag_reasm_prepare);
476 476
477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
478 void *reasm_data) 478 void *reasm_data, bool try_coalesce)
479{ 479{
480 struct sk_buff **nextp = (struct sk_buff **)reasm_data; 480 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
481 struct rb_node *rbn; 481 struct rb_node *rbn;
482 struct sk_buff *fp; 482 struct sk_buff *fp;
483 int sum_truesize;
483 484
484 skb_push(head, head->data - skb_network_header(head)); 485 skb_push(head, head->data - skb_network_header(head));
485 486
@@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
487 fp = FRAG_CB(head)->next_frag; 488 fp = FRAG_CB(head)->next_frag;
488 rbn = rb_next(&head->rbnode); 489 rbn = rb_next(&head->rbnode);
489 rb_erase(&head->rbnode, &q->rb_fragments); 490 rb_erase(&head->rbnode, &q->rb_fragments);
491
492 sum_truesize = head->truesize;
490 while (rbn || fp) { 493 while (rbn || fp) {
491 /* fp points to the next sk_buff in the current run; 494 /* fp points to the next sk_buff in the current run;
492 * rbn points to the next run. 495 * rbn points to the next run.
493 */ 496 */
494 /* Go through the current run. */ 497 /* Go through the current run. */
495 while (fp) { 498 while (fp) {
496 *nextp = fp; 499 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
497 nextp = &fp->next; 500 bool stolen;
498 fp->prev = NULL; 501 int delta;
499 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 502
500 fp->sk = NULL; 503 sum_truesize += fp->truesize;
501 head->data_len += fp->len;
502 head->len += fp->len;
503 if (head->ip_summed != fp->ip_summed) 504 if (head->ip_summed != fp->ip_summed)
504 head->ip_summed = CHECKSUM_NONE; 505 head->ip_summed = CHECKSUM_NONE;
505 else if (head->ip_summed == CHECKSUM_COMPLETE) 506 else if (head->ip_summed == CHECKSUM_COMPLETE)
506 head->csum = csum_add(head->csum, fp->csum); 507 head->csum = csum_add(head->csum, fp->csum);
507 head->truesize += fp->truesize; 508
508 fp = FRAG_CB(fp)->next_frag; 509 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
510 &delta)) {
511 kfree_skb_partial(fp, stolen);
512 } else {
513 fp->prev = NULL;
514 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
515 fp->sk = NULL;
516
517 head->data_len += fp->len;
518 head->len += fp->len;
519 head->truesize += fp->truesize;
520
521 *nextp = fp;
522 nextp = &fp->next;
523 }
524
525 fp = next_frag;
509 } 526 }
510 /* Move to the next run. */ 527 /* Move to the next run. */
511 if (rbn) { 528 if (rbn) {
@@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
516 rbn = rbnext; 533 rbn = rbnext;
517 } 534 }
518 } 535 }
519 sub_frag_mem_limit(q->fqdir, head->truesize); 536 sub_frag_mem_limit(q->fqdir, sum_truesize);
520 537
521 *nextp = NULL; 538 *nextp = NULL;
522 skb_mark_not_on_list(head); 539 skb_mark_not_on_list(head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4385eb9e781f..cfeb8890f94e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -393,6 +393,11 @@ err:
393 return err; 393 return err;
394} 394}
395 395
396static bool ip_frag_coalesce_ok(const struct ipq *qp)
397{
398 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
399}
400
396/* Build a new IP datagram from all its fragments. */ 401/* Build a new IP datagram from all its fragments. */
397static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 402static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
398 struct sk_buff *prev_tail, struct net_device *dev) 403 struct sk_buff *prev_tail, struct net_device *dev)
@@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
421 if (len > 65535) 426 if (len > 65535)
422 goto out_oversize; 427 goto out_oversize;
423 428
424 inet_frag_reasm_finish(&qp->q, skb, reasm_data); 429 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
430 ip_frag_coalesce_ok(qp));
425 431
426 skb->dev = dev; 432 skb->dev = dev;
427 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 433 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 517300d587a7..b6a6f18c3dd1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728/* called with rcu_read_lock held */ 2728/* called with rcu_read_lock held */
2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq) 2731 struct sk_buff *skb, u32 portid, u32 seq,
2732 unsigned int flags)
2732{ 2733{
2733 struct rtmsg *r; 2734 struct rtmsg *r;
2734 struct nlmsghdr *nlh; 2735 struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 u32 error; 2737 u32 error;
2737 u32 metrics[RTAX_MAX]; 2738 u32 metrics[RTAX_MAX];
2738 2739
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2740 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2740 if (!nlh) 2741 if (!nlh)
2741 return -EMSGSIZE; 2742 return -EMSGSIZE;
2742 2743
@@ -2860,7 +2861,7 @@ nla_put_failure:
2860static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2861static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id, 2862 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid, 2863 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start) 2864 int *fa_index, int fa_start, unsigned int flags)
2864{ 2865{
2865 int i; 2866 int i;
2866 2867
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2892 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb, 2893 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid, 2894 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq); 2895 cb->nlh->nlmsg_seq, flags);
2895 if (err) 2896 if (err)
2896 return err; 2897 return err;
2897next: 2898next:
@@ -2904,7 +2905,7 @@ next:
2904 2905
2905int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2906int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi, 2907 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start) 2908 int *fa_index, int fa_start, unsigned int flags)
2908{ 2909{
2909 struct net *net = sock_net(cb->skb->sk); 2910 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net); 2911 int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2922 err = 0; 2923 err = 0;
2923 if (bucket) 2924 if (bucket)
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2925 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start); 2926 genid, fa_index, fa_start,
2927 flags);
2926 rcu_read_unlock(); 2928 rcu_read_unlock();
2927 if (err) 2929 if (err)
2928 return err; 2930 return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3183 fl4.flowi4_tos, res.fi, 0); 3185 fl4.flowi4_tos, res.fi, 0);
3184 } else { 3186 } else {
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3187 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3188 NETLINK_CB(in_skb).portid,
3189 nlh->nlmsg_seq, 0);
3187 } 3190 }
3188 if (err < 0) 3191 if (err < 0)
3189 goto errout_rcu; 3192 goto errout_rcu;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 776905899ac0..61082065b26a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
935 return mss_now; 935 return mss_now;
936} 936}
937 937
938/* In some cases, both sendpage() and sendmsg() could have added
939 * an skb to the write queue, but failed adding payload on it.
940 * We need to remove it to consume less memory, but more
941 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
942 * users.
943 */
944static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
945{
946 if (skb && !skb->len) {
947 tcp_unlink_write_queue(skb, sk);
948 if (tcp_write_queue_empty(sk))
949 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
950 sk_wmem_free_skb(sk, skb);
951 }
952}
953
938ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 954ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
939 size_t size, int flags) 955 size_t size, int flags)
940{ 956{
@@ -984,6 +1000,9 @@ new_segment:
984 if (!skb) 1000 if (!skb)
985 goto wait_for_memory; 1001 goto wait_for_memory;
986 1002
1003#ifdef CONFIG_TLS_DEVICE
1004 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1005#endif
987 skb_entail(sk, skb); 1006 skb_entail(sk, skb);
988 copy = size_goal; 1007 copy = size_goal;
989 } 1008 }
@@ -1061,6 +1080,7 @@ out:
1061 return copied; 1080 return copied;
1062 1081
1063do_error: 1082do_error:
1083 tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1064 if (copied) 1084 if (copied)
1065 goto out; 1085 goto out;
1066out_err: 1086out_err:
@@ -1385,18 +1405,11 @@ out_nopush:
1385 sock_zerocopy_put(uarg); 1405 sock_zerocopy_put(uarg);
1386 return copied + copied_syn; 1406 return copied + copied_syn;
1387 1407
1408do_error:
1409 skb = tcp_write_queue_tail(sk);
1388do_fault: 1410do_fault:
1389 if (!skb->len) { 1411 tcp_remove_empty_skb(sk, skb);
1390 tcp_unlink_write_queue(skb, sk);
1391 /* It is the one place in all of TCP, except connection
1392 * reset, where we can be unlinking the send_head.
1393 */
1394 if (tcp_write_queue_empty(sk))
1395 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1396 sk_wmem_free_skb(sk, skb);
1397 }
1398 1412
1399do_error:
1400 if (copied + copied_syn) 1413 if (copied + copied_syn)
1401 goto out; 1414 goto out;
1402out_err: 1415out_err:
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d1e15401384..8a56e09cfb0e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -398,10 +398,14 @@ more_data:
398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
399{ 399{
400 struct sk_msg tmp, *msg_tx = NULL; 400 struct sk_msg tmp, *msg_tx = NULL;
401 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
402 int copied = 0, err = 0; 401 int copied = 0, err = 0;
403 struct sk_psock *psock; 402 struct sk_psock *psock;
404 long timeo; 403 long timeo;
404 int flags;
405
406 /* Don't let internal do_tcp_sendpages() flags through */
407 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
408 flags |= MSG_NO_SHARED_FRAGS;
405 409
406 psock = sk_psock_get(sk); 410 psock = sk_psock_get(sk);
407 if (unlikely(!psock)) 411 if (unlikely(!psock))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6e4afc48d7bb..8a645f304e6c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1321 if (!buff) 1321 if (!buff)
1322 return -ENOMEM; /* We'll just try again later. */ 1322 return -ENOMEM; /* We'll just try again later. */
1323 skb_copy_decrypted(buff, skb);
1323 1324
1324 sk->sk_wmem_queued += buff->truesize; 1325 sk->sk_wmem_queued += buff->truesize;
1325 sk_mem_charge(sk, buff->truesize); 1326 sk_mem_charge(sk, buff->truesize);
@@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1874 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1875 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1875 if (unlikely(!buff)) 1876 if (unlikely(!buff))
1876 return -ENOMEM; 1877 return -ENOMEM;
1878 skb_copy_decrypted(buff, skb);
1877 1879
1878 sk->sk_wmem_queued += buff->truesize; 1880 sk->sk_wmem_queued += buff->truesize;
1879 sk_mem_charge(sk, buff->truesize); 1881 sk_mem_charge(sk, buff->truesize);
@@ -2051,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2051 if (len <= skb->len) 2053 if (len <= skb->len)
2052 break; 2054 break;
2053 2055
2054 if (unlikely(TCP_SKB_CB(skb)->eor)) 2056 if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2055 return false; 2057 return false;
2056 2058
2057 len -= skb->len; 2059 len -= skb->len;
@@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
2143 sk_mem_charge(sk, nskb->truesize); 2145 sk_mem_charge(sk, nskb->truesize);
2144 2146
2145 skb = tcp_send_head(sk); 2147 skb = tcp_send_head(sk);
2148 skb_copy_decrypted(nskb, skb);
2146 2149
2147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2150 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2148 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2151 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
@@ -2167,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk)
2167 * we need to propagate it to the new skb. 2170 * we need to propagate it to the new skb.
2168 */ 2171 */
2169 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2172 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2173 tcp_skb_collapse_tstamp(nskb, skb);
2170 tcp_unlink_write_queue(skb, sk); 2174 tcp_unlink_write_queue(skb, sk);
2171 sk_wmem_free_skb(sk, skb); 2175 sk_wmem_free_skb(sk, skb);
2172 } else { 2176 } else {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dc73888c7859..6a576ff92c39 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
478 if (!idev) { 478 if (!idev) {
479 idev = ipv6_add_dev(dev); 479 idev = ipv6_add_dev(dev);
480 if (IS_ERR(idev)) 480 if (IS_ERR(idev))
481 return NULL; 481 return idev;
482 } 482 }
483 483
484 if (dev->flags&IFF_UP) 484 if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1045 int err = 0; 1045 int err = 0;
1046 1046
1047 if (addr_type == IPV6_ADDR_ANY || 1047 if (addr_type == IPV6_ADDR_ANY ||
1048 addr_type & IPV6_ADDR_MULTICAST || 1048 (addr_type & IPV6_ADDR_MULTICAST &&
1049 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1049 (!(idev->dev->flags & IFF_LOOPBACK) && 1050 (!(idev->dev->flags & IFF_LOOPBACK) &&
1050 !netif_is_l3_master(idev->dev) && 1051 !netif_is_l3_master(idev->dev) &&
1051 addr_type & IPV6_ADDR_LOOPBACK)) 1052 addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2465 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2466 2467
2467 idev = ipv6_find_idev(dev); 2468 idev = ipv6_find_idev(dev);
2468 if (!idev) 2469 if (IS_ERR(idev))
2469 return ERR_PTR(-ENOBUFS); 2470 return idev;
2470 2471
2471 if (idev->cnf.disable_ipv6) 2472 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES); 2473 return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
3158 ASSERT_RTNL(); 3159 ASSERT_RTNL();
3159 3160
3160 idev = ipv6_find_idev(dev); 3161 idev = ipv6_find_idev(dev);
3161 if (!idev) { 3162 if (IS_ERR(idev)) {
3162 pr_debug("%s: add_dev failed\n", __func__); 3163 pr_debug("%s: add_dev failed\n", __func__);
3163 return; 3164 return;
3164 } 3165 }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
3373 */ 3374 */
3374 3375
3375 idev = ipv6_find_idev(dev); 3376 idev = ipv6_find_idev(dev);
3376 if (!idev) { 3377 if (IS_ERR(idev)) {
3377 pr_debug("%s: add_dev failed\n", __func__); 3378 pr_debug("%s: add_dev failed\n", __func__);
3378 return; 3379 return;
3379 } 3380 }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
3398 ASSERT_RTNL(); 3399 ASSERT_RTNL();
3399 3400
3400 idev = ipv6_find_idev(dev); 3401 idev = ipv6_find_idev(dev);
3401 if (!idev) { 3402 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__); 3403 pr_debug("%s: add_dev failed\n", __func__);
3403 return; 3404 return;
3404 } 3405 }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4772 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 4773 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4773 4774
4774 idev = ipv6_find_idev(dev); 4775 idev = ipv6_find_idev(dev);
4775 if (!idev) 4776 if (IS_ERR(idev))
4776 return -ENOBUFS; 4777 return PTR_ERR(idev);
4777 4778
4778 if (!ipv6_allow_optimistic_dad(net, idev)) 4779 if (!ipv6_allow_optimistic_dad(net, idev))
4779 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 4780 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7f3f13c37916..eaa4c2cc2fbb 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
787 if (pmc) { 787 if (pmc) {
788 im->idev = pmc->idev; 788 im->idev = pmc->idev;
789 if (im->mca_sfmode == MCAST_INCLUDE) { 789 if (im->mca_sfmode == MCAST_INCLUDE) {
790 im->mca_tomb = pmc->mca_tomb; 790 swap(im->mca_tomb, pmc->mca_tomb);
791 im->mca_sources = pmc->mca_sources; 791 swap(im->mca_sources, pmc->mca_sources);
792 for (psf = im->mca_sources; psf; psf = psf->sf_next) 792 for (psf = im->mca_sources; psf; psf = psf->sf_next)
793 psf->sf_crcount = idev->mc_qrv; 793 psf->sf_crcount = idev->mc_qrv;
794 } else { 794 } else {
795 im->mca_crcount = idev->mc_qrv; 795 im->mca_crcount = idev->mc_qrv;
796 } 796 }
797 in6_dev_put(pmc->idev); 797 in6_dev_put(pmc->idev);
798 ip6_mc_clear_src(pmc);
798 kfree(pmc); 799 kfree(pmc);
799 } 800 }
800 spin_unlock_bh(&im->mca_lock); 801 spin_unlock_bh(&im->mca_lock);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0f82c150543b..fed9666a2f7d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
348 348
349 skb_reset_transport_header(skb); 349 skb_reset_transport_header(skb);
350 350
351 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 351 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
352 352
353 skb->ignore_df = 1; 353 skb->ignore_df = 1;
354 skb->dev = dev; 354 skb->dev = dev;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ca05b16f1bb9..1f5d4d196dcc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
282 282
283 skb_reset_transport_header(skb); 283 skb_reset_transport_header(skb);
284 284
285 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 285 inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
286 286
287 skb->dev = dev; 287 skb->dev = dev;
288 ipv6_hdr(skb)->payload_len = htons(payload_len); 288 ipv6_hdr(skb)->payload_len = htons(payload_len);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4d458067d80d..111c400199ec 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1546,6 +1546,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1546 if (is_multicast_ether_addr(mac)) 1546 if (is_multicast_ether_addr(mac))
1547 return -EINVAL; 1547 return -EINVAL;
1548 1548
1549 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
1550 sdata->vif.type == NL80211_IFTYPE_STATION &&
1551 !sdata->u.mgd.associated)
1552 return -EINVAL;
1553
1549 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 1554 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
1550 if (!sta) 1555 if (!sta)
1551 return -ENOMEM; 1556 return -ENOMEM;
@@ -1553,10 +1558,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1553 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1558 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1554 sta->sta.tdls = true; 1559 sta->sta.tdls = true;
1555 1560
1556 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1557 !sdata->u.mgd.associated)
1558 return -EINVAL;
1559
1560 err = sta_apply_parameters(local, sta, params); 1561 err = sta_apply_parameters(local, sta, params);
1561 if (err) { 1562 if (err) {
1562 sta_info_free(local, sta); 1563 sta_info_free(local, sta);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3c1ab870fefe..768d14c9a716 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2448 sdata->control_port_over_nl80211)) { 2448 sdata->control_port_over_nl80211)) {
2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2450 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2450 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2451 2451
2452 cfg80211_rx_control_port(dev, skb, noencrypt); 2452 cfg80211_rx_control_port(dev, skb, noencrypt);
2453 dev_kfree_skb(skb); 2453 dev_kfree_skb(skb);
2454 } else { 2454 } else {
2455 memset(skb->cb, 0, sizeof(skb->cb));
2456
2455 /* deliver to local stack */ 2457 /* deliver to local stack */
2456 if (rx->napi) 2458 if (rx->napi)
2457 napi_gro_receive(rx->napi, skb); 2459 napi_gro_receive(rx->napi, skb);
@@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2546 2548
2547 if (skb) { 2549 if (skb) {
2548 skb->protocol = eth_type_trans(skb, dev); 2550 skb->protocol = eth_type_trans(skb, dev);
2549 memset(skb->cb, 0, sizeof(skb->cb));
2550
2551 ieee80211_deliver_skb_to_local_stack(skb, rx); 2551 ieee80211_deliver_skb_to_local_stack(skb, rx);
2552 } 2552 }
2553 2553
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index d25e91d7bdc1..44b675016393 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
133 mpls_stats_inc_outucastpkts(out_dev, skb); 133 mpls_stats_inc_outucastpkts(out_dev, skb);
134 134
135 if (rt) { 135 if (rt) {
136 if (rt->rt_gw_family == AF_INET) 136 if (rt->rt_gw_family == AF_INET6)
137 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
138 skb);
139 else if (rt->rt_gw_family == AF_INET6)
140 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, 137 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
141 skb); 138 skb);
139 else
140 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
141 skb);
142 } else if (rt6) { 142 } else if (rt6) {
143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { 143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
144 /* 6PE (RFC 4798) */ 144 /* 6PE (RFC 4798) */
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 5c3fad8cba57..0187e65176c0 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
54 checksum = ncsi_calculate_checksum((unsigned char *)h, 54 checksum = ncsi_calculate_checksum((unsigned char *)h,
55 sizeof(*h) + nca->payload); 55 sizeof(*h) + nca->payload);
56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + 56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
57 nca->payload); 57 ALIGN(nca->payload, 4));
58 *pchecksum = htonl(checksum); 58 *pchecksum = htonl(checksum);
59} 59}
60 60
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
309 309
310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) 310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
311{ 311{
312 struct ncsi_cmd_handler *nch = NULL;
312 struct ncsi_request *nr; 313 struct ncsi_request *nr;
314 unsigned char type;
313 struct ethhdr *eh; 315 struct ethhdr *eh;
314 struct ncsi_cmd_handler *nch = NULL;
315 int i, ret; 316 int i, ret;
316 317
318 /* Use OEM generic handler for Netlink request */
319 if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
320 type = NCSI_PKT_CMD_OEM;
321 else
322 type = nca->type;
323
317 /* Search for the handler */ 324 /* Search for the handler */
318 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { 325 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
319 if (ncsi_cmd_handlers[i].type == nca->type) { 326 if (ncsi_cmd_handlers[i].type == type) {
320 if (ncsi_cmd_handlers[i].handler) 327 if (ncsi_cmd_handlers[i].handler)
321 nch = &ncsi_cmd_handlers[i]; 328 nch = &ncsi_cmd_handlers[i];
322 else 329 else
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 7581bf919885..d876bd55f356 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || 47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { 48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
49 netdev_dbg(nr->ndp->ndev.dev, 49 netdev_dbg(nr->ndp->ndev.dev,
50 "NCSI: non zero response/reason code\n"); 50 "NCSI: non zero response/reason code %04xh, %04xh\n",
51 ntohs(h->code), ntohs(h->reason));
51 return -EPERM; 52 return -EPERM;
52 } 53 }
53 54
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
55 * sender doesn't support checksum according to NCSI 56 * sender doesn't support checksum according to NCSI
56 * specification. 57 * specification.
57 */ 58 */
58 pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); 59 pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
59 if (ntohl(*pchecksum) == 0) 60 if (ntohl(*pchecksum) == 0)
60 return 0; 61 return 0;
61 62
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
63 sizeof(*h) + payload - 4); 64 sizeof(*h) + payload - 4);
64 65
65 if (*pchecksum != htonl(checksum)) { 66 if (*pchecksum != htonl(checksum)) {
66 netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); 67 netdev_dbg(nr->ndp->ndev.dev,
68 "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
69 *pchecksum, htonl(checksum));
67 return -EINVAL; 70 return -EINVAL;
68 } 71 }
69 72
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a542761e90d1..81a8ef42b88d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
453 * table location, we assume id gets exposed to userspace. 453 * table location, we assume id gets exposed to userspace.
454 * 454 *
455 * Following nf_conn items do not change throughout lifetime 455 * Following nf_conn items do not change throughout lifetime
456 * of the nf_conn after it has been committed to main hash table: 456 * of the nf_conn:
457 * 457 *
458 * 1. nf_conn address 458 * 1. nf_conn address
459 * 2. nf_conn->ext address 459 * 2. nf_conn->master address (normally NULL)
460 * 3. nf_conn->master address (normally NULL) 460 * 3. the associated net namespace
461 * 4. tuple 461 * 4. the original direction tuple
462 * 5. the associated net namespace
463 */ 462 */
464u32 nf_ct_get_id(const struct nf_conn *ct) 463u32 nf_ct_get_id(const struct nf_conn *ct)
465{ 464{
@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
469 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); 468 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
470 469
471 a = (unsigned long)ct; 470 a = (unsigned long)ct;
472 b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); 471 b = (unsigned long)ct->master;
473 c = (unsigned long)ct->ext; 472 c = (unsigned long)nf_ct_net(ct);
474 d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), 473 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
474 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
475 &ct_id_seed); 475 &ct_id_seed);
476#ifdef CONFIG_64BIT 476#ifdef CONFIG_64BIT
477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); 477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 0ecb3e289ef2..8d96738b7dfd 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
322 i++; 322 i++;
323 } 323 }
324 324
325 pr_debug("Skipped up to `%c'!\n", skip); 325 pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
326 326
327 *numoff = i; 327 *numoff = i;
328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff); 328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e0d392cb3075..0006503d2da9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1037,9 +1037,14 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; 1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; 1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; 1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
1040 table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
1041 table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
1040#ifdef CONFIG_NF_CONNTRACK_EVENTS 1042#ifdef CONFIG_NF_CONNTRACK_EVENTS
1041 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; 1043 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
1042#endif 1044#endif
1045#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
1046 table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
1047#endif
1043 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; 1048 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
1044 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; 1049 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
1045 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; 1050 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e3d797252a98..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113 113
114static void flow_offload_fixup_ct_state(struct nf_conn *ct) 114static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
115{
116 return (__s32)(timeout - (u32)jiffies);
117}
118
119static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
115{ 120{
116 const struct nf_conntrack_l4proto *l4proto; 121 const struct nf_conntrack_l4proto *l4proto;
122 int l4num = nf_ct_protonum(ct);
117 unsigned int timeout; 123 unsigned int timeout;
118 int l4num;
119
120 l4num = nf_ct_protonum(ct);
121 if (l4num == IPPROTO_TCP)
122 flow_offload_fixup_tcp(&ct->proto.tcp);
123 124
124 l4proto = nf_ct_l4proto_find(l4num); 125 l4proto = nf_ct_l4proto_find(l4num);
125 if (!l4proto) 126 if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
132 else 133 else
133 return; 134 return;
134 135
135 ct->timeout = nfct_time_stamp + timeout; 136 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
137 ct->timeout = nfct_time_stamp + timeout;
138}
139
140static void flow_offload_fixup_ct_state(struct nf_conn *ct)
141{
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 flow_offload_fixup_tcp(&ct->proto.tcp);
144}
145
146static void flow_offload_fixup_ct(struct nf_conn *ct)
147{
148 flow_offload_fixup_ct_state(ct);
149 flow_offload_fixup_ct_timeout(ct);
136} 150}
137 151
138void flow_offload_free(struct flow_offload *flow) 152void flow_offload_free(struct flow_offload *flow)
@@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
208} 222}
209EXPORT_SYMBOL_GPL(flow_offload_add); 223EXPORT_SYMBOL_GPL(flow_offload_add);
210 224
225static inline bool nf_flow_has_expired(const struct flow_offload *flow)
226{
227 return nf_flow_timeout_delta(flow->timeout) <= 0;
228}
229
211static void flow_offload_del(struct nf_flowtable *flow_table, 230static void flow_offload_del(struct nf_flowtable *flow_table,
212 struct flow_offload *flow) 231 struct flow_offload *flow)
213{ 232{
@@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
223 e = container_of(flow, struct flow_offload_entry, flow); 242 e = container_of(flow, struct flow_offload_entry, flow);
224 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); 243 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
225 244
245 if (nf_flow_has_expired(flow))
246 flow_offload_fixup_ct(e->ct);
247 else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
248 flow_offload_fixup_ct_timeout(e->ct);
249
226 flow_offload_free(flow); 250 flow_offload_free(flow);
227} 251}
228 252
@@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
298 return err; 322 return err;
299} 323}
300 324
301static inline bool nf_flow_has_expired(const struct flow_offload *flow)
302{
303 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
304}
305
306static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) 325static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
307{ 326{
308 struct nf_flowtable *flow_table = data; 327 struct nf_flowtable *flow_table = data;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index cdfc33517e85..b9e7dd6e60ce 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -214,6 +214,24 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
214 return true; 214 return true;
215} 215}
216 216
217static int nf_flow_offload_dst_check(struct dst_entry *dst)
218{
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
221
222 return 0;
223}
224
225static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
228{
229 skb_orphan(skb);
230 skb_dst_set_noref(skb, dst);
231 dst_output(state->net, state->sk, skb);
232 return NF_STOLEN;
233}
234
217unsigned int 235unsigned int
218nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 236nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
219 const struct nf_hook_state *state) 237 const struct nf_hook_state *state)
@@ -254,12 +272,25 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) 272 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT; 273 return NF_ACCEPT;
256 274
275 if (nf_flow_offload_dst_check(&rt->dst)) {
276 flow_offload_teardown(flow);
277 return NF_ACCEPT;
278 }
279
257 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 280 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
258 return NF_DROP; 281 return NF_DROP;
259 282
260 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 283 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
261 iph = ip_hdr(skb); 284 iph = ip_hdr(skb);
262 ip_decrease_ttl(iph); 285 ip_decrease_ttl(iph);
286 skb->tstamp = 0;
287
288 if (unlikely(dst_xfrm(&rt->dst))) {
289 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
290 IPCB(skb)->iif = skb->dev->ifindex;
291 IPCB(skb)->flags = IPSKB_FORWARDED;
292 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
293 }
263 294
264 skb->dev = outdev; 295 skb->dev = outdev;
265 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 296 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
467 sizeof(*ip6h))) 498 sizeof(*ip6h)))
468 return NF_ACCEPT; 499 return NF_ACCEPT;
469 500
501 if (nf_flow_offload_dst_check(&rt->dst)) {
502 flow_offload_teardown(flow);
503 return NF_ACCEPT;
504 }
505
470 if (skb_try_make_writable(skb, sizeof(*ip6h))) 506 if (skb_try_make_writable(skb, sizeof(*ip6h)))
471 return NF_DROP; 507 return NF_DROP;
472 508
@@ -476,6 +512,14 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
476 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 512 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
477 ip6h = ipv6_hdr(skb); 513 ip6h = ipv6_hdr(skb);
478 ip6h->hop_limit--; 514 ip6h->hop_limit--;
515 skb->tstamp = 0;
516
517 if (unlikely(dst_xfrm(&rt->dst))) {
518 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
519 IP6CB(skb)->iif = skb->dev->ifindex;
520 IP6CB(skb)->flags = IP6SKB_FORWARDED;
521 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
522 }
479 523
480 skb->dev = outdev; 524 skb->dev = outdev;
481 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 525 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 605a7cfe7ca7..d47469f824a1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
138 return; 138 return;
139 139
140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { 140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
141 if (trans->msg_type == NFT_MSG_NEWSET && 141 switch (trans->msg_type) {
142 nft_trans_set(trans) == set) { 142 case NFT_MSG_NEWSET:
143 set->bound = true; 143 if (nft_trans_set(trans) == set)
144 nft_trans_set_bound(trans) = true;
145 break;
146 case NFT_MSG_NEWSETELEM:
147 if (nft_trans_elem_set(trans) == set)
148 nft_trans_elem_set_bound(trans) = true;
144 break; 149 break;
145 } 150 }
146 } 151 }
@@ -1662,6 +1667,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1662 1667
1663 chain->flags |= NFT_BASE_CHAIN | flags; 1668 chain->flags |= NFT_BASE_CHAIN | flags;
1664 basechain->policy = NF_ACCEPT; 1669 basechain->policy = NF_ACCEPT;
1670 if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
1671 nft_chain_offload_priority(basechain) < 0)
1672 return -EOPNOTSUPP;
1673
1665 flow_block_init(&basechain->flow_block); 1674 flow_block_init(&basechain->flow_block);
1666 } else { 1675 } else {
1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1676 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
@@ -6906,7 +6915,7 @@ static int __nf_tables_abort(struct net *net)
6906 break; 6915 break;
6907 case NFT_MSG_NEWSET: 6916 case NFT_MSG_NEWSET:
6908 trans->ctx.table->use--; 6917 trans->ctx.table->use--;
6909 if (nft_trans_set(trans)->bound) { 6918 if (nft_trans_set_bound(trans)) {
6910 nft_trans_destroy(trans); 6919 nft_trans_destroy(trans);
6911 break; 6920 break;
6912 } 6921 }
@@ -6918,7 +6927,7 @@ static int __nf_tables_abort(struct net *net)
6918 nft_trans_destroy(trans); 6927 nft_trans_destroy(trans);
6919 break; 6928 break;
6920 case NFT_MSG_NEWSETELEM: 6929 case NFT_MSG_NEWSETELEM:
6921 if (nft_trans_elem_set(trans)->bound) { 6930 if (nft_trans_elem_set_bound(trans)) {
6922 nft_trans_destroy(trans); 6931 nft_trans_destroy(trans);
6923 break; 6932 break;
6924 } 6933 }
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 64f5fd5f240e..c0d18c1d77ac 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
103} 103}
104 104
105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
106 __be16 proto, 106 __be16 proto, int priority,
107 struct netlink_ext_ack *extack) 107 struct netlink_ext_ack *extack)
108{ 108{
109 common->protocol = proto; 109 common->protocol = proto;
110 common->prio = priority;
110 common->extack = extack; 111 common->extack = extack;
111} 112}
112 113
@@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
124 return 0; 125 return 0;
125} 126}
126 127
128int nft_chain_offload_priority(struct nft_base_chain *basechain)
129{
130 if (basechain->ops.priority <= 0 ||
131 basechain->ops.priority > USHRT_MAX)
132 return -1;
133
134 return 0;
135}
136
127static int nft_flow_offload_rule(struct nft_trans *trans, 137static int nft_flow_offload_rule(struct nft_trans *trans,
128 enum flow_cls_command command) 138 enum flow_cls_command command)
129{ 139{
@@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
142 if (flow) 152 if (flow)
143 proto = flow->proto; 153 proto = flow->proto;
144 154
145 nft_flow_offload_common_init(&cls_flow.common, proto, &extack); 155 nft_flow_offload_common_init(&cls_flow.common, proto,
156 basechain->ops.priority, &extack);
146 cls_flow.command = command; 157 cls_flow.command = command;
147 cls_flow.cookie = (unsigned long) rule; 158 cls_flow.cookie = (unsigned long) rule;
148 if (flow) 159 if (flow)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index aa5f571d4361..01705ad74a9a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
72{ 72{
73 struct nft_flow_offload *priv = nft_expr_priv(expr); 73 struct nft_flow_offload *priv = nft_expr_priv(expr);
74 struct nf_flowtable *flowtable = &priv->flowtable->data; 74 struct nf_flowtable *flowtable = &priv->flowtable->data;
75 struct tcphdr _tcph, *tcph = NULL;
75 enum ip_conntrack_info ctinfo; 76 enum ip_conntrack_info ctinfo;
76 struct nf_flow_route route; 77 struct nf_flow_route route;
77 struct flow_offload *flow; 78 struct flow_offload *flow;
78 enum ip_conntrack_dir dir; 79 enum ip_conntrack_dir dir;
79 bool is_tcp = false;
80 struct nf_conn *ct; 80 struct nf_conn *ct;
81 int ret; 81 int ret;
82 82
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
89 89
90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { 90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
91 case IPPROTO_TCP: 91 case IPPROTO_TCP:
92 is_tcp = true; 92 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
93 sizeof(_tcph), &_tcph);
94 if (unlikely(!tcph || tcph->fin || tcph->rst))
95 goto out;
93 break; 96 break;
94 case IPPROTO_UDP: 97 case IPPROTO_UDP:
95 break; 98 break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
115 if (!flow) 118 if (!flow)
116 goto err_flow_alloc; 119 goto err_flow_alloc;
117 120
118 if (is_tcp) { 121 if (tcph) {
119 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 122 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
120 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 123 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
121 } 124 }
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
146 return nft_chain_validate_hooks(ctx->chain, hook_mask); 149 return nft_chain_validate_hooks(ctx->chain, hook_mask);
147} 150}
148 151
152static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
153 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
154 .len = NFT_NAME_MAXLEN - 1 },
155};
156
149static int nft_flow_offload_init(const struct nft_ctx *ctx, 157static int nft_flow_offload_init(const struct nft_ctx *ctx,
150 const struct nft_expr *expr, 158 const struct nft_expr *expr,
151 const struct nlattr * const tb[]) 159 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
204static struct nft_expr_type nft_flow_offload_type __read_mostly = { 212static struct nft_expr_type nft_flow_offload_type __read_mostly = {
205 .name = "flow_offload", 213 .name = "flow_offload",
206 .ops = &nft_flow_offload_ops, 214 .ops = &nft_flow_offload_ops,
215 .policy = nft_flow_offload_policy,
207 .maxattr = NFTA_FLOW_MAX, 216 .maxattr = NFTA_FLOW_MAX,
208 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
209}; 218};
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index d0ab1adf5bff..5aab6df74e0f 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
54 nfnl_acct_put(info->nfacct); 54 nfnl_acct_put(info->nfacct);
55} 55}
56 56
57static struct xt_match nfacct_mt_reg __read_mostly = { 57static struct xt_match nfacct_mt_reg[] __read_mostly = {
58 .name = "nfacct", 58 {
59 .family = NFPROTO_UNSPEC, 59 .name = "nfacct",
60 .checkentry = nfacct_mt_checkentry, 60 .revision = 0,
61 .match = nfacct_mt, 61 .family = NFPROTO_UNSPEC,
62 .destroy = nfacct_mt_destroy, 62 .checkentry = nfacct_mt_checkentry,
63 .matchsize = sizeof(struct xt_nfacct_match_info), 63 .match = nfacct_mt,
64 .usersize = offsetof(struct xt_nfacct_match_info, nfacct), 64 .destroy = nfacct_mt_destroy,
65 .me = THIS_MODULE, 65 .matchsize = sizeof(struct xt_nfacct_match_info),
66 .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
67 .me = THIS_MODULE,
68 },
69 {
70 .name = "nfacct",
71 .revision = 1,
72 .family = NFPROTO_UNSPEC,
73 .checkentry = nfacct_mt_checkentry,
74 .match = nfacct_mt,
75 .destroy = nfacct_mt_destroy,
76 .matchsize = sizeof(struct xt_nfacct_match_info_v1),
77 .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
78 .me = THIS_MODULE,
79 },
66}; 80};
67 81
68static int __init nfacct_mt_init(void) 82static int __init nfacct_mt_init(void)
69{ 83{
70 return xt_register_match(&nfacct_mt_reg); 84 return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
71} 85}
72 86
73static void __exit nfacct_mt_exit(void) 87static void __exit nfacct_mt_exit(void)
74{ 88{
75 xt_unregister_match(&nfacct_mt_reg); 89 xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
76} 90}
77 91
78module_init(nfacct_mt_init); 92module_init(nfacct_mt_init);
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index ead7c6022208..b92b22ce8abd 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && 101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
103 info->invert & XT_PHYSDEV_OP_BRIDGED) && 103 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
104 par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | 104 par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
105 (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
106 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); 105 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
107 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 106 return -EINVAL;
108 return -EINVAL;
109 } 107 }
110 108
111 if (!brnf_probed) { 109 if (!brnf_probed) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 848c6eb55064..05249eb45082 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
67 struct md_mark mark; 67 struct md_mark mark;
68 struct md_labels labels; 68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
70#if IS_ENABLED(CONFIG_NF_NAT) 71#if IS_ENABLED(CONFIG_NF_NAT)
71 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
72#endif 73#endif
@@ -524,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
524 return -EPFNOSUPPORT; 525 return -EPFNOSUPPORT;
525 } 526 }
526 527
528 /* The key extracted from the fragment that completed this datagram
529 * likely didn't have an L4 header, so regenerate it.
530 */
531 ovs_flow_key_update_l3l4(skb, key);
532
527 key->ip.frag = OVS_FRAG_TYPE_NONE; 533 key->ip.frag = OVS_FRAG_TYPE_NONE;
528 skb_clear_hash(skb); 534 skb_clear_hash(skb);
529 skb->ignore_df = 1; 535 skb->ignore_df = 1;
@@ -697,6 +703,14 @@ static bool skb_nfct_cached(struct net *net,
697 if (help && rcu_access_pointer(help->helper) != info->helper) 703 if (help && rcu_access_pointer(help->helper) != info->helper)
698 return false; 704 return false;
699 } 705 }
706 if (info->nf_ct_timeout) {
707 struct nf_conn_timeout *timeout_ext;
708
709 timeout_ext = nf_ct_timeout_find(ct);
710 if (!timeout_ext || info->nf_ct_timeout !=
711 rcu_dereference(timeout_ext->timeout))
712 return false;
713 }
700 /* Force conntrack entry direction to the current packet? */ 714 /* Force conntrack entry direction to the current packet? */
701 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 715 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
702 /* Delete the conntrack entry if confirmed, else just release 716 /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1579,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1565 case OVS_CT_ATTR_TIMEOUT: 1579 case OVS_CT_ATTR_TIMEOUT:
1566 memcpy(info->timeout, nla_data(a), nla_len(a)); 1580 memcpy(info->timeout, nla_data(a), nla_len(a));
1567 if (!memchr(info->timeout, '\0', nla_len(a))) { 1581 if (!memchr(info->timeout, '\0', nla_len(a))) {
1568 OVS_NLERR(log, "Invalid conntrack helper"); 1582 OVS_NLERR(log, "Invalid conntrack timeout");
1569 return -EINVAL; 1583 return -EINVAL;
1570 } 1584 }
1571 break; 1585 break;
@@ -1657,6 +1671,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1657 ct_info.timeout)) 1671 ct_info.timeout))
1658 pr_info_ratelimited("Failed to associated timeout " 1672 pr_info_ratelimited("Failed to associated timeout "
1659 "policy `%s'\n", ct_info.timeout); 1673 "policy `%s'\n", ct_info.timeout);
1674 else
1675 ct_info.nf_ct_timeout = rcu_dereference(
1676 nf_ct_timeout_find(ct_info.ct)->timeout);
1677
1660 } 1678 }
1661 1679
1662 if (helper) { 1680 if (helper) {
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc89e16e0505..9d81d2c7bf82 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
523} 523}
524 524
525/** 525/**
526 * key_extract - extracts a flow key from an Ethernet frame. 526 * key_extract_l3l4 - extracts L3/L4 header information.
527 * @skb: sk_buff that contains the frame, with skb->data pointing to the 527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
528 * Ethernet header 528 * L3 header
529 * @key: output flow key 529 * @key: output flow key
530 * 530 *
531 * The caller must ensure that skb->len >= ETH_HLEN.
532 *
533 * Returns 0 if successful, otherwise a negative errno value.
534 *
535 * Initializes @skb header fields as follows:
536 *
537 * - skb->mac_header: the L2 header.
538 *
539 * - skb->network_header: just past the L2 header, or just past the
540 * VLAN header, to the first byte of the L2 payload.
541 *
542 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
543 * on output, then just past the IP header, if one is present and
544 * of a correct length, otherwise the same as skb->network_header.
545 * For other key->eth.type values it is left untouched.
546 *
547 * - skb->protocol: the type of the data starting at skb->network_header.
548 * Equals to key->eth.type.
549 */ 531 */
550static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 532static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
551{ 533{
552 int error; 534 int error;
553 struct ethhdr *eth;
554
555 /* Flags are always used as part of stats */
556 key->tp.flags = 0;
557
558 skb_reset_mac_header(skb);
559
560 /* Link layer. */
561 clear_vlan(key);
562 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
563 if (unlikely(eth_type_vlan(skb->protocol)))
564 return -EINVAL;
565
566 skb_reset_network_header(skb);
567 key->eth.type = skb->protocol;
568 } else {
569 eth = eth_hdr(skb);
570 ether_addr_copy(key->eth.src, eth->h_source);
571 ether_addr_copy(key->eth.dst, eth->h_dest);
572
573 __skb_pull(skb, 2 * ETH_ALEN);
574 /* We are going to push all headers that we pull, so no need to
575 * update skb->csum here.
576 */
577
578 if (unlikely(parse_vlan(skb, key)))
579 return -ENOMEM;
580
581 key->eth.type = parse_ethertype(skb);
582 if (unlikely(key->eth.type == htons(0)))
583 return -ENOMEM;
584
585 /* Multiple tagged packets need to retain TPID to satisfy
586 * skb_vlan_pop(), which will later shift the ethertype into
587 * skb->protocol.
588 */
589 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
590 skb->protocol = key->eth.cvlan.tpid;
591 else
592 skb->protocol = key->eth.type;
593
594 skb_reset_network_header(skb);
595 __skb_push(skb, skb->data - skb_mac_header(skb));
596 }
597 skb_reset_mac_len(skb);
598 535
599 /* Network layer. */ 536 /* Network layer. */
600 if (key->eth.type == htons(ETH_P_IP)) { 537 if (key->eth.type == htons(ETH_P_IP)) {
@@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
623 offset = nh->frag_off & htons(IP_OFFSET); 560 offset = nh->frag_off & htons(IP_OFFSET);
624 if (offset) { 561 if (offset) {
625 key->ip.frag = OVS_FRAG_TYPE_LATER; 562 key->ip.frag = OVS_FRAG_TYPE_LATER;
563 memset(&key->tp, 0, sizeof(key->tp));
626 return 0; 564 return 0;
627 } 565 }
628 if (nh->frag_off & htons(IP_MF) || 566 if (nh->frag_off & htons(IP_MF) ||
@@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
740 return error; 678 return error;
741 } 679 }
742 680
743 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 681 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
682 memset(&key->tp, 0, sizeof(key->tp));
744 return 0; 683 return 0;
684 }
745 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 685 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
746 key->ip.frag = OVS_FRAG_TYPE_FIRST; 686 key->ip.frag = OVS_FRAG_TYPE_FIRST;
747 687
@@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
788 return 0; 728 return 0;
789} 729}
790 730
731/**
732 * key_extract - extracts a flow key from an Ethernet frame.
733 * @skb: sk_buff that contains the frame, with skb->data pointing to the
734 * Ethernet header
735 * @key: output flow key
736 *
737 * The caller must ensure that skb->len >= ETH_HLEN.
738 *
739 * Returns 0 if successful, otherwise a negative errno value.
740 *
741 * Initializes @skb header fields as follows:
742 *
743 * - skb->mac_header: the L2 header.
744 *
745 * - skb->network_header: just past the L2 header, or just past the
746 * VLAN header, to the first byte of the L2 payload.
747 *
748 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
749 * on output, then just past the IP header, if one is present and
750 * of a correct length, otherwise the same as skb->network_header.
751 * For other key->eth.type values it is left untouched.
752 *
753 * - skb->protocol: the type of the data starting at skb->network_header.
754 * Equals to key->eth.type.
755 */
756static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
757{
758 struct ethhdr *eth;
759
760 /* Flags are always used as part of stats */
761 key->tp.flags = 0;
762
763 skb_reset_mac_header(skb);
764
765 /* Link layer. */
766 clear_vlan(key);
767 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
768 if (unlikely(eth_type_vlan(skb->protocol)))
769 return -EINVAL;
770
771 skb_reset_network_header(skb);
772 key->eth.type = skb->protocol;
773 } else {
774 eth = eth_hdr(skb);
775 ether_addr_copy(key->eth.src, eth->h_source);
776 ether_addr_copy(key->eth.dst, eth->h_dest);
777
778 __skb_pull(skb, 2 * ETH_ALEN);
779 /* We are going to push all headers that we pull, so no need to
780 * update skb->csum here.
781 */
782
783 if (unlikely(parse_vlan(skb, key)))
784 return -ENOMEM;
785
786 key->eth.type = parse_ethertype(skb);
787 if (unlikely(key->eth.type == htons(0)))
788 return -ENOMEM;
789
790 /* Multiple tagged packets need to retain TPID to satisfy
791 * skb_vlan_pop(), which will later shift the ethertype into
792 * skb->protocol.
793 */
794 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
795 skb->protocol = key->eth.cvlan.tpid;
796 else
797 skb->protocol = key->eth.type;
798
799 skb_reset_network_header(skb);
800 __skb_push(skb, skb->data - skb_mac_header(skb));
801 }
802
803 skb_reset_mac_len(skb);
804
805 /* Fill out L3/L4 key info, if any */
806 return key_extract_l3l4(skb, key);
807}
808
809/* In the case of conntrack fragment handling it expects L3 headers,
810 * add a helper.
811 */
812int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
813{
814 return key_extract_l3l4(skb, key);
815}
816
791int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 817int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
792{ 818{
793 int res; 819 int res;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a5506e2d4b7a..b830d5ff7af4 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
270u64 ovs_flow_used_time(unsigned long flow_jiffies); 270u64 ovs_flow_used_time(unsigned long flow_jiffies);
271 271
272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); 272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 274int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
274 struct sk_buff *skb, 275 struct sk_buff *skb,
275 struct sw_flow_key *key); 276 struct sw_flow_key *key);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8d54f3047768..e2742b006d25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2618 2618
2619 mutex_lock(&po->pg_vec_lock); 2619 mutex_lock(&po->pg_vec_lock);
2620 2620
2621 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622 * we need to confirm it under protection of pg_vec_lock.
2623 */
2624 if (unlikely(!po->tx_ring.pg_vec)) {
2625 err = -EBUSY;
2626 goto out;
2627 }
2621 if (likely(saddr == NULL)) { 2628 if (likely(saddr == NULL)) {
2622 dev = packet_cached_dev_get(po); 2629 dev = packet_cached_dev_get(po);
2623 proto = po->num; 2630 proto = po->num;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 841f198ea1a8..66e4b61a350d 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group)
154{ 154{
155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); 155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
156 list_del(&group->list); 156 list_del(&group->list);
157 kfree(group); 157 kfree_rcu(group, rcu);
158} 158}
159 159
160static struct psample_group * 160static struct psample_group *
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ec05d91aa9a2..45acab2de0cf 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
291 void *buffer) 291 void *buffer)
292{ 292{
293 struct rds_info_rdma_connection *iinfo = buffer; 293 struct rds_info_rdma_connection *iinfo = buffer;
294 struct rds_ib_connection *ic; 294 struct rds_ib_connection *ic = conn->c_transport_data;
295 295
296 /* We will only ever look at IB transports */ 296 /* We will only ever look at IB transports */
297 if (conn->c_trans != &rds_ib_transport) 297 if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
301 301
302 iinfo->src_addr = conn->c_laddr.s6_addr32[3]; 302 iinfo->src_addr = conn->c_laddr.s6_addr32[3];
303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; 303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
304 iinfo->tos = conn->c_tos; 304 if (ic) {
305 iinfo->tos = conn->c_tos;
306 iinfo->sl = ic->i_sl;
307 }
305 308
306 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 309 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
307 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 310 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
308 if (rds_conn_state(conn) == RDS_CONN_UP) { 311 if (rds_conn_state(conn) == RDS_CONN_UP) {
309 struct rds_ib_device *rds_ibdev; 312 struct rds_ib_device *rds_ibdev;
310 313
311 ic = conn->c_transport_data;
312
313 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 314 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
314 (union ib_gid *)&iinfo->dst_gid); 315 (union ib_gid *)&iinfo->dst_gid);
315 316
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
329 void *buffer) 330 void *buffer)
330{ 331{
331 struct rds6_info_rdma_connection *iinfo6 = buffer; 332 struct rds6_info_rdma_connection *iinfo6 = buffer;
332 struct rds_ib_connection *ic; 333 struct rds_ib_connection *ic = conn->c_transport_data;
333 334
334 /* We will only ever look at IB transports */ 335 /* We will only ever look at IB transports */
335 if (conn->c_trans != &rds_ib_transport) 336 if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
337 338
338 iinfo6->src_addr = conn->c_laddr; 339 iinfo6->src_addr = conn->c_laddr;
339 iinfo6->dst_addr = conn->c_faddr; 340 iinfo6->dst_addr = conn->c_faddr;
341 if (ic) {
342 iinfo6->tos = conn->c_tos;
343 iinfo6->sl = ic->i_sl;
344 }
340 345
341 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); 346 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
342 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); 347 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
344 if (rds_conn_state(conn) == RDS_CONN_UP) { 349 if (rds_conn_state(conn) == RDS_CONN_UP) {
345 struct rds_ib_device *rds_ibdev; 350 struct rds_ib_device *rds_ibdev;
346 351
347 ic = conn->c_transport_data;
348 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, 352 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
349 (union ib_gid *)&iinfo6->dst_gid); 353 (union ib_gid *)&iinfo6->dst_gid);
350 rds_ibdev = ic->rds_ibdev; 354 rds_ibdev = ic->rds_ibdev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 303c6ee8bdb7..f2b558e8b5ea 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -220,6 +220,7 @@ struct rds_ib_connection {
220 /* Send/Recv vectors */ 220 /* Send/Recv vectors */
221 int i_scq_vector; 221 int i_scq_vector;
222 int i_rcq_vector; 222 int i_rcq_vector;
223 u8 i_sl;
223}; 224};
224 225
225/* This assumes that atomic_t is at least 32 bits */ 226/* This assumes that atomic_t is at least 32 bits */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fddaa09f7b0d..233f1368162b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
152 RDS_PROTOCOL_MINOR(conn->c_version), 152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : ""); 153 ic->i_flowctl ? ", flow control" : "");
154 154
155 /* receive sl from the peer */
156 ic->i_sl = ic->i_cm_id->route.path_rec->sl;
157
155 atomic_set(&ic->i_cq_quiesce, 0); 158 atomic_set(&ic->i_cq_quiesce, 0);
156 159
157 /* Init rings and fill recv. this needs to wait until protocol 160 /* Init rings and fill recv. this needs to wait until protocol
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9986d6065c4d..5f741e51b4ba 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
43static struct rdma_cm_id *rds6_rdma_listen_id; 43static struct rdma_cm_id *rds6_rdma_listen_id;
44#endif 44#endif
45 45
46/* Per IB specification 7.7.3, service level is a 4-bit field. */
47#define TOS_TO_SL(tos) ((tos) & 0xF)
48
46static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, 49static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
47 struct rdma_cm_event *event, 50 struct rdma_cm_event *event,
48 bool isv6) 51 bool isv6)
@@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
97 struct rds_ib_connection *ibic; 100 struct rds_ib_connection *ibic;
98 101
99 ibic = conn->c_transport_data; 102 ibic = conn->c_transport_data;
100 if (ibic && ibic->i_cm_id == cm_id) 103 if (ibic && ibic->i_cm_id == cm_id) {
104 cm_id->route.path_rec[0].sl =
105 TOS_TO_SL(conn->c_tos);
101 ret = trans->cm_initiate_connect(cm_id, isv6); 106 ret = trans->cm_initiate_connect(cm_id, isv6);
102 else 107 } else {
103 rds_conn_drop(conn); 108 rds_conn_drop(conn);
109 }
104 } 110 }
105 break; 111 break;
106 112
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 853de4876088..a42ba7fa06d5 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
811 811
812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); 812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len); 813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
814 minfo6.tos = inc->i_conn->c_tos;
814 815
815 if (flip) { 816 if (flip) {
816 minfo6.laddr = *daddr; 817 minfo6.laddr = *daddr;
@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
824 minfo6.fport = inc->i_hdr.h_dport; 825 minfo6.fport = inc->i_hdr.h_dport;
825 } 826 }
826 827
828 minfo6.flags = 0;
829
827 rds_info_copy(iter, &minfo6, sizeof(minfo6)); 830 rds_info_copy(iter, &minfo6, sizeof(minfo6));
828} 831}
829#endif 832#endif
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d09eaf153544..d72ddb67bb74 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
193 193
194service_in_use: 194service_in_use:
195 write_unlock(&local->services_lock); 195 write_unlock(&local->services_lock);
196 rxrpc_put_local(local); 196 rxrpc_unuse_local(local);
197 ret = -EADDRINUSE; 197 ret = -EADDRINUSE;
198error_unlock: 198error_unlock:
199 release_sock(&rx->sk); 199 release_sock(&rx->sk);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life);
402 */ 402 */
403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
404{ 404{
405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
406 rxrpc_propose_ack_ping_for_check_life); 406 rxrpc_propose_ack_ping_for_check_life);
407 rxrpc_send_ack_packet(call, true, NULL); 407 rxrpc_send_ack_packet(call, true, NULL);
408} 408}
@@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk)
862static int rxrpc_release_sock(struct sock *sk) 862static int rxrpc_release_sock(struct sock *sk)
863{ 863{
864 struct rxrpc_sock *rx = rxrpc_sk(sk); 864 struct rxrpc_sock *rx = rxrpc_sk(sk);
865 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
866 865
867 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 866 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
868 867
@@ -898,10 +897,8 @@ static int rxrpc_release_sock(struct sock *sk)
898 rxrpc_release_calls_on_socket(rx); 897 rxrpc_release_calls_on_socket(rx);
899 flush_workqueue(rxrpc_workqueue); 898 flush_workqueue(rxrpc_workqueue);
900 rxrpc_purge_queue(&sk->sk_receive_queue); 899 rxrpc_purge_queue(&sk->sk_receive_queue);
901 rxrpc_queue_work(&rxnet->service_conn_reaper);
902 rxrpc_queue_work(&rxnet->client_conn_reaper);
903 900
904 rxrpc_put_local(rx->local); 901 rxrpc_unuse_local(rx->local);
905 rx->local = NULL; 902 rx->local = NULL;
906 key_put(rx->key); 903 key_put(rx->key);
907 rx->key = NULL; 904 rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 822f45386e31..8051dfdcf26d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -185,11 +185,17 @@ struct rxrpc_host_header {
185 * - max 48 bytes (struct sk_buff::cb) 185 * - max 48 bytes (struct sk_buff::cb)
186 */ 186 */
187struct rxrpc_skb_priv { 187struct rxrpc_skb_priv {
188 union { 188 atomic_t nr_ring_pins; /* Number of rxtx ring pins */
189 u8 nr_jumbo; /* Number of jumbo subpackets */ 189 u8 nr_subpackets; /* Number of subpackets */
190 }; 190 u8 rx_flags; /* Received packet flags */
191#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
192#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
191 union { 193 union {
192 int remain; /* amount of space remaining for next write */ 194 int remain; /* amount of space remaining for next write */
195
196 /* List of requested ACKs on subpackets */
197 unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
198 BITS_PER_LONG];
193 }; 199 };
194 200
195 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 201 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
@@ -254,7 +260,8 @@ struct rxrpc_security {
254 */ 260 */
255struct rxrpc_local { 261struct rxrpc_local {
256 struct rcu_head rcu; 262 struct rcu_head rcu;
257 atomic_t usage; 263 atomic_t active_users; /* Number of users of the local endpoint */
264 atomic_t usage; /* Number of references to the structure */
258 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 265 struct rxrpc_net *rxnet; /* The network ns in which this resides */
259 struct list_head link; 266 struct list_head link;
260 struct socket *socket; /* my UDP socket */ 267 struct socket *socket; /* my UDP socket */
@@ -612,8 +619,7 @@ struct rxrpc_call {
612#define RXRPC_TX_ANNO_LAST 0x04 619#define RXRPC_TX_ANNO_LAST 0x04
613#define RXRPC_TX_ANNO_RESENT 0x08 620#define RXRPC_TX_ANNO_RESENT 0x08
614 621
615#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ 622#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
616#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
617#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ 623#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
618 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but 624 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
619 * not hard-ACK'd packet follows this. 625 * not hard-ACK'd packet follows this.
@@ -649,7 +655,6 @@ struct rxrpc_call {
649 655
650 /* receive-phase ACK management */ 656 /* receive-phase ACK management */
651 u8 ackr_reason; /* reason to ACK */ 657 u8 ackr_reason; /* reason to ACK */
652 u16 ackr_skew; /* skew on packet being ACK'd */
653 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 658 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
654 rxrpc_serial_t ackr_first_seq; /* first sequence number received */ 659 rxrpc_serial_t ackr_first_seq; /* first sequence number received */
655 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 660 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
@@ -743,7 +748,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
743/* 748/*
744 * call_event.c 749 * call_event.c
745 */ 750 */
746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 751void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
747 enum rxrpc_propose_ack_trace); 752 enum rxrpc_propose_ack_trace);
748void rxrpc_process_call(struct work_struct *); 753void rxrpc_process_call(struct work_struct *);
749 754
@@ -905,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *);
905void rxrpc_put_client_conn(struct rxrpc_connection *); 910void rxrpc_put_client_conn(struct rxrpc_connection *);
906void rxrpc_discard_expired_client_conns(struct work_struct *); 911void rxrpc_discard_expired_client_conns(struct work_struct *);
907void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 912void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
913void rxrpc_clean_up_local_conns(struct rxrpc_local *);
908 914
909/* 915/*
910 * conn_event.c 916 * conn_event.c
@@ -1002,6 +1008,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); 1008struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); 1009struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1004void rxrpc_put_local(struct rxrpc_local *); 1010void rxrpc_put_local(struct rxrpc_local *);
1011struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1012void rxrpc_unuse_local(struct rxrpc_local *);
1005void rxrpc_queue_local(struct rxrpc_local *); 1013void rxrpc_queue_local(struct rxrpc_local *);
1006void rxrpc_destroy_all_locals(struct rxrpc_net *); 1014void rxrpc_destroy_all_locals(struct rxrpc_net *);
1007 1015
@@ -1103,6 +1111,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1103void rxrpc_packet_destructor(struct sk_buff *); 1111void rxrpc_packet_destructor(struct sk_buff *);
1104void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1112void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1105void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1113void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1114void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
1106void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1115void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1107void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1116void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1108void rxrpc_purge_queue(struct sk_buff_head *); 1117void rxrpc_purge_queue(struct sk_buff_head *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bc2adeb3acb9..cedbbb3a7c2e 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
43 * propose an ACK be sent 43 * propose an ACK be sent
44 */ 44 */
45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
46 u16 skew, u32 serial, bool immediate, 46 u32 serial, bool immediate, bool background,
47 bool background,
48 enum rxrpc_propose_ack_trace why) 47 enum rxrpc_propose_ack_trace why)
49{ 48{
50 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
69 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { 68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
70 outcome = rxrpc_propose_ack_update; 69 outcome = rxrpc_propose_ack_update;
71 call->ackr_serial = serial; 70 call->ackr_serial = serial;
72 call->ackr_skew = skew;
73 } 71 }
74 if (!immediate) 72 if (!immediate)
75 goto trace; 73 goto trace;
76 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { 74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
77 call->ackr_reason = ack_reason; 75 call->ackr_reason = ack_reason;
78 call->ackr_serial = serial; 76 call->ackr_serial = serial;
79 call->ackr_skew = skew;
80 } else { 77 } else {
81 outcome = rxrpc_propose_ack_subsume; 78 outcome = rxrpc_propose_ack_subsume;
82 } 79 }
@@ -137,11 +134,11 @@ trace:
137 * propose an ACK be sent, locking the call structure 134 * propose an ACK be sent, locking the call structure
138 */ 135 */
139void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
140 u16 skew, u32 serial, bool immediate, bool background, 137 u32 serial, bool immediate, bool background,
141 enum rxrpc_propose_ack_trace why) 138 enum rxrpc_propose_ack_trace why)
142{ 139{
143 spin_lock_bh(&call->lock); 140 spin_lock_bh(&call->lock);
144 __rxrpc_propose_ACK(call, ack_reason, skew, serial, 141 __rxrpc_propose_ACK(call, ack_reason, serial,
145 immediate, background, why); 142 immediate, background, why);
146 spin_unlock_bh(&call->lock); 143 spin_unlock_bh(&call->lock);
147} 144}
@@ -202,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
202 continue; 199 continue;
203 200
204 skb = call->rxtx_buffer[ix]; 201 skb = call->rxtx_buffer[ix];
205 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 202 rxrpc_see_skb(skb, rxrpc_skb_seen);
206 203
207 if (anno_type == RXRPC_TX_ANNO_UNACK) { 204 if (anno_type == RXRPC_TX_ANNO_UNACK) {
208 if (ktime_after(skb->tstamp, max_age)) { 205 if (ktime_after(skb->tstamp, max_age)) {
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
239 ack_ts = ktime_sub(now, call->acks_latest_ts); 236 ack_ts = ktime_sub(now, call->acks_latest_ts);
240 if (ktime_to_ns(ack_ts) < call->peer->rtt) 237 if (ktime_to_ns(ack_ts) < call->peer->rtt)
241 goto out; 238 goto out;
242 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
243 rxrpc_propose_ack_ping_for_lost_ack); 240 rxrpc_propose_ack_ping_for_lost_ack);
244 rxrpc_send_ack_packet(call, true, NULL); 241 rxrpc_send_ack_packet(call, true, NULL);
245 goto out; 242 goto out;
@@ -258,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
258 continue; 255 continue;
259 256
260 skb = call->rxtx_buffer[ix]; 257 skb = call->rxtx_buffer[ix];
261 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 258 rxrpc_get_skb(skb, rxrpc_skb_got);
262 spin_unlock_bh(&call->lock); 259 spin_unlock_bh(&call->lock);
263 260
264 if (rxrpc_send_data_packet(call, skb, true) < 0) { 261 if (rxrpc_send_data_packet(call, skb, true) < 0) {
265 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 262 rxrpc_free_skb(skb, rxrpc_skb_freed);
266 return; 263 return;
267 } 264 }
268 265
269 if (rxrpc_is_client_call(call)) 266 if (rxrpc_is_client_call(call))
270 rxrpc_expose_client_call(call); 267 rxrpc_expose_client_call(call);
271 268
272 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 269 rxrpc_free_skb(skb, rxrpc_skb_freed);
273 spin_lock_bh(&call->lock); 270 spin_lock_bh(&call->lock);
274 271
275 /* We need to clear the retransmit state, but there are two 272 /* We need to clear the retransmit state, but there are two
@@ -372,7 +369,7 @@ recheck_state:
372 if (time_after_eq(now, t)) { 369 if (time_after_eq(now, t)) {
373 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 370 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
374 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 371 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
375 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, 372 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
376 rxrpc_propose_ack_ping_for_keepalive); 373 rxrpc_propose_ack_ping_for_keepalive);
377 set_bit(RXRPC_CALL_EV_PING, &call->events); 374 set_bit(RXRPC_CALL_EV_PING, &call->events);
378 } 375 }
@@ -407,7 +404,7 @@ recheck_state:
407 send_ack = NULL; 404 send_ack = NULL;
408 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 405 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
409 call->acks_lost_top = call->tx_top; 406 call->acks_lost_top = call->tx_top;
410 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 407 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
411 rxrpc_propose_ack_ping_for_lost_ack); 408 rxrpc_propose_ack_ping_for_lost_ack);
412 send_ack = &call->acks_lost_ping; 409 send_ack = &call->acks_lost_ping;
413 } 410 }
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 217b12be9e08..014548c259ce 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -422,6 +422,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
422} 422}
423 423
424/* 424/*
425 * Clean up the RxTx skb ring.
426 */
427static void rxrpc_cleanup_ring(struct rxrpc_call *call)
428{
429 int i;
430
431 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
432 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
433 call->rxtx_buffer[i] = NULL;
434 }
435}
436
437/*
425 * Detach a call from its owning socket. 438 * Detach a call from its owning socket.
426 */ 439 */
427void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 440void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
@@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
429 const void *here = __builtin_return_address(0); 442 const void *here = __builtin_return_address(0);
430 struct rxrpc_connection *conn = call->conn; 443 struct rxrpc_connection *conn = call->conn;
431 bool put = false; 444 bool put = false;
432 int i;
433 445
434 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 446 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
435 447
@@ -479,13 +491,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
479 if (conn) 491 if (conn)
480 rxrpc_disconnect_call(call); 492 rxrpc_disconnect_call(call);
481 493
482 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 494 rxrpc_cleanup_ring(call);
483 rxrpc_free_skb(call->rxtx_buffer[i],
484 (call->tx_phase ? rxrpc_skb_tx_cleaned :
485 rxrpc_skb_rx_cleaned));
486 call->rxtx_buffer[i] = NULL;
487 }
488
489 _leave(""); 495 _leave("");
490} 496}
491 497
@@ -568,8 +574,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
568 */ 574 */
569void rxrpc_cleanup_call(struct rxrpc_call *call) 575void rxrpc_cleanup_call(struct rxrpc_call *call)
570{ 576{
571 int i;
572
573 _net("DESTROY CALL %d", call->debug_id); 577 _net("DESTROY CALL %d", call->debug_id);
574 578
575 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 579 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
@@ -580,13 +584,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
580 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 584 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
581 ASSERTCMP(call->conn, ==, NULL); 585 ASSERTCMP(call->conn, ==, NULL);
582 586
583 /* Clean up the Rx/Tx buffer */ 587 rxrpc_cleanup_ring(call);
584 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 588 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
585 rxrpc_free_skb(call->rxtx_buffer[i],
586 (call->tx_phase ? rxrpc_skb_tx_cleaned :
587 rxrpc_skb_rx_cleaned));
588
589 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
590 589
591 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 590 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
592} 591}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index aea82f909c60..3f1da1b49f69 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1162 1162
1163 _leave(""); 1163 _leave("");
1164} 1164}
1165
1166/*
1167 * Clean up the client connections on a local endpoint.
1168 */
1169void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1170{
1171 struct rxrpc_connection *conn, *tmp;
1172 struct rxrpc_net *rxnet = local->rxnet;
1173 unsigned int nr_active;
1174 LIST_HEAD(graveyard);
1175
1176 _enter("");
1177
1178 spin_lock(&rxnet->client_conn_cache_lock);
1179 nr_active = rxnet->nr_active_client_conns;
1180
1181 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1182 cache_link) {
1183 if (conn->params.local == local) {
1184 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1185
1186 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1187 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1188 BUG();
1189 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1190 list_move(&conn->cache_link, &graveyard);
1191 nr_active--;
1192 }
1193 }
1194
1195 rxnet->nr_active_client_conns = nr_active;
1196 spin_unlock(&rxnet->client_conn_cache_lock);
1197 ASSERTCMP(nr_active, >=, 0);
1198
1199 while (!list_empty(&graveyard)) {
1200 conn = list_entry(graveyard.next,
1201 struct rxrpc_connection, cache_link);
1202 list_del_init(&conn->cache_link);
1203
1204 rxrpc_put_connection(conn);
1205 }
1206
1207 _leave(" [culled]");
1208}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index df6624c140be..a1ceef4f5cd0 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work)
472 /* go through the conn-level event packets, releasing the ref on this 472 /* go through the conn-level event packets, releasing the ref on this
473 * connection that each one has when we've finished with it */ 473 * connection that each one has when we've finished with it */
474 while ((skb = skb_dequeue(&conn->rx_queue))) { 474 while ((skb = skb_dequeue(&conn->rx_queue))) {
475 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 475 rxrpc_see_skb(skb, rxrpc_skb_seen);
476 ret = rxrpc_process_event(conn, skb, &abort_code); 476 ret = rxrpc_process_event(conn, skb, &abort_code);
477 switch (ret) { 477 switch (ret) {
478 case -EPROTO: 478 case -EPROTO:
@@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work)
484 goto requeue_and_leave; 484 goto requeue_and_leave;
485 case -ECONNABORTED: 485 case -ECONNABORTED:
486 default: 486 default:
487 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 487 rxrpc_free_skb(skb, rxrpc_skb_freed);
488 break; 488 break;
489 } 489 }
490 } 490 }
@@ -501,6 +501,6 @@ requeue_and_leave:
501protocol_error: 501protocol_error:
502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
503 goto requeue_and_leave; 503 goto requeue_and_leave;
504 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 504 rxrpc_free_skb(skb, rxrpc_skb_freed);
505 goto out; 505 goto out;
506} 506}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 434ef392212b..ed05b6922132 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
399 continue; 399 continue;
400 400
401 if (rxnet->live) { 401 if (rxnet->live && !conn->params.local->dead) {
402 idle_timestamp = READ_ONCE(conn->idle_timestamp); 402 idle_timestamp = READ_ONCE(conn->idle_timestamp);
403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; 403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
404 if (conn->params.local->service_closed) 404 if (conn->params.local->service_closed)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5bd6f1546e5c..d122c53c8697 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -196,15 +196,14 @@ send_extra_data:
196 * Ping the other end to fill our RTT cache and to retrieve the rwind 196 * Ping the other end to fill our RTT cache and to retrieve the rwind
197 * and MTU parameters. 197 * and MTU parameters.
198 */ 198 */
199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, 199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
200 int skew)
201{ 200{
202 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 201 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
203 ktime_t now = skb->tstamp; 202 ktime_t now = skb->tstamp;
204 203
205 if (call->peer->rtt_usage < 3 || 204 if (call->peer->rtt_usage < 3 ||
206 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 205 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
207 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 206 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
208 true, true, 207 true, true,
209 rxrpc_propose_ack_ping_for_params); 208 rxrpc_propose_ack_ping_for_params);
210} 209}
@@ -234,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
234 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; 233 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
235 skb = call->rxtx_buffer[ix]; 234 skb = call->rxtx_buffer[ix];
236 annotation = call->rxtx_annotations[ix]; 235 annotation = call->rxtx_annotations[ix];
237 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); 236 rxrpc_see_skb(skb, rxrpc_skb_rotated);
238 call->rxtx_buffer[ix] = NULL; 237 call->rxtx_buffer[ix] = NULL;
239 call->rxtx_annotations[ix] = 0; 238 call->rxtx_annotations[ix] = 0;
240 skb->next = list; 239 skb->next = list;
@@ -259,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
259 skb = list; 258 skb = list;
260 list = skb->next; 259 list = skb->next;
261 skb_mark_not_on_list(skb); 260 skb_mark_not_on_list(skb);
262 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 261 rxrpc_free_skb(skb, rxrpc_skb_freed);
263 } 262 }
264 263
265 return rot_last; 264 return rot_last;
@@ -348,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
348} 347}
349 348
350/* 349/*
351 * Scan a jumbo packet to validate its structure and to work out how many 350 * Scan a data packet to validate its structure and to work out how many
352 * subpackets it contains. 351 * subpackets it contains.
353 * 352 *
354 * A jumbo packet is a collection of consecutive packets glued together with 353 * A jumbo packet is a collection of consecutive packets glued together with
@@ -359,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
359 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any 358 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
360 * size. 359 * size.
361 */ 360 */
362static bool rxrpc_validate_jumbo(struct sk_buff *skb) 361static bool rxrpc_validate_data(struct sk_buff *skb)
363{ 362{
364 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 363 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
365 unsigned int offset = sizeof(struct rxrpc_wire_header); 364 unsigned int offset = sizeof(struct rxrpc_wire_header);
366 unsigned int len = skb->len; 365 unsigned int len = skb->len;
367 int nr_jumbo = 1;
368 u8 flags = sp->hdr.flags; 366 u8 flags = sp->hdr.flags;
369 367
370 do { 368 for (;;) {
371 nr_jumbo++; 369 if (flags & RXRPC_REQUEST_ACK)
370 __set_bit(sp->nr_subpackets, sp->rx_req_ack);
371 sp->nr_subpackets++;
372
373 if (!(flags & RXRPC_JUMBO_PACKET))
374 break;
375
372 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) 376 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
373 goto protocol_error; 377 goto protocol_error;
374 if (flags & RXRPC_LAST_PACKET) 378 if (flags & RXRPC_LAST_PACKET)
@@ -377,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb)
377 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 381 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
378 goto protocol_error; 382 goto protocol_error;
379 offset += sizeof(struct rxrpc_jumbo_header); 383 offset += sizeof(struct rxrpc_jumbo_header);
380 } while (flags & RXRPC_JUMBO_PACKET); 384 }
381 385
382 sp->nr_jumbo = nr_jumbo; 386 if (flags & RXRPC_LAST_PACKET)
387 sp->rx_flags |= RXRPC_SKB_INCL_LAST;
383 return true; 388 return true;
384 389
385protocol_error: 390protocol_error:
@@ -400,10 +405,10 @@ protocol_error:
400 * (that information is encoded in the ACK packet). 405 * (that information is encoded in the ACK packet).
401 */ 406 */
402static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, 407static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
403 u8 annotation, bool *_jumbo_bad) 408 bool is_jumbo, bool *_jumbo_bad)
404{ 409{
405 /* Discard normal packets that are duplicates. */ 410 /* Discard normal packets that are duplicates. */
406 if (annotation == 0) 411 if (is_jumbo)
407 return; 412 return;
408 413
409 /* Skip jumbo subpackets that are duplicates. When we've had three or 414 /* Skip jumbo subpackets that are duplicates. When we've had three or
@@ -417,30 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
417} 422}
418 423
419/* 424/*
420 * Process a DATA packet, adding the packet to the Rx ring. 425 * Process a DATA packet, adding the packet to the Rx ring. The caller's
426 * packet ref must be passed on or discarded.
421 */ 427 */
422static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, 428static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
423 u16 skew)
424{ 429{
425 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 430 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
426 enum rxrpc_call_state state; 431 enum rxrpc_call_state state;
427 unsigned int offset = sizeof(struct rxrpc_wire_header); 432 unsigned int j;
428 unsigned int ix;
429 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 433 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
430 rxrpc_seq_t seq = sp->hdr.seq, hard_ack; 434 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
431 bool immediate_ack = false, jumbo_bad = false, queued; 435 bool immediate_ack = false, jumbo_bad = false;
432 u16 len; 436 u8 ack = 0;
433 u8 ack = 0, flags, annotation = 0;
434 437
435 _enter("{%u,%u},{%u,%u}", 438 _enter("{%u,%u},{%u,%u}",
436 call->rx_hard_ack, call->rx_top, skb->len, seq); 439 call->rx_hard_ack, call->rx_top, skb->len, seq0);
437 440
438 _proto("Rx DATA %%%u { #%u f=%02x }", 441 _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
439 sp->hdr.serial, seq, sp->hdr.flags); 442 sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
440 443
441 state = READ_ONCE(call->state); 444 state = READ_ONCE(call->state);
442 if (state >= RXRPC_CALL_COMPLETE) 445 if (state >= RXRPC_CALL_COMPLETE) {
446 rxrpc_free_skb(skb, rxrpc_skb_freed);
443 return; 447 return;
448 }
444 449
445 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 450 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
446 unsigned long timo = READ_ONCE(call->next_req_timo); 451 unsigned long timo = READ_ONCE(call->next_req_timo);
@@ -465,156 +470,157 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
465 !rxrpc_receiving_reply(call)) 470 !rxrpc_receiving_reply(call))
466 goto unlock; 471 goto unlock;
467 472
468 call->ackr_prev_seq = seq; 473 call->ackr_prev_seq = seq0;
469
470 hard_ack = READ_ONCE(call->rx_hard_ack); 474 hard_ack = READ_ONCE(call->rx_hard_ack);
471 if (after(seq, hard_ack + call->rx_winsize)) {
472 ack = RXRPC_ACK_EXCEEDS_WINDOW;
473 ack_serial = serial;
474 goto ack;
475 }
476 475
477 flags = sp->hdr.flags; 476 if (sp->nr_subpackets > 1) {
478 if (flags & RXRPC_JUMBO_PACKET) {
479 if (call->nr_jumbo_bad > 3) { 477 if (call->nr_jumbo_bad > 3) {
480 ack = RXRPC_ACK_NOSPACE; 478 ack = RXRPC_ACK_NOSPACE;
481 ack_serial = serial; 479 ack_serial = serial;
482 goto ack; 480 goto ack;
483 } 481 }
484 annotation = 1;
485 } 482 }
486 483
487next_subpacket: 484 for (j = 0; j < sp->nr_subpackets; j++) {
488 queued = false; 485 rxrpc_serial_t serial = sp->hdr.serial + j;
489 ix = seq & RXRPC_RXTX_BUFF_MASK; 486 rxrpc_seq_t seq = seq0 + j;
490 len = skb->len; 487 unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
491 if (flags & RXRPC_JUMBO_PACKET) 488 bool terminal = (j == sp->nr_subpackets - 1);
492 len = RXRPC_JUMBO_DATALEN; 489 bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
493 490 u8 flags, annotation = j;
494 if (flags & RXRPC_LAST_PACKET) { 491
495 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 492 _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
496 seq != call->rx_top) { 493 j, serial, seq, terminal, last);
497 rxrpc_proto_abort("LSN", call, seq); 494
498 goto unlock; 495 if (last) {
499 } 496 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
500 } else { 497 seq != call->rx_top) {
501 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 498 rxrpc_proto_abort("LSN", call, seq);
502 after_eq(seq, call->rx_top)) { 499 goto unlock;
503 rxrpc_proto_abort("LSA", call, seq); 500 }
504 goto unlock; 501 } else {
502 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
503 after_eq(seq, call->rx_top)) {
504 rxrpc_proto_abort("LSA", call, seq);
505 goto unlock;
506 }
505 } 507 }
506 }
507
508 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
509 if (before_eq(seq, hard_ack)) {
510 ack = RXRPC_ACK_DUPLICATE;
511 ack_serial = serial;
512 goto skip;
513 }
514 508
515 if (flags & RXRPC_REQUEST_ACK && !ack) { 509 flags = 0;
516 ack = RXRPC_ACK_REQUESTED; 510 if (last)
517 ack_serial = serial; 511 flags |= RXRPC_LAST_PACKET;
518 } 512 if (!terminal)
513 flags |= RXRPC_JUMBO_PACKET;
514 if (test_bit(j, sp->rx_req_ack))
515 flags |= RXRPC_REQUEST_ACK;
516 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
519 517
520 if (call->rxtx_buffer[ix]) { 518 if (before_eq(seq, hard_ack)) {
521 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
522 if (ack != RXRPC_ACK_DUPLICATE) {
523 ack = RXRPC_ACK_DUPLICATE; 519 ack = RXRPC_ACK_DUPLICATE;
524 ack_serial = serial; 520 ack_serial = serial;
521 continue;
525 } 522 }
526 immediate_ack = true;
527 goto skip;
528 }
529
530 /* Queue the packet. We use a couple of memory barriers here as need
531 * to make sure that rx_top is perceived to be set after the buffer
532 * pointer and that the buffer pointer is set after the annotation and
533 * the skb data.
534 *
535 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
536 * and also rxrpc_fill_out_ack().
537 */
538 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
539 call->rxtx_annotations[ix] = annotation;
540 smp_wmb();
541 call->rxtx_buffer[ix] = skb;
542 if (after(seq, call->rx_top)) {
543 smp_store_release(&call->rx_top, seq);
544 } else if (before(seq, call->rx_top)) {
545 /* Send an immediate ACK if we fill in a hole */
546 if (!ack) {
547 ack = RXRPC_ACK_DELAY;
548 ack_serial = serial;
549 }
550 immediate_ack = true;
551 }
552 if (flags & RXRPC_LAST_PACKET) {
553 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
554 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
555 } else {
556 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
557 }
558 queued = true;
559 523
560 if (after_eq(seq, call->rx_expect_next)) { 524 if (call->rxtx_buffer[ix]) {
561 if (after(seq, call->rx_expect_next)) { 525 rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
562 _net("OOS %u > %u", seq, call->rx_expect_next); 526 &jumbo_bad);
563 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 527 if (ack != RXRPC_ACK_DUPLICATE) {
564 ack_serial = serial; 528 ack = RXRPC_ACK_DUPLICATE;
529 ack_serial = serial;
530 }
531 immediate_ack = true;
532 continue;
565 } 533 }
566 call->rx_expect_next = seq + 1;
567 }
568 534
569skip:
570 offset += len;
571 if (flags & RXRPC_JUMBO_PACKET) {
572 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
573 rxrpc_proto_abort("XJF", call, seq);
574 goto unlock;
575 }
576 offset += sizeof(struct rxrpc_jumbo_header);
577 seq++;
578 serial++;
579 annotation++;
580 if (flags & RXRPC_JUMBO_PACKET)
581 annotation |= RXRPC_RX_ANNO_JLAST;
582 if (after(seq, hard_ack + call->rx_winsize)) { 535 if (after(seq, hard_ack + call->rx_winsize)) {
583 ack = RXRPC_ACK_EXCEEDS_WINDOW; 536 ack = RXRPC_ACK_EXCEEDS_WINDOW;
584 ack_serial = serial; 537 ack_serial = serial;
585 if (!jumbo_bad) { 538 if (flags & RXRPC_JUMBO_PACKET) {
586 call->nr_jumbo_bad++; 539 if (!jumbo_bad) {
587 jumbo_bad = true; 540 call->nr_jumbo_bad++;
541 jumbo_bad = true;
542 }
588 } 543 }
544
589 goto ack; 545 goto ack;
590 } 546 }
591 547
592 _proto("Rx DATA Jumbo %%%u", serial); 548 if (flags & RXRPC_REQUEST_ACK && !ack) {
593 goto next_subpacket; 549 ack = RXRPC_ACK_REQUESTED;
594 } 550 ack_serial = serial;
551 }
552
553 /* Queue the packet. We use a couple of memory barriers here as need
554 * to make sure that rx_top is perceived to be set after the buffer
555 * pointer and that the buffer pointer is set after the annotation and
556 * the skb data.
557 *
558 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
559 * and also rxrpc_fill_out_ack().
560 */
561 if (!terminal)
562 rxrpc_get_skb(skb, rxrpc_skb_got);
563 call->rxtx_annotations[ix] = annotation;
564 smp_wmb();
565 call->rxtx_buffer[ix] = skb;
566 if (after(seq, call->rx_top)) {
567 smp_store_release(&call->rx_top, seq);
568 } else if (before(seq, call->rx_top)) {
569 /* Send an immediate ACK if we fill in a hole */
570 if (!ack) {
571 ack = RXRPC_ACK_DELAY;
572 ack_serial = serial;
573 }
574 immediate_ack = true;
575 }
595 576
596 if (queued && flags & RXRPC_LAST_PACKET && !ack) { 577 if (terminal) {
597 ack = RXRPC_ACK_DELAY; 578 /* From this point on, we're not allowed to touch the
598 ack_serial = serial; 579 * packet any longer as its ref now belongs to the Rx
580 * ring.
581 */
582 skb = NULL;
583 }
584
585 if (last) {
586 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
587 if (!ack) {
588 ack = RXRPC_ACK_DELAY;
589 ack_serial = serial;
590 }
591 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
592 } else {
593 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
594 }
595
596 if (after_eq(seq, call->rx_expect_next)) {
597 if (after(seq, call->rx_expect_next)) {
598 _net("OOS %u > %u", seq, call->rx_expect_next);
599 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
600 ack_serial = serial;
601 }
602 call->rx_expect_next = seq + 1;
603 }
599 } 604 }
600 605
601ack: 606ack:
602 if (ack) 607 if (ack)
603 rxrpc_propose_ACK(call, ack, skew, ack_serial, 608 rxrpc_propose_ACK(call, ack, ack_serial,
604 immediate_ack, true, 609 immediate_ack, true,
605 rxrpc_propose_ack_input_data); 610 rxrpc_propose_ack_input_data);
606 else 611 else
607 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, 612 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
608 false, true, 613 false, true,
609 rxrpc_propose_ack_input_data); 614 rxrpc_propose_ack_input_data);
610 615
611 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { 616 if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
612 trace_rxrpc_notify_socket(call->debug_id, serial); 617 trace_rxrpc_notify_socket(call->debug_id, serial);
613 rxrpc_notify_socket(call); 618 rxrpc_notify_socket(call);
614 } 619 }
615 620
616unlock: 621unlock:
617 spin_unlock(&call->input_lock); 622 spin_unlock(&call->input_lock);
623 rxrpc_free_skb(skb, rxrpc_skb_freed);
618 _leave(" [queued]"); 624 _leave(" [queued]");
619} 625}
620 626
@@ -822,8 +828,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
822 * soft-ACK means that the packet may be discarded and retransmission 828 * soft-ACK means that the packet may be discarded and retransmission
823 * requested. A phase is complete when all packets are hard-ACK'd. 829 * requested. A phase is complete when all packets are hard-ACK'd.
824 */ 830 */
825static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, 831static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
826 u16 skew)
827{ 832{
828 struct rxrpc_ack_summary summary = { 0 }; 833 struct rxrpc_ack_summary summary = { 0 };
829 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 834 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -867,11 +872,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
867 if (buf.ack.reason == RXRPC_ACK_PING) { 872 if (buf.ack.reason == RXRPC_ACK_PING) {
868 _proto("Rx ACK %%%u PING Request", sp->hdr.serial); 873 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
869 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 874 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
870 skew, sp->hdr.serial, true, true, 875 sp->hdr.serial, true, true,
871 rxrpc_propose_ack_respond_to_ping); 876 rxrpc_propose_ack_respond_to_ping);
872 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 877 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
873 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, 878 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
874 skew, sp->hdr.serial, true, true, 879 sp->hdr.serial, true, true,
875 rxrpc_propose_ack_respond_to_ack); 880 rxrpc_propose_ack_respond_to_ack);
876 } 881 }
877 882
@@ -948,7 +953,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
948 RXRPC_TX_ANNO_LAST && 953 RXRPC_TX_ANNO_LAST &&
949 summary.nr_acks == call->tx_top - hard_ack && 954 summary.nr_acks == call->tx_top - hard_ack &&
950 rxrpc_is_client_call(call)) 955 rxrpc_is_client_call(call))
951 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 956 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
952 false, true, 957 false, true,
953 rxrpc_propose_ack_ping_for_lost_reply); 958 rxrpc_propose_ack_ping_for_lost_reply);
954 959
@@ -1004,7 +1009,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1004 * Process an incoming call packet. 1009 * Process an incoming call packet.
1005 */ 1010 */
1006static void rxrpc_input_call_packet(struct rxrpc_call *call, 1011static void rxrpc_input_call_packet(struct rxrpc_call *call,
1007 struct sk_buff *skb, u16 skew) 1012 struct sk_buff *skb)
1008{ 1013{
1009 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1014 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1010 unsigned long timo; 1015 unsigned long timo;
@@ -1023,11 +1028,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1023 1028
1024 switch (sp->hdr.type) { 1029 switch (sp->hdr.type) {
1025 case RXRPC_PACKET_TYPE_DATA: 1030 case RXRPC_PACKET_TYPE_DATA:
1026 rxrpc_input_data(call, skb, skew); 1031 rxrpc_input_data(call, skb);
1027 break; 1032 goto no_free;
1028 1033
1029 case RXRPC_PACKET_TYPE_ACK: 1034 case RXRPC_PACKET_TYPE_ACK:
1030 rxrpc_input_ack(call, skb, skew); 1035 rxrpc_input_ack(call, skb);
1031 break; 1036 break;
1032 1037
1033 case RXRPC_PACKET_TYPE_BUSY: 1038 case RXRPC_PACKET_TYPE_BUSY:
@@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1051 break; 1056 break;
1052 } 1057 }
1053 1058
1059 rxrpc_free_skb(skb, rxrpc_skb_freed);
1060no_free:
1054 _leave(""); 1061 _leave("");
1055} 1062}
1056 1063
@@ -1108,8 +1115,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1108{ 1115{
1109 _enter("%p,%p", local, skb); 1116 _enter("%p,%p", local, skb);
1110 1117
1111 skb_queue_tail(&local->event_queue, skb); 1118 if (rxrpc_get_local_maybe(local)) {
1112 rxrpc_queue_local(local); 1119 skb_queue_tail(&local->event_queue, skb);
1120 rxrpc_queue_local(local);
1121 } else {
1122 rxrpc_free_skb(skb, rxrpc_skb_freed);
1123 }
1113} 1124}
1114 1125
1115/* 1126/*
@@ -1119,8 +1130,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1119{ 1130{
1120 CHECK_SLAB_OKAY(&local->usage); 1131 CHECK_SLAB_OKAY(&local->usage);
1121 1132
1122 skb_queue_tail(&local->reject_queue, skb); 1133 if (rxrpc_get_local_maybe(local)) {
1123 rxrpc_queue_local(local); 1134 skb_queue_tail(&local->reject_queue, skb);
1135 rxrpc_queue_local(local);
1136 } else {
1137 rxrpc_free_skb(skb, rxrpc_skb_freed);
1138 }
1124} 1139}
1125 1140
1126/* 1141/*
@@ -1173,7 +1188,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1173 struct rxrpc_peer *peer = NULL; 1188 struct rxrpc_peer *peer = NULL;
1174 struct rxrpc_sock *rx = NULL; 1189 struct rxrpc_sock *rx = NULL;
1175 unsigned int channel; 1190 unsigned int channel;
1176 int skew = 0;
1177 1191
1178 _enter("%p", udp_sk); 1192 _enter("%p", udp_sk);
1179 1193
@@ -1184,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1184 if (skb->tstamp == 0) 1198 if (skb->tstamp == 0)
1185 skb->tstamp = ktime_get_real(); 1199 skb->tstamp = ktime_get_real();
1186 1200
1187 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1201 rxrpc_new_skb(skb, rxrpc_skb_received);
1188 1202
1189 skb_pull(skb, sizeof(struct udphdr)); 1203 skb_pull(skb, sizeof(struct udphdr));
1190 1204
@@ -1201,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1201 static int lose; 1215 static int lose;
1202 if ((lose++ & 7) == 7) { 1216 if ((lose++ & 7) == 7) {
1203 trace_rxrpc_rx_lose(sp); 1217 trace_rxrpc_rx_lose(sp);
1204 rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1218 rxrpc_free_skb(skb, rxrpc_skb_lost);
1205 return 0; 1219 return 0;
1206 } 1220 }
1207 } 1221 }
@@ -1233,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1233 if (sp->hdr.callNumber == 0 || 1247 if (sp->hdr.callNumber == 0 ||
1234 sp->hdr.seq == 0) 1248 sp->hdr.seq == 0)
1235 goto bad_message; 1249 goto bad_message;
1236 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1250 if (!rxrpc_validate_data(skb))
1237 !rxrpc_validate_jumbo(skb))
1238 goto bad_message; 1251 goto bad_message;
1252
1253 /* Unshare the packet so that it can be modified for in-place
1254 * decryption.
1255 */
1256 if (sp->hdr.securityIndex != 0) {
1257 struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
1258 if (!nskb) {
1259 rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
1260 goto out;
1261 }
1262
1263 if (nskb != skb) {
1264 rxrpc_eaten_skb(skb, rxrpc_skb_received);
1265 rxrpc_new_skb(skb, rxrpc_skb_unshared);
1266 skb = nskb;
1267 sp = rxrpc_skb(skb);
1268 }
1269 }
1239 break; 1270 break;
1240 1271
1241 case RXRPC_PACKET_TYPE_CHALLENGE: 1272 case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -1301,15 +1332,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1301 goto out; 1332 goto out;
1302 } 1333 }
1303 1334
1304 /* Note the serial number skew here */ 1335 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
1305 skew = (int)sp->hdr.serial - (int)conn->hi_serial; 1336 conn->hi_serial = sp->hdr.serial;
1306 if (skew >= 0) {
1307 if (skew > 0)
1308 conn->hi_serial = sp->hdr.serial;
1309 } else {
1310 skew = -skew;
1311 skew = min(skew, 65535);
1312 }
1313 1337
1314 /* Call-bound packets are routed by connection channel. */ 1338 /* Call-bound packets are routed by connection channel. */
1315 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1339 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -1372,15 +1396,18 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1372 call = rxrpc_new_incoming_call(local, rx, skb); 1396 call = rxrpc_new_incoming_call(local, rx, skb);
1373 if (!call) 1397 if (!call)
1374 goto reject_packet; 1398 goto reject_packet;
1375 rxrpc_send_ping(call, skb, skew); 1399 rxrpc_send_ping(call, skb);
1376 mutex_unlock(&call->user_mutex); 1400 mutex_unlock(&call->user_mutex);
1377 } 1401 }
1378 1402
1379 rxrpc_input_call_packet(call, skb, skew); 1403 /* Process a call packet; this either discards or passes on the ref
1380 goto discard; 1404 * elsewhere.
1405 */
1406 rxrpc_input_call_packet(call, skb);
1407 goto out;
1381 1408
1382discard: 1409discard:
1383 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1410 rxrpc_free_skb(skb, rxrpc_skb_freed);
1384out: 1411out:
1385 trace_rxrpc_rx_done(0, 0); 1412 trace_rxrpc_rx_done(0, 0);
1386 return 0; 1413 return 0;
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index e93a78f7c05e..3ce6d628cd75 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
90 if (skb) { 90 if (skb) {
91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
92 92
93 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 93 rxrpc_see_skb(skb, rxrpc_skb_seen);
94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type); 94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
95 95
96 switch (sp->hdr.type) { 96 switch (sp->hdr.type) {
@@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
108 break; 108 break;
109 } 109 }
110 110
111 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 111 rxrpc_free_skb(skb, rxrpc_skb_freed);
112 } 112 }
113 113
114 _leave(""); 114 _leave("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b1c71bad510b..36587260cabd 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
80 if (local) { 80 if (local) {
81 atomic_set(&local->usage, 1); 81 atomic_set(&local->usage, 1);
82 atomic_set(&local->active_users, 1);
82 local->rxnet = rxnet; 83 local->rxnet = rxnet;
83 INIT_LIST_HEAD(&local->link); 84 INIT_LIST_HEAD(&local->link);
84 INIT_WORK(&local->processor, rxrpc_local_processor); 85 INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
92 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 93 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
93 memcpy(&local->srx, srx, sizeof(*srx)); 94 memcpy(&local->srx, srx, sizeof(*srx));
94 local->srx.srx_service = 0; 95 local->srx.srx_service = 0;
95 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 96 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
96 } 97 }
97 98
98 _leave(" = %p", local); 99 _leave(" = %p", local);
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
266 * bind the transport socket may still fail if we're attempting 267 * bind the transport socket may still fail if we're attempting
267 * to use a local address that the dying object is still using. 268 * to use a local address that the dying object is still using.
268 */ 269 */
269 if (!rxrpc_get_local_maybe(local)) { 270 if (!rxrpc_use_local(local))
270 cursor = cursor->next;
271 list_del_init(&local->link);
272 break; 271 break;
273 }
274 272
275 age = "old"; 273 age = "old";
276 goto found; 274 goto found;
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
284 if (ret < 0) 282 if (ret < 0)
285 goto sock_error; 283 goto sock_error;
286 284
287 list_add_tail(&local->link, cursor); 285 if (cursor != &rxnet->local_endpoints)
286 list_replace_init(cursor, &local->link);
287 else
288 list_add_tail(&local->link, cursor);
288 age = "new"; 289 age = "new";
289 290
290found: 291found:
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
320 int n; 321 int n;
321 322
322 n = atomic_inc_return(&local->usage); 323 n = atomic_inc_return(&local->usage);
323 trace_rxrpc_local(local, rxrpc_local_got, n, here); 324 trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
324 return local; 325 return local;
325} 326}
326 327
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
334 if (local) { 335 if (local) {
335 int n = atomic_fetch_add_unless(&local->usage, 1, 0); 336 int n = atomic_fetch_add_unless(&local->usage, 1, 0);
336 if (n > 0) 337 if (n > 0)
337 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 338 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
339 n + 1, here);
338 else 340 else
339 local = NULL; 341 local = NULL;
340 } 342 }
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
342} 344}
343 345
344/* 346/*
345 * Queue a local endpoint. 347 * Queue a local endpoint and pass the caller's reference to the work item.
346 */ 348 */
347void rxrpc_queue_local(struct rxrpc_local *local) 349void rxrpc_queue_local(struct rxrpc_local *local)
348{ 350{
349 const void *here = __builtin_return_address(0); 351 const void *here = __builtin_return_address(0);
352 unsigned int debug_id = local->debug_id;
353 int n = atomic_read(&local->usage);
350 354
351 if (rxrpc_queue_work(&local->processor)) 355 if (rxrpc_queue_work(&local->processor))
352 trace_rxrpc_local(local, rxrpc_local_queued, 356 trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
353 atomic_read(&local->usage), here); 357 else
354} 358 rxrpc_put_local(local);
355
356/*
357 * A local endpoint reached its end of life.
358 */
359static void __rxrpc_put_local(struct rxrpc_local *local)
360{
361 _enter("%d", local->debug_id);
362 rxrpc_queue_work(&local->processor);
363} 359}
364 360
365/* 361/*
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
372 368
373 if (local) { 369 if (local) {
374 n = atomic_dec_return(&local->usage); 370 n = atomic_dec_return(&local->usage);
375 trace_rxrpc_local(local, rxrpc_local_put, n, here); 371 trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
376 372
377 if (n == 0) 373 if (n == 0)
378 __rxrpc_put_local(local); 374 call_rcu(&local->rcu, rxrpc_local_rcu);
375 }
376}
377
378/*
379 * Start using a local endpoint.
380 */
381struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
382{
383 unsigned int au;
384
385 local = rxrpc_get_local_maybe(local);
386 if (!local)
387 return NULL;
388
389 au = atomic_fetch_add_unless(&local->active_users, 1, 0);
390 if (au == 0) {
391 rxrpc_put_local(local);
392 return NULL;
393 }
394
395 return local;
396}
397
398/*
399 * Cease using a local endpoint. Once the number of active users reaches 0, we
400 * start the closure of the transport in the work processor.
401 */
402void rxrpc_unuse_local(struct rxrpc_local *local)
403{
404 unsigned int au;
405
406 if (local) {
407 au = atomic_dec_return(&local->active_users);
408 if (au == 0)
409 rxrpc_queue_local(local);
410 else
411 rxrpc_put_local(local);
379 } 412 }
380} 413}
381 414
@@ -393,21 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
393 426
394 _enter("%d", local->debug_id); 427 _enter("%d", local->debug_id);
395 428
396 /* We can get a race between an incoming call packet queueing the
397 * processor again and the work processor starting the destruction
398 * process which will shut down the UDP socket.
399 */
400 if (local->dead) {
401 _leave(" [already dead]");
402 return;
403 }
404 local->dead = true; 429 local->dead = true;
405 430
406 mutex_lock(&rxnet->local_mutex); 431 mutex_lock(&rxnet->local_mutex);
407 list_del_init(&local->link); 432 list_del_init(&local->link);
408 mutex_unlock(&rxnet->local_mutex); 433 mutex_unlock(&rxnet->local_mutex);
409 434
410 ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 435 rxrpc_clean_up_local_conns(local);
436 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
411 ASSERT(!local->service); 437 ASSERT(!local->service);
412 438
413 if (socket) { 439 if (socket) {
@@ -422,13 +448,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
422 */ 448 */
423 rxrpc_purge_queue(&local->reject_queue); 449 rxrpc_purge_queue(&local->reject_queue);
424 rxrpc_purge_queue(&local->event_queue); 450 rxrpc_purge_queue(&local->event_queue);
425
426 _debug("rcu local %d", local->debug_id);
427 call_rcu(&local->rcu, rxrpc_local_rcu);
428} 451}
429 452
430/* 453/*
431 * Process events on an endpoint 454 * Process events on an endpoint. The work item carries a ref which
455 * we must release.
432 */ 456 */
433static void rxrpc_local_processor(struct work_struct *work) 457static void rxrpc_local_processor(struct work_struct *work)
434{ 458{
@@ -436,13 +460,15 @@ static void rxrpc_local_processor(struct work_struct *work)
436 container_of(work, struct rxrpc_local, processor); 460 container_of(work, struct rxrpc_local, processor);
437 bool again; 461 bool again;
438 462
439 trace_rxrpc_local(local, rxrpc_local_processing, 463 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
440 atomic_read(&local->usage), NULL); 464 atomic_read(&local->usage), NULL);
441 465
442 do { 466 do {
443 again = false; 467 again = false;
444 if (atomic_read(&local->usage) == 0) 468 if (atomic_read(&local->active_users) == 0) {
445 return rxrpc_local_destroyer(local); 469 rxrpc_local_destroyer(local);
470 break;
471 }
446 472
447 if (!skb_queue_empty(&local->reject_queue)) { 473 if (!skb_queue_empty(&local->reject_queue)) {
448 rxrpc_reject_packets(local); 474 rxrpc_reject_packets(local);
@@ -454,6 +480,8 @@ static void rxrpc_local_processor(struct work_struct *work)
454 again = true; 480 again = true;
455 } 481 }
456 } while (again); 482 } while (again);
483
484 rxrpc_put_local(local);
457} 485}
458 486
459/* 487/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 948e3fe249ec..935bb60fff56 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
87 *_top = top; 87 *_top = top;
88 88
89 pkt->ack.bufferSpace = htons(8); 89 pkt->ack.bufferSpace = htons(8);
90 pkt->ack.maxSkew = htons(call->ackr_skew); 90 pkt->ack.maxSkew = htons(0);
91 pkt->ack.firstPacket = htonl(hard_ack + 1); 91 pkt->ack.firstPacket = htonl(hard_ack + 1);
92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq); 92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
93 pkt->ack.serial = htonl(serial); 93 pkt->ack.serial = htonl(serial);
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
228 if (ping) 228 if (ping)
229 clear_bit(RXRPC_CALL_PINGING, &call->flags); 229 clear_bit(RXRPC_CALL_PINGING, &call->flags);
230 rxrpc_propose_ACK(call, pkt->ack.reason, 230 rxrpc_propose_ACK(call, pkt->ack.reason,
231 ntohs(pkt->ack.maxSkew),
232 ntohl(pkt->ack.serial), 231 ntohl(pkt->ack.serial),
233 false, true, 232 false, true,
234 rxrpc_propose_ack_retry_tx); 233 rxrpc_propose_ack_retry_tx);
@@ -566,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
566 memset(&whdr, 0, sizeof(whdr)); 565 memset(&whdr, 0, sizeof(whdr));
567 566
568 while ((skb = skb_dequeue(&local->reject_queue))) { 567 while ((skb = skb_dequeue(&local->reject_queue))) {
569 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 568 rxrpc_see_skb(skb, rxrpc_skb_seen);
570 sp = rxrpc_skb(skb); 569 sp = rxrpc_skb(skb);
571 570
572 switch (skb->mark) { 571 switch (skb->mark) {
@@ -582,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
582 ioc = 2; 581 ioc = 2;
583 break; 582 break;
584 default: 583 default:
585 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 584 rxrpc_free_skb(skb, rxrpc_skb_freed);
586 continue; 585 continue;
587 } 586 }
588 587
@@ -607,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
607 rxrpc_tx_point_reject); 606 rxrpc_tx_point_reject);
608 } 607 }
609 608
610 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 609 rxrpc_free_skb(skb, rxrpc_skb_freed);
611 } 610 }
612 611
613 _leave(""); 612 _leave("");
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7666ec72d37e..c97ebdc043e4 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk)
163 _leave("UDP socket errqueue empty"); 163 _leave("UDP socket errqueue empty");
164 return; 164 return;
165 } 165 }
166 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 166 rxrpc_new_skb(skb, rxrpc_skb_received);
167 serr = SKB_EXT_ERR(skb); 167 serr = SKB_EXT_ERR(skb);
168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { 168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
169 _leave("UDP empty message"); 169 _leave("UDP empty message");
170 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 170 rxrpc_free_skb(skb, rxrpc_skb_freed);
171 return; 171 return;
172 } 172 }
173 173
@@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk)
177 peer = NULL; 177 peer = NULL;
178 if (!peer) { 178 if (!peer) {
179 rcu_read_unlock(); 179 rcu_read_unlock();
180 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 180 rxrpc_free_skb(skb, rxrpc_skb_freed);
181 _leave(" [no peer]"); 181 _leave(" [no peer]");
182 return; 182 return;
183 } 183 }
@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk)
189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) { 189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
190 rxrpc_adjust_mtu(peer, serr); 190 rxrpc_adjust_mtu(peer, serr);
191 rcu_read_unlock(); 191 rcu_read_unlock();
192 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 192 rxrpc_free_skb(skb, rxrpc_skb_freed);
193 rxrpc_put_peer(peer); 193 rxrpc_put_peer(peer);
194 _leave(" [MTU update]"); 194 _leave(" [MTU update]");
195 return; 195 return;
@@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk)
197 197
198 rxrpc_store_error(peer, serr); 198 rxrpc_store_error(peer, serr);
199 rcu_read_unlock(); 199 rcu_read_unlock();
200 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 200 rxrpc_free_skb(skb, rxrpc_skb_freed);
201 rxrpc_put_peer(peer); 201 rxrpc_put_peer(peer);
202 202
203 _leave(""); 203 _leave("");
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 99ce322d7caa..49bb972539aa 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -89,6 +89,15 @@ struct rxrpc_jumbo_header {
89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ 89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) 90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
91 91
92/*
93 * The maximum number of subpackets that can possibly fit in a UDP packet is:
94 *
95 * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
96 * = ((65535 - 28 - 28) / 1416) + 1
97 * = 46 non-terminal packets and 1 terminal packet.
98 */
99#define RXRPC_MAX_NR_JUMBO 47
100
92/*****************************************************************************/ 101/*****************************************************************************/
93/* 102/*
94 * on-the-wire Rx ACK packet data payload 103 * on-the-wire Rx ACK packet data payload
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 5abf46cf9e6c..3b0becb12041 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
142 142
143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, 144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
145 rxrpc_propose_ack_terminal_ack); 145 rxrpc_propose_ack_terminal_ack);
146 //rxrpc_send_ack_packet(call, false, NULL); 146 //rxrpc_send_ack_packet(call, false, NULL);
147 } 147 }
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
161 write_unlock_bh(&call->state_lock); 161 write_unlock_bh(&call->state_lock);
162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
163 rxrpc_propose_ack_processing_op); 163 rxrpc_propose_ack_processing_op);
164 break; 164 break;
165 default: 165 default:
@@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
177 struct sk_buff *skb; 177 struct sk_buff *skb;
178 rxrpc_serial_t serial; 178 rxrpc_serial_t serial;
179 rxrpc_seq_t hard_ack, top; 179 rxrpc_seq_t hard_ack, top;
180 u8 flags; 180 bool last = false;
181 u8 subpacket;
181 int ix; 182 int ix;
182 183
183 _enter("%d", call->debug_id); 184 _enter("%d", call->debug_id);
@@ -189,30 +190,32 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
189 hard_ack++; 190 hard_ack++;
190 ix = hard_ack & RXRPC_RXTX_BUFF_MASK; 191 ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
191 skb = call->rxtx_buffer[ix]; 192 skb = call->rxtx_buffer[ix];
192 rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); 193 rxrpc_see_skb(skb, rxrpc_skb_rotated);
193 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
194 flags = sp->hdr.flags; 195
195 serial = sp->hdr.serial; 196 subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
196 if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) 197 serial = sp->hdr.serial + subpacket;
197 serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; 198
199 if (subpacket == sp->nr_subpackets - 1 &&
200 sp->rx_flags & RXRPC_SKB_INCL_LAST)
201 last = true;
198 202
199 call->rxtx_buffer[ix] = NULL; 203 call->rxtx_buffer[ix] = NULL;
200 call->rxtx_annotations[ix] = 0; 204 call->rxtx_annotations[ix] = 0;
201 /* Barrier against rxrpc_input_data(). */ 205 /* Barrier against rxrpc_input_data(). */
202 smp_store_release(&call->rx_hard_ack, hard_ack); 206 smp_store_release(&call->rx_hard_ack, hard_ack);
203 207
204 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 208 rxrpc_free_skb(skb, rxrpc_skb_freed);
205 209
206 _debug("%u,%u,%02x", hard_ack, top, flags);
207 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); 210 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
208 if (flags & RXRPC_LAST_PACKET) { 211 if (last) {
209 rxrpc_end_rx_phase(call, serial); 212 rxrpc_end_rx_phase(call, serial);
210 } else { 213 } else {
211 /* Check to see if there's an ACK that needs sending. */ 214 /* Check to see if there's an ACK that needs sending. */
212 if (after_eq(hard_ack, call->ackr_consumed + 2) || 215 if (after_eq(hard_ack, call->ackr_consumed + 2) ||
213 after_eq(top, call->ackr_seen + 2) || 216 after_eq(top, call->ackr_seen + 2) ||
214 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 217 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
215 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 218 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
216 true, true, 219 true, true,
217 rxrpc_propose_ack_rotate_rx); 220 rxrpc_propose_ack_rotate_rx);
218 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 221 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
@@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
233 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 236 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
234 rxrpc_seq_t seq = sp->hdr.seq; 237 rxrpc_seq_t seq = sp->hdr.seq;
235 u16 cksum = sp->hdr.cksum; 238 u16 cksum = sp->hdr.cksum;
239 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
236 240
237 _enter(""); 241 _enter("");
238 242
239 /* For all but the head jumbo subpacket, the security checksum is in a 243 /* For all but the head jumbo subpacket, the security checksum is in a
240 * jumbo header immediately prior to the data. 244 * jumbo header immediately prior to the data.
241 */ 245 */
242 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { 246 if (subpacket > 0) {
243 __be16 tmp; 247 __be16 tmp;
244 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) 248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
245 BUG(); 249 BUG();
246 cksum = ntohs(tmp); 250 cksum = ntohs(tmp);
247 seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; 251 seq += subpacket;
248 } 252 }
249 253
250 return call->conn->security->verify_packet(call, skb, offset, len, 254 return call->conn->security->verify_packet(call, skb, offset, len,
@@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
265 u8 *_annotation, 269 u8 *_annotation,
266 unsigned int *_offset, unsigned int *_len) 270 unsigned int *_offset, unsigned int *_len)
267{ 271{
272 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
268 unsigned int offset = sizeof(struct rxrpc_wire_header); 273 unsigned int offset = sizeof(struct rxrpc_wire_header);
269 unsigned int len; 274 unsigned int len;
270 int ret; 275 int ret;
271 u8 annotation = *_annotation; 276 u8 annotation = *_annotation;
277 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
272 278
273 /* Locate the subpacket */ 279 /* Locate the subpacket */
280 offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
274 len = skb->len - offset; 281 len = skb->len - offset;
275 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { 282 if (subpacket < sp->nr_subpackets - 1)
276 offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * 283 len = RXRPC_JUMBO_DATALEN;
277 RXRPC_JUMBO_SUBPKTLEN);
278 len = (annotation & RXRPC_RX_ANNO_JLAST) ?
279 skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
280 }
281 284
282 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 285 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
283 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 286 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
303{ 306{
304 struct rxrpc_skb_priv *sp; 307 struct rxrpc_skb_priv *sp;
305 struct sk_buff *skb; 308 struct sk_buff *skb;
309 rxrpc_serial_t serial;
306 rxrpc_seq_t hard_ack, top, seq; 310 rxrpc_seq_t hard_ack, top, seq;
307 size_t remain; 311 size_t remain;
308 bool last; 312 bool last;
@@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
336 break; 340 break;
337 } 341 }
338 smp_rmb(); 342 smp_rmb();
339 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 343 rxrpc_see_skb(skb, rxrpc_skb_seen);
340 sp = rxrpc_skb(skb); 344 sp = rxrpc_skb(skb);
341 345
342 if (!(flags & MSG_PEEK)) 346 if (!(flags & MSG_PEEK)) {
347 serial = sp->hdr.serial;
348 serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
343 trace_rxrpc_receive(call, rxrpc_receive_front, 349 trace_rxrpc_receive(call, rxrpc_receive_front,
344 sp->hdr.serial, seq); 350 serial, seq);
351 }
345 352
346 if (msg) 353 if (msg)
347 sock_recv_timestamp(msg, sock->sk, skb); 354 sock_recv_timestamp(msg, sock->sk, skb);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ae8cd8926456..c60c520fde7c 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
187 struct rxrpc_skb_priv *sp; 187 struct rxrpc_skb_priv *sp;
188 struct rxrpc_crypt iv; 188 struct rxrpc_crypt iv;
189 struct scatterlist sg[16]; 189 struct scatterlist sg[16];
190 struct sk_buff *trailer;
191 unsigned int len; 190 unsigned int len;
192 u16 check; 191 u16 check;
193 int nsg;
194 int err; 192 int err;
195 193
196 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
@@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
214 crypto_skcipher_encrypt(req); 212 crypto_skcipher_encrypt(req);
215 213
216 /* we want to encrypt the skbuff in-place */ 214 /* we want to encrypt the skbuff in-place */
217 nsg = skb_cow_data(skb, 0, &trailer); 215 err = -EMSGSIZE;
218 err = -ENOMEM; 216 if (skb_shinfo(skb)->nr_frags > 16)
219 if (nsg < 0 || nsg > 16)
220 goto out; 217 goto out;
221 218
222 len = data_size + call->conn->size_align - 1; 219 len = data_size + call->conn->size_align - 1;
223 len &= ~(call->conn->size_align - 1); 220 len &= ~(call->conn->size_align - 1);
224 221
225 sg_init_table(sg, nsg); 222 sg_init_table(sg, ARRAY_SIZE(sg));
226 err = skb_to_sgvec(skb, sg, 0, len); 223 err = skb_to_sgvec(skb, sg, 0, len);
227 if (unlikely(err < 0)) 224 if (unlikely(err < 0))
228 goto out; 225 goto out;
@@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
319 struct rxkad_level1_hdr sechdr; 316 struct rxkad_level1_hdr sechdr;
320 struct rxrpc_crypt iv; 317 struct rxrpc_crypt iv;
321 struct scatterlist sg[16]; 318 struct scatterlist sg[16];
322 struct sk_buff *trailer;
323 bool aborted; 319 bool aborted;
324 u32 data_size, buf; 320 u32 data_size, buf;
325 u16 check; 321 u16 check;
326 int nsg, ret; 322 int ret;
327 323
328 _enter(""); 324 _enter("");
329 325
@@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
336 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 332 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
337 * directly into the target buffer. 333 * directly into the target buffer.
338 */ 334 */
339 nsg = skb_cow_data(skb, 0, &trailer); 335 sg_init_table(sg, ARRAY_SIZE(sg));
340 if (nsg < 0 || nsg > 16)
341 goto nomem;
342
343 sg_init_table(sg, nsg);
344 ret = skb_to_sgvec(skb, sg, offset, 8); 336 ret = skb_to_sgvec(skb, sg, offset, 8);
345 if (unlikely(ret < 0)) 337 if (unlikely(ret < 0))
346 return ret; 338 return ret;
@@ -388,10 +380,6 @@ protocol_error:
388 if (aborted) 380 if (aborted)
389 rxrpc_send_abort_packet(call); 381 rxrpc_send_abort_packet(call);
390 return -EPROTO; 382 return -EPROTO;
391
392nomem:
393 _leave(" = -ENOMEM");
394 return -ENOMEM;
395} 383}
396 384
397/* 385/*
@@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
406 struct rxkad_level2_hdr sechdr; 394 struct rxkad_level2_hdr sechdr;
407 struct rxrpc_crypt iv; 395 struct rxrpc_crypt iv;
408 struct scatterlist _sg[4], *sg; 396 struct scatterlist _sg[4], *sg;
409 struct sk_buff *trailer;
410 bool aborted; 397 bool aborted;
411 u32 data_size, buf; 398 u32 data_size, buf;
412 u16 check; 399 u16 check;
@@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
423 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 410 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
424 * directly into the target buffer. 411 * directly into the target buffer.
425 */ 412 */
426 nsg = skb_cow_data(skb, 0, &trailer);
427 if (nsg < 0)
428 goto nomem;
429
430 sg = _sg; 413 sg = _sg;
431 if (unlikely(nsg > 4)) { 414 nsg = skb_shinfo(skb)->nr_frags;
415 if (nsg <= 4) {
416 nsg = 4;
417 } else {
432 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); 418 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
433 if (!sg) 419 if (!sg)
434 goto nomem; 420 goto nomem;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bae14438f869..6a1547b270fe 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
176 skb->tstamp = ktime_get_real(); 176 skb->tstamp = ktime_get_real();
177 177
178 ix = seq & RXRPC_RXTX_BUFF_MASK; 178 ix = seq & RXRPC_RXTX_BUFF_MASK;
179 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 179 rxrpc_get_skb(skb, rxrpc_skb_got);
180 call->rxtx_annotations[ix] = annotation; 180 call->rxtx_annotations[ix] = annotation;
181 smp_wmb(); 181 smp_wmb();
182 call->rxtx_buffer[ix] = skb; 182 call->rxtx_buffer[ix] = skb;
@@ -248,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
248 } 248 }
249 249
250out: 250out:
251 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 251 rxrpc_free_skb(skb, rxrpc_skb_freed);
252 _leave(" = %d", ret); 252 _leave(" = %d", ret);
253 return ret; 253 return ret;
254} 254}
@@ -289,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
289 289
290 skb = call->tx_pending; 290 skb = call->tx_pending;
291 call->tx_pending = NULL; 291 call->tx_pending = NULL;
292 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 292 rxrpc_see_skb(skb, rxrpc_skb_seen);
293 293
294 copied = 0; 294 copied = 0;
295 do { 295 do {
@@ -336,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
336 if (!skb) 336 if (!skb)
337 goto maybe_error; 337 goto maybe_error;
338 338
339 rxrpc_new_skb(skb, rxrpc_skb_tx_new); 339 sp = rxrpc_skb(skb);
340 sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
341 rxrpc_new_skb(skb, rxrpc_skb_new);
340 342
341 _debug("ALLOC SEND %p", skb); 343 _debug("ALLOC SEND %p", skb);
342 344
@@ -346,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
346 skb_reserve(skb, call->conn->security_size); 348 skb_reserve(skb, call->conn->security_size);
347 skb->len += call->conn->security_size; 349 skb->len += call->conn->security_size;
348 350
349 sp = rxrpc_skb(skb);
350 sp->remain = chunk; 351 sp->remain = chunk;
351 if (sp->remain > skb_tailroom(skb)) 352 if (sp->remain > skb_tailroom(skb))
352 sp->remain = skb_tailroom(skb); 353 sp->remain = skb_tailroom(skb);
@@ -439,7 +440,7 @@ out:
439 return ret; 440 return ret;
440 441
441call_terminated: 442call_terminated:
442 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 443 rxrpc_free_skb(skb, rxrpc_skb_freed);
443 _leave(" = %d", call->error); 444 _leave(" = %d", call->error);
444 return call->error; 445 return call->error;
445 446
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 9ad5045b7c2f..0348d2bf6f7d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -14,7 +14,8 @@
14#include <net/af_rxrpc.h> 14#include <net/af_rxrpc.h>
15#include "ar-internal.h" 15#include "ar-internal.h"
16 16
17#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 17#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
18#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
18 19
19/* 20/*
20 * Note the allocation or reception of a socket buffer. 21 * Note the allocation or reception of a socket buffer.
@@ -22,8 +23,9 @@
22void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 23void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
23{ 24{
24 const void *here = __builtin_return_address(0); 25 const void *here = __builtin_return_address(0);
25 int n = atomic_inc_return(select_skb_count(op)); 26 int n = atomic_inc_return(select_skb_count(skb));
26 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 27 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
28 rxrpc_skb(skb)->rx_flags, here);
27} 29}
28 30
29/* 31/*
@@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
33{ 35{
34 const void *here = __builtin_return_address(0); 36 const void *here = __builtin_return_address(0);
35 if (skb) { 37 if (skb) {
36 int n = atomic_read(select_skb_count(op)); 38 int n = atomic_read(select_skb_count(skb));
37 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 39 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
40 rxrpc_skb(skb)->rx_flags, here);
38 } 41 }
39} 42}
40 43
@@ -44,12 +47,23 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
44void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 47void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
45{ 48{
46 const void *here = __builtin_return_address(0); 49 const void *here = __builtin_return_address(0);
47 int n = atomic_inc_return(select_skb_count(op)); 50 int n = atomic_inc_return(select_skb_count(skb));
48 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 51 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
52 rxrpc_skb(skb)->rx_flags, here);
49 skb_get(skb); 53 skb_get(skb);
50} 54}
51 55
52/* 56/*
57 * Note the dropping of a ref on a socket buffer by the core.
58 */
59void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
60{
61 const void *here = __builtin_return_address(0);
62 int n = atomic_inc_return(&rxrpc_n_rx_skbs);
63 trace_rxrpc_skb(skb, op, 0, n, 0, here);
64}
65
66/*
53 * Note the destruction of a socket buffer. 67 * Note the destruction of a socket buffer.
54 */ 68 */
55void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 69void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
@@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
58 if (skb) { 72 if (skb) {
59 int n; 73 int n;
60 CHECK_SLAB_OKAY(&skb->users); 74 CHECK_SLAB_OKAY(&skb->users);
61 n = atomic_dec_return(select_skb_count(op)); 75 n = atomic_dec_return(select_skb_count(skb));
62 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 76 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
77 rxrpc_skb(skb)->rx_flags, here);
63 kfree_skb(skb); 78 kfree_skb(skb);
64 } 79 }
65} 80}
@@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
72 const void *here = __builtin_return_address(0); 87 const void *here = __builtin_return_address(0);
73 struct sk_buff *skb; 88 struct sk_buff *skb;
74 while ((skb = skb_dequeue((list))) != NULL) { 89 while ((skb = skb_dequeue((list))) != NULL) {
75 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 90 int n = atomic_dec_return(select_skb_count(skb));
76 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 91 trace_rxrpc_skb(skb, rxrpc_skb_purged,
77 refcount_read(&skb->users), n, here); 92 refcount_read(&skb->users), n,
93 rxrpc_skb(skb)->rx_flags, here);
78 kfree_skb(skb); 94 kfree_skb(skb);
79 } 95 }
80} 96}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index fd1f7e799e23..04b7bd4ec751 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net)
422{ 422{
423 struct tc_action_net *tn = net_generic(net, bpf_net_id); 423 struct tc_action_net *tn = net_generic(net, bpf_net_id);
424 424
425 return tc_action_net_init(tn, &act_bpf_ops); 425 return tc_action_net_init(net, tn, &act_bpf_ops);
426} 426}
427 427
428static void __net_exit bpf_exit_net(struct list_head *net_list) 428static void __net_exit bpf_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 32ac04d77a45..2b43cacf82af 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net)
231{ 231{
232 struct tc_action_net *tn = net_generic(net, connmark_net_id); 232 struct tc_action_net *tn = net_generic(net, connmark_net_id);
233 233
234 return tc_action_net_init(tn, &act_connmark_ops); 234 return tc_action_net_init(net, tn, &act_connmark_ops);
235} 235}
236 236
237static void __net_exit connmark_exit_net(struct list_head *net_list) 237static void __net_exit connmark_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 9b9288267a54..d3cfad88dc3a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net)
714{ 714{
715 struct tc_action_net *tn = net_generic(net, csum_net_id); 715 struct tc_action_net *tn = net_generic(net, csum_net_id);
716 716
717 return tc_action_net_init(tn, &act_csum_ops); 717 return tc_action_net_init(net, tn, &act_csum_ops);
718} 718}
719 719
720static void __net_exit csum_exit_net(struct list_head *net_list) 720static void __net_exit csum_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 33a1a7406e87..cdd6f3818097 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -939,7 +939,7 @@ static __net_init int ct_init_net(struct net *net)
939 tn->labels = true; 939 tn->labels = true;
940 } 940 }
941 941
942 return tc_action_net_init(&tn->tn, &act_ct_ops); 942 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
943} 943}
944 944
945static void __net_exit ct_exit_net(struct list_head *net_list) 945static void __net_exit ct_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 06ef74b74911..0dbcfd1dca7b 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -376,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net)
376{ 376{
377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
378 378
379 return tc_action_net_init(tn, &act_ctinfo_ops); 379 return tc_action_net_init(net, tn, &act_ctinfo_ops);
380} 380}
381 381
382static void __net_exit ctinfo_exit_net(struct list_head *net_list) 382static void __net_exit ctinfo_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 8f0140c6ca58..324f1d1f6d47 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net)
278{ 278{
279 struct tc_action_net *tn = net_generic(net, gact_net_id); 279 struct tc_action_net *tn = net_generic(net, gact_net_id);
280 280
281 return tc_action_net_init(tn, &act_gact_ops); 281 return tc_action_net_init(net, tn, &act_gact_ops);
282} 282}
283 283
284static void __net_exit gact_exit_net(struct list_head *net_list) 284static void __net_exit gact_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 92ee853d43e6..3a31e241c647 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net)
890{ 890{
891 struct tc_action_net *tn = net_generic(net, ife_net_id); 891 struct tc_action_net *tn = net_generic(net, ife_net_id);
892 892
893 return tc_action_net_init(tn, &act_ife_ops); 893 return tc_action_net_init(net, tn, &act_ife_ops);
894} 894}
895 895
896static void __net_exit ife_exit_net(struct list_head *net_list) 896static void __net_exit ife_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index ce2c30a591d2..214a03d405cf 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
61 return 0; 61 return 0;
62} 62}
63 63
64static void ipt_destroy_target(struct xt_entry_target *t) 64static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
65{ 65{
66 struct xt_tgdtor_param par = { 66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target, 67 .target = t->u.kernel.target,
68 .targinfo = t->data, 68 .targinfo = t->data,
69 .family = NFPROTO_IPV4, 69 .family = NFPROTO_IPV4,
70 .net = net,
70 }; 71 };
71 if (par.target->destroy != NULL) 72 if (par.target->destroy != NULL)
72 par.target->destroy(&par); 73 par.target->destroy(&par);
@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a)
78 struct tcf_ipt *ipt = to_ipt(a); 79 struct tcf_ipt *ipt = to_ipt(a);
79 80
80 if (ipt->tcfi_t) { 81 if (ipt->tcfi_t) {
81 ipt_destroy_target(ipt->tcfi_t); 82 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
82 kfree(ipt->tcfi_t); 83 kfree(ipt->tcfi_t);
83 } 84 }
84 kfree(ipt->tcfi_tname); 85 kfree(ipt->tcfi_tname);
@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
180 181
181 spin_lock_bh(&ipt->tcf_lock); 182 spin_lock_bh(&ipt->tcf_lock);
182 if (ret != ACT_P_CREATED) { 183 if (ret != ACT_P_CREATED) {
183 ipt_destroy_target(ipt->tcfi_t); 184 ipt_destroy_target(ipt->tcfi_t, net);
184 kfree(ipt->tcfi_tname); 185 kfree(ipt->tcfi_tname);
185 kfree(ipt->tcfi_t); 186 kfree(ipt->tcfi_t);
186 } 187 }
@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net)
350{ 351{
351 struct tc_action_net *tn = net_generic(net, ipt_net_id); 352 struct tc_action_net *tn = net_generic(net, ipt_net_id);
352 353
353 return tc_action_net_init(tn, &act_ipt_ops); 354 return tc_action_net_init(net, tn, &act_ipt_ops);
354} 355}
355 356
356static void __net_exit ipt_exit_net(struct list_head *net_list) 357static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net)
399{ 400{
400 struct tc_action_net *tn = net_generic(net, xt_net_id); 401 struct tc_action_net *tn = net_generic(net, xt_net_id);
401 402
402 return tc_action_net_init(tn, &act_xt_ops); 403 return tc_action_net_init(net, tn, &act_xt_ops);
403} 404}
404 405
405static void __net_exit xt_exit_net(struct list_head *net_list) 406static void __net_exit xt_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index be3f88dfc37e..9d1bf508075a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -453,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net)
453{ 453{
454 struct tc_action_net *tn = net_generic(net, mirred_net_id); 454 struct tc_action_net *tn = net_generic(net, mirred_net_id);
455 455
456 return tc_action_net_init(tn, &act_mirred_ops); 456 return tc_action_net_init(net, tn, &act_mirred_ops);
457} 457}
458 458
459static void __net_exit mirred_exit_net(struct list_head *net_list) 459static void __net_exit mirred_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 0f299e3b618c..e168df0e008a 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -375,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net)
375{ 375{
376 struct tc_action_net *tn = net_generic(net, mpls_net_id); 376 struct tc_action_net *tn = net_generic(net, mpls_net_id);
377 377
378 return tc_action_net_init(tn, &act_mpls_ops); 378 return tc_action_net_init(net, tn, &act_mpls_ops);
379} 379}
380 380
381static void __net_exit mpls_exit_net(struct list_head *net_list) 381static void __net_exit mpls_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 7b858c11b1b5..ea4c5359e7df 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net)
327{ 327{
328 struct tc_action_net *tn = net_generic(net, nat_net_id); 328 struct tc_action_net *tn = net_generic(net, nat_net_id);
329 329
330 return tc_action_net_init(tn, &act_nat_ops); 330 return tc_action_net_init(net, tn, &act_nat_ops);
331} 331}
332 332
333static void __net_exit nat_exit_net(struct list_head *net_list) 333static void __net_exit nat_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17360c6faeaa..cdfaa79382a2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net)
498{ 498{
499 struct tc_action_net *tn = net_generic(net, pedit_net_id); 499 struct tc_action_net *tn = net_generic(net, pedit_net_id);
500 500
501 return tc_action_net_init(tn, &act_pedit_ops); 501 return tc_action_net_init(net, tn, &act_pedit_ops);
502} 502}
503 503
504static void __net_exit pedit_exit_net(struct list_head *net_list) 504static void __net_exit pedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 49cec3e64a4d..6315e0f8d26e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net)
371{ 371{
372 struct tc_action_net *tn = net_generic(net, police_net_id); 372 struct tc_action_net *tn = net_generic(net, police_net_id);
373 373
374 return tc_action_net_init(tn, &act_police_ops); 374 return tc_action_net_init(net, tn, &act_police_ops);
375} 375}
376 376
377static void __net_exit police_exit_net(struct list_head *net_list) 377static void __net_exit police_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 595308d60133..10229124a992 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
103 s->rate = rate; 103 s->rate = rate;
104 s->psample_group_num = psample_group_num; 104 s->psample_group_num = psample_group_num;
105 RCU_INIT_POINTER(s->psample_group, psample_group); 105 rcu_swap_protected(s->psample_group, psample_group,
106 lockdep_is_held(&s->tcf_lock));
106 107
107 if (tb[TCA_SAMPLE_TRUNC_SIZE]) { 108 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
108 s->truncate = true; 109 s->truncate = true;
109 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); 110 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
110 } 111 }
111 spin_unlock_bh(&s->tcf_lock); 112 spin_unlock_bh(&s->tcf_lock);
113
114 if (psample_group)
115 psample_group_put(psample_group);
112 if (goto_ch) 116 if (goto_ch)
113 tcf_chain_put_by_act(goto_ch); 117 tcf_chain_put_by_act(goto_ch);
114 118
@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net)
265{ 269{
266 struct tc_action_net *tn = net_generic(net, sample_net_id); 270 struct tc_action_net *tn = net_generic(net, sample_net_id);
267 271
268 return tc_action_net_init(tn, &act_sample_ops); 272 return tc_action_net_init(net, tn, &act_sample_ops);
269} 273}
270 274
271static void __net_exit sample_exit_net(struct list_head *net_list) 275static void __net_exit sample_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 33aefa25b545..6120e56117ca 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net)
232{ 232{
233 struct tc_action_net *tn = net_generic(net, simp_net_id); 233 struct tc_action_net *tn = net_generic(net, simp_net_id);
234 234
235 return tc_action_net_init(tn, &act_simp_ops); 235 return tc_action_net_init(net, tn, &act_simp_ops);
236} 236}
237 237
238static void __net_exit simp_exit_net(struct list_head *net_list) 238static void __net_exit simp_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index b100870f02a6..6a8d3337c577 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
307 return tcf_idr_search(tn, a, index); 307 return tcf_idr_search(tn, a, index);
308} 308}
309 309
310static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
311{
312 return nla_total_size(sizeof(struct tc_skbedit))
313 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
314 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
315 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
316 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
317 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
318 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
319}
320
310static struct tc_action_ops act_skbedit_ops = { 321static struct tc_action_ops act_skbedit_ops = {
311 .kind = "skbedit", 322 .kind = "skbedit",
312 .id = TCA_ID_SKBEDIT, 323 .id = TCA_ID_SKBEDIT,
@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
316 .init = tcf_skbedit_init, 327 .init = tcf_skbedit_init,
317 .cleanup = tcf_skbedit_cleanup, 328 .cleanup = tcf_skbedit_cleanup,
318 .walk = tcf_skbedit_walker, 329 .walk = tcf_skbedit_walker,
330 .get_fill_size = tcf_skbedit_get_fill_size,
319 .lookup = tcf_skbedit_search, 331 .lookup = tcf_skbedit_search,
320 .size = sizeof(struct tcf_skbedit), 332 .size = sizeof(struct tcf_skbedit),
321}; 333};
@@ -324,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net)
324{ 336{
325 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 337 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
326 338
327 return tc_action_net_init(tn, &act_skbedit_ops); 339 return tc_action_net_init(net, tn, &act_skbedit_ops);
328} 340}
329 341
330static void __net_exit skbedit_exit_net(struct list_head *net_list) 342static void __net_exit skbedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 7da3518e18ef..888437f97ba6 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net)
287{ 287{
288 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 288 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
289 289
290 return tc_action_net_init(tn, &act_skbmod_ops); 290 return tc_action_net_init(net, tn, &act_skbmod_ops);
291} 291}
292 292
293static void __net_exit skbmod_exit_net(struct list_head *net_list) 293static void __net_exit skbmod_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 6d0debdc9b97..2f83a79f76aa 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
600{ 600{
601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
602 602
603 return tc_action_net_init(tn, &act_tunnel_key_ops); 603 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
604} 604}
605 605
606static void __net_exit tunnel_key_exit_net(struct list_head *net_list) 606static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index a3c9eea1ee8a..287a30bf8930 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net)
334{ 334{
335 struct tc_action_net *tn = net_generic(net, vlan_net_id); 335 struct tc_action_net *tn = net_generic(net, vlan_net_id);
336 336
337 return tc_action_net_init(tn, &act_vlan_ops); 337 return tc_action_net_init(net, tn, &act_vlan_ops);
338} 338}
339 339
340static void __net_exit vlan_exit_net(struct list_head *net_list) 340static void __net_exit vlan_exit_net(struct list_head *net_list)
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 732e109c3055..810645b5c086 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
181 s64 credits; 181 s64 credits;
182 int len; 182 int len;
183 183
184 if (atomic64_read(&q->port_rate) == -1) {
185 WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
186 return NULL;
187 }
188
189 if (q->credits < 0) { 184 if (q->credits < 0) {
190 credits = timediff_to_credits(now - q->last, q->idleslope); 185 credits = timediff_to_credits(now - q->last, q->idleslope);
191 186
@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
303static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) 298static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
304{ 299{
305 struct ethtool_link_ksettings ecmd; 300 struct ethtool_link_ksettings ecmd;
301 int speed = SPEED_10;
306 int port_rate = -1; 302 int port_rate = -1;
303 int err;
304
305 err = __ethtool_get_link_ksettings(dev, &ecmd);
306 if (err < 0)
307 goto skip;
308
309 if (ecmd.base.speed != SPEED_UNKNOWN)
310 speed = ecmd.base.speed;
307 311
308 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 312skip:
309 ecmd.base.speed != SPEED_UNKNOWN) 313 port_rate = speed * 1000 * BYTES_PER_KBIT;
310 port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
311 314
312 atomic64_set(&q->port_rate, port_rate); 315 atomic64_set(&q->port_rate, port_rate);
313 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", 316 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 11c03cf4aa74..137db1cbde85 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
624 624
625 err = skb_array_produce(q, skb); 625 err = skb_array_produce(q, skb);
626 626
627 if (unlikely(err)) 627 if (unlikely(err)) {
628 return qdisc_drop_cpu(skb, qdisc, to_free); 628 if (qdisc_is_percpu_stats(qdisc))
629 return qdisc_drop_cpu(skb, qdisc, to_free);
630 else
631 return qdisc_drop(skb, qdisc, to_free);
632 }
629 633
630 qdisc_update_stats_at_enqueue(qdisc, pkt_len); 634 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
631 return NET_XMIT_SUCCESS; 635 return NET_XMIT_SUCCESS;
@@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
688 kfree_skb(skb); 692 kfree_skb(skb);
689 } 693 }
690 694
691 for_each_possible_cpu(i) { 695 if (qdisc_is_percpu_stats(qdisc)) {
692 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 696 for_each_possible_cpu(i) {
697 struct gnet_stats_queue *q;
693 698
694 q->backlog = 0; 699 q = per_cpu_ptr(qdisc->cpu_qstats, i);
695 q->qlen = 0; 700 q->backlog = 0;
701 q->qlen = 0;
702 }
696 } 703 }
697} 704}
698 705
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c39db507ba3f..8d8bc2ec5cd6 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
477 u32 gate_mask; 477 u32 gate_mask;
478 int i; 478 int i;
479 479
480 if (atomic64_read(&q->picos_per_byte) == -1) {
481 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
482 return NULL;
483 }
484
485 rcu_read_lock(); 480 rcu_read_lock();
486 entry = rcu_dereference(q->current_entry); 481 entry = rcu_dereference(q->current_entry);
487 /* if there's no entry, it means that the schedule didn't 482 /* if there's no entry, it means that the schedule didn't
@@ -958,12 +953,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
958 struct taprio_sched *q) 953 struct taprio_sched *q)
959{ 954{
960 struct ethtool_link_ksettings ecmd; 955 struct ethtool_link_ksettings ecmd;
961 int picos_per_byte = -1; 956 int speed = SPEED_10;
957 int picos_per_byte;
958 int err;
962 959
963 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 960 err = __ethtool_get_link_ksettings(dev, &ecmd);
964 ecmd.base.speed != SPEED_UNKNOWN) 961 if (err < 0)
965 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 962 goto skip;
966 ecmd.base.speed * 1000 * 1000); 963
964 if (ecmd.base.speed != SPEED_UNKNOWN)
965 speed = ecmd.base.speed;
966
967skip:
968 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
969 speed * 1000 * 1000);
967 970
968 atomic64_set(&q->picos_per_byte, picos_per_byte); 971 atomic64_set(&q->picos_per_byte, picos_per_byte);
969 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 972 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1195,7 +1198,8 @@ unlock:
1195 spin_unlock_bh(qdisc_lock(sch)); 1198 spin_unlock_bh(qdisc_lock(sch));
1196 1199
1197free_sched: 1200free_sched:
1198 kfree(new_admin); 1201 if (new_admin)
1202 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1199 1203
1200 return err; 1204 return err;
1201} 1205}
@@ -1248,6 +1252,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1248 */ 1252 */
1249 q->clockid = -1; 1253 q->clockid = -1;
1250 1254
1255 spin_lock(&taprio_list_lock);
1256 list_add(&q->taprio_list, &taprio_list);
1257 spin_unlock(&taprio_list_lock);
1258
1251 if (sch->parent != TC_H_ROOT) 1259 if (sch->parent != TC_H_ROOT)
1252 return -EOPNOTSUPP; 1260 return -EOPNOTSUPP;
1253 1261
@@ -1265,10 +1273,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1265 if (!opt) 1273 if (!opt)
1266 return -EINVAL; 1274 return -EINVAL;
1267 1275
1268 spin_lock(&taprio_list_lock);
1269 list_add(&q->taprio_list, &taprio_list);
1270 spin_unlock(&taprio_list_lock);
1271
1272 for (i = 0; i < dev->num_tx_queues; i++) { 1276 for (i = 0; i < dev->num_tx_queues; i++) {
1273 struct netdev_queue *dev_queue; 1277 struct netdev_queue *dev_queue;
1274 struct Qdisc *qdisc; 1278 struct Qdisc *qdisc;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a554d6d15d1b..1cf5bb5b73c4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
546 */ 546 */
547 if (net->sctp.pf_enable && 547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) && 548 (transport->state == SCTP_ACTIVE) &&
549 (asoc->pf_retrans < transport->pathmaxrxt) && 549 (transport->error_count < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) { 550 (transport->error_count > asoc->pf_retrans)) {
551 551
552 sctp_assoc_control_transport(asoc, transport, 552 sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 25946604af85..e83cdaa2ab76 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
316 nstr_list[i] = htons(str_list[i]); 316 nstr_list[i] = htons(str_list[i]);
317 317
318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { 318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
319 kfree(nstr_list);
319 retval = -EAGAIN; 320 retval = -EAGAIN;
320 goto out; 321 goto out;
321 } 322 }
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f0de323d15d6..6c8f09c1ce51 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
76 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn; 77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk; 78 struct sock *sk = &smc->sk;
79 bool noblock;
80 long timeo; 79 long timeo;
81 int rc = 0; 80 int rc = 0;
82 81
83 /* similar to sk_stream_wait_memory */ 82 /* similar to sk_stream_wait_memory */
84 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
85 noblock = timeo ? false : true;
86 add_wait_queue(sk_sleep(sk), &wait); 84 add_wait_queue(sk_sleep(sk), &wait);
87 while (1) { 85 while (1) {
88 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
97 break; 95 break;
98 } 96 }
99 if (!timeo) { 97 if (!timeo) {
100 if (noblock) 98 /* ensure EPOLLOUT is subsequently generated */
101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 rc = -EAGAIN; 100 rc = -EAGAIN;
103 break; 101 break;
104 } 102 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8679b6027e9..a07b516e503a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
1970static void 1970static void
1971call_bind_status(struct rpc_task *task) 1971call_bind_status(struct rpc_task *task)
1972{ 1972{
1973 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1973 int status = -EIO; 1974 int status = -EIO;
1974 1975
1975 if (rpc_task_transmitted(task)) { 1976 if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
1977 return; 1978 return;
1978 } 1979 }
1979 1980
1980 if (task->tk_status >= 0) { 1981 dprint_status(task);
1981 dprint_status(task); 1982 trace_rpc_bind_status(task);
1983 if (task->tk_status >= 0)
1984 goto out_next;
1985 if (xprt_bound(xprt)) {
1982 task->tk_status = 0; 1986 task->tk_status = 0;
1983 task->tk_action = call_connect; 1987 goto out_next;
1984 return;
1985 } 1988 }
1986 1989
1987 trace_rpc_bind_status(task);
1988 switch (task->tk_status) { 1990 switch (task->tk_status) {
1989 case -ENOMEM: 1991 case -ENOMEM:
1990 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1992 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
2003 task->tk_rebind_retry--; 2005 task->tk_rebind_retry--;
2004 rpc_delay(task, 3*HZ); 2006 rpc_delay(task, 3*HZ);
2005 goto retry_timeout; 2007 goto retry_timeout;
2008 case -ENOBUFS:
2009 rpc_delay(task, HZ >> 2);
2010 goto retry_timeout;
2006 case -EAGAIN: 2011 case -EAGAIN:
2007 goto retry_timeout; 2012 goto retry_timeout;
2008 case -ETIMEDOUT: 2013 case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
2026 case -ENETDOWN: 2031 case -ENETDOWN:
2027 case -EHOSTUNREACH: 2032 case -EHOSTUNREACH:
2028 case -ENETUNREACH: 2033 case -ENETUNREACH:
2029 case -ENOBUFS:
2030 case -EPIPE: 2034 case -EPIPE:
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task->tk_pid, task->tk_status); 2036 task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
2043 2047
2044 rpc_call_rpcerror(task, status); 2048 rpc_call_rpcerror(task, status);
2045 return; 2049 return;
2046 2050out_next:
2051 task->tk_action = call_connect;
2052 return;
2047retry_timeout: 2053retry_timeout:
2048 task->tk_status = 0; 2054 task->tk_status = 0;
2049 task->tk_action = call_bind; 2055 task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
2090static void 2096static void
2091call_connect_status(struct rpc_task *task) 2097call_connect_status(struct rpc_task *task)
2092{ 2098{
2099 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2093 struct rpc_clnt *clnt = task->tk_client; 2100 struct rpc_clnt *clnt = task->tk_client;
2094 int status = task->tk_status; 2101 int status = task->tk_status;
2095 2102
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
2099 } 2106 }
2100 2107
2101 dprint_status(task); 2108 dprint_status(task);
2102
2103 trace_rpc_connect_status(task); 2109 trace_rpc_connect_status(task);
2110
2111 if (task->tk_status == 0) {
2112 clnt->cl_stats->netreconn++;
2113 goto out_next;
2114 }
2115 if (xprt_connected(xprt)) {
2116 task->tk_status = 0;
2117 goto out_next;
2118 }
2119
2104 task->tk_status = 0; 2120 task->tk_status = 0;
2105 switch (status) { 2121 switch (status) {
2106 case -ECONNREFUSED: 2122 case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
2117 case -ENETDOWN: 2133 case -ENETDOWN:
2118 case -ENETUNREACH: 2134 case -ENETUNREACH:
2119 case -EHOSTUNREACH: 2135 case -EHOSTUNREACH:
2120 case -EADDRINUSE:
2121 case -ENOBUFS:
2122 case -EPIPE: 2136 case -EPIPE:
2123 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2137 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2124 task->tk_rqstp->rq_connect_cookie); 2138 task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
2127 /* retry with existing socket, after a delay */ 2141 /* retry with existing socket, after a delay */
2128 rpc_delay(task, 3*HZ); 2142 rpc_delay(task, 3*HZ);
2129 /* fall through */ 2143 /* fall through */
2144 case -EADDRINUSE:
2130 case -ENOTCONN: 2145 case -ENOTCONN:
2131 case -EAGAIN: 2146 case -EAGAIN:
2132 case -ETIMEDOUT: 2147 case -ETIMEDOUT:
2133 goto out_retry; 2148 goto out_retry;
2134 case 0: 2149 case -ENOBUFS:
2135 clnt->cl_stats->netreconn++; 2150 rpc_delay(task, HZ >> 2);
2136 task->tk_action = call_transmit; 2151 goto out_retry;
2137 return;
2138 } 2152 }
2139 rpc_call_rpcerror(task, status); 2153 rpc_call_rpcerror(task, status);
2140 return; 2154 return;
2155out_next:
2156 task->tk_action = call_transmit;
2157 return;
2141out_retry: 2158out_retry:
2142 /* Check for timeouts before looping back to call_bind */ 2159 /* Check for timeouts before looping back to call_bind */
2143 task->tk_action = call_bind; 2160 task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
2365 case -ECONNABORTED: 2382 case -ECONNABORTED:
2366 case -ENOTCONN: 2383 case -ENOTCONN:
2367 rpc_force_rebind(clnt); 2384 rpc_force_rebind(clnt);
2368 /* fall through */ 2385 break;
2369 case -EADDRINUSE: 2386 case -EADDRINUSE:
2370 rpc_delay(task, 3*HZ); 2387 rpc_delay(task, 3*HZ);
2371 /* fall through */ 2388 /* fall through */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 783748dc5e6f..2e71f5455c6c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1408 status = -EBADMSG; 1408 status = -EBADMSG;
1409 goto out_dequeue; 1409 goto out_dequeue;
1410 } 1410 }
1411 if (task->tk_ops->rpc_call_prepare_transmit) {
1412 task->tk_ops->rpc_call_prepare_transmit(task,
1413 task->tk_calldata);
1414 status = task->tk_status;
1415 if (status < 0)
1416 goto out_dequeue;
1417 }
1418 if (RPC_SIGNALLED(task)) { 1411 if (RPC_SIGNALLED(task)) {
1419 status = -ERESTARTSYS; 1412 status = -ERESTARTSYS;
1420 goto out_dequeue; 1413 goto out_dequeue;
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d00913..0f1eaed1bd1b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
75 tipc_set_node_id(net, node_id); 75 tipc_set_node_id(net, node_id);
76 } 76 }
77 tn->trial_addr = addr; 77 tn->trial_addr = addr;
78 tn->addr_trial_end = jiffies;
78 pr_info("32-bit node address hash set to %x\n", addr); 79 pr_info("32-bit node address hash set to %x\n", addr);
79} 80}
80 81
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 66d3a07bc571..c2c5c53cad22 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,8 +106,6 @@ struct tipc_stats {
106 * @transmitq: queue for sent, non-acked messages 106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent 107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages 108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released 109 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast. 110 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages 111 * @rcv_nxt: next sequence number to expect for inbound messages
@@ -164,9 +162,7 @@ struct tipc_link {
164 u16 limit; 162 u16 limit;
165 } backlog[5]; 163 } backlog[5];
166 u16 snd_nxt; 164 u16 snd_nxt;
167 u16 prev_from;
168 u16 window; 165 u16 window;
169 unsigned long stale_limit;
170 166
171 /* Reception */ 167 /* Reception */
172 u16 rcv_nxt; 168 u16 rcv_nxt;
@@ -1044,47 +1040,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
1044 * link_retransmit_failure() - Detect repeated retransmit failures 1040 * link_retransmit_failure() - Detect repeated retransmit failures
1045 * @l: tipc link sender 1041 * @l: tipc link sender
1046 * @r: tipc link receiver (= l in case of unicast) 1042 * @r: tipc link receiver (= l in case of unicast)
1047 * @from: seqno of the 1st packet in retransmit request
1048 * @rc: returned code 1043 * @rc: returned code
1049 * 1044 *
1050 * Return: true if the repeated retransmit failures happens, otherwise 1045 * Return: true if the repeated retransmit failures happens, otherwise
1051 * false 1046 * false
1052 */ 1047 */
1053static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, 1048static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1054 u16 from, int *rc) 1049 int *rc)
1055{ 1050{
1056 struct sk_buff *skb = skb_peek(&l->transmq); 1051 struct sk_buff *skb = skb_peek(&l->transmq);
1057 struct tipc_msg *hdr; 1052 struct tipc_msg *hdr;
1058 1053
1059 if (!skb) 1054 if (!skb)
1060 return false; 1055 return false;
1061 hdr = buf_msg(skb);
1062 1056
1063 /* Detect repeated retransmit failures on same packet */ 1057 if (!TIPC_SKB_CB(skb)->retr_cnt)
1064 if (r->prev_from != from) { 1058 return false;
1065 r->prev_from = from;
1066 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1067 } else if (time_after(jiffies, r->stale_limit)) {
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1072 msg_errcode(hdr));
1073 pr_info("sqno %u, prev: %x, src: %x\n",
1074 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1075
1076 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1077 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1078 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1079 1059
1080 if (link_is_bc_sndlink(l)) 1060 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1081 *rc = TIPC_LINK_DOWN_EVT; 1061 msecs_to_jiffies(r->tolerance)))
1062 return false;
1063
1064 hdr = buf_msg(skb);
1065 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1066 return false;
1082 1067
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1072 pr_info("sqno %u, prev: %x, dest: %x\n",
1073 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1074 pr_info("retr_stamp %d, retr_cnt %d\n",
1075 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1076 TIPC_SKB_CB(skb)->retr_cnt);
1077
1078 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1079 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1080 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1081
1082 if (link_is_bc_sndlink(l)) {
1083 r->state = LINK_RESET;
1084 *rc = TIPC_LINK_DOWN_EVT;
1085 } else {
1083 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1086 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1084 return true;
1085 } 1087 }
1086 1088
1087 return false; 1089 return true;
1088} 1090}
1089 1091
1090/* tipc_link_bc_retrans() - retransmit zero or more packets 1092/* tipc_link_bc_retrans() - retransmit zero or more packets
@@ -1110,7 +1112,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1110 1112
1111 trace_tipc_link_retrans(r, from, to, &l->transmq); 1113 trace_tipc_link_retrans(r, from, to, &l->transmq);
1112 1114
1113 if (link_retransmit_failure(l, r, from, &rc)) 1115 if (link_retransmit_failure(l, r, &rc))
1114 return rc; 1116 return rc;
1115 1117
1116 skb_queue_walk(&l->transmq, skb) { 1118 skb_queue_walk(&l->transmq, skb) {
@@ -1119,11 +1121,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1119 continue; 1121 continue;
1120 if (more(msg_seqno(hdr), to)) 1122 if (more(msg_seqno(hdr), to))
1121 break; 1123 break;
1122 if (link_is_bc_sndlink(l)) { 1124
1123 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1125 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1124 continue; 1126 continue;
1125 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1127 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1126 }
1127 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); 1128 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1128 if (!_skb) 1129 if (!_skb)
1129 return 0; 1130 return 0;
@@ -1133,6 +1134,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1133 _skb->priority = TC_PRIO_CONTROL; 1134 _skb->priority = TC_PRIO_CONTROL;
1134 __skb_queue_tail(xmitq, _skb); 1135 __skb_queue_tail(xmitq, _skb);
1135 l->stats.retransmitted++; 1136 l->stats.retransmitted++;
1137
1138 /* Increase actual retrans counter & mark first time */
1139 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1140 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1136 } 1141 }
1137 return 0; 1142 return 0;
1138} 1143}
@@ -1357,12 +1362,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1357 struct tipc_msg *hdr; 1362 struct tipc_msg *hdr;
1358 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1363 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1359 u16 ack = l->rcv_nxt - 1; 1364 u16 ack = l->rcv_nxt - 1;
1365 bool passed = false;
1360 u16 seqno, n = 0; 1366 u16 seqno, n = 0;
1361 int rc = 0; 1367 int rc = 0;
1362 1368
1363 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1364 return rc;
1365
1366 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1369 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1367 seqno = buf_seqno(skb); 1370 seqno = buf_seqno(skb);
1368 1371
@@ -1372,12 +1375,17 @@ next_gap_ack:
1372 __skb_unlink(skb, &l->transmq); 1375 __skb_unlink(skb, &l->transmq);
1373 kfree_skb(skb); 1376 kfree_skb(skb);
1374 } else if (less_eq(seqno, acked + gap)) { 1377 } else if (less_eq(seqno, acked + gap)) {
1375 /* retransmit skb */ 1378 /* First, check if repeated retrans failures occurs? */
1379 if (!passed && link_retransmit_failure(l, l, &rc))
1380 return rc;
1381 passed = true;
1382
1383 /* retransmit skb if unrestricted*/
1376 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1384 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1377 continue; 1385 continue;
1378 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 1386 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1379 1387 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
1380 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1388 GFP_ATOMIC);
1381 if (!_skb) 1389 if (!_skb)
1382 continue; 1390 continue;
1383 hdr = buf_msg(_skb); 1391 hdr = buf_msg(_skb);
@@ -1386,6 +1394,10 @@ next_gap_ack:
1386 _skb->priority = TC_PRIO_CONTROL; 1394 _skb->priority = TC_PRIO_CONTROL;
1387 __skb_queue_tail(xmitq, _skb); 1395 __skb_queue_tail(xmitq, _skb);
1388 l->stats.retransmitted++; 1396 l->stats.retransmitted++;
1397
1398 /* Increase actual retrans counter & mark first time */
1399 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1400 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1389 } else { 1401 } else {
1390 /* retry with Gap ACK blocks if any */ 1402 /* retry with Gap ACK blocks if any */
1391 if (!ga || n >= ga->gack_cnt) 1403 if (!ga || n >= ga->gack_cnt)
@@ -2577,7 +2589,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2577 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); 2589 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2578 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); 2590 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2579 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); 2591 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2580 i += scnprintf(buf + i, sz - i, " %u", l->prev_from); 2592 i += scnprintf(buf + i, sz - i, " %u", 0);
2581 i += scnprintf(buf + i, sz - i, " %u", 0); 2593 i += scnprintf(buf + i, sz - i, " %u", 0);
2582 i += scnprintf(buf + i, sz - i, " %u", l->acked); 2594 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2583 2595
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index da509f0eb9ca..d7ebc9e955f6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,13 +102,15 @@ struct plist;
102#define TIPC_MEDIA_INFO_OFFSET 5 102#define TIPC_MEDIA_INFO_OFFSET 5
103 103
104struct tipc_skb_cb { 104struct tipc_skb_cb {
105 u32 bytes_read;
106 u32 orig_member;
107 struct sk_buff *tail; 105 struct sk_buff *tail;
108 unsigned long nxt_retr; 106 unsigned long nxt_retr;
109 bool validated; 107 unsigned long retr_stamp;
108 u32 bytes_read;
109 u32 orig_member;
110 u16 chain_imp; 110 u16 chain_imp;
111 u16 ackers; 111 u16 ackers;
112 u16 retr_cnt;
113 bool validated;
112}; 114};
113 115
114#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 116#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 7c0b2b778703..43922d86e510 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk,
373 struct tls_context *tls_ctx = tls_get_ctx(sk); 373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_prot_info *prot = &tls_ctx->prot_info; 374 struct tls_prot_info *prot = &tls_ctx->prot_info;
375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); 375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
376 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
377 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); 376 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
378 struct tls_record_info *record = ctx->open_record; 377 struct tls_record_info *record = ctx->open_record;
378 int tls_push_record_flags;
379 struct page_frag *pfrag; 379 struct page_frag *pfrag;
380 size_t orig_size = size; 380 size_t orig_size = size;
381 u32 max_open_record_len; 381 u32 max_open_record_len;
@@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk,
390 if (sk->sk_err) 390 if (sk->sk_err)
391 return -sk->sk_err; 391 return -sk->sk_err;
392 392
393 flags |= MSG_SENDPAGE_DECRYPTED;
394 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
395
393 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 396 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
394 if (tls_is_partially_sent_record(tls_ctx)) { 397 if (tls_is_partially_sent_record(tls_ctx)) {
395 rc = tls_push_partial_record(sk, tls_ctx, flags); 398 rc = tls_push_partial_record(sk, tls_ctx, flags);
@@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
576 gfp_t sk_allocation = sk->sk_allocation; 579 gfp_t sk_allocation = sk->sk_allocation;
577 580
578 sk->sk_allocation = GFP_ATOMIC; 581 sk->sk_allocation = GFP_ATOMIC;
579 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); 582 tls_push_partial_record(sk, ctx,
583 MSG_DONTWAIT | MSG_NOSIGNAL |
584 MSG_SENDPAGE_DECRYPTED);
580 sk->sk_allocation = sk_allocation; 585 sk->sk_allocation = sk_allocation;
581 } 586 }
582} 587}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 9cbbae606ced..43252a801c3f 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,6 +308,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
308 if (free_ctx) 308 if (free_ctx)
309 icsk->icsk_ulp_data = NULL; 309 icsk->icsk_ulp_data = NULL;
310 sk->sk_prot = ctx->sk_proto; 310 sk->sk_prot = ctx->sk_proto;
311 if (sk->sk_write_space == tls_write_space)
312 sk->sk_write_space = ctx->sk_write_space;
311 write_unlock_bh(&sk->sk_callback_lock); 313 write_unlock_bh(&sk->sk_callback_lock);
312 release_sock(sk); 314 release_sock(sk);
313 if (ctx->tx_conf == TLS_SW) 315 if (ctx->tx_conf == TLS_SW)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4831ad745f91..327479ce69f5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
2788 2788
2789 /* When last_request->processed becomes true this will be rescheduled */ 2789 /* When last_request->processed becomes true this will be rescheduled */
2790 if (lr && !lr->processed) { 2790 if (lr && !lr->processed) {
2791 reg_process_hint(lr); 2791 pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2792 return; 2792 return;
2793 } 2793 }
2794 2794
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0e35b7b9e35..e74837824cea 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
233 233
234 switch (params->cipher) { 234 switch (params->cipher) {
235 case WLAN_CIPHER_SUITE_TKIP: 235 case WLAN_CIPHER_SUITE_TKIP:
236 /* Extended Key ID can only be used with CCMP/GCMP ciphers */
237 if ((pairwise && key_idx) ||
238 params->mode != NL80211_KEY_RX_TX)
239 return -EINVAL;
240 break;
236 case WLAN_CIPHER_SUITE_CCMP: 241 case WLAN_CIPHER_SUITE_CCMP:
237 case WLAN_CIPHER_SUITE_CCMP_256: 242 case WLAN_CIPHER_SUITE_CCMP_256:
238 case WLAN_CIPHER_SUITE_GCMP: 243 case WLAN_CIPHER_SUITE_GCMP:
239 case WLAN_CIPHER_SUITE_GCMP_256: 244 case WLAN_CIPHER_SUITE_GCMP_256:
240 /* IEEE802.11-2016 allows only 0 and - when using Extended Key 245 /* IEEE802.11-2016 allows only 0 and - when supporting
241 * ID - 1 as index for pairwise keys. 246 * Extended Key ID - 1 as index for pairwise keys.
242 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when 247 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
243 * the driver supports Extended Key ID. 248 * the driver supports Extended Key ID.
244 * @NL80211_KEY_SET_TX can't be set when installing and 249 * @NL80211_KEY_SET_TX can't be set when installing and
245 * validating a key. 250 * validating a key.
246 */ 251 */
247 if (params->mode == NL80211_KEY_NO_TX) { 252 if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
248 if (!wiphy_ext_feature_isset(&rdev->wiphy, 253 params->mode == NL80211_KEY_SET_TX)
249 NL80211_EXT_FEATURE_EXT_KEY_ID)) 254 return -EINVAL;
250 return -EINVAL; 255 if (wiphy_ext_feature_isset(&rdev->wiphy,
251 else if (!pairwise || key_idx < 0 || key_idx > 1) 256 NL80211_EXT_FEATURE_EXT_KEY_ID)) {
257 if (pairwise && (key_idx < 0 || key_idx > 1))
252 return -EINVAL; 258 return -EINVAL;
253 } else if ((pairwise && key_idx) || 259 } else if (pairwise && key_idx) {
254 params->mode == NL80211_KEY_SET_TX) {
255 return -EINVAL; 260 return -EINVAL;
256 } 261 }
257 break; 262 break;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 83de74ca729a..688aac7a6943 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
366 if (!umem->pages) { 366 if (!umem->pages) {
367 err = -ENOMEM; 367 err = -ENOMEM;
368 goto out_account; 368 goto out_pin;
369 } 369 }
370 370
371 for (i = 0; i < umem->npgs; i++) 371 for (i = 0; i < umem->npgs; i++)
@@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
373 373
374 return 0; 374 return 0;
375 375
376out_pin:
377 xdp_umem_unpin_pages(umem);
376out_account: 378out_account:
377 xdp_umem_unaccount_pages(umem); 379 xdp_umem_unaccount_pages(umem);
378 return err; 380 return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ca637a72697..ec94f5795ea4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0; 3270 int oif = 0;
3271 3271
3272 if (skb_dst(skb)) 3272 if (skb_dst(skb) && skb_dst(skb)->dev)
3273 oif = skb_dst(skb)->dev->ifindex; 3273 oif = skb_dst(skb)->dev->ifindex;
3274 3274
3275 memset(fl4, 0, sizeof(struct flowi4)); 3275 memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3387 3387
3388 nexthdr = nh[nhoff]; 3388 nexthdr = nh[nhoff];
3389 3389
3390 if (skb_dst(skb)) 3390 if (skb_dst(skb) && skb_dst(skb)->dev)
3391 oif = skb_dst(skb)->dev->ifindex; 3391 oif = skb_dst(skb)->dev->ifindex;
3392 3392
3393 memset(fl6, 0, sizeof(struct flowi6)); 3393 memset(fl6, 0, sizeof(struct flowi6));
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 7325f382dbf4..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
595 595
596 key = check_cached_key(&ctx); 596 key = check_cached_key(&ctx);
597 if (key) 597 if (key)
598 return key; 598 goto error_free;
599 599
600 /* search all the process keyrings for a key */ 600 /* search all the process keyrings for a key */
601 rcu_read_lock(); 601 rcu_read_lock();
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7737b2670064..6d9592f0ae1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1835 if (cptr->type == USER_CLIENT) { 1835 if (cptr->type == USER_CLIENT) {
1836 info->input_pool = cptr->data.user.fifo_pool_size; 1836 info->input_pool = cptr->data.user.fifo_pool_size;
1837 info->input_free = info->input_pool; 1837 info->input_free = info->input_pool;
1838 if (cptr->data.user.fifo) 1838 info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1839 info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
1840 } else { 1839 } else {
1841 info->input_pool = 0; 1840 info->input_pool = 0;
1842 info->input_free = 0; 1841 info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index ea69261f269a..eaaa8b5830bb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
263 263
264 return 0; 264 return 0;
265} 265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270 unsigned long flags;
271 int cells;
272
273 if (!f)
274 return 0;
275
276 snd_use_lock_use(&f->use_lock);
277 spin_lock_irqsave(&f->lock, flags);
278 cells = snd_seq_unused_cells(f->pool);
279 spin_unlock_irqrestore(&f->lock, flags);
280 snd_use_lock_free(&f->use_lock);
281 return cells;
282}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index edc68743943d..b56a7b897c9c 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
53/* resize pool in fifo */ 53/* resize pool in fifo */
54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); 54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
55 55
56/* get the number of unused cells safely */
57int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
56 58
57#endif 59#endif
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9ea39348cdf5..7c6d1c277d4d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
248 unsigned int channels = params_channels(hw_params); 248 unsigned int channels = params_channels(hw_params);
249 249
250 mutex_lock(&oxfw->mutex); 250 mutex_lock(&oxfw->mutex);
251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
252 rate, channels); 252 rate, channels);
253 if (err >= 0) 253 if (err >= 0)
254 ++oxfw->substreams_count; 254 ++oxfw->substreams_count;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0d51823d7270..6d1fb7c11f17 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1180 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14298ef45b21..968d3caab6ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
611 611
612/* update LED status via GPIO */ 612/* update LED status via GPIO */
613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, 613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
614 bool enabled) 614 bool led_on)
615{ 615{
616 struct conexant_spec *spec = codec->spec; 616 struct conexant_spec *spec = codec->spec;
617 unsigned int oldval = spec->gpio_led; 617 unsigned int oldval = spec->gpio_led;
618 618
619 if (spec->mute_led_polarity) 619 if (spec->mute_led_polarity)
620 enabled = !enabled; 620 led_on = !led_on;
621 621
622 if (enabled) 622 if (led_on)
623 spec->gpio_led &= ~mask;
624 else
625 spec->gpio_led |= mask; 623 spec->gpio_led |= mask;
624 else
625 spec->gpio_led &= ~mask;
626 codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
627 mask, led_on, spec->gpio_led);
626 if (spec->gpio_led != oldval) 628 if (spec->gpio_led != oldval)
627 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 629 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
628 spec->gpio_led); 630 spec->gpio_led);
@@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
633{ 635{
634 struct hda_codec *codec = private_data; 636 struct hda_codec *codec = private_data;
635 struct conexant_spec *spec = codec->spec; 637 struct conexant_spec *spec = codec->spec;
636 638 /* muted -> LED on */
637 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); 639 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
638} 640}
639 641
640/* turn on/off mic-mute LED via GPIO per capture hook */ 642/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
656 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, 658 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
657 {} 659 {}
658 }; 660 };
659 codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
660 661
661 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 662 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
662 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; 663 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 2c03e0f6bf72..f70211e6b174 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
550 line6pcm->volume_monitor = 255; 550 line6pcm->volume_monitor = 255;
551 line6pcm->line6 = line6; 551 line6pcm->line6 = line6;
552 552
553 spin_lock_init(&line6pcm->out.lock);
554 spin_lock_init(&line6pcm->in.lock);
555 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
556
557 line6->line6pcm = line6pcm;
558
559 pcm->private_data = line6pcm;
560 pcm->private_free = line6_cleanup_pcm;
561
553 line6pcm->max_packet_size_in = 562 line6pcm->max_packet_size_in =
554 usb_maxpacket(line6->usbdev, 563 usb_maxpacket(line6->usbdev,
555 usb_rcvisocpipe(line6->usbdev, ep_read), 0); 564 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
562 return -EINVAL; 571 return -EINVAL;
563 } 572 }
564 573
565 spin_lock_init(&line6pcm->out.lock);
566 spin_lock_init(&line6pcm->in.lock);
567 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
568
569 line6->line6pcm = line6pcm;
570
571 pcm->private_data = line6pcm;
572 pcm->private_free = line6_cleanup_pcm;
573
574 err = line6_create_audio_out_urbs(line6pcm); 574 err = line6_create_audio_out_urbs(line6pcm);
575 if (err < 0) 575 if (err < 0)
576 return err; 576 return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index b5927c3d5bc0..eceab19766db 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
739 struct uac_mixer_unit_descriptor *desc) 739 struct uac_mixer_unit_descriptor *desc)
740{ 740{
741 int mu_channels; 741 int mu_channels;
742 void *c;
743 742
744 if (desc->bLength < sizeof(*desc)) 743 if (desc->bLength < sizeof(*desc))
745 return -EINVAL; 744 return -EINVAL;
@@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
762 break; 761 break;
763 } 762 }
764 763
765 if (!mu_channels)
766 return 0;
767
768 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
769 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
770 return 0; /* no bmControls -> skip */
771
772 return mu_channels; 764 return mu_channels;
773} 765}
774 766
@@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
2009 * Mixer Unit 2001 * Mixer Unit
2010 */ 2002 */
2011 2003
2004/* check whether the given in/out overflows bmMixerControls matrix */
2005static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
2006 int protocol, int num_ins, int num_outs)
2007{
2008 u8 *hdr = (u8 *)desc;
2009 u8 *c = uac_mixer_unit_bmControls(desc, protocol);
2010 size_t rest; /* remaining bytes after bmMixerControls */
2011
2012 switch (protocol) {
2013 case UAC_VERSION_1:
2014 default:
2015 rest = 1; /* iMixer */
2016 break;
2017 case UAC_VERSION_2:
2018 rest = 2; /* bmControls + iMixer */
2019 break;
2020 case UAC_VERSION_3:
2021 rest = 6; /* bmControls + wMixerDescrStr */
2022 break;
2023 }
2024
2025 /* overflow? */
2026 return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
2027}
2028
2012/* 2029/*
2013 * build a mixer unit control 2030 * build a mixer unit control
2014 * 2031 *
@@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2137 if (err < 0) 2154 if (err < 0)
2138 return err; 2155 return err;
2139 num_ins += iterm.channels; 2156 num_ins += iterm.channels;
2157 if (mixer_bitmap_overflow(desc, state->mixer->protocol,
2158 num_ins, num_outs))
2159 break;
2140 for (; ich < num_ins; ich++) { 2160 for (; ich < num_ins; ich++) {
2141 int och, ich_has_controls = 0; 2161 int och, ich_has_controls = 0;
2142 2162
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 199fa157a411..27dcb3743690 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
1155{ 1155{
1156 struct usb_mixer_interface *mixer; 1156 struct usb_mixer_interface *mixer;
1157 struct usb_mixer_elem_info *cval; 1157 struct usb_mixer_elem_info *cval;
1158 int unitid = 12; /* SamleRate ExtensionUnit ID */ 1158 int unitid = 12; /* SampleRate ExtensionUnit ID */
1159 1159
1160 list_for_each_entry(mixer, &chip->mixer_list, list) { 1160 list_for_each_entry(mixer, &chip->mixer_list, list) {
1161 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); 1161 if (mixer->id_elems[unitid]) {
1162 if (cval) { 1162 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, 1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
1164 cval->control << 8, 1164 cval->control << 8,
1165 samplerate_id); 1165 samplerate_id);
1166 snd_usb_mixer_notify_id(mixer, unitid); 1166 snd_usb_mixer_notify_id(mixer, unitid);
1167 break;
1167 } 1168 }
1168 break;
1169 } 1169 }
1170} 1170}
1171 1171
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 75b96929f76c..e4bbf79de956 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
339 ep = 0x81; 339 ep = 0x81;
340 ifnum = 2; 340 ifnum = 2;
341 goto add_sync_ep_from_ifnum; 341 goto add_sync_ep_from_ifnum;
342 case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
342 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ 343 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
343 ep = 0x81; 344 ep = 0x81;
344 ifnum = 1; 345 ifnum = 1;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 5215e0870bcb..6a71324be628 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -204,7 +204,11 @@ int do_pin_fd(int fd, const char *name)
204 if (err) 204 if (err)
205 return err; 205 return err;
206 206
207 return bpf_obj_pin(fd, name); 207 err = bpf_obj_pin(fd, name);
208 if (err)
209 p_err("can't pin the object (%s): %s", name, strerror(errno));
210
211 return err;
208} 212}
209 213
210int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) 214int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
@@ -237,7 +241,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
237 241
238 fd = get_fd_by_id(id); 242 fd = get_fd_by_id(id);
239 if (fd < 0) { 243 if (fd < 0) {
240 p_err("can't get prog by id (%u): %s", id, strerror(errno)); 244 p_err("can't open object by id (%u): %s", id, strerror(errno));
241 return -1; 245 return -1;
242 } 246 }
243 247
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 66f04a4846a5..43fdbbfe41bb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
363 if (fd < 0) 363 if (fd < 0)
364 return -1; 364 return -1;
365 365
366 return show_prog(fd); 366 err = show_prog(fd);
367 close(fd);
368 return err;
367 } 369 }
368 370
369 if (argc) 371 if (argc)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index f5597503c771..e9ef4ca6a655 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
809 int sn_offset = 0; 809 int sn_offset = 0;
810 int error = 0; 810 int error = 0;
811 char *buffer; 811 char *buffer;
812 struct hv_kvp_ipaddr_value *ip_buffer; 812 struct hv_kvp_ipaddr_value *ip_buffer = NULL;
813 char cidr_mask[5]; /* /xyz */ 813 char cidr_mask[5]; /* /xyz */
814 int weight; 814 int weight;
815 int i; 815 int i;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4e455018da65..a5aa7d3ac6a1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
@@ -1571,8 +1571,11 @@ union bpf_attr {
1571 * but this is only implemented for native XDP (with driver 1571 * but this is only implemented for native XDP (with driver
1572 * support) as of this writing). 1572 * support) as of this writing).
1573 * 1573 *
1574 * All values for *flags* are reserved for future usage, and must 1574 * The lower two bits of *flags* are used as the return code if
1575 * be left at zero. 1575 * the map lookup fails. This is so that the return value can be
1576 * one of the XDP program return codes up to XDP_TX, as chosen by
1577 * the caller. Any higher bits in the *flags* argument must be
1578 * unset.
1576 * 1579 *
1577 * When used to redirect packets to net devices, this helper 1580 * When used to redirect packets to net devices, this helper
1578 * provides a high performance increase over **bpf_redirect**\ (). 1581 * provides a high performance increase over **bpf_redirect**\ ().
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2586b6cb8f34..2b57d7ea7836 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -182,7 +182,6 @@ struct bpf_program {
182 bpf_program_clear_priv_t clear_priv; 182 bpf_program_clear_priv_t clear_priv;
183 183
184 enum bpf_attach_type expected_attach_type; 184 enum bpf_attach_type expected_attach_type;
185 int btf_fd;
186 void *func_info; 185 void *func_info;
187 __u32 func_info_rec_size; 186 __u32 func_info_rec_size;
188 __u32 func_info_cnt; 187 __u32 func_info_cnt;
@@ -313,7 +312,6 @@ void bpf_program__unload(struct bpf_program *prog)
313 prog->instances.nr = -1; 312 prog->instances.nr = -1;
314 zfree(&prog->instances.fds); 313 zfree(&prog->instances.fds);
315 314
316 zclose(prog->btf_fd);
317 zfree(&prog->func_info); 315 zfree(&prog->func_info);
318 zfree(&prog->line_info); 316 zfree(&prog->line_info);
319} 317}
@@ -392,7 +390,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
392 prog->instances.fds = NULL; 390 prog->instances.fds = NULL;
393 prog->instances.nr = -1; 391 prog->instances.nr = -1;
394 prog->type = BPF_PROG_TYPE_UNSPEC; 392 prog->type = BPF_PROG_TYPE_UNSPEC;
395 prog->btf_fd = -1;
396 393
397 return 0; 394 return 0;
398errout: 395errout:
@@ -2288,9 +2285,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2288 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 2285 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2289 } 2286 }
2290 2287
2291 if (!insn_offset)
2292 prog->btf_fd = btf__fd(obj->btf);
2293
2294 return 0; 2288 return 0;
2295} 2289}
2296 2290
@@ -2463,7 +2457,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2463 char *cp, errmsg[STRERR_BUFSIZE]; 2457 char *cp, errmsg[STRERR_BUFSIZE];
2464 int log_buf_size = BPF_LOG_BUF_SIZE; 2458 int log_buf_size = BPF_LOG_BUF_SIZE;
2465 char *log_buf; 2459 char *log_buf;
2466 int ret; 2460 int btf_fd, ret;
2467 2461
2468 if (!insns || !insns_cnt) 2462 if (!insns || !insns_cnt)
2469 return -EINVAL; 2463 return -EINVAL;
@@ -2478,7 +2472,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2478 load_attr.license = license; 2472 load_attr.license = license;
2479 load_attr.kern_version = kern_version; 2473 load_attr.kern_version = kern_version;
2480 load_attr.prog_ifindex = prog->prog_ifindex; 2474 load_attr.prog_ifindex = prog->prog_ifindex;
2481 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2475 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
2476 if (prog->obj->btf_ext)
2477 btf_fd = bpf_object__btf_fd(prog->obj);
2478 else
2479 btf_fd = -1;
2480 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
2482 load_attr.func_info = prog->func_info; 2481 load_attr.func_info = prog->func_info;
2483 load_attr.func_info_rec_size = prog->func_info_rec_size; 2482 load_attr.func_info_rec_size = prog->func_info_rec_size;
2484 load_attr.func_info_cnt = prog->func_info_cnt; 2483 load_attr.func_info_cnt = prog->func_info_cnt;
@@ -5000,13 +4999,15 @@ int libbpf_num_possible_cpus(void)
5000 static const char *fcpu = "/sys/devices/system/cpu/possible"; 4999 static const char *fcpu = "/sys/devices/system/cpu/possible";
5001 int len = 0, n = 0, il = 0, ir = 0; 5000 int len = 0, n = 0, il = 0, ir = 0;
5002 unsigned int start = 0, end = 0; 5001 unsigned int start = 0, end = 0;
5002 int tmp_cpus = 0;
5003 static int cpus; 5003 static int cpus;
5004 char buf[128]; 5004 char buf[128];
5005 int error = 0; 5005 int error = 0;
5006 int fd = -1; 5006 int fd = -1;
5007 5007
5008 if (cpus > 0) 5008 tmp_cpus = READ_ONCE(cpus);
5009 return cpus; 5009 if (tmp_cpus > 0)
5010 return tmp_cpus;
5010 5011
5011 fd = open(fcpu, O_RDONLY); 5012 fd = open(fcpu, O_RDONLY);
5012 if (fd < 0) { 5013 if (fd < 0) {
@@ -5029,7 +5030,7 @@ int libbpf_num_possible_cpus(void)
5029 } 5030 }
5030 buf[len] = '\0'; 5031 buf[len] = '\0';
5031 5032
5032 for (ir = 0, cpus = 0; ir <= len; ir++) { 5033 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
5033 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 5034 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5034 if (buf[ir] == ',' || buf[ir] == '\0') { 5035 if (buf[ir] == ',' || buf[ir] == '\0') {
5035 buf[ir] = '\0'; 5036 buf[ir] = '\0';
@@ -5041,13 +5042,15 @@ int libbpf_num_possible_cpus(void)
5041 } else if (n == 1) { 5042 } else if (n == 1) {
5042 end = start; 5043 end = start;
5043 } 5044 }
5044 cpus += end - start + 1; 5045 tmp_cpus += end - start + 1;
5045 il = ir + 1; 5046 il = ir + 1;
5046 } 5047 }
5047 } 5048 }
5048 if (cpus <= 0) { 5049 if (tmp_cpus <= 0) {
5049 pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); 5050 pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
5050 return -EINVAL; 5051 return -EINVAL;
5051 } 5052 }
5052 return cpus; 5053
5054 WRITE_ONCE(cpus, tmp_cpus);
5055 return tmp_cpus;
5053} 5056}
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 045f5f7d68ab..13f1e8b9ac52 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11turbostat : turbostat.c 11turbostat : turbostat.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' 14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
15override CFLAGS += -D_FORTIFY_SOURCE=2
15 16
16%: %.c 17%: %.c
17 @mkdir -p $(BUILD_OUTPUT) 18 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 75fc4fb9901c..b2a86438f074 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -39,7 +39,6 @@ FILE *outf;
39int *fd_percpu; 39int *fd_percpu;
40struct timeval interval_tv = {5, 0}; 40struct timeval interval_tv = {5, 0};
41struct timespec interval_ts = {5, 0}; 41struct timespec interval_ts = {5, 0};
42struct timespec one_msec = {0, 1000000};
43unsigned int num_iterations; 42unsigned int num_iterations;
44unsigned int debug; 43unsigned int debug;
45unsigned int quiet; 44unsigned int quiet;
@@ -60,6 +59,7 @@ unsigned int do_irtl_hsw;
60unsigned int units = 1000000; /* MHz etc */ 59unsigned int units = 1000000; /* MHz etc */
61unsigned int genuine_intel; 60unsigned int genuine_intel;
62unsigned int authentic_amd; 61unsigned int authentic_amd;
62unsigned int hygon_genuine;
63unsigned int max_level, max_extended_level; 63unsigned int max_level, max_extended_level;
64unsigned int has_invariant_tsc; 64unsigned int has_invariant_tsc;
65unsigned int do_nhm_platform_info; 65unsigned int do_nhm_platform_info;
@@ -100,6 +100,7 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
101unsigned int has_misc_feature_control; 101unsigned int has_misc_feature_control;
102unsigned int first_counter_read = 1; 102unsigned int first_counter_read = 1;
103int ignore_stdin;
103 104
104#define RAPL_PKG (1 << 0) 105#define RAPL_PKG (1 << 0)
105 /* 0x610 MSR_PKG_POWER_LIMIT */ 106 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
166struct thread_data { 167struct thread_data {
167 struct timeval tv_begin; 168 struct timeval tv_begin;
168 struct timeval tv_end; 169 struct timeval tv_end;
170 struct timeval tv_delta;
169 unsigned long long tsc; 171 unsigned long long tsc;
170 unsigned long long aperf; 172 unsigned long long aperf;
171 unsigned long long mperf; 173 unsigned long long mperf;
@@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
506unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; 508unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
507 509
508#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 510#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
511#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
509#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 512#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
510#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 513#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
511#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 514#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
@@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c,
849 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 852 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
850 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 853 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
851 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 854 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
852 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
853 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); 855 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
854 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); 856 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
855 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 857 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
@@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
911 if (DO_BIC(BIC_TOD)) 913 if (DO_BIC(BIC_TOD))
912 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); 914 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
913 915
914 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 916 interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
915 917
916 tsc = t->tsc * tsc_tweak; 918 tsc = t->tsc * tsc_tweak;
917 919
@@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old)
1287 } 1289 }
1288} 1290}
1289 1291
1292int soft_c1_residency_display(int bic)
1293{
1294 if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
1295 return 0;
1296
1297 return DO_BIC_READ(bic);
1298}
1299
1290/* 1300/*
1291 * old = new - old 1301 * old = new - old
1292 */ 1302 */
@@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1309 * over-write old w/ new so we can print end of interval values 1319 * over-write old w/ new so we can print end of interval values
1310 */ 1320 */
1311 1321
1322 timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
1312 old->tv_begin = new->tv_begin; 1323 old->tv_begin = new->tv_begin;
1313 old->tv_end = new->tv_end; 1324 old->tv_end = new->tv_end;
1314 1325
@@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1322 1333
1323 old->c1 = new->c1 - old->c1; 1334 old->c1 = new->c1 - old->c1;
1324 1335
1325 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1336 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1337 soft_c1_residency_display(BIC_Avg_MHz)) {
1326 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 1338 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1327 old->aperf = new->aperf - old->aperf; 1339 old->aperf = new->aperf - old->aperf;
1328 old->mperf = new->mperf - old->mperf; 1340 old->mperf = new->mperf - old->mperf;
@@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1404 t->tv_begin.tv_usec = 0; 1416 t->tv_begin.tv_usec = 0;
1405 t->tv_end.tv_sec = 0; 1417 t->tv_end.tv_sec = 0;
1406 t->tv_end.tv_usec = 0; 1418 t->tv_end.tv_usec = 0;
1419 t->tv_delta.tv_sec = 0;
1420 t->tv_delta.tv_usec = 0;
1407 1421
1408 t->tsc = 0; 1422 t->tsc = 0;
1409 t->aperf = 0; 1423 t->aperf = 0;
@@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c,
1573 1587
1574 for_all_cpus(sum_counters, t, c, p); 1588 for_all_cpus(sum_counters, t, c, p);
1575 1589
1590 /* Use the global time delta for the average. */
1591 average.threads.tv_delta = tv_delta;
1592
1576 average.threads.tsc /= topo.num_cpus; 1593 average.threads.tsc /= topo.num_cpus;
1577 average.threads.aperf /= topo.num_cpus; 1594 average.threads.aperf /= topo.num_cpus;
1578 average.threads.mperf /= topo.num_cpus; 1595 average.threads.mperf /= topo.num_cpus;
@@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t)
1714 if (!DO_BIC(BIC_X2APIC)) 1731 if (!DO_BIC(BIC_X2APIC))
1715 return; 1732 return;
1716 1733
1717 if (authentic_amd) { 1734 if (authentic_amd || hygon_genuine) {
1718 unsigned int topology_extensions; 1735 unsigned int topology_extensions;
1719 1736
1720 if (max_extended_level < 0x8000001e) 1737 if (max_extended_level < 0x8000001e)
@@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1762 struct msr_counter *mp; 1779 struct msr_counter *mp;
1763 int i; 1780 int i;
1764 1781
1765 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1766
1767 if (cpu_migrate(cpu)) { 1782 if (cpu_migrate(cpu)) {
1768 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1783 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1769 return -1; 1784 return -1;
1770 } 1785 }
1771 1786
1787 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1788
1772 if (first_counter_read) 1789 if (first_counter_read)
1773 get_apic_id(t); 1790 get_apic_id(t);
1774retry: 1791retry:
1775 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1792 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1776 1793
1777 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1794 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1795 soft_c1_residency_display(BIC_Avg_MHz)) {
1778 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1796 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1779 1797
1780 /* 1798 /*
@@ -1851,20 +1869,20 @@ retry:
1851 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1852 goto done; 1870 goto done;
1853 1871
1854 if (DO_BIC(BIC_CPU_c3)) { 1872 if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
1855 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1873 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1856 return -6; 1874 return -6;
1857 } 1875 }
1858 1876
1859 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { 1877 if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
1860 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1878 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1861 return -7; 1879 return -7;
1862 } else if (do_knl_cstates) { 1880 } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
1863 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1881 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1864 return -7; 1882 return -7;
1865 } 1883 }
1866 1884
1867 if (DO_BIC(BIC_CPU_c7)) 1885 if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
1868 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1886 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1869 return -8; 1887 return -8;
1870 1888
@@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void)
2912 if (retval != 1) { 2930 if (retval != 1) {
2913 fprintf(stderr, "Disabling Low Power Idle CPU output\n"); 2931 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2914 BIC_NOT_PRESENT(BIC_CPU_LPI); 2932 BIC_NOT_PRESENT(BIC_CPU_LPI);
2933 fclose(fp);
2915 return -1; 2934 return -1;
2916 } 2935 }
2917 2936
@@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void)
2938 if (retval != 1) { 2957 if (retval != 1) {
2939 fprintf(stderr, "Disabling Low Power Idle System output\n"); 2958 fprintf(stderr, "Disabling Low Power Idle System output\n");
2940 BIC_NOT_PRESENT(BIC_SYS_LPI); 2959 BIC_NOT_PRESENT(BIC_SYS_LPI);
2960 fclose(fp);
2941 return -1; 2961 return -1;
2942 } 2962 }
2943 fclose(fp); 2963 fclose(fp);
@@ -2985,8 +3005,6 @@ static void signal_handler (int signal)
2985 fprintf(stderr, "SIGUSR1\n"); 3005 fprintf(stderr, "SIGUSR1\n");
2986 break; 3006 break;
2987 } 3007 }
2988 /* make sure this manually-invoked interval is at least 1ms long */
2989 nanosleep(&one_msec, NULL);
2990} 3008}
2991 3009
2992void setup_signal_handler(void) 3010void setup_signal_handler(void)
@@ -3005,29 +3023,38 @@ void setup_signal_handler(void)
3005 3023
3006void do_sleep(void) 3024void do_sleep(void)
3007{ 3025{
3008 struct timeval select_timeout; 3026 struct timeval tout;
3027 struct timespec rest;
3009 fd_set readfds; 3028 fd_set readfds;
3010 int retval; 3029 int retval;
3011 3030
3012 FD_ZERO(&readfds); 3031 FD_ZERO(&readfds);
3013 FD_SET(0, &readfds); 3032 FD_SET(0, &readfds);
3014 3033
3015 if (!isatty(fileno(stdin))) { 3034 if (ignore_stdin) {
3016 nanosleep(&interval_ts, NULL); 3035 nanosleep(&interval_ts, NULL);
3017 return; 3036 return;
3018 } 3037 }
3019 3038
3020 select_timeout = interval_tv; 3039 tout = interval_tv;
3021 retval = select(1, &readfds, NULL, NULL, &select_timeout); 3040 retval = select(1, &readfds, NULL, NULL, &tout);
3022 3041
3023 if (retval == 1) { 3042 if (retval == 1) {
3024 switch (getc(stdin)) { 3043 switch (getc(stdin)) {
3025 case 'q': 3044 case 'q':
3026 exit_requested = 1; 3045 exit_requested = 1;
3027 break; 3046 break;
3047 case EOF:
3048 /*
3049 * 'stdin' is a pipe closed on the other end. There
3050 * won't be any further input.
3051 */
3052 ignore_stdin = 1;
3053 /* Sleep the rest of the time */
3054 rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
3055 rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
3056 nanosleep(&rest, NULL);
3028 } 3057 }
3029 /* make sure this manually-invoked interval is at least 1ms long */
3030 nanosleep(&one_msec, NULL);
3031 } 3058 }
3032} 3059}
3033 3060
@@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
3209 break; 3236 break;
3210 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3237 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3211 case INTEL_FAM6_HASWELL_X: /* HSX */ 3238 case INTEL_FAM6_HASWELL_X: /* HSX */
3239 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3212 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3240 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3213 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3241 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3214 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3242 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
3405 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3433 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3406 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3434 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3407 case INTEL_FAM6_HASWELL_X: /* HSX */ 3435 case INTEL_FAM6_HASWELL_X: /* HSX */
3436 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3408 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3437 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3409 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3438 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3410 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3439 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family)
3803{ 3832{
3804 switch (family) { 3833 switch (family) {
3805 case 0x17: 3834 case 0x17:
3835 case 0x18:
3806 default: 3836 default:
3807 /* This is the max stock TDP of HEDT/Server Fam17h chips */ 3837 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3808 return 250.0; 3838 return 250.0;
@@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
3841 case INTEL_FAM6_SANDYBRIDGE: 3871 case INTEL_FAM6_SANDYBRIDGE:
3842 case INTEL_FAM6_IVYBRIDGE: 3872 case INTEL_FAM6_IVYBRIDGE:
3843 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3873 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3874 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3844 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3875 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3845 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3876 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3846 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3877 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
3982 4013
3983 switch (family) { 4014 switch (family) {
3984 case 0x17: /* Zen, Zen+ */ 4015 case 0x17: /* Zen, Zen+ */
4016 case 0x18: /* Hygon Dhyana */
3985 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; 4017 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3986 if (rapl_joules) { 4018 if (rapl_joules) {
3987 BIC_PRESENT(BIC_Pkg_J); 4019 BIC_PRESENT(BIC_Pkg_J);
@@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
4002 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); 4034 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4003 rapl_power_units = ldexp(1.0, -(msr & 0xf)); 4035 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4004 4036
4005 tdp = get_tdp_amd(model); 4037 tdp = get_tdp_amd(family);
4006 4038
4007 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 4039 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4008 if (!quiet) 4040 if (!quiet)
@@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model)
4018{ 4050{
4019 if (genuine_intel) 4051 if (genuine_intel)
4020 rapl_probe_intel(family, model); 4052 rapl_probe_intel(family, model);
4021 if (authentic_amd) 4053 if (authentic_amd || hygon_genuine)
4022 rapl_probe_amd(family, model); 4054 rapl_probe_amd(family, model);
4023} 4055}
4024 4056
@@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
4032 4064
4033 switch (model) { 4065 switch (model) {
4034 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4066 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4067 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4035 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4068 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4036 do_gfx_perf_limit_reasons = 1; 4069 do_gfx_perf_limit_reasons = 1;
4037 case INTEL_FAM6_HASWELL_X: /* HSX */ 4070 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4251 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 4284 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
4252 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4285 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4253 case INTEL_FAM6_HASWELL_X: /* HSW */ 4286 case INTEL_FAM6_HASWELL_X: /* HSW */
4287 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4254 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4288 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4255 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4289 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4256 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 4290 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4267} 4301}
4268 4302
4269/* 4303/*
4270 * HSW adds support for additional MSRs: 4304 * HSW ULT added support for C8/C9/C10 MSRs:
4271 * 4305 *
4272 * MSR_PKG_C8_RESIDENCY 0x00000630 4306 * MSR_PKG_C8_RESIDENCY 0x00000630
4273 * MSR_PKG_C9_RESIDENCY 0x00000631 4307 * MSR_PKG_C9_RESIDENCY 0x00000631
@@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4278 * MSR_PKGC10_IRTL 0x00000635 4312 * MSR_PKGC10_IRTL 0x00000635
4279 * 4313 *
4280 */ 4314 */
4281int has_hsw_msrs(unsigned int family, unsigned int model) 4315int has_c8910_msrs(unsigned int family, unsigned int model)
4282{ 4316{
4283 if (!genuine_intel) 4317 if (!genuine_intel)
4284 return 0; 4318 return 0;
4285 4319
4286 switch (model) { 4320 switch (model) {
4287 case INTEL_FAM6_HASWELL_CORE: 4321 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4288 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4322 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4289 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4323 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
4290 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4324 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
@@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model)
4568 case INTEL_FAM6_XEON_PHI_KNM: 4602 case INTEL_FAM6_XEON_PHI_KNM:
4569 return INTEL_FAM6_XEON_PHI_KNL; 4603 return INTEL_FAM6_XEON_PHI_KNL;
4570 4604
4571 case INTEL_FAM6_HASWELL_ULT:
4572 return INTEL_FAM6_HASWELL_CORE;
4573
4574 case INTEL_FAM6_BROADWELL_X: 4605 case INTEL_FAM6_BROADWELL_X:
4575 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 4606 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
4576 return INTEL_FAM6_BROADWELL_X; 4607 return INTEL_FAM6_BROADWELL_X;
@@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model)
4582 return INTEL_FAM6_SKYLAKE_MOBILE; 4613 return INTEL_FAM6_SKYLAKE_MOBILE;
4583 4614
4584 case INTEL_FAM6_ICELAKE_MOBILE: 4615 case INTEL_FAM6_ICELAKE_MOBILE:
4616 case INTEL_FAM6_ICELAKE_NNPI:
4585 return INTEL_FAM6_CANNONLAKE_MOBILE; 4617 return INTEL_FAM6_CANNONLAKE_MOBILE;
4618
4619 case INTEL_FAM6_ATOM_TREMONT_X:
4620 return INTEL_FAM6_ATOM_GOLDMONT_X;
4586 } 4621 }
4587 return model; 4622 return model;
4588} 4623}
@@ -4600,6 +4635,8 @@ void process_cpuid()
4600 genuine_intel = 1; 4635 genuine_intel = 1;
4601 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) 4636 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
4602 authentic_amd = 1; 4637 authentic_amd = 1;
4638 else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
4639 hygon_genuine = 1;
4603 4640
4604 if (!quiet) 4641 if (!quiet)
4605 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 4642 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4820,12 +4857,12 @@ void process_cpuid()
4820 BIC_NOT_PRESENT(BIC_CPU_c7); 4857 BIC_NOT_PRESENT(BIC_CPU_c7);
4821 BIC_NOT_PRESENT(BIC_Pkgpc7); 4858 BIC_NOT_PRESENT(BIC_Pkgpc7);
4822 } 4859 }
4823 if (has_hsw_msrs(family, model)) { 4860 if (has_c8910_msrs(family, model)) {
4824 BIC_PRESENT(BIC_Pkgpc8); 4861 BIC_PRESENT(BIC_Pkgpc8);
4825 BIC_PRESENT(BIC_Pkgpc9); 4862 BIC_PRESENT(BIC_Pkgpc9);
4826 BIC_PRESENT(BIC_Pkgpc10); 4863 BIC_PRESENT(BIC_Pkgpc10);
4827 } 4864 }
4828 do_irtl_hsw = has_hsw_msrs(family, model); 4865 do_irtl_hsw = has_c8910_msrs(family, model);
4829 if (has_skl_msrs(family, model)) { 4866 if (has_skl_msrs(family, model)) {
4830 BIC_PRESENT(BIC_Totl_c0); 4867 BIC_PRESENT(BIC_Totl_c0);
4831 BIC_PRESENT(BIC_Any_c0); 4868 BIC_PRESENT(BIC_Any_c0);
@@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id)
5123 5160
5124void allocate_output_buffer() 5161void allocate_output_buffer()
5125{ 5162{
5126 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 5163 output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
5127 outp = output_buffer; 5164 outp = output_buffer;
5128 if (outp == NULL) 5165 if (outp == NULL)
5129 err(-1, "calloc output buffer"); 5166 err(-1, "calloc output buffer");
@@ -5269,7 +5306,7 @@ int get_and_dump_counters(void)
5269} 5306}
5270 5307
5271void print_version() { 5308void print_version() {
5272 fprintf(outf, "turbostat version 19.03.20" 5309 fprintf(outf, "turbostat version 19.08.31"
5273 " - Len Brown <lenb@kernel.org>\n"); 5310 " - Len Brown <lenb@kernel.org>\n");
5274} 5311}
5275 5312
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 1fdeef864e7c..666b325a62a2 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11x86_energy_perf_policy : x86_energy_perf_policy.c 11x86_energy_perf_policy : x86_energy_perf_policy.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -D_FORTIFY_SOURCE=2
14 15
15%: %.c 16%: %.c
16 @mkdir -p $(BUILD_OUTPUT) 17 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 17db1c3af4d0..78c6361898b1 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -40,7 +40,7 @@ in the same processor package.
40Hardware P-States (HWP) are effectively an expansion of hardware 40Hardware P-States (HWP) are effectively an expansion of hardware
41P-state control from the opportunistic turbo-mode P-state range 41P-state control from the opportunistic turbo-mode P-state range
42to include the entire range of available P-states. 42to include the entire range of available P-states.
43On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP. 43On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
44That influence was removed in subsequent generations, 44That influence was removed in subsequent generations,
45where it was moved to the 45where it was moved to the
46Energy_Performance_Preference (EPP) field in 46Energy_Performance_Preference (EPP) field in
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 34a796b303fe..3fe1eed900d4 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
545 545
546 progname = argv[0]; 546 progname = argv[0];
547 547
548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", 548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
549 long_options, &option_index)) != -1) { 549 long_options, &option_index)) != -1) {
550 switch (opt) { 550 switch (opt) {
551 case 'a': 551 case 'a':
@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
1259 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1259 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1261} 1261}
1262
1263static void get_cpuid_or_exit(unsigned int leaf,
1264 unsigned int *eax, unsigned int *ebx,
1265 unsigned int *ecx, unsigned int *edx)
1266{
1267 if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
1268 errx(1, "Processor not supported\n");
1269}
1270
1262/* 1271/*
1263 * early_cpuid() 1272 * early_cpuid()
1264 * initialize turbo_is_enabled, has_hwp, has_epb 1273 * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
1266 */ 1275 */
1267void early_cpuid(void) 1276void early_cpuid(void)
1268{ 1277{
1269 unsigned int eax, ebx, ecx, edx, max_level; 1278 unsigned int eax, ebx, ecx, edx;
1270 unsigned int fms, family, model; 1279 unsigned int fms, family, model;
1271 1280
1272 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1281 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1273
1274 if (max_level < 6)
1275 errx(1, "Processor not supported\n");
1276
1277 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1278 family = (fms >> 8) & 0xf; 1282 family = (fms >> 8) & 0xf;
1279 model = (fms >> 4) & 0xf; 1283 model = (fms >> 4) & 0xf;
1280 if (family == 6 || family == 0xf) 1284 if (family == 6 || family == 0xf)
@@ -1288,7 +1292,7 @@ void early_cpuid(void)
1288 bdx_highest_ratio = msr & 0xFF; 1292 bdx_highest_ratio = msr & 0xFF;
1289 } 1293 }
1290 1294
1291 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1295 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1292 turbo_is_enabled = (eax >> 1) & 1; 1296 turbo_is_enabled = (eax >> 1) & 1;
1293 has_hwp = (eax >> 7) & 1; 1297 has_hwp = (eax >> 7) & 1;
1294 has_epb = (ecx >> 3) & 1; 1298 has_epb = (ecx >> 3) & 1;
@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
1306 1310
1307 eax = ebx = ecx = edx = 0; 1311 eax = ebx = ecx = edx = 0;
1308 1312
1309 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1313 get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
1310 1314
1311 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 1315 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1312 genuine_intel = 1; 1316 genuine_intel = 1;
@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
1315 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", 1319 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
1316 (char *)&ebx, (char *)&edx, (char *)&ecx); 1320 (char *)&ebx, (char *)&edx, (char *)&ecx);
1317 1321
1318 __get_cpuid(1, &fms, &ebx, &ecx, &edx); 1322 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1319 family = (fms >> 8) & 0xf; 1323 family = (fms >> 8) & 0xf;
1320 model = (fms >> 4) & 0xf; 1324 model = (fms >> 4) & 0xf;
1321 stepping = fms & 0xf; 1325 stepping = fms & 0xf;
@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
1340 errx(1, "CPUID: no MSR"); 1344 errx(1, "CPUID: no MSR");
1341 1345
1342 1346
1343 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1347 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1344 /* turbo_is_enabled already set */ 1348 /* turbo_is_enabled already set */
1345 /* has_hwp already set */ 1349 /* has_hwp already set */
1346 has_hwp_notify = eax & (1 << 8); 1350 has_hwp_notify = eax & (1 << 8);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c085964e1d05..96752ebd938f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
35TEST_GEN_FILES = $(BPF_OBJ_FILES) 35TEST_GEN_FILES = $(BPF_OBJ_FILES)
36 36
37BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
38TEST_FILES = $(BTF_C_FILES)
39
37# Also test sub-register code-gen if LLVM has eBPF v3 processor support which 40# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
38# contains both ALU32 and JMP32 instructions. 41# contains both ALU32 and JMP32 instructions.
39SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ 42SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
68TEST_PROGS_EXTENDED := with_addr.sh \ 71TEST_PROGS_EXTENDED := with_addr.sh \
69 with_tunnels.sh \ 72 with_tunnels.sh \
70 tcp_client.py \ 73 tcp_client.py \
71 tcp_server.py 74 tcp_server.py \
75 test_xdp_vlan.sh
72 76
73# Compile but not part of 'make run_tests' 77# Compile but not part of 'make run_tests'
74TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 78TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f7a0744db31e..5dc109f4c097 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
34CONFIG_MPLS_ROUTING=m 34CONFIG_MPLS_ROUTING=m
35CONFIG_MPLS_IPTUNNEL=m 35CONFIG_MPLS_IPTUNNEL=m
36CONFIG_IPV6_SIT=m 36CONFIG_IPV6_SIT=m
37CONFIG_BPF_JIT=y
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c
index 8f850823d35f..6e75dd3cb14f 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/test_btf_dump.c
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
97 } 97 }
98 98
99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); 99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
100 if (access(test_file, R_OK) == -1)
101 /*
102 * When the test is run with O=, kselftest copies TEST_FILES
103 * without preserving the directory structure.
104 */
105 snprintf(test_file, sizeof(test_file), "%s.c",
106 test_case->name);
100 /* 107 /*
101 * Diff test output and expected test output, contained between 108 * Diff test output and expected test output, contained between
102 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. 109 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2fc4625c1a15..655729004391 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
22 BPF_FUNC_get_local_storage), 22 BPF_FUNC_get_local_storage),
23 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 23 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
25 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 25 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
26 26
27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ 27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
30 BPF_FUNC_get_local_storage), 30 BPF_FUNC_get_local_storage),
31 BPF_MOV64_IMM(BPF_REG_1, 1), 31 BPF_MOV64_IMM(BPF_REG_1, 1),
32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
36 BPF_EXIT_INSN(), 36 BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fb679ac3d4b0..0e6652733462 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_endian.h"
16#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
17#include "bpf_util.h" 18#include "bpf_util.h"
18 19
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
232 /* if (ip == expected && port == expected) */ 233 /* if (ip == expected && port == expected) */
233 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 234 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
234 offsetof(struct bpf_sock, src_ip6[3])), 235 offsetof(struct bpf_sock, src_ip6[3])),
235 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), 236 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
237 __bpf_constant_ntohl(0x00000001), 4),
236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
237 offsetof(struct bpf_sock, src_port)), 239 offsetof(struct bpf_sock, src_port)),
238 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), 240 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
261 /* if (ip == expected && port == expected) */ 263 /* if (ip == expected && port == expected) */
262 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 264 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
263 offsetof(struct bpf_sock, src_ip4)), 265 offsetof(struct bpf_sock, src_ip4)),
264 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), 266 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
267 __bpf_constant_ntohl(0x7F000001), 4),
265 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 268 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
266 offsetof(struct bpf_sock, src_port)), 269 offsetof(struct bpf_sock, src_port)),
267 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), 270 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
index 5e980a5ab69d..1fc4e61e9f9f 100644
--- a/tools/testing/selftests/bpf/verifier/loops1.c
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -159,3 +159,31 @@
159 .errstr = "loop detected", 159 .errstr = "loop detected",
160 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
161}, 161},
162{
163 "not-taken loop with back jump to 1st insn",
164 .insns = {
165 BPF_MOV64_IMM(BPF_REG_0, 123),
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2),
167 BPF_EXIT_INSN(),
168 },
169 .result = ACCEPT,
170 .prog_type = BPF_PROG_TYPE_XDP,
171 .retval = 123,
172},
173{
174 "taken loop with back jump to 1st insn",
175 .insns = {
176 BPF_MOV64_IMM(BPF_REG_1, 10),
177 BPF_MOV64_IMM(BPF_REG_2, 0),
178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
179 BPF_EXIT_INSN(),
180 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
181 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
182 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3),
183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
184 BPF_EXIT_INSN(),
185 },
186 .result = ACCEPT,
187 .prog_type = BPF_PROG_TYPE_XDP,
188 .retval = 55,
189},
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4059014d93ea..4912d23844bc 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
220struct hv_enlightened_vmcs *current_evmcs; 220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist; 221struct hv_vp_assist_page *current_vp_assist;
222 222
223int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
224
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 225static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{ 226{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 227 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6cb34a0fa200..0a5e487dbc50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", 1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1061 r); 1061 r);
1062 1062
1063 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); 1063 if (kvm_check_cap(KVM_CAP_XCRS)) {
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", 1064 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1065 r); 1065 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1066 r);
1067 }
1066 1068
1067 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); 1069 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1068 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", 1070 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", 1105 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1104 r); 1106 r);
1105 1107
1106 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); 1108 if (kvm_check_cap(KVM_CAP_XCRS)) {
1107 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", 1109 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1108 r); 1110 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1111 r);
1112 }
1109 1113
1110 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); 1114 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", 1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 204f847bd065..9cef0455b819 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -12,6 +12,26 @@
12 12
13bool enable_evmcs; 13bool enable_evmcs;
14 14
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{
17 uint16_t evmcs_ver;
18
19 struct kvm_enable_cap enable_evmcs_cap = {
20 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
21 .args[0] = (unsigned long)&evmcs_ver
22 };
23
24 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
25
26 /* KVM should return supported EVMCS version range */
27 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
28 (evmcs_ver & 0xff) > 0,
29 "Incorrect EVMCS version range: %x:%x\n",
30 evmcs_ver & 0xff, evmcs_ver >> 8);
31
32 return evmcs_ver;
33}
34
15/* Allocate memory regions for nested VMX tests. 35/* Allocate memory regions for nested VMX tests.
16 * 36 *
17 * Input Args: 37 * Input Args:
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f95c08343b48..92915e6408e7 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
79 struct kvm_x86_state *state; 79 struct kvm_x86_state *state;
80 struct ucall uc; 80 struct ucall uc;
81 int stage; 81 int stage;
82 uint16_t evmcs_ver;
83 struct kvm_enable_cap enable_evmcs_cap = {
84 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
85 .args[0] = (unsigned long)&evmcs_ver
86 };
87 82
88 /* Create VM */ 83 /* Create VM */
89 vm = vm_create_default(VCPU_ID, 0, guest_code); 84 vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
96 exit(KSFT_SKIP); 91 exit(KSFT_SKIP);
97 } 92 }
98 93
99 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 94 vcpu_enable_evmcs(vm, VCPU_ID);
100
101 /* KVM should return supported EVMCS version range */
102 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
103 (evmcs_ver & 0xff) > 0,
104 "Incorrect EVMCS version range: %x:%x\n",
105 evmcs_ver & 0xff, evmcs_ver >> 8);
106 95
107 run = vcpu_state(vm, VCPU_ID); 96 run = vcpu_state(vm, VCPU_ID);
108 97
@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 135 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID); 136 vm_vcpu_add(vm, VCPU_ID);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 137 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 138 vcpu_enable_evmcs(vm, VCPU_ID);
150 vcpu_load_state(vm, VCPU_ID, state); 139 vcpu_load_state(vm, VCPU_ID, state);
151 run = vcpu_state(vm, VCPU_ID); 140 run = vcpu_state(vm, VCPU_ID);
152 free(state); 141 free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index f72b3043db0e..ee59831fbc98 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -18,6 +18,7 @@
18#include "test_util.h" 18#include "test_util.h"
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h" 20#include "processor.h"
21#include "vmx.h"
21 22
22#define VCPU_ID 0 23#define VCPU_ID 0
23 24
@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
106{ 107{
107 struct kvm_vm *vm; 108 struct kvm_vm *vm;
108 int rv; 109 int rv;
109 uint16_t evmcs_ver;
110 struct kvm_cpuid2 *hv_cpuid_entries; 110 struct kvm_cpuid2 *hv_cpuid_entries;
111 struct kvm_enable_cap enable_evmcs_cap = {
112 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
113 .args[0] = (unsigned long)&evmcs_ver
114 };
115 111
116 /* Tell stdout not to buffer its content */ 112 /* Tell stdout not to buffer its content */
117 setbuf(stdout, NULL); 113 setbuf(stdout, NULL);
@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
136 132
137 free(hv_cpuid_entries); 133 free(hv_cpuid_entries);
138 134
139 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 135 if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
140
141 if (rv) {
142 fprintf(stderr, 136 fprintf(stderr,
143 "Enlightened VMCS is unsupported, skip related test\n"); 137 "Enlightened VMCS is unsupported, skip related test\n");
144 goto vm_free; 138 goto vm_free;
145 } 139 }
146 140
141 vcpu_enable_evmcs(vm, VCPU_ID);
142
147 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); 143 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
148 if (!hv_cpuid_entries) 144 if (!hv_cpuid_entries)
149 return 1; 145 return 1;
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 40050e44ec0a..f9334bd3cce9 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); 99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, 100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
102 test_msr_platform_info_disabled(vm);
103 test_msr_platform_info_enabled(vm); 102 test_msr_platform_info_enabled(vm);
103 test_msr_platform_info_disabled(vm);
104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); 104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
105 105
106 kvm_vm_free(vm); 106 kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index ed7218d166da..853e370e8a39 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -25,24 +25,17 @@
25#define VMCS12_REVISION 0x11e57ed0 25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5 26#define VCPU_ID 5
27 27
28bool have_evmcs;
29
28void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
29{ 31{
30 volatile struct kvm_run *run;
31
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33 run = vcpu_state(vm, VCPU_ID);
34 vcpu_run(vm, VCPU_ID);
35 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
36 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
37 run->exit_reason,
38 exit_reason_str(run->exit_reason));
39} 33}
40 34
41void test_nested_state_expect_errno(struct kvm_vm *vm, 35void test_nested_state_expect_errno(struct kvm_vm *vm,
42 struct kvm_nested_state *state, 36 struct kvm_nested_state *state,
43 int expected_errno) 37 int expected_errno)
44{ 38{
45 volatile struct kvm_run *run;
46 int rv; 39 int rv;
47 40
48 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
50 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
51 strerror(expected_errno), expected_errno, rv, strerror(errno), 44 strerror(expected_errno), expected_errno, rv, strerror(errno),
52 errno); 45 errno);
53 run = vcpu_state(vm, VCPU_ID);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
56 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59} 46}
60 47
61void test_nested_state_expect_einval(struct kvm_vm *vm, 48void test_nested_state_expect_einval(struct kvm_vm *vm,
@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
90{ 77{
91 memset(state, 0, size); 78 memset(state, 0, size);
92 state->flags = KVM_STATE_NESTED_GUEST_MODE | 79 state->flags = KVM_STATE_NESTED_GUEST_MODE |
93 KVM_STATE_NESTED_RUN_PENDING | 80 KVM_STATE_NESTED_RUN_PENDING;
94 KVM_STATE_NESTED_EVMCS; 81 if (have_evmcs)
82 state->flags |= KVM_STATE_NESTED_EVMCS;
95 state->format = 0; 83 state->format = 0;
96 state->size = size; 84 state->size = size;
97 state->hdr.vmx.vmxon_pa = 0x1000; 85 state->hdr.vmx.vmxon_pa = 0x1000;
@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
141 /* 129 /*
142 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 130 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
143 * setting the nested state but flags other than eVMCS must be clear. 131 * setting the nested state but flags other than eVMCS must be clear.
132 * The eVMCS flag can be set if the enlightened VMCS capability has
133 * been enabled.
144 */ 134 */
145 set_default_vmx_state(state, state_sz); 135 set_default_vmx_state(state, state_sz);
146 state->hdr.vmx.vmxon_pa = -1ull; 136 state->hdr.vmx.vmxon_pa = -1ull;
147 state->hdr.vmx.vmcs12_pa = -1ull; 137 state->hdr.vmx.vmcs12_pa = -1ull;
148 test_nested_state_expect_einval(vm, state); 138 test_nested_state_expect_einval(vm, state);
149 139
150 state->flags = KVM_STATE_NESTED_EVMCS; 140 state->flags &= KVM_STATE_NESTED_EVMCS;
141 if (have_evmcs) {
142 test_nested_state_expect_einval(vm, state);
143 vcpu_enable_evmcs(vm, VCPU_ID);
144 }
151 test_nested_state(vm, state); 145 test_nested_state(vm, state);
152 146
153 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 147 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
232 struct kvm_nested_state state; 226 struct kvm_nested_state state;
233 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 227 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
234 228
229 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
230
235 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 231 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
236 printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); 232 printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
237 exit(KSFT_SKIP); 233 exit(KSFT_SKIP);
diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
index 41476399e184..f6e65674b83c 100755
--- a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
+++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
@@ -30,7 +30,7 @@ do_test() {
30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1" 30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1"
31 val=$(ip netns exec "${NETNS}" nstat -az | \ 31 val=$(ip netns exec "${NETNS}" nstat -az | \
32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}') 32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}')
33 if [ $val -ne 0 ]; then 33 if [ "$val" != 0 ]; then
34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero" 34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero"
35 return 1 35 return 1
36 fi 36 fi
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index fe52488a6f72..16571ac1dab4 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -321,4 +321,52 @@ else
321 ip netns exec nsr1 nft list ruleset 321 ip netns exec nsr1 nft list ruleset
322fi 322fi
323 323
324KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
325KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
326SPI1=$RANDOM
327SPI2=$RANDOM
328
329if [ $SPI1 -eq $SPI2 ]; then
330 SPI2=$((SPI2+1))
331fi
332
333do_esp() {
334 local ns=$1
335 local me=$2
336 local remote=$3
337 local lnet=$4
338 local rnet=$5
339 local spi_out=$6
340 local spi_in=$7
341
342 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
343 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
344
345 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
346 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
347 # to fwd decrypted packets after esp processing:
348 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow
349
350}
351
352do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
353
354do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
355
356ip netns exec nsr1 nft delete table ip nat
357
358# restore default routes
359ip -net ns2 route del 192.168.10.1 via 10.0.2.1
360ip -net ns2 route add default via 10.0.2.1
361ip -net ns2 route add default via dead:2::1
362
363test_tcp_forwarding ns1 ns2
364if [ $? -eq 0 ] ;then
365 echo "PASS: ipsec tunnel mode for ns1/ns2"
366else
367 echo "FAIL: ipsec tunnel mode for ns1/ns2"
368 ip netns exec nsr1 nft list ruleset 1>&2
369 ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
370fi
371
324exit $ret 372exit $ret
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
index affa7f2d9670..9539cffa9e5e 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
@@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin):
64 cmdlist.insert(0, self.args.NAMES['NS']) 64 cmdlist.insert(0, self.args.NAMES['NS'])
65 cmdlist.insert(0, 'exec') 65 cmdlist.insert(0, 'exec')
66 cmdlist.insert(0, 'netns') 66 cmdlist.insert(0, 'netns')
67 cmdlist.insert(0, 'ip') 67 cmdlist.insert(0, self.args.NAMES['IP'])
68 else: 68 else:
69 pass 69 pass
70 70
@@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin):
78 return command 78 return command
79 79
80 def _ports_create(self): 80 def _ports_create(self):
81 cmd = 'ip link add $DEV0 type veth peer name $DEV1' 81 cmd = '$IP link add $DEV0 type veth peer name $DEV1'
82 self._exec_cmd('pre', cmd) 82 self._exec_cmd('pre', cmd)
83 cmd = 'ip link set $DEV0 up' 83 cmd = '$IP link set $DEV0 up'
84 self._exec_cmd('pre', cmd) 84 self._exec_cmd('pre', cmd)
85 if not self.args.namespace: 85 if not self.args.namespace:
86 cmd = 'ip link set $DEV1 up' 86 cmd = '$IP link set $DEV1 up'
87 self._exec_cmd('pre', cmd) 87 self._exec_cmd('pre', cmd)
88 88
89 def _ports_destroy(self): 89 def _ports_destroy(self):
90 cmd = 'ip link del $DEV0' 90 cmd = '$IP link del $DEV0'
91 self._exec_cmd('post', cmd) 91 self._exec_cmd('post', cmd)
92 92
93 def _ns_create(self): 93 def _ns_create(self):
@@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin):
97 ''' 97 '''
98 self._ports_create() 98 self._ports_create()
99 if self.args.namespace: 99 if self.args.namespace:
100 cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) 100 cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
101 self._exec_cmd('pre', cmd) 101 self._exec_cmd('pre', cmd)
102 cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) 102 cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
103 self._exec_cmd('pre', cmd) 103 self._exec_cmd('pre', cmd)
104 cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) 104 cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
105 self._exec_cmd('pre', cmd) 105 self._exec_cmd('pre', cmd)
106 if self.args.device: 106 if self.args.device:
107 cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) 107 cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
108 self._exec_cmd('pre', cmd) 108 self._exec_cmd('pre', cmd)
109 cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) 109 cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
110 self._exec_cmd('pre', cmd) 110 self._exec_cmd('pre', cmd)
111 111
112 def _ns_destroy(self): 112 def _ns_destroy(self):
@@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin):
115 devices as well) 115 devices as well)
116 ''' 116 '''
117 if self.args.namespace: 117 if self.args.namespace:
118 cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) 118 cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
119 self._exec_cmd('post', cmd) 119 self._exec_cmd('post', cmd)
120 120
121 def _exec_cmd(self, stage, command): 121 def _exec_cmd(self, stage, command):
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index bf5ebf59c2d4..9cdd2e31ac2c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -670,5 +670,52 @@
670 "teardown": [ 670 "teardown": [
671 "$TC actions flush action skbedit" 671 "$TC actions flush action skbedit"
672 ] 672 ]
673 },
674 {
675 "id": "630c",
676 "name": "Add batch of 32 skbedit actions with all parameters and cookie",
677 "category": [
678 "actions",
679 "skbedit"
680 ],
681 "setup": [
682 [
683 "$TC actions flush action skbedit",
684 0,
685 1,
686 255
687 ]
688 ],
689 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
690 "expExitCode": "0",
691 "verifyCmd": "$TC actions list action skbedit",
692 "matchPattern": "^[ \t]+index [0-9]+ ref",
693 "matchCount": "32",
694 "teardown": [
695 "$TC actions flush action skbedit"
696 ]
697 },
698 {
699 "id": "706d",
700 "name": "Delete batch of 32 skbedit actions with all parameters",
701 "category": [
702 "actions",
703 "skbedit"
704 ],
705 "setup": [
706 [
707 "$TC actions flush action skbedit",
708 0,
709 1,
710 255
711 ],
712 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
713 ],
714 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
715 "expExitCode": "0",
716 "verifyCmd": "$TC actions list action skbedit",
717 "matchPattern": "^[ \t]+index [0-9]+ ref",
718 "matchCount": "0",
719 "teardown": []
673 } 720 }
674] 721]
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index a8a6a0c883f1..6af5c91337f2 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
86 unsigned int len; 86 unsigned int len;
87 int mask; 87 int mask;
88 88
89 /* Detect an already handled MMIO return */
90 if (unlikely(!vcpu->mmio_needed))
91 return 0;
92
93 vcpu->mmio_needed = 0;
94
89 if (!run->mmio.is_write) { 95 if (!run->mmio.is_write) {
90 len = run->mmio.len; 96 len = run->mmio.len;
91 if (len > sizeof(unsigned long)) 97 if (len > sizeof(unsigned long))
@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 run->mmio.is_write = is_write; 194 run->mmio.is_write = is_write;
189 run->mmio.phys_addr = fault_ipa; 195 run->mmio.phys_addr = fault_ipa;
190 run->mmio.len = len; 196 run->mmio.len = len;
197 vcpu->mmio_needed = 1;
191 198
192 if (!ret) { 199 if (!ret) {
193 /* We handled the access successfully in the kernel. */ 200 /* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index bdbc297d06fb..e621b5d45b27 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h> 10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
11#include <asm/kvm_mmu.h> 12#include <asm/kvm_mmu.h>
12#include "vgic.h" 13#include "vgic.h"
13 14
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
164 irq->vcpu = NULL; 165 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0; 166 irq->target_vcpu = vcpu0;
166 kref_init(&irq->refcount); 167 kref_init(&irq->refcount);
167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
168 irq->targets = 0; 170 irq->targets = 0;
169 irq->group = 0; 171 irq->group = 0;
170 } else { 172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
171 irq->mpidr = 0; 174 irq->mpidr = 0;
172 irq->group = 1; 175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
173 } 180 }
174 } 181 }
175 return 0; 182 return 0;
@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
209 irq->intid = i; 216 irq->intid = i;
210 irq->vcpu = NULL; 217 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu; 218 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount); 219 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) { 220 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */ 221 /* SGIs */
@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
219 /* PPIs */ 225 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL; 226 irq->config = VGIC_CONFIG_LEVEL;
221 } 227 }
222
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
227 } 228 }
228 229
229 if (!irqchip_in_kernel(vcpu->kvm)) 230 if (!irqchip_in_kernel(vcpu->kvm))
@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
286 287
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
290 irq->group = 1; 292 irq->group = 1;
291 else 293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
292 irq->group = 0; 296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
293 } 303 }
294 } 304 }
295 305
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 44efc2ff863f..0d090482720d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -211,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
211 vgic_irq_set_phys_active(irq, true); 211 vgic_irq_set_phys_active(irq, true);
212} 212}
213 213
214static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
215{
216 return (vgic_irq_is_sgi(irq->intid) &&
217 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
218}
219
214void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 220void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 gpa_t addr, unsigned int len, 221 gpa_t addr, unsigned int len,
216 unsigned long val) 222 unsigned long val)
@@ -223,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
223 for_each_set_bit(i, &val, len * 8) { 229 for_each_set_bit(i, &val, len * 8) {
224 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 230 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
225 231
232 /* GICD_ISPENDR0 SGI bits are WI */
233 if (is_vgic_v2_sgi(vcpu, irq)) {
234 vgic_put_irq(vcpu->kvm, irq);
235 continue;
236 }
237
226 raw_spin_lock_irqsave(&irq->irq_lock, flags); 238 raw_spin_lock_irqsave(&irq->irq_lock, flags);
227 if (irq->hw) 239 if (irq->hw)
228 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 240 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -270,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
270 for_each_set_bit(i, &val, len * 8) { 282 for_each_set_bit(i, &val, len * 8) {
271 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 283 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
272 284
285 /* GICD_ICPENDR0 SGI bits are WI */
286 if (is_vgic_v2_sgi(vcpu, irq)) {
287 vgic_put_irq(vcpu->kvm, irq);
288 continue;
289 }
290
273 raw_spin_lock_irqsave(&irq->irq_lock, flags); 291 raw_spin_lock_irqsave(&irq->irq_lock, flags);
274 292
275 if (irq->hw) 293 if (irq->hw)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 96aab77d0471..b00aa304c260 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
184 if (vgic_irq_is_sgi(irq->intid)) { 184 if (vgic_irq_is_sgi(irq->intid)) {
185 u32 src = ffs(irq->source); 185 u32 src = ffs(irq->source);
186 186
187 BUG_ON(!src); 187 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
188 irq->intid))
189 return;
190
188 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
189 irq->source &= ~(1 << (src - 1)); 192 irq->source &= ~(1 << (src - 1));
190 if (irq->source) { 193 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 0c653a1e5215..a4ad431c92a9 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
167 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 167 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168 u32 src = ffs(irq->source); 168 u32 src = ffs(irq->source);
169 169
170 BUG_ON(!src); 170 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
171 irq->intid))
172 return;
173
171 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 174 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
172 irq->source &= ~(1 << (src - 1)); 175 irq->source &= ~(1 << (src - 1));
173 if (irq->source) { 176 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 13d4b38a94ec..e7bde65ba67c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
254 bool penda, pendb; 254 bool penda, pendb;
255 int ret; 255 int ret;
256 256
257 /*
258 * list_sort may call this function with the same element when
259 * the list is fairly long.
260 */
261 if (unlikely(irqa == irqb))
262 return 0;
263
257 raw_spin_lock(&irqa->irq_lock); 264 raw_spin_lock(&irqa->irq_lock);
258 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 265 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
259 266