summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html118
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html22
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg123
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg16
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg56
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg237
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg12
-rw-r--r--Documentation/RCU/stallwarn.txt24
-rw-r--r--Documentation/RCU/whatisRCU.txt18
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt3
-rw-r--r--Documentation/networking/dpaa2/overview.rst1
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/asm/delay.h3
-rw-r--r--arch/arc/mm/cache.c17
-rw-r--r--arch/arc/mm/dma.c49
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h10
-rw-r--r--arch/arc/plat-eznps/mtm.c6
-rw-r--r--arch/arm/Kconfig19
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/mach/arch.h2
-rw-r--r--arch/arm/kernel/entry-armv.S10
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-rpc/ecard.c5
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S150
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S76
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c8
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/include/asm/irq.h2
-rw-r--r--arch/arm64/include/asm/tlb.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/irq.c10
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/ia64/include/asm/tlb.h7
-rw-r--r--arch/ia64/mm/init.c4
-rw-r--r--arch/m68k/Kconfig5
-rw-r--r--arch/m68k/apollo/config.c8
-rw-r--r--arch/m68k/atari/config.c5
-rw-r--r--arch/m68k/atari/time.c63
-rw-r--r--arch/m68k/bvme6000/config.c45
-rw-r--r--arch/m68k/configs/amiga_defconfig32
-rw-r--r--arch/m68k/configs/apollo_defconfig30
-rw-r--r--arch/m68k/configs/atari_defconfig29
-rw-r--r--arch/m68k/configs/bvme6000_defconfig30
-rw-r--r--arch/m68k/configs/hp300_defconfig30
-rw-r--r--arch/m68k/configs/mac_defconfig30
-rw-r--r--arch/m68k/configs/multi_defconfig32
-rw-r--r--arch/m68k/configs/mvme147_defconfig30
-rw-r--r--arch/m68k/configs/mvme16x_defconfig30
-rw-r--r--arch/m68k/configs/q40_defconfig30
-rw-r--r--arch/m68k/configs/sun3_defconfig28
-rw-r--r--arch/m68k/configs/sun3x_defconfig30
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/bitops.h8
-rw-r--r--arch/m68k/include/asm/dma-mapping.h12
-rw-r--r--arch/m68k/include/asm/io.h7
-rw-r--r--arch/m68k/include/asm/io_mm.h42
-rw-r--r--arch/m68k/include/asm/io_no.h12
-rw-r--r--arch/m68k/include/asm/kmap.h9
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/macintosh.h1
-rw-r--r--arch/m68k/include/asm/page_no.h2
-rw-r--r--arch/m68k/kernel/dma.c68
-rw-r--r--arch/m68k/kernel/setup_mm.c15
-rw-r--r--arch/m68k/kernel/setup_no.c21
-rw-r--r--arch/m68k/mac/config.c21
-rw-r--r--arch/m68k/mac/misc.c80
-rw-r--r--arch/m68k/mm/init.c1
-rw-r--r--arch/m68k/mm/mcfmmu.c13
-rw-r--r--arch/m68k/mm/motorola.c35
-rw-r--r--arch/m68k/mvme147/config.c7
-rw-r--r--arch/m68k/mvme16x/config.c8
-rw-r--r--arch/m68k/q40/config.c30
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/mips/ath79/common.c2
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/include/asm/mipsregs.h3
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/openrisc/Kconfig5
-rw-r--r--arch/openrisc/include/asm/irq.h2
-rw-r--r--arch/openrisc/kernel/irq.c7
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/barrier.h32
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/pacache.S1
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/powerpc/include/asm/mmu_context.h33
-rw-r--r--arch/powerpc/kernel/pci-common.c4
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c29
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/msi.h32
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c20
-rw-r--r--arch/x86/boot/compressed/Makefile8
-rw-r--r--arch/x86/boot/compressed/eboot.c545
-rw-r--r--arch/x86/boot/compressed/eboot.h12
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c73
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c12
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c12
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c10
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c10
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c10
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/events/amd/ibs.c6
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/events/intel/ds.c25
-rw-r--r--arch/x86/events/intel/uncore.h2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c10
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c202
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c22
-rw-r--r--arch/x86/net/bpf_jit_comp32.c8
-rw-r--r--arch/x86/platform/efi/efi_64.c103
-rw-r--r--arch/x86/platform/efi/quirks.c14
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--block/bio.c54
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/acpi/acpi_lpss.c26
-rw-r--r--drivers/acpi/acpica/psloop.c31
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/block/nbd.c96
-rw-r--r--drivers/block/zram/zram_drv.c15
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/clk/clk-aspeed.c59
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/meson/clk-audio-divider.c2
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c38
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c17
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c1
-rw-r--r--drivers/crypto/padlock-aes.c8
-rw-r--r--drivers/firmware/efi/Kconfig12
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi.c22
-rw-r--r--drivers/firmware/efi/esrt.c8
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c32
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c31
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c202
-rw-r--r--drivers/gpio/gpio-uniphier.c6
-rw-r--r--drivers/gpio/gpiolib-acpi.c56
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c54
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c41
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/i2c-mux.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c59
-rw-r--r--drivers/input/keyboard/hilkbd.c4
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c16
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c243
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-ingenic.c1
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c4
-rw-r--r--drivers/media/rc/bpf-lirc.c1
-rw-r--r--drivers/media/rc/rc-ir-raw.c8
-rw-r--r--drivers/media/rc/rc-main.c12
-rw-r--r--drivers/mmc/host/mxcmmc.c3
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c19
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/xilinx_can.c392
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c25
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c20
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c51
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c40
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/netdevsim/devlink.c1
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/virtio_net.c41
-rw-r--r--drivers/net/vxlan.c126
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c22
-rw-r--r--drivers/net/xen-netfront.c14
-rw-r--r--drivers/nubus/bus.c3
-rw-r--r--drivers/nvme/host/fabrics.c10
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/configfs.c9
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/pci/bus.c6
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/err.c2
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/remove.c5
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/libiscsi.c12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c16
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/scsi_error.c14
-rw-r--r--drivers/scsi/sg.c15
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/vmw_pvscsi.c11
-rw-r--r--drivers/staging/android/ashmem.c2
-rw-r--r--drivers/staging/ks7010/ks_hostif.c12
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c161
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c92
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c16
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/hcd.c54
-rw-r--r--drivers/usb/dwc2/hcd_intr.c9
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/typec/tcpm.c2
-rw-r--r--drivers/vhost/vhost.c9
-rw-r--r--drivers/video/fbdev/efifb.c51
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/cachefiles/bind.c3
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c17
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/efivarfs/inode.c4
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inline.c19
-rw-r--r--fs/ext4/inode.c16
-rw-r--r--fs/ext4/mmp.c7
-rw-r--r--fs/ext4/super.c15
-rw-r--r--fs/fscache/cache.c2
-rw-r--r--fs/fscache/cookie.c7
-rw-r--r--fs/fscache/object.c1
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/jfs/jfs_dinode.h7
-rw-r--r--fs/jfs/jfs_incore.h1
-rw-r--r--fs/jfs/super.c3
-rw-r--r--fs/namespace.c28
-rw-r--r--fs/nfs/nfs4proc.c26
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c58
-rw-r--r--fs/squashfs/file_cache.c4
-rw-r--r--fs/squashfs/file_direct.c24
-rw-r--r--fs/squashfs/fragment.c17
-rw-r--r--fs/squashfs/squashfs.h3
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c5
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c6
-rw-r--r--include/linux/blk-mq.h14
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h3
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/linux/srcu.h17
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/cfg80211.h12
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/tcp.h7
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--init/main.c2
-rw-r--r--ipc/sem.c2
-rw-r--r--ipc/shm.c12
-rw-r--r--kernel/auditsc.c13
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/btf.c30
-rw-r--r--kernel/bpf/cpumap.c15
-rw-r--r--kernel/bpf/devmap.c14
-rw-r--r--kernel/bpf/sockmap.c9
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/events/core.c10
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/irqdesc.c13
-rw-r--r--kernel/irq/manage.c56
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/kthread.c8
-rw-r--r--kernel/locking/locktorture.c5
-rw-r--r--kernel/locking/rtmutex.c29
-rw-r--r--kernel/memremap.c22
-rw-r--r--kernel/rcu/rcu.h104
-rw-r--r--kernel/rcu/rcuperf.c57
-rw-r--r--kernel/rcu/rcutorture.c462
-rw-r--r--kernel/rcu/srcutree.c39
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tree.c1019
-rw-r--r--kernel/rcu/tree.h71
-rw-r--r--kernel/rcu/tree_exp.h14
-rw-r--r--kernel/rcu/tree_plugin.h176
-rw-r--r--kernel/rcu/update.c45
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/torture.c15
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events_trigger.c18
-rw-r--r--kernel/trace/trace_kprobe.c15
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.ubsan11
-rw-r--r--lib/debugobjects.c10
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/zswap.c9
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/filter.c12
-rw-r--r--net/core/lwt_bpf.c2
-rw-r--r--net/core/page_pool.c2
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/xdp.c3
-rw-r--r--net/dccp/ccids/ccid2.c6
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/inet_fragment.c6
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp_bbr.c4
-rw-r--r--net/ipv4/tcp_dctcp.c52
-rw-r--r--net/ipv4/tcp_input.c74
-rw-r--r--net/ipv4/tcp_output.c32
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/route.c45
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/l2tp/l2tp_ppp.c13
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c8
-rw-r--r--net/netfilter/nf_tables_api.c304
-rw-r--r--net/netfilter/nft_immediate.c3
-rw-r--r--net/netfilter/nft_lookup.c13
-rw-r--r--net/netfilter/nft_set_hash.c1
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/openvswitch/meter.c10
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/rds/ib_frmr.c5
-rw-r--r--net/rds/ib_mr.h3
-rw-r--r--net/rds/ib_rdma.c21
-rw-r--r--net/rds/rdma.c13
-rw-r--r--net/rds/rds.h5
-rw-r--r--net/rds/send.c12
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/call_accept.c4
-rw-r--r--net/rxrpc/conn_event.c4
-rw-r--r--net/rxrpc/net_ns.c6
-rw-r--r--net/rxrpc/output.c12
-rw-r--r--net/rxrpc/peer_event.c156
-rw-r--r--net/rxrpc/peer_object.c8
-rw-r--r--net/rxrpc/rxkad.c4
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/smc/smc_cdc.c3
-rw-r--r--net/socket.c5
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/tls/tls_sw.c3
-rw-r--r--net/vmw_vsock/af_vsock.c15
-rw-r--r--net/vmw_vsock/vmci_transport.c3
-rw-r--r--net/wireless/nl80211.c25
-rw-r--r--net/wireless/reg.c28
-rw-r--r--net/wireless/trace.h18
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--net/xdp/xsk_queue.h2
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_user.c18
-rw-r--r--samples/bpf/xdp_redirect_cpu_kern.c2
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c4
-rw-r--r--scripts/Makefile.ubsan4
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/mcsafe_test.h13
-rw-r--r--tools/arch/x86/lib/memcpy_64.S112
-rw-r--r--tools/bpf/bpftool/common.c11
-rw-r--r--tools/bpf/bpftool/map.c14
-rw-r--r--tools/include/uapi/linux/bpf.h28
-rw-r--r--tools/include/uapi/linux/btf.h2
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/bpf/btf.c41
-rw-r--r--tools/lib/bpf/btf.h12
-rw-r--r--tools/lib/bpf/libbpf.c87
-rw-r--r--tools/lib/bpf/libbpf.h4
-rw-r--r--tools/perf/arch/x86/util/pmu.c1
-rw-r--r--tools/perf/arch/x86/util/tsc.c1
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S1
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-lib.c24
-rw-r--r--tools/perf/perf.h1
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/namespaces.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.84
-rw-r--r--tools/power/x86/turbostat/turbostat.c120
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h9
-rw-r--r--tools/testing/selftests/bpf/test_btf.c114
-rw-r--r--tools/testing/selftests/bpf/test_btf_haskv.c7
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh6
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c40
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh26
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh11
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh1
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh7
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh2
-rw-r--r--tools/usb/ffs-test.c19
-rw-r--r--tools/virtio/asm/barrier.h4
-rw-r--r--tools/virtio/linux/kernel.h5
562 files changed, 7161 insertions, 4744 deletions
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 6c06e10bd04b..f5120a00f511 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -380,31 +380,26 @@ and therefore need no protection.
380as follows: 380as follows:
381 381
382<pre> 382<pre>
383 1 unsigned long gpnum; 383 1 unsigned long gp_seq;
384 2 unsigned long completed;
385</pre> 384</pre>
386 385
387<p>RCU grace periods are numbered, and 386<p>RCU grace periods are numbered, and
388the <tt>-&gt;gpnum</tt> field contains the number of the grace 387the <tt>-&gt;gp_seq</tt> field contains the current grace-period
389period that started most recently. 388sequence number.
390The <tt>-&gt;completed</tt> field contains the number of the 389The bottom two bits are the state of the current grace period,
391grace period that completed most recently. 390which can be zero for not yet started or one for in progress.
392If the two fields are equal, the RCU grace period that most recently 391In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
393started has already completed, and therefore the corresponding 392zero, the corresponding flavor of RCU is idle.
394flavor of RCU is idle. 393Any other value in the bottom two bits indicates that something is broken.
395If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>, 394This field is protected by the root <tt>rcu_node</tt> structure's
396then <tt>-&gt;gpnum</tt> gives the number of the current RCU
397grace period, which has not yet completed.
398Any other combination of values indicates that something is broken.
399These two fields are protected by the root <tt>rcu_node</tt>'s
400<tt>-&gt;lock</tt> field. 395<tt>-&gt;lock</tt> field.
401 396
402</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields 397</p><p>There are <tt>-&gt;gp_seq</tt> fields
403in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures 398in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
404as well. 399as well.
405The fields in the <tt>rcu_state</tt> structure represent the 400The fields in the <tt>rcu_state</tt> structure represent the
406most current values, and those of the other structures are compared 401most current value, and those of the other structures are compared
407in order to detect the start of a new grace period in a distributed 402in order to detect the beginnings and ends of grace periods in a distributed
408fashion. 403fashion.
409The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt> 404The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
410(down the tree from the root to the leaves) to <tt>rcu_data</tt>. 405(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
512as follows: 507as follows:
513 508
514<pre> 509<pre>
515 1 unsigned long gpnum; 510 1 unsigned long gp_seq;
516 2 unsigned long completed; 511 2 unsigned long gp_seq_needed;
517</pre> 512</pre>
518 513
519<p>These fields are the counterparts of the fields of the same name in 514<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
520the <tt>rcu_state</tt> structure. 515the counterparts of the field of the same name in the <tt>rcu_state</tt>
521They each may lag up to one behind their <tt>rcu_state</tt> 516structure.
522counterparts. 517They each may lag up to one step behind their <tt>rcu_state</tt>
523If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and 518counterpart.
524<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt> 519If the bottom two bits of a given <tt>rcu_node</tt> structure's
520<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
525structure believes that RCU is idle. 521structure believes that RCU is idle.
526Otherwise, as with the <tt>rcu_state</tt> structure, 522</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
527the <tt>-&gt;gpnum</tt> field will be one greater than the 523structure is updated at the beginning and the end
528<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt> 524of each grace period.
529indicating which grace period this <tt>rcu_node</tt> believes 525
530is still being waited for. 526<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
527furthest-in-the-future grace period request seen by the corresponding
528<tt>rcu_node</tt> structure. The request is considered fulfilled when
529the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
530the <tt>-&gt;gp_seq_needed</tt> field.
531 531
532</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt> 532<table>
533structure is updated at the beginning 533<tr><th>&nbsp;</th></tr>
534of each grace period, and the <tt>-&gt;completed</tt> fields are 534<tr><th align="left">Quick Quiz:</th></tr>
535updated at the end of each grace period. 535<tr><td>
536 Suppose that this <tt>rcu_node</tt> structure doesn't see
537 a request for a very long time.
538 Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
539 problems?
540</td></tr>
541<tr><th align="left">Answer:</th></tr>
542<tr><td bgcolor="#ffffff"><font color="ffffff">
543 No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
544 <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
545 will be updated at the end of the grace period.
546 Modulo-arithmetic comparisons therefore will always get the
547 correct answer, even with wrapping.
548</font></td></tr>
549<tr><td>&nbsp;</td></tr>
550</table>
536 551
537<h5>Quiescent-State Tracking</h5> 552<h5>Quiescent-State Tracking</h5>
538 553
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
626 </ol> 641 </ol>
627 642
628 <p><font color="ffffff">So the locking is absolutely required in 643 <p><font color="ffffff">So the locking is absolutely required in
629 order to coordinate 644 order to coordinate clearing of the bits with updating of the
630 clearing of the bits with the grace-period numbers in 645 grace-period sequence number in <tt>-&gt;gp_seq</tt>.
631 <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
632</font></td></tr> 646</font></td></tr>
633<tr><td>&nbsp;</td></tr> 647<tr><td>&nbsp;</td></tr>
634</table> 648</table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
1038as follows: 1052as follows:
1039 1053
1040<pre> 1054<pre>
1041 1 unsigned long completed; 1055 1 unsigned long gp_seq;
1042 2 unsigned long gpnum; 1056 2 unsigned long gp_seq_needed;
1043 3 bool cpu_no_qs; 1057 3 bool cpu_no_qs;
1044 4 bool core_needs_qs; 1058 4 bool core_needs_qs;
1045 5 bool gpwrap; 1059 5 bool gpwrap;
1046 6 unsigned long rcu_qs_ctr_snap; 1060 6 unsigned long rcu_qs_ctr_snap;
1047</pre> 1061</pre>
1048 1062
1049<p>The <tt>completed</tt> and <tt>gpnum</tt> 1063<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
1050fields are the counterparts of the fields of the same name 1064fields are the counterparts of the fields of the same name
1051in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. 1065in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
1052They may each lag up to one behind their <tt>rcu_node</tt> 1066They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
1054<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag 1068<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
1055arbitrarily far behind for CPUs in dyntick-idle mode (but these counters 1069arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
1056will catch up upon exit from dyntick-idle mode). 1070will catch up upon exit from dyntick-idle mode).
1057If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and 1071If the lower two bits of a given <tt>rcu_data</tt> structure's
1058<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt> 1072<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
1059structure believes that RCU is idle. 1073structure believes that RCU is idle.
1060Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
1061structure,
1062the <tt>-&gt;gpnum</tt> field will be one greater than the
1063<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
1064indicating which grace period this <tt>rcu_data</tt> believes
1065is still being waited for.
1066 1074
1067<table> 1075<table>
1068<tr><th>&nbsp;</th></tr> 1076<tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
1070<tr><td> 1078<tr><td>
1071 All this replication of the grace period numbers can only cause 1079 All this replication of the grace period numbers can only cause
1072 massive confusion. 1080 massive confusion.
1073 Why not just keep a global pair of counters and be done with it??? 1081 Why not just keep a global sequence number and be done with it???
1074</td></tr> 1082</td></tr>
1075<tr><th align="left">Answer:</th></tr> 1083<tr><th align="left">Answer:</th></tr>
1076<tr><td bgcolor="#ffffff"><font color="ffffff"> 1084<tr><td bgcolor="#ffffff"><font color="ffffff">
1077 Because if there was only a single global pair of grace-period 1085 Because if there was only a single global sequence
1078 numbers, there would need to be a single global lock to allow 1086 numbers, there would need to be a single global lock to allow
1079 safely accessing and updating them. 1087 safely accessing and updating it.
1080 And if we are not going to have a single global lock, we need 1088 And if we are not going to have a single global lock, we need
1081 to carefully manage the numbers on a per-node basis. 1089 to carefully manage the numbers on a per-node basis.
1082 Recall from the answer to a previous Quick Quiz that the consequences 1090 Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
1091while the <tt>-&gt;core_needs_qs</tt> flag indicates that the 1099while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
1092RCU core needs a quiescent state from the corresponding CPU. 1100RCU core needs a quiescent state from the corresponding CPU.
1093The <tt>-&gt;gpwrap</tt> field indicates that the corresponding 1101The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
1094CPU has remained idle for so long that the <tt>completed</tt> 1102CPU has remained idle for so long that the
1095and <tt>gpnum</tt> counters are in danger of overflow, which 1103<tt>gp_seq</tt> counter is in danger of overflow, which
1096will cause the CPU to disregard the values of its counters on 1104will cause the CPU to disregard the values of its counters on
1097its next exit from idle. 1105its next exit from idle.
1098Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect 1106Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
1130whenever it notices that another RCU grace period has completed. 1138whenever it notices that another RCU grace period has completed.
1131The CPU detects the completion of an RCU grace period by noticing 1139The CPU detects the completion of an RCU grace period by noticing
1132that the value of its <tt>rcu_data</tt> structure's 1140that the value of its <tt>rcu_data</tt> structure's
1133<tt>-&gt;completed</tt> field differs from that of its leaf 1141<tt>-&gt;gp_seq</tt> field differs from that of its leaf
1134<tt>rcu_node</tt> structure. 1142<tt>rcu_node</tt> structure.
1135Recall that each <tt>rcu_node</tt> structure's 1143Recall that each <tt>rcu_node</tt> structure's
1136<tt>-&gt;completed</tt> field is updated at the end of each 1144<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
1137grace period. 1145grace period.
1138 1146
1139<p> 1147<p>
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8651b0b4fd79..a346ce0116eb 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
357grace-period initialization. 357grace-period initialization.
358 358
359<p>The first ordering-related grace-period initialization action is to 359<p>The first ordering-related grace-period initialization action is to
360increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt> 360advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
361grace-period-number counter, as shown below: 361grace-period-number counter, as shown below:
362 362
363</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%"> 363</p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
388 388
389<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt> 389<p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
390tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's 390tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
391<tt>-&gt;gpnum</tt> field to the newly incremented value from the 391<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
392<tt>rcu_state</tt> structure, as shown in the following diagram. 392<tt>rcu_state</tt> structure, as shown in the following diagram.
393 393
394</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%"> 394</p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
398to notice that a new grace period has started, as described in the next 398to notice that a new grace period has started, as described in the next
399section. 399section.
400But because the grace-period kthread started the grace period at the 400But because the grace-period kthread started the grace period at the
401root (with the increment of the <tt>rcu_state</tt> structure's 401root (with the advancing of the <tt>rcu_state</tt> structure's
402<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt> 402<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
403structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of 403structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
404the start of the grace period will happen after the actual start 404the start of the grace period will happen after the actual start
405of the grace period. 405of the grace period.
406 406
@@ -466,7 +466,7 @@ section that the grace period must wait on.
466<tr><td> 466<tr><td>
467 But a RCU read-side critical section might have started 467 But a RCU read-side critical section might have started
468 after the beginning of the grace period 468 after the beginning of the grace period
469 (the <tt>-&gt;gpnum++</tt> from earlier), so why should 469 (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
470 the grace period wait on such a critical section? 470 the grace period wait on such a critical section?
471</td></tr> 471</td></tr>
472<tr><th align="left">Answer:</th></tr> 472<tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
609<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4> 609<h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
610 610
611<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree 611<p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
612breadth-first setting all the <tt>-&gt;completed</tt> fields equal 612breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
613to the number of the newly completed grace period, then it sets 613advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
614the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
615again to the number of the newly completed grace period.
616The ordering effects are shown below: 614The ordering effects are shown below:
617 615
618</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%"> 616</p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
634 CPU has reported its quiescent state, but it may be some 632 CPU has reported its quiescent state, but it may be some
635 milliseconds before RCU becomes aware of this. 633 milliseconds before RCU becomes aware of this.
636 The latest reasonable candidate is once the <tt>rcu_state</tt> 634 The latest reasonable candidate is once the <tt>rcu_state</tt>
637 structure's <tt>-&gt;completed</tt> field has been updated, 635 structure's <tt>-&gt;gp_seq</tt> field has been updated,
638 but it is quite possible that some CPUs have already completed 636 but it is quite possible that some CPUs have already completed
639 phase two of their updates by that time. 637 phase two of their updates by that time.
640 In short, if you are going to work with RCU, you need to 638 In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
647<h4><a name="Callback Invocation">Callback Invocation</a></h4> 645<h4><a name="Callback Invocation">Callback Invocation</a></h4>
648 646
649<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's 647<p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
650<tt>-&gt;completed</tt> field has been updated, that CPU can begin 648<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
651invoking its RCU callbacks that were waiting for this grace period 649invoking its RCU callbacks that were waiting for this grace period
652to end. 650to end.
653These callbacks are identified by <tt>rcu_advance_cbs()</tt>, 651These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
index 754f426b297a..bf84fbab27ee 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
@@ -384,11 +384,11 @@
384 inkscape:window-height="1144" 384 inkscape:window-height="1144"
385 id="namedview208" 385 id="namedview208"
386 showgrid="true" 386 showgrid="true"
387 inkscape:zoom="0.70710678" 387 inkscape:zoom="0.78716603"
388 inkscape:cx="617.89017" 388 inkscape:cx="513.06403"
389 inkscape:cy="542.52419" 389 inkscape:cy="623.1214"
390 inkscape:window-x="86" 390 inkscape:window-x="102"
391 inkscape:window-y="28" 391 inkscape:window-y="38"
392 inkscape:window-maximized="0" 392 inkscape:window-maximized="0"
393 inkscape:current-layer="g3188-3" 393 inkscape:current-layer="g3188-3"
394 fit-margin-top="5" 394 fit-margin-top="5"
@@ -417,13 +417,15 @@
417 id="g3188"> 417 id="g3188">
418 <text 418 <text
419 xml:space="preserve" 419 xml:space="preserve"
420 x="3199.1516" 420 x="3145.9592"
421 y="13255.592" 421 y="13255.592"
422 font-style="normal" 422 font-style="normal"
423 font-weight="bold" 423 font-weight="bold"
424 font-size="192" 424 font-size="192"
425 id="text202" 425 id="text202"
426 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 426 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
427 style="font-size:172.87567139px"
428 id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
427 <g 429 <g
428 id="g3107" 430 id="g3107"
429 transform="translate(947.90548,11584.029)"> 431 transform="translate(947.90548,11584.029)">
@@ -502,13 +504,15 @@
502 </g> 504 </g>
503 <text 505 <text
504 xml:space="preserve" 506 xml:space="preserve"
505 x="5324.5371" 507 x="5264.4731"
506 y="15414.598" 508 y="15428.84"
507 font-style="normal" 509 font-style="normal"
508 font-weight="bold" 510 font-weight="bold"
509 font-size="192" 511 font-size="192"
510 id="text202-753" 512 id="text202-36-7"
511 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 513 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
514 style="font-size:172.87567139px"
515 id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
512 </g> 516 </g>
513 <g 517 <g
514 style="fill:none;stroke-width:0.025in" 518 style="fill:none;stroke-width:0.025in"
@@ -547,15 +551,6 @@
547 sodipodi:linespacing="125%"><tspan 551 sodipodi:linespacing="125%"><tspan
548 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 552 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
549 id="tspan3104-6-5-6-0">Leaf</tspan></text> 553 id="tspan3104-6-5-6-0">Leaf</tspan></text>
550 <text
551 xml:space="preserve"
552 x="7479.5796"
553 y="17699.943"
554 font-style="normal"
555 font-weight="bold"
556 font-size="192"
557 id="text202-9"
558 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
559 <path 554 <path
560 sodipodi:nodetypes="cc" 555 sodipodi:nodetypes="cc"
561 inkscape:connector-curvature="0" 556 inkscape:connector-curvature="0"
@@ -566,15 +561,6 @@
566 style="fill:none;stroke-width:0.025in" 561 style="fill:none;stroke-width:0.025in"
567 transform="translate(-737.93887,7732.6672)" 562 transform="translate(-737.93887,7732.6672)"
568 id="g3188-3"> 563 id="g3188-3">
569 <text
570 xml:space="preserve"
571 x="3225.7478"
572 y="13175.802"
573 font-style="normal"
574 font-weight="bold"
575 font-size="192"
576 id="text202-60"
577 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
578 <g 564 <g
579 id="g3107-62" 565 id="g3107-62"
580 transform="translate(947.90548,11584.029)"> 566 transform="translate(947.90548,11584.029)">
@@ -607,15 +593,6 @@
607 sodipodi:linespacing="125%"><tspan 593 sodipodi:linespacing="125%"><tspan
608 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 594 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
609 id="tspan3104-6-5-7">Root</tspan></text> 595 id="tspan3104-6-5-7">Root</tspan></text>
610 <text
611 xml:space="preserve"
612 x="3225.7478"
613 y="13390.038"
614 font-style="normal"
615 font-weight="bold"
616 font-size="192"
617 id="text202-60-3"
618 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
619 <flowRoot 596 <flowRoot
620 xml:space="preserve" 597 xml:space="preserve"
621 id="flowRoot3356" 598 id="flowRoot3356"
@@ -627,7 +604,18 @@
627 height="63.63961" 604 height="63.63961"
628 x="332.34018" 605 x="332.34018"
629 y="681.87292" /></flowRegion><flowPara 606 y="681.87292" /></flowRegion><flowPara
630 id="flowPara3362" /></flowRoot> </g> 607 id="flowPara3362" /></flowRoot> <text
608 xml:space="preserve"
609 x="3156.6121"
610 y="13317.754"
611 font-style="normal"
612 font-weight="bold"
613 font-size="192"
614 id="text202-36-6"
615 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
616 style="font-size:172.87567139px"
617 id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
618 </g>
631 <g 619 <g
632 style="fill:none;stroke-width:0.025in" 620 style="fill:none;stroke-width:0.025in"
633 transform="translate(-858.40227,7769.0342)" 621 transform="translate(-858.40227,7769.0342)"
@@ -859,6 +847,17 @@
859 id="path3414-8-3-6-6" 847 id="path3414-8-3-6-6"
860 inkscape:connector-curvature="0" 848 inkscape:connector-curvature="0"
861 sodipodi:nodetypes="cc" /> 849 sodipodi:nodetypes="cc" />
850 <text
851 xml:space="preserve"
852 x="7418.769"
853 y="17646.104"
854 font-style="normal"
855 font-weight="bold"
856 font-size="192"
857 id="text202-36-70"
858 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
859 style="font-size:172.87567139px"
860 id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
862 </g> 861 </g>
863 <g 862 <g
864 transform="translate(-1642.5377,-11611.245)" 863 transform="translate(-1642.5377,-11611.245)"
@@ -887,13 +886,15 @@
887 </g> 886 </g>
888 <text 887 <text
889 xml:space="preserve" 888 xml:space="preserve"
890 x="5327.3057" 889 x="5274.1133"
891 y="15428.84" 890 y="15428.84"
892 font-style="normal" 891 font-style="normal"
893 font-weight="bold" 892 font-weight="bold"
894 font-size="192" 893 font-size="192"
895 id="text202-36" 894 id="text202-36"
896 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 895 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
896 style="font-size:172.87567139px"
897 id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
897 </g> 898 </g>
898 <g 899 <g
899 transform="translate(-151.71746,-11647.612)" 900 transform="translate(-151.71746,-11647.612)"
@@ -972,13 +973,15 @@
972 id="tspan3104-6-5-6-0-92">Leaf</tspan></text> 973 id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
973 <text 974 <text
974 xml:space="preserve" 975 xml:space="preserve"
975 x="7486.4907" 976 x="7408.5918"
976 y="17670.119" 977 y="17619.504"
977 font-style="normal" 978 font-style="normal"
978 font-weight="bold" 979 font-weight="bold"
979 font-size="192" 980 font-size="192"
980 id="text202-6" 981 id="text202-36-2"
981 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 982 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
983 style="font-size:172.87567139px"
984 id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
982 </g> 985 </g>
983 <g 986 <g
984 transform="translate(-6817.1997,-11647.612)" 987 transform="translate(-6817.1997,-11647.612)"
@@ -1019,13 +1022,15 @@
1019 id="tspan3104-6-5-6-0-1">Leaf</tspan></text> 1022 id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
1020 <text 1023 <text
1021 xml:space="preserve" 1024 xml:space="preserve"
1022 x="7474.1382" 1025 x="7416.8003"
1023 y="17688.926" 1026 y="17619.504"
1024 font-style="normal" 1027 font-style="normal"
1025 font-weight="bold" 1028 font-weight="bold"
1026 font-size="192" 1029 font-size="192"
1027 id="text202-5" 1030 id="text202-36-3"
1028 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 1031 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
1032 style="font-size:172.87567139px"
1033 id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
1029 </g> 1034 </g>
1030 <path 1035 <path
1031 style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)" 1036 style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -1059,15 +1064,6 @@
1059 id="path3414-8-3-6" 1064 id="path3414-8-3-6"
1060 inkscape:connector-curvature="0" 1065 inkscape:connector-curvature="0"
1061 sodipodi:nodetypes="cc" /> 1066 sodipodi:nodetypes="cc" />
1062 <text
1063 xml:space="preserve"
1064 x="7318.9653"
1065 y="6031.6353"
1066 font-style="normal"
1067 font-weight="bold"
1068 font-size="192"
1069 id="text202-2"
1070 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
1071 <g 1067 <g
1072 style="fill:none;stroke-width:0.025in" 1068 style="fill:none;stroke-width:0.025in"
1073 id="g4504-3-9" 1069 id="g4504-3-9"
@@ -1123,4 +1119,15 @@
1123 id="path3134-9-0-3-5" 1119 id="path3134-9-0-3-5"
1124 d="m 6875.6003,15833.906 1595.7755,0" 1120 d="m 6875.6003,15833.906 1595.7755,0"
1125 style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" /> 1121 style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
1122 <text
1123 xml:space="preserve"
1124 x="7275.2612"
1125 y="5971.8916"
1126 font-style="normal"
1127 font-weight="bold"
1128 font-size="192"
1129 id="text202-36-1"
1130 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
1131 style="font-size:172.87567139px"
1132 id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
1126</svg> 1133</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
index 0161262904ec..8c207550818f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
@@ -272,13 +272,13 @@
272 inkscape:window-height="1144" 272 inkscape:window-height="1144"
273 id="namedview208" 273 id="namedview208"
274 showgrid="true" 274 showgrid="true"
275 inkscape:zoom="0.70710678" 275 inkscape:zoom="2.6330492"
276 inkscape:cx="617.89019" 276 inkscape:cx="524.82797"
277 inkscape:cy="636.57143" 277 inkscape:cy="519.31194"
278 inkscape:window-x="697" 278 inkscape:window-x="79"
279 inkscape:window-y="28" 279 inkscape:window-y="28"
280 inkscape:window-maximized="0" 280 inkscape:window-maximized="0"
281 inkscape:current-layer="svg2" 281 inkscape:current-layer="g3188"
282 fit-margin-top="5" 282 fit-margin-top="5"
283 fit-margin-right="5" 283 fit-margin-right="5"
284 fit-margin-left="5" 284 fit-margin-left="5"
@@ -305,13 +305,15 @@
305 id="g3188"> 305 id="g3188">
306 <text 306 <text
307 xml:space="preserve" 307 xml:space="preserve"
308 x="3305.5364" 308 x="3119.363"
309 y="13255.592" 309 y="13255.592"
310 font-style="normal" 310 font-style="normal"
311 font-weight="bold" 311 font-weight="bold"
312 font-size="192" 312 font-size="192"
313 id="text202" 313 id="text202"
314 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text> 314 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
315 style="font-size:172.87567139px"
316 id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
315 <g 317 <g
316 id="g3107" 318 id="g3107"
317 transform="translate(947.90548,11584.029)"> 319 transform="translate(947.90548,11584.029)">
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
index de6ecc51b00e..d24d7d555dbc 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
@@ -19,7 +19,7 @@
19 id="svg2" 19 id="svg2"
20 version="1.1" 20 version="1.1"
21 inkscape:version="0.48.4 r9939" 21 inkscape:version="0.48.4 r9939"
22 sodipodi:docname="TreeRCU-gp-init-2.svg"> 22 sodipodi:docname="TreeRCU-gp-init-3.svg">
23 <metadata 23 <metadata
24 id="metadata212"> 24 id="metadata212">
25 <rdf:RDF> 25 <rdf:RDF>
@@ -257,18 +257,22 @@
257 inkscape:window-width="1087" 257 inkscape:window-width="1087"
258 inkscape:window-height="1144" 258 inkscape:window-height="1144"
259 id="namedview208" 259 id="namedview208"
260 showgrid="false" 260 showgrid="true"
261 inkscape:zoom="0.70710678" 261 inkscape:zoom="0.68224756"
262 inkscape:cx="617.89019" 262 inkscape:cx="617.89019"
263 inkscape:cy="625.84293" 263 inkscape:cy="625.84293"
264 inkscape:window-x="697" 264 inkscape:window-x="54"
265 inkscape:window-y="28" 265 inkscape:window-y="28"
266 inkscape:window-maximized="0" 266 inkscape:window-maximized="0"
267 inkscape:current-layer="svg2" 267 inkscape:current-layer="g3153"
268 fit-margin-top="5" 268 fit-margin-top="5"
269 fit-margin-right="5" 269 fit-margin-right="5"
270 fit-margin-left="5" 270 fit-margin-left="5"
271 fit-margin-bottom="5" /> 271 fit-margin-bottom="5">
272 <inkscape:grid
273 type="xygrid"
274 id="grid3090" />
275 </sodipodi:namedview>
272 <path 276 <path
273 sodipodi:nodetypes="cccccccccccccccccccccccc" 277 sodipodi:nodetypes="cccccccccccccccccccccccc"
274 inkscape:connector-curvature="0" 278 inkscape:connector-curvature="0"
@@ -281,13 +285,13 @@
281 id="g3188"> 285 id="g3188">
282 <text 286 <text
283 xml:space="preserve" 287 xml:space="preserve"
284 x="3305.5364" 288 x="3145.9592"
285 y="13255.592" 289 y="13255.592"
286 font-style="normal" 290 font-style="normal"
287 font-weight="bold" 291 font-weight="bold"
288 font-size="192" 292 font-size="192"
289 id="text202" 293 id="text202"
290 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 294 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
291 <g 295 <g
292 id="g3107" 296 id="g3107"
293 transform="translate(947.90548,11584.029)"> 297 transform="translate(947.90548,11584.029)">
@@ -366,13 +370,13 @@
366 </g> 370 </g>
367 <text 371 <text
368 xml:space="preserve" 372 xml:space="preserve"
369 x="5392.3345" 373 x="5253.6904"
370 y="15407.104" 374 y="15407.032"
371 font-style="normal" 375 font-style="normal"
372 font-weight="bold" 376 font-weight="bold"
373 font-size="192" 377 font-size="192"
374 id="text202-6" 378 id="text202-6"
375 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 379 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
376 </g> 380 </g>
377 <g 381 <g
378 style="fill:none;stroke-width:0.025in" 382 style="fill:none;stroke-width:0.025in"
@@ -413,13 +417,13 @@
413 id="tspan3104-6-5-6-0">Leaf</tspan></text> 417 id="tspan3104-6-5-6-0">Leaf</tspan></text>
414 <text 418 <text
415 xml:space="preserve" 419 xml:space="preserve"
416 x="7536.4883" 420 x="7415.4365"
417 y="17640.934" 421 y="17670.572"
418 font-style="normal" 422 font-style="normal"
419 font-weight="bold" 423 font-weight="bold"
420 font-size="192" 424 font-size="192"
421 id="text202-9" 425 id="text202-9"
422 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 426 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
423 </g> 427 </g>
424 <g 428 <g
425 transform="translate(-1642.5375,-11610.962)" 429 transform="translate(-1642.5375,-11610.962)"
@@ -448,13 +452,13 @@
448 </g> 452 </g>
449 <text 453 <text
450 xml:space="preserve" 454 xml:space="preserve"
451 x="5378.4146" 455 x="5258.0688"
452 y="15436.927" 456 y="15412.313"
453 font-style="normal" 457 font-style="normal"
454 font-weight="bold" 458 font-weight="bold"
455 font-size="192" 459 font-size="192"
456 id="text202-3" 460 id="text202-3"
457 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 461 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
458 </g> 462 </g>
459 <g 463 <g
460 transform="translate(-151.71726,-11647.329)" 464 transform="translate(-151.71726,-11647.329)"
@@ -533,13 +537,13 @@
533 id="tspan3104-6-5-6-0-92">Leaf</tspan></text> 537 id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
534 <text 538 <text
535 xml:space="preserve" 539 xml:space="preserve"
536 x="7520.1294" 540 x="7405.2607"
537 y="17673.639" 541 y="17670.572"
538 font-style="normal" 542 font-style="normal"
539 font-weight="bold" 543 font-weight="bold"
540 font-size="192" 544 font-size="192"
541 id="text202-35" 545 id="text202-35"
542 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 546 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
543 </g> 547 </g>
544 <g 548 <g
545 transform="translate(-6817.1998,-11647.329)" 549 transform="translate(-6817.1998,-11647.329)"
@@ -580,13 +584,13 @@
580 id="tspan3104-6-5-6-0-1">Leaf</tspan></text> 584 id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
581 <text 585 <text
582 xml:space="preserve" 586 xml:space="preserve"
583 x="7521.4663" 587 x="7413.4688"
584 y="17666.062" 588 y="17670.566"
585 font-style="normal" 589 font-style="normal"
586 font-weight="bold" 590 font-weight="bold"
587 font-size="192" 591 font-size="192"
588 id="text202-75" 592 id="text202-75"
589 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 593 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
590 </g> 594 </g>
591 <path 595 <path
592 style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)" 596 style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -622,11 +626,11 @@
622 sodipodi:nodetypes="cc" /> 626 sodipodi:nodetypes="cc" />
623 <text 627 <text
624 xml:space="preserve" 628 xml:space="preserve"
625 x="7370.856" 629 x="7271.9297"
626 y="5997.5972" 630 y="6023.2412"
627 font-style="normal" 631 font-style="normal"
628 font-weight="bold" 632 font-weight="bold"
629 font-size="192" 633 font-size="192"
630 id="text202-62" 634 id="text202-62"
631 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 635 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
632</svg> 636</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
index b13b7b01bb3a..acd73c7ad0f4 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
@@ -1070,13 +1070,13 @@
1070 inkscape:window-height="1144" 1070 inkscape:window-height="1144"
1071 id="namedview208" 1071 id="namedview208"
1072 showgrid="true" 1072 showgrid="true"
1073 inkscape:zoom="0.6004608" 1073 inkscape:zoom="0.81932583"
1074 inkscape:cx="826.65969" 1074 inkscape:cx="840.45848"
1075 inkscape:cy="483.3047" 1075 inkscape:cy="5052.4242"
1076 inkscape:window-x="66" 1076 inkscape:window-x="787"
1077 inkscape:window-y="28" 1077 inkscape:window-y="24"
1078 inkscape:window-maximized="0" 1078 inkscape:window-maximized="0"
1079 inkscape:current-layer="svg2" 1079 inkscape:current-layer="g4"
1080 fit-margin-top="5" 1080 fit-margin-top="5"
1081 fit-margin-right="5" 1081 fit-margin-right="5"
1082 fit-margin-left="5" 1082 fit-margin-left="5"
@@ -1543,15 +1543,6 @@
1543 style="fill:none;stroke-width:0.025in" 1543 style="fill:none;stroke-width:0.025in"
1544 transform="translate(1749.0282,658.72243)" 1544 transform="translate(1749.0282,658.72243)"
1545 id="g3188"> 1545 id="g3188">
1546 <text
1547 xml:space="preserve"
1548 x="3305.5364"
1549 y="13255.592"
1550 font-style="normal"
1551 font-weight="bold"
1552 font-size="192"
1553 id="text202-5"
1554 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
1555 <g 1546 <g
1556 id="g3107-62" 1547 id="g3107-62"
1557 transform="translate(947.90548,11584.029)"> 1548 transform="translate(947.90548,11584.029)">
@@ -1584,6 +1575,17 @@
1584 sodipodi:linespacing="125%"><tspan 1575 sodipodi:linespacing="125%"><tspan
1585 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 1576 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
1586 id="tspan3104-6-5-7">Root</tspan></text> 1577 id="tspan3104-6-5-7">Root</tspan></text>
1578 <text
1579 xml:space="preserve"
1580 x="3137.9988"
1581 y="13271.316"
1582 font-style="normal"
1583 font-weight="bold"
1584 font-size="192"
1585 id="text202-626"
1586 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
1587 style="font-size:172.87567139px"
1588 id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
1587 </g> 1589 </g>
1588 <rect 1590 <rect
1589 ry="0" 1591 ry="0"
@@ -2318,15 +2320,6 @@
2318 style="fill:none;stroke-width:0.025in" 2320 style="fill:none;stroke-width:0.025in"
2319 transform="translate(1739.0986,17188.625)" 2321 transform="translate(1739.0986,17188.625)"
2320 id="g3188-6"> 2322 id="g3188-6">
2321 <text
2322 xml:space="preserve"
2323 x="3305.5364"
2324 y="13255.592"
2325 font-style="normal"
2326 font-weight="bold"
2327 font-size="192"
2328 id="text202-1"
2329 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
2330 <g 2323 <g
2331 id="g3107-5" 2324 id="g3107-5"
2332 transform="translate(947.90548,11584.029)"> 2325 transform="translate(947.90548,11584.029)">
@@ -2359,6 +2352,15 @@
2359 sodipodi:linespacing="125%"><tspan 2352 sodipodi:linespacing="125%"><tspan
2360 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 2353 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
2361 id="tspan3104-6-5-1">Root</tspan></text> 2354 id="tspan3104-6-5-1">Root</tspan></text>
2355 <text
2356 xml:space="preserve"
2357 x="3147.9268"
2358 y="13240.524"
2359 font-style="normal"
2360 font-weight="bold"
2361 font-size="192"
2362 id="text202-1"
2363 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2362 </g> 2364 </g>
2363 <g 2365 <g
2364 style="fill:none;stroke-width:0.025in" 2366 style="fill:none;stroke-width:0.025in"
@@ -2387,13 +2389,13 @@
2387 </g> 2389 </g>
2388 <text 2390 <text
2389 xml:space="preserve" 2391 xml:space="preserve"
2390 x="5392.3345" 2392 x="5263.1094"
2391 y="15407.104" 2393 y="15411.646"
2392 font-style="normal" 2394 font-style="normal"
2393 font-weight="bold" 2395 font-weight="bold"
2394 font-size="192" 2396 font-size="192"
2395 id="text202-6-7" 2397 id="text202-92"
2396 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 2398 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2397 </g> 2399 </g>
2398 <g 2400 <g
2399 style="fill:none;stroke-width:0.025in" 2401 style="fill:none;stroke-width:0.025in"
@@ -2434,13 +2436,13 @@
2434 id="tspan3104-6-5-6-0-94">Leaf</tspan></text> 2436 id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
2435 <text 2437 <text
2436 xml:space="preserve" 2438 xml:space="preserve"
2437 x="7536.4883" 2439 x="7417.4053"
2438 y="17640.934" 2440 y="17655.502"
2439 font-style="normal" 2441 font-style="normal"
2440 font-weight="bold" 2442 font-weight="bold"
2441 font-size="192" 2443 font-size="192"
2442 id="text202-9" 2444 id="text202-759"
2443 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 2445 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2444 </g> 2446 </g>
2445 <g 2447 <g
2446 transform="translate(-2353.8462,17224.992)" 2448 transform="translate(-2353.8462,17224.992)"
@@ -2469,13 +2471,13 @@
2469 </g> 2471 </g>
2470 <text 2472 <text
2471 xml:space="preserve" 2473 xml:space="preserve"
2472 x="5378.4146" 2474 x="5246.1548"
2473 y="15436.927" 2475 y="15411.648"
2474 font-style="normal" 2476 font-style="normal"
2475 font-weight="bold" 2477 font-weight="bold"
2476 font-size="192" 2478 font-size="192"
2477 id="text202-3" 2479 id="text202-87"
2478 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 2480 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2479 </g> 2481 </g>
2480 <g 2482 <g
2481 transform="translate(-863.02613,17188.625)" 2483 transform="translate(-863.02613,17188.625)"
@@ -2554,13 +2556,13 @@
2554 id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text> 2556 id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
2555 <text 2557 <text
2556 xml:space="preserve" 2558 xml:space="preserve"
2557 x="7520.1294" 2559 x="7433.8257"
2558 y="17673.639" 2560 y="17682.098"
2559 font-style="normal" 2561 font-style="normal"
2560 font-weight="bold" 2562 font-weight="bold"
2561 font-size="192" 2563 font-size="192"
2562 id="text202-35" 2564 id="text202-2"
2563 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 2565 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2564 </g> 2566 </g>
2565 <g 2567 <g
2566 transform="translate(-7528.5085,17188.625)" 2568 transform="translate(-7528.5085,17188.625)"
@@ -2601,13 +2603,13 @@
2601 id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text> 2603 id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
2602 <text 2604 <text
2603 xml:space="preserve" 2605 xml:space="preserve"
2604 x="7521.4663" 2606 x="7415.4404"
2605 y="17666.062" 2607 y="17682.098"
2606 font-style="normal" 2608 font-style="normal"
2607 font-weight="bold" 2609 font-weight="bold"
2608 font-size="192" 2610 font-size="192"
2609 id="text202-75-1" 2611 id="text202-0"
2610 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text> 2612 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
2611 </g> 2613 </g>
2612 <path 2614 <path
2613 style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)" 2615 style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -2641,15 +2643,6 @@
2641 id="path3414-8-3-6-4" 2643 id="path3414-8-3-6-4"
2642 inkscape:connector-curvature="0" 2644 inkscape:connector-curvature="0"
2643 sodipodi:nodetypes="cc" /> 2645 sodipodi:nodetypes="cc" />
2644 <text
2645 xml:space="preserve"
2646 x="6659.5469"
2647 y="34833.551"
2648 font-style="normal"
2649 font-weight="bold"
2650 font-size="192"
2651 id="text202-62"
2652 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
2653 <path 2646 <path
2654 sodipodi:nodetypes="ccc" 2647 sodipodi:nodetypes="ccc"
2655 inkscape:connector-curvature="0" 2648 inkscape:connector-curvature="0"
@@ -3844,7 +3837,7 @@
3844 font-weight="bold" 3837 font-weight="bold"
3845 font-size="192" 3838 font-size="192"
3846 id="text202-6-6-5" 3839 id="text202-6-6-5"
3847 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text> 3840 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
3848 <text 3841 <text
3849 xml:space="preserve" 3842 xml:space="preserve"
3850 x="5035.4155" 3843 x="5035.4155"
@@ -4284,15 +4277,6 @@
4284 style="fill:none;stroke-width:0.025in" 4277 style="fill:none;stroke-width:0.025in"
4285 transform="translate(1874.038,53203.538)" 4278 transform="translate(1874.038,53203.538)"
4286 id="g3188-7"> 4279 id="g3188-7">
4287 <text
4288 xml:space="preserve"
4289 x="3199.1516"
4290 y="13255.592"
4291 font-style="normal"
4292 font-weight="bold"
4293 font-size="192"
4294 id="text202-82"
4295 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
4296 <g 4280 <g
4297 id="g3107-53" 4281 id="g3107-53"
4298 transform="translate(947.90548,11584.029)"> 4282 transform="translate(947.90548,11584.029)">
@@ -4325,6 +4309,17 @@
4325 sodipodi:linespacing="125%"><tspan 4309 sodipodi:linespacing="125%"><tspan
4326 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 4310 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
4327 id="tspan3104-6-5-19">Root</tspan></text> 4311 id="tspan3104-6-5-19">Root</tspan></text>
4312 <text
4313 xml:space="preserve"
4314 x="3175.896"
4315 y="13240.11"
4316 font-style="normal"
4317 font-weight="bold"
4318 font-size="192"
4319 id="text202-36-3"
4320 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
4321 style="font-size:172.87567139px"
4322 id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
4328 </g> 4323 </g>
4329 <rect 4324 <rect
4330 ry="0" 4325 ry="0"
@@ -4371,13 +4366,15 @@
4371 </g> 4366 </g>
4372 <text 4367 <text
4373 xml:space="preserve" 4368 xml:space="preserve"
4374 x="5324.5371" 4369 x="5264.4829"
4375 y="15414.598" 4370 y="15411.231"
4376 font-style="normal" 4371 font-style="normal"
4377 font-weight="bold" 4372 font-weight="bold"
4378 font-size="192" 4373 font-size="192"
4379 id="text202-753" 4374 id="text202-36-7"
4380 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 4375 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
4376 style="font-size:172.87567139px"
4377 id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
4381 </g> 4378 </g>
4382 <g 4379 <g
4383 style="fill:none;stroke-width:0.025in" 4380 style="fill:none;stroke-width:0.025in"
@@ -4412,30 +4409,12 @@
4412 sodipodi:linespacing="125%"><tspan 4409 sodipodi:linespacing="125%"><tspan
4413 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 4410 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
4414 id="tspan3104-6-5-6-0-4">Leaf</tspan></text> 4411 id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
4415 <text
4416 xml:space="preserve"
4417 x="10084.225"
4418 y="70903.312"
4419 font-style="normal"
4420 font-weight="bold"
4421 font-size="192"
4422 id="text202-9-0"
4423 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
4424 <path 4412 <path
4425 sodipodi:nodetypes="ccc" 4413 sodipodi:nodetypes="ccc"
4426 inkscape:connector-curvature="0" 4414 inkscape:connector-curvature="0"
4427 id="path3134-9-0-3-9" 4415 id="path3134-9-0-3-9"
4428 d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0" 4416 d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
4429 style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" /> 4417 style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
4430 <text
4431 xml:space="preserve"
4432 x="5092.4683"
4433 y="74111.672"
4434 font-style="normal"
4435 font-weight="bold"
4436 font-size="192"
4437 id="text202-60"
4438 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
4439 <g 4418 <g
4440 style="fill:none;stroke-width:0.025in" 4419 style="fill:none;stroke-width:0.025in"
4441 id="g3107-62-6" 4420 id="g3107-62-6"
@@ -4469,15 +4448,6 @@
4469 sodipodi:linespacing="125%"><tspan 4448 sodipodi:linespacing="125%"><tspan
4470 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans" 4449 style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
4471 id="tspan3104-6-5-7-7">Root</tspan></text> 4450 id="tspan3104-6-5-7-7">Root</tspan></text>
4472 <text
4473 xml:space="preserve"
4474 x="5092.4683"
4475 y="74325.906"
4476 font-style="normal"
4477 font-weight="bold"
4478 font-size="192"
4479 id="text202-60-3"
4480 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"> rnp-&gt;completed</text>
4481 <g 4451 <g
4482 style="fill:none;stroke-width:0.025in" 4452 style="fill:none;stroke-width:0.025in"
4483 transform="translate(1746.2528,60972.572)" 4453 transform="translate(1746.2528,60972.572)"
@@ -4736,13 +4706,15 @@
4736 </g> 4706 </g>
4737 <text 4707 <text
4738 xml:space="preserve" 4708 xml:space="preserve"
4739 x="5327.3057" 4709 x="5274.1216"
4740 y="15428.84" 4710 y="15411.231"
4741 font-style="normal" 4711 font-style="normal"
4742 font-weight="bold" 4712 font-weight="bold"
4743 font-size="192" 4713 font-size="192"
4744 id="text202-36" 4714 id="text202-36"
4745 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 4715 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
4716 style="font-size:172.87567139px"
4717 id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
4746 </g> 4718 </g>
4747 <g 4719 <g
4748 transform="translate(-728.08545,53203.538)" 4720 transform="translate(-728.08545,53203.538)"
@@ -4821,13 +4793,15 @@
4821 id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text> 4793 id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
4822 <text 4794 <text
4823 xml:space="preserve" 4795 xml:space="preserve"
4824 x="7486.4907" 4796 x="7435.1987"
4825 y="17670.119" 4797 y="17708.281"
4826 font-style="normal" 4798 font-style="normal"
4827 font-weight="bold" 4799 font-weight="bold"
4828 font-size="192" 4800 font-size="192"
4829 id="text202-6-2" 4801 id="text202-36-9"
4830 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 4802 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
4803 style="font-size:172.87567139px"
4804 id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
4831 </g> 4805 </g>
4832 <g 4806 <g
4833 transform="translate(-7393.5687,53203.538)" 4807 transform="translate(-7393.5687,53203.538)"
@@ -4868,13 +4842,15 @@
4868 id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text> 4842 id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
4869 <text 4843 <text
4870 xml:space="preserve" 4844 xml:space="preserve"
4871 x="7474.1382" 4845 x="7416.8125"
4872 y="17688.926" 4846 y="17708.281"
4873 font-style="normal" 4847 font-style="normal"
4874 font-weight="bold" 4848 font-weight="bold"
4875 font-size="192" 4849 font-size="192"
4876 id="text202-5-1" 4850 id="text202-36-35"
4877 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text> 4851 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
4852 style="font-size:172.87567139px"
4853 id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
4878 </g> 4854 </g>
4879 <path 4855 <path
4880 style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)" 4856 style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
@@ -4908,15 +4884,6 @@
4908 id="path3414-8-3-6-67" 4884 id="path3414-8-3-6-67"
4909 inkscape:connector-curvature="0" 4885 inkscape:connector-curvature="0"
4910 sodipodi:nodetypes="cc" /> 4886 sodipodi:nodetypes="cc" />
4911 <text
4912 xml:space="preserve"
4913 x="6742.6001"
4914 y="70882.617"
4915 font-style="normal"
4916 font-weight="bold"
4917 font-size="192"
4918 id="text202-2"
4919 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
4920 <g 4887 <g
4921 style="fill:none;stroke-width:0.025in" 4888 style="fill:none;stroke-width:0.025in"
4922 id="g4504-3-9-6" 4889 id="g4504-3-9-6"
@@ -5131,5 +5098,47 @@
5131 font-size="192" 5098 font-size="192"
5132 id="text202-7-9-6-6-7" 5099 id="text202-7-9-6-6-7"
5133 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text> 5100 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
5101 <text
5102 xml:space="preserve"
5103 x="6698.9019"
5104 y="70885.211"
5105 font-style="normal"
5106 font-weight="bold"
5107 font-size="192"
5108 id="text202-36-2"
5109 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
5110 style="font-size:172.87567139px"
5111 id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
5112 <text
5113 xml:space="preserve"
5114 x="10023.457"
5115 y="70885.234"
5116 font-style="normal"
5117 font-weight="bold"
5118 font-size="192"
5119 id="text202-36-0"
5120 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
5121 style="font-size:172.87567139px"
5122 id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
5123 <text
5124 xml:space="preserve"
5125 x="5023.3389"
5126 y="74209.773"
5127 font-style="normal"
5128 font-weight="bold"
5129 font-size="192"
5130 id="text202-36-36"
5131 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
5132 style="font-size:172.87567139px"
5133 id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
5134 <text
5135 xml:space="preserve"
5136 x="6562.5884"
5137 y="34870.727"
5138 font-style="normal"
5139 font-weight="bold"
5140 font-size="192"
5141 id="text202-3"
5142 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
5134 </g> 5143 </g>
5135</svg> 5144</svg>
diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
index de3992f4cbe1..149bec2a4493 100644
--- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
+++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
@@ -300,13 +300,13 @@
300 inkscape:window-height="1144" 300 inkscape:window-height="1144"
301 id="namedview208" 301 id="namedview208"
302 showgrid="true" 302 showgrid="true"
303 inkscape:zoom="0.70710678" 303 inkscape:zoom="0.96484375"
304 inkscape:cx="616.47598" 304 inkscape:cx="507.0191"
305 inkscape:cy="595.41964" 305 inkscape:cy="885.62207"
306 inkscape:window-x="813" 306 inkscape:window-x="47"
307 inkscape:window-y="28" 307 inkscape:window-y="28"
308 inkscape:window-maximized="0" 308 inkscape:window-maximized="0"
309 inkscape:current-layer="g4405" 309 inkscape:current-layer="g3115"
310 fit-margin-top="5" 310 fit-margin-top="5"
311 fit-margin-right="5" 311 fit-margin-right="5"
312 fit-margin-left="5" 312 fit-margin-left="5"
@@ -710,7 +710,7 @@
710 font-weight="bold" 710 font-weight="bold"
711 font-size="192" 711 font-size="192"
712 id="text202-6-6" 712 id="text202-6-6"
713 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text> 713 style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
714 <text 714 <text
715 xml:space="preserve" 715 xml:space="preserve"
716 x="5035.4155" 716 x="5035.4155"
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 4259f95c3261..f99cf11b314b 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -172,7 +172,7 @@ it will print a message similar to the following:
172 INFO: rcu_sched detected stalls on CPUs/tasks: 172 INFO: rcu_sched detected stalls on CPUs/tasks:
173 2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0 173 2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
174 16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0 174 16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
175 (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625) 175 (detected by 32, t=2603 jiffies, g=7075, q=625)
176 176
177This message indicates that CPU 32 detected that CPUs 2 and 16 were both 177This message indicates that CPU 32 detected that CPUs 2 and 16 were both
178causing stalls, and that the stall was affecting RCU-sched. This message 178causing stalls, and that the stall was affecting RCU-sched. This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
215period. 215period.
216 216
217The "detected by" line indicates which CPU detected the stall (in this 217The "detected by" line indicates which CPU detected the stall (in this
218case, CPU 32), how many jiffies have elapsed since the start of the 218case, CPU 32), how many jiffies have elapsed since the start of the grace
219grace period (in this case 2603), the number of the last grace period 219period (in this case 2603), the grace-period sequence number (7075), and
220to start and to complete (7073 and 7072, respectively), and an estimate 220an estimate of the total number of RCU callbacks queued across all CPUs
221of the total number of RCU callbacks queued across all CPUs (625 in 221(625 in this case).
222this case).
223 222
224In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed 223In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
225for each CPU: 224for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
266the stall warning, as was the case in the "All QSes seen" line above, 265the stall warning, as was the case in the "All QSes seen" line above,
267the following additional line is printed: 266the following additional line is printed:
268 267
269 kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 268 kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
270 269
271Starving the grace-period kthreads of CPU time can of course result 270Starving the grace-period kthreads of CPU time can of course result
272in RCU CPU stall warnings even when all CPUs and tasks have passed 271in RCU CPU stall warnings even when all CPUs and tasks have passed
273through the required quiescent states. The "g" and "c" numbers flag the 272through the required quiescent states. The "g" number shows the current
274number of the last grace period started and completed, respectively, 273grace-period sequence number, the "f" precedes the ->gp_flags command
275the "f" precedes the ->gp_flags command to the grace-period kthread, 274to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
276the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short 275kthread is waiting for a short timeout, the "state" precedes value of the
277timeout, and the "state" precedes value of the task_struct ->state field. 276task_struct ->state field, and the "cpu" indicates that the grace-period
277kthread last ran on CPU 5.
278 278
279 279
280Multiple Warnings From One Stall 280Multiple Warnings From One Stall
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 65eb856526b7..c2a7facf7ff9 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -588,6 +588,7 @@ It is extremely simple:
588 void synchronize_rcu(void) 588 void synchronize_rcu(void)
589 { 589 {
590 write_lock(&rcu_gp_mutex); 590 write_lock(&rcu_gp_mutex);
591 smp_mb__after_spinlock();
591 write_unlock(&rcu_gp_mutex); 592 write_unlock(&rcu_gp_mutex);
592 } 593 }
593 594
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
609 610
610The rcu_read_lock() and rcu_read_unlock() primitive read-acquire 611The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
611and release a global reader-writer lock. The synchronize_rcu() 612and release a global reader-writer lock. The synchronize_rcu()
612primitive write-acquires this same lock, then immediately releases 613primitive write-acquires this same lock, then releases it. This means
613it. This means that once synchronize_rcu() exits, all RCU read-side 614that once synchronize_rcu() exits, all RCU read-side critical sections
614critical sections that were in progress before synchronize_rcu() was 615that were in progress before synchronize_rcu() was called are guaranteed
615called are guaranteed to have completed -- there is no way that 616to have completed -- there is no way that synchronize_rcu() would have
616synchronize_rcu() would have been able to write-acquire the lock 617been able to write-acquire the lock otherwise. The smp_mb__after_spinlock()
617otherwise. 618promotes synchronize_rcu() to a full memory barrier in compliance with
619the "Memory-Barrier Guarantees" listed in:
620
621 Documentation/RCU/Design/Requirements/Requirements.html.
618 622
619It is possible to nest rcu_read_lock(), since reader-writer locks may 623It is possible to nest rcu_read_lock(), since reader-writer locks may
620be recursively acquired. Note also that rcu_read_lock() is immune 624be recursively acquired. Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
816 list_next_rcu 820 list_next_rcu
817 list_for_each_entry_rcu 821 list_for_each_entry_rcu
818 list_for_each_entry_continue_rcu 822 list_for_each_entry_continue_rcu
823 list_for_each_entry_from_rcu
819 hlist_first_rcu 824 hlist_first_rcu
820 hlist_next_rcu 825 hlist_next_rcu
821 hlist_pprev_rcu 826 hlist_pprev_rcu
822 hlist_for_each_entry_rcu 827 hlist_for_each_entry_rcu
823 hlist_for_each_entry_rcu_bh 828 hlist_for_each_entry_rcu_bh
829 hlist_for_each_entry_from_rcu
824 hlist_for_each_entry_continue_rcu 830 hlist_for_each_entry_continue_rcu
825 hlist_for_each_entry_continue_rcu_bh 831 hlist_for_each_entry_continue_rcu_bh
826 hlist_nulls_first_rcu 832 hlist_nulls_first_rcu
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 533ff5c68970..c370f5f0eb38 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3632,8 +3632,8 @@
3632 Set time (s) after boot for CPU-hotplug testing. 3632 Set time (s) after boot for CPU-hotplug testing.
3633 3633
3634 rcutorture.onoff_interval= [KNL] 3634 rcutorture.onoff_interval= [KNL]
3635 Set time (s) between CPU-hotplug operations, or 3635 Set time (jiffies) between CPU-hotplug operations,
3636 zero to disable CPU-hotplug testing. 3636 or zero to disable CPU-hotplug testing.
3637 3637
3638 rcutorture.shuffle_interval= [KNL] 3638 rcutorture.shuffle_interval= [KNL]
3639 Set task-shuffle interval (s). Shuffling tasks 3639 Set task-shuffle interval (s). Shuffling tasks
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
index 5f89fb635a1b..f97fd8ab5e45 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
@@ -4,6 +4,7 @@ Required properties:
4 4
5- compatible : should be "ingenic,<socname>-intc". Valid strings are: 5- compatible : should be "ingenic,<socname>-intc". Valid strings are:
6 ingenic,jz4740-intc 6 ingenic,jz4740-intc
7 ingenic,jz4725b-intc
7 ingenic,jz4770-intc 8 ingenic,jz4770-intc
8 ingenic,jz4775-intc 9 ingenic,jz4775-intc
9 ingenic,jz4780-intc 10 ingenic,jz4780-intc
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 20f121daa910..697ca2f26d1b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -7,6 +7,7 @@ Required properties:
7 - "renesas,irqc-r8a73a4" (R-Mobile APE6) 7 - "renesas,irqc-r8a73a4" (R-Mobile APE6)
8 - "renesas,irqc-r8a7743" (RZ/G1M) 8 - "renesas,irqc-r8a7743" (RZ/G1M)
9 - "renesas,irqc-r8a7745" (RZ/G1E) 9 - "renesas,irqc-r8a7745" (RZ/G1E)
10 - "renesas,irqc-r8a77470" (RZ/G1C)
10 - "renesas,irqc-r8a7790" (R-Car H2) 11 - "renesas,irqc-r8a7790" (R-Car H2)
11 - "renesas,irqc-r8a7791" (R-Car M2-W) 12 - "renesas,irqc-r8a7791" (R-Car M2-W)
12 - "renesas,irqc-r8a7792" (R-Car V2H) 13 - "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
16 - "renesas,intc-ex-r8a7796" (R-Car M3-W) 17 - "renesas,intc-ex-r8a7796" (R-Car M3-W)
17 - "renesas,intc-ex-r8a77965" (R-Car M3-N) 18 - "renesas,intc-ex-r8a77965" (R-Car M3-N)
18 - "renesas,intc-ex-r8a77970" (R-Car V3M) 19 - "renesas,intc-ex-r8a77970" (R-Car V3M)
20 - "renesas,intc-ex-r8a77980" (R-Car V3H)
19 - "renesas,intc-ex-r8a77995" (R-Car D3) 21 - "renesas,intc-ex-r8a77995" (R-Car D3)
20- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in 22- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
21 interrupts.txt in this directory 23 interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
index 252a05c5d976..c8c4b00ecb94 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
16the node is not important. The content of the node is defined in dwc3.txt. 16the node is not important. The content of the node is defined in dwc3.txt.
17 17
18Phy documentation is provided in the following places: 18Phy documentation is provided in the following places:
19Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt 19Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
20Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
20 21
21Example device nodes: 22Example device nodes:
22 23
diff --git a/Documentation/networking/dpaa2/overview.rst b/Documentation/networking/dpaa2/overview.rst
index 79fede4447d6..d638b5a8aadd 100644
--- a/Documentation/networking/dpaa2/overview.rst
+++ b/Documentation/networking/dpaa2/overview.rst
@@ -1,5 +1,6 @@
1.. include:: <isonum.txt> 1.. include:: <isonum.txt>
2 2
3=========================================================
3DPAA2 (Data Path Acceleration Architecture Gen2) Overview 4DPAA2 (Data Path Acceleration Architecture Gen2) Overview
4========================================================= 5=========================================================
5 6
diff --git a/MAINTAINERS b/MAINTAINERS
index 0fe4228f78cb..d0d729016d65 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5444,6 +5444,7 @@ F: drivers/iommu/exynos-iommu.c
5444 5444
5445EZchip NPS platform support 5445EZchip NPS platform support
5446M: Vineet Gupta <vgupta@synopsys.com> 5446M: Vineet Gupta <vgupta@synopsys.com>
5447M: Ofer Levi <oferle@mellanox.com>
5447S: Supported 5448S: Supported
5448F: arch/arc/plat-eznps 5449F: arch/arc/plat-eznps
5449F: arch/arc/boot/dts/eznps.dts 5450F: arch/arc/boot/dts/eznps.dts
@@ -5929,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst
5929 5930
5930GDB KERNEL DEBUGGING HELPER SCRIPTS 5931GDB KERNEL DEBUGGING HELPER SCRIPTS
5931M: Jan Kiszka <jan.kiszka@siemens.com> 5932M: Jan Kiszka <jan.kiszka@siemens.com>
5932M: Kieran Bingham <kieran@bingham.xyz> 5933M: Kieran Bingham <kbingham@kernel.org>
5933S: Supported 5934S: Supported
5934F: scripts/gdb/ 5935F: scripts/gdb/
5935 5936
@@ -7095,6 +7096,7 @@ F: include/uapi/linux/input.h
7095F: include/uapi/linux/input-event-codes.h 7096F: include/uapi/linux/input-event-codes.h
7096F: include/linux/input/ 7097F: include/linux/input/
7097F: Documentation/devicetree/bindings/input/ 7098F: Documentation/devicetree/bindings/input/
7099F: Documentation/devicetree/bindings/serio/
7098F: Documentation/input/ 7100F: Documentation/input/
7099 7101
7100INPUT MULTITOUCH (MT) PROTOCOL 7102INPUT MULTITOUCH (MT) PROTOCOL
@@ -7984,7 +7986,7 @@ F: lib/test_kmod.c
7984F: tools/testing/selftests/kmod/ 7986F: tools/testing/selftests/kmod/
7985 7987
7986KPROBES 7988KPROBES
7987M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> 7989M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
7988M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7990M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7989M: "David S. Miller" <davem@davemloft.net> 7991M: "David S. Miller" <davem@davemloft.net>
7990M: Masami Hiramatsu <mhiramat@kernel.org> 7992M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -12037,9 +12039,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
12037F: Documentation/RCU/ 12039F: Documentation/RCU/
12038X: Documentation/RCU/torture.txt 12040X: Documentation/RCU/torture.txt
12039F: include/linux/rcu* 12041F: include/linux/rcu*
12040X: include/linux/srcu.h 12042X: include/linux/srcu*.h
12041F: kernel/rcu/ 12043F: kernel/rcu/
12042X: kernel/torture.c 12044X: kernel/rcu/srcu*.c
12043 12045
12044REAL TIME CLOCK (RTC) SUBSYSTEM 12046REAL TIME CLOCK (RTC) SUBSYSTEM
12045M: Alessandro Zummo <a.zummo@towertech.it> 12047M: Alessandro Zummo <a.zummo@towertech.it>
@@ -13076,8 +13078,8 @@ L: linux-kernel@vger.kernel.org
13076W: http://www.rdrop.com/users/paulmck/RCU/ 13078W: http://www.rdrop.com/users/paulmck/RCU/
13077S: Supported 13079S: Supported
13078T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git 13080T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
13079F: include/linux/srcu.h 13081F: include/linux/srcu*.h
13080F: kernel/rcu/srcu.c 13082F: kernel/rcu/srcu*.c
13081 13083
13082SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus) 13084SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
13083M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> 13085M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -14436,6 +14438,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
14436F: Documentation/RCU/torture.txt 14438F: Documentation/RCU/torture.txt
14437F: kernel/torture.c 14439F: kernel/torture.c
14438F: kernel/rcu/rcutorture.c 14440F: kernel/rcu/rcutorture.c
14441F: kernel/rcu/rcuperf.c
14439F: kernel/locking/locktorture.c 14442F: kernel/locking/locktorture.c
14440 14443
14441TOSHIBA ACPI EXTRAS DRIVER 14444TOSHIBA ACPI EXTRAS DRIVER
diff --git a/Makefile b/Makefile
index 67d9d20f8564..863f58503bee 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 18 3PATCHLEVEL = 18
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION =
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9cf59fc60eab..5151d81476a1 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -50,6 +50,9 @@ config ARC
50 select HAVE_KERNEL_LZMA 50 select HAVE_KERNEL_LZMA
51 select ARCH_HAS_PTE_SPECIAL 51 select ARCH_HAS_PTE_SPECIAL
52 52
53config ARCH_HAS_CACHE_LINE_SIZE
54 def_bool y
55
53config MIGHT_HAVE_PCI 56config MIGHT_HAVE_PCI
54 bool 57 bool
55 58
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 8486f328cc5d..ff7d3232764a 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -48,7 +48,9 @@
48}) 48})
49 49
50/* Largest line length for either L1 or L2 is 128 bytes */ 50/* Largest line length for either L1 or L2 is 128 bytes */
51#define ARCH_DMA_MINALIGN 128 51#define SMP_CACHE_BYTES 128
52#define cache_line_size() SMP_CACHE_BYTES
53#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
52 54
53extern void arc_cache_init(void); 55extern void arc_cache_init(void);
54extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); 56extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index d5da2115d78a..03d6bb0f4e13 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -17,8 +17,11 @@
17#ifndef __ASM_ARC_UDELAY_H 17#ifndef __ASM_ARC_UDELAY_H
18#define __ASM_ARC_UDELAY_H 18#define __ASM_ARC_UDELAY_H
19 19
20#include <asm-generic/types.h>
20#include <asm/param.h> /* HZ */ 21#include <asm/param.h> /* HZ */
21 22
23extern unsigned long loops_per_jiffy;
24
22static inline void __delay(unsigned long loops) 25static inline void __delay(unsigned long loops)
23{ 26{
24 __asm__ __volatile__( 27 __asm__ __volatile__(
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9dbe645ee127..25c631942500 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1038,7 +1038,7 @@ void flush_cache_mm(struct mm_struct *mm)
1038void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 1038void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
1039 unsigned long pfn) 1039 unsigned long pfn)
1040{ 1040{
1041 unsigned int paddr = pfn << PAGE_SHIFT; 1041 phys_addr_t paddr = pfn << PAGE_SHIFT;
1042 1042
1043 u_vaddr &= PAGE_MASK; 1043 u_vaddr &= PAGE_MASK;
1044 1044
@@ -1058,8 +1058,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
1058 unsigned long u_vaddr) 1058 unsigned long u_vaddr)
1059{ 1059{
1060 /* TBD: do we really need to clear the kernel mapping */ 1060 /* TBD: do we really need to clear the kernel mapping */
1061 __flush_dcache_page(page_address(page), u_vaddr); 1061 __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
1062 __flush_dcache_page(page_address(page), page_address(page)); 1062 __flush_dcache_page((phys_addr_t)page_address(page),
1063 (phys_addr_t)page_address(page));
1063 1064
1064} 1065}
1065 1066
@@ -1246,6 +1247,16 @@ void __init arc_cache_init_master(void)
1246 } 1247 }
1247 } 1248 }
1248 1249
1250 /*
1251 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
1252 * or equal to any cache line length.
1253 */
1254 BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
1255 "SMP_CACHE_BYTES must be >= any cache line length");
1256 if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1257 panic("L2 Cache line [%d] > kernel Config [%d]\n",
1258 l2_line_sz, SMP_CACHE_BYTES);
1259
1249 /* Note that SLC disable not formally supported till HS 3.0 */ 1260 /* Note that SLC disable not formally supported till HS 3.0 */
1250 if (is_isa_arcv2() && l2_line_sz && !slc_enable) 1261 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1251 arc_slc_disable(); 1262 arc_slc_disable();
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 8c1071840979..ec47e6079f5d 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
129 return ret; 129 return ret;
130} 130}
131 131
132/*
133 * Cache operations depending on function and direction argument, inspired by
134 * https://lkml.org/lkml/2018/5/18/979
135 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
136 * dma-mapping: provide a generic dma-noncoherent implementation)"
137 *
138 * | map == for_device | unmap == for_cpu
139 * |----------------------------------------------------------------
140 * TO_DEV | writeback writeback | none none
141 * FROM_DEV | invalidate invalidate | invalidate* invalidate*
142 * BIDIR | writeback+inv writeback+inv | invalidate invalidate
143 *
144 * [*] needed for CPU speculative prefetches
145 *
146 * NOTE: we don't check the validity of direction argument as it is done in
147 * upper layer functions (in include/linux/dma-mapping.h)
148 */
149
132void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 150void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
133 size_t size, enum dma_data_direction dir) 151 size_t size, enum dma_data_direction dir)
134{ 152{
135 dma_cache_wback(paddr, size); 153 switch (dir) {
154 case DMA_TO_DEVICE:
155 dma_cache_wback(paddr, size);
156 break;
157
158 case DMA_FROM_DEVICE:
159 dma_cache_inv(paddr, size);
160 break;
161
162 case DMA_BIDIRECTIONAL:
163 dma_cache_wback_inv(paddr, size);
164 break;
165
166 default:
167 break;
168 }
136} 169}
137 170
138void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 171void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
139 size_t size, enum dma_data_direction dir) 172 size_t size, enum dma_data_direction dir)
140{ 173{
141 dma_cache_inv(paddr, size); 174 switch (dir) {
175 case DMA_TO_DEVICE:
176 break;
177
178 /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
179 case DMA_FROM_DEVICE:
180 case DMA_BIDIRECTIONAL:
181 dma_cache_inv(paddr, size);
182 break;
183
184 default:
185 break;
186 }
142} 187}
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index 0c7d11022d0f..4f6a1673b3a6 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -21,6 +21,7 @@
21#error "Incorrect ctop.h include" 21#error "Incorrect ctop.h include"
22#endif 22#endif
23 23
24#include <linux/types.h>
24#include <soc/nps/common.h> 25#include <soc/nps/common.h>
25 26
26/* core auxiliary registers */ 27/* core auxiliary registers */
@@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
143}; 144};
144 145
145/* AUX registers definition */ 146/* AUX registers definition */
147struct nps_host_reg_aux_dpc {
148 union {
149 struct {
150 u32 ien:1, men:1, hen:1, reserved:29;
151 };
152 u32 value;
153 };
154};
155
146struct nps_host_reg_aux_udmc { 156struct nps_host_reg_aux_udmc {
147 union { 157 union {
148 struct { 158 struct {
diff --git a/arch/arc/plat-eznps/mtm.c b/arch/arc/plat-eznps/mtm.c
index 2388de3d09ef..ed0077ef666e 100644
--- a/arch/arc/plat-eznps/mtm.c
+++ b/arch/arc/plat-eznps/mtm.c
@@ -15,6 +15,8 @@
15 */ 15 */
16 16
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
18#include <linux/io.h> 20#include <linux/io.h>
19#include <linux/log2.h> 21#include <linux/log2.h>
20#include <asm/arcregs.h> 22#include <asm/arcregs.h>
@@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
157/* Verify and set the value of the mtm hs counter */ 159/* Verify and set the value of the mtm hs counter */
158static int __init set_mtm_hs_ctr(char *ctr_str) 160static int __init set_mtm_hs_ctr(char *ctr_str)
159{ 161{
160 long hs_ctr; 162 int hs_ctr;
161 int ret; 163 int ret;
162 164
163 ret = kstrtol(ctr_str, 0, &hs_ctr); 165 ret = kstrtoint(ctr_str, 0, &hs_ctr);
164 166
165 if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { 167 if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
166 pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", 168 pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 843edfd000be..d7a81284c272 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -337,8 +337,8 @@ config ARCH_MULTIPLATFORM
337 select TIMER_OF 337 select TIMER_OF
338 select COMMON_CLK 338 select COMMON_CLK
339 select GENERIC_CLOCKEVENTS 339 select GENERIC_CLOCKEVENTS
340 select GENERIC_IRQ_MULTI_HANDLER
340 select MIGHT_HAVE_PCI 341 select MIGHT_HAVE_PCI
341 select MULTI_IRQ_HANDLER
342 select PCI_DOMAINS if PCI 342 select PCI_DOMAINS if PCI
343 select SPARSE_IRQ 343 select SPARSE_IRQ
344 select USE_OF 344 select USE_OF
@@ -465,9 +465,9 @@ config ARCH_DOVE
465 bool "Marvell Dove" 465 bool "Marvell Dove"
466 select CPU_PJ4 466 select CPU_PJ4
467 select GENERIC_CLOCKEVENTS 467 select GENERIC_CLOCKEVENTS
468 select GENERIC_IRQ_MULTI_HANDLER
468 select GPIOLIB 469 select GPIOLIB
469 select MIGHT_HAVE_PCI 470 select MIGHT_HAVE_PCI
470 select MULTI_IRQ_HANDLER
471 select MVEBU_MBUS 471 select MVEBU_MBUS
472 select PINCTRL 472 select PINCTRL
473 select PINCTRL_DOVE 473 select PINCTRL_DOVE
@@ -512,8 +512,8 @@ config ARCH_LPC32XX
512 select COMMON_CLK 512 select COMMON_CLK
513 select CPU_ARM926T 513 select CPU_ARM926T
514 select GENERIC_CLOCKEVENTS 514 select GENERIC_CLOCKEVENTS
515 select GENERIC_IRQ_MULTI_HANDLER
515 select GPIOLIB 516 select GPIOLIB
516 select MULTI_IRQ_HANDLER
517 select SPARSE_IRQ 517 select SPARSE_IRQ
518 select USE_OF 518 select USE_OF
519 help 519 help
@@ -532,11 +532,11 @@ config ARCH_PXA
532 select TIMER_OF 532 select TIMER_OF
533 select CPU_XSCALE if !CPU_XSC3 533 select CPU_XSCALE if !CPU_XSC3
534 select GENERIC_CLOCKEVENTS 534 select GENERIC_CLOCKEVENTS
535 select GENERIC_IRQ_MULTI_HANDLER
535 select GPIO_PXA 536 select GPIO_PXA
536 select GPIOLIB 537 select GPIOLIB
537 select HAVE_IDE 538 select HAVE_IDE
538 select IRQ_DOMAIN 539 select IRQ_DOMAIN
539 select MULTI_IRQ_HANDLER
540 select PLAT_PXA 540 select PLAT_PXA
541 select SPARSE_IRQ 541 select SPARSE_IRQ
542 help 542 help
@@ -572,11 +572,11 @@ config ARCH_SA1100
572 select CPU_FREQ 572 select CPU_FREQ
573 select CPU_SA1100 573 select CPU_SA1100
574 select GENERIC_CLOCKEVENTS 574 select GENERIC_CLOCKEVENTS
575 select GENERIC_IRQ_MULTI_HANDLER
575 select GPIOLIB 576 select GPIOLIB
576 select HAVE_IDE 577 select HAVE_IDE
577 select IRQ_DOMAIN 578 select IRQ_DOMAIN
578 select ISA 579 select ISA
579 select MULTI_IRQ_HANDLER
580 select NEED_MACH_MEMORY_H 580 select NEED_MACH_MEMORY_H
581 select SPARSE_IRQ 581 select SPARSE_IRQ
582 help 582 help
@@ -590,10 +590,10 @@ config ARCH_S3C24XX
590 select GENERIC_CLOCKEVENTS 590 select GENERIC_CLOCKEVENTS
591 select GPIO_SAMSUNG 591 select GPIO_SAMSUNG
592 select GPIOLIB 592 select GPIOLIB
593 select GENERIC_IRQ_MULTI_HANDLER
593 select HAVE_S3C2410_I2C if I2C 594 select HAVE_S3C2410_I2C if I2C
594 select HAVE_S3C2410_WATCHDOG if WATCHDOG 595 select HAVE_S3C2410_WATCHDOG if WATCHDOG
595 select HAVE_S3C_RTC if RTC_CLASS 596 select HAVE_S3C_RTC if RTC_CLASS
596 select MULTI_IRQ_HANDLER
597 select NEED_MACH_IO_H 597 select NEED_MACH_IO_H
598 select SAMSUNG_ATAGS 598 select SAMSUNG_ATAGS
599 select USE_OF 599 select USE_OF
@@ -627,10 +627,10 @@ config ARCH_OMAP1
627 select CLKSRC_MMIO 627 select CLKSRC_MMIO
628 select GENERIC_CLOCKEVENTS 628 select GENERIC_CLOCKEVENTS
629 select GENERIC_IRQ_CHIP 629 select GENERIC_IRQ_CHIP
630 select GENERIC_IRQ_MULTI_HANDLER
630 select GPIOLIB 631 select GPIOLIB
631 select HAVE_IDE 632 select HAVE_IDE
632 select IRQ_DOMAIN 633 select IRQ_DOMAIN
633 select MULTI_IRQ_HANDLER
634 select NEED_MACH_IO_H if PCCARD 634 select NEED_MACH_IO_H if PCCARD
635 select NEED_MACH_MEMORY_H 635 select NEED_MACH_MEMORY_H
636 select SPARSE_IRQ 636 select SPARSE_IRQ
@@ -921,11 +921,6 @@ config IWMMXT
921 Enable support for iWMMXt context switching at run time if 921 Enable support for iWMMXt context switching at run time if
922 running on a CPU that supports it. 922 running on a CPU that supports it.
923 923
924config MULTI_IRQ_HANDLER
925 bool
926 help
927 Allow each machine to specify it's own IRQ handler at run time.
928
929if !MMU 924if !MMU
930source "arch/arm/Kconfig-nommu" 925source "arch/arm/Kconfig-nommu"
931endif 926endif
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 17f1f1a814ff..38badaae8d9d 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
58#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) 58#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
59#define efi_is_64bit() (false) 59#define efi_is_64bit() (false)
60 60
61#define efi_table_attr(table, attr, instance) \
62 ((table##_t *)instance)->attr
63
61#define efi_call_proto(protocol, f, instance, ...) \ 64#define efi_call_proto(protocol, f, instance, ...) \
62 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) 65 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
63 66
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index b6f319606e30..c883fcbe93b6 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
31void handle_IRQ(unsigned int, struct pt_regs *); 31void handle_IRQ(unsigned int, struct pt_regs *);
32void init_IRQ(void); 32void init_IRQ(void);
33 33
34#ifdef CONFIG_MULTI_IRQ_HANDLER
35extern void (*handle_arch_irq)(struct pt_regs *);
36extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
37#endif
38
39#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
40extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, 35extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
41 bool exclude_self); 36 bool exclude_self);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 5c1ad11aa392..bb8851208e17 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -59,7 +59,7 @@ struct machine_desc {
59 void (*init_time)(void); 59 void (*init_time)(void);
60 void (*init_machine)(void); 60 void (*init_machine)(void);
61 void (*init_late)(void); 61 void (*init_late)(void);
62#ifdef CONFIG_MULTI_IRQ_HANDLER 62#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
63 void (*handle_irq)(struct pt_regs *); 63 void (*handle_irq)(struct pt_regs *);
64#endif 64#endif
65 void (*restart)(enum reboot_mode, const char *); 65 void (*restart)(enum reboot_mode, const char *);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 179a9f6bd1e3..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
22#include <asm/glue-df.h> 22#include <asm/glue-df.h>
23#include <asm/glue-pf.h> 23#include <asm/glue-pf.h>
24#include <asm/vfpmacros.h> 24#include <asm/vfpmacros.h>
25#ifndef CONFIG_MULTI_IRQ_HANDLER 25#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
26#include <mach/entry-macro.S> 26#include <mach/entry-macro.S>
27#endif 27#endif
28#include <asm/thread_notify.h> 28#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
39 * Interrupt handling. 39 * Interrupt handling.
40 */ 40 */
41 .macro irq_handler 41 .macro irq_handler
42#ifdef CONFIG_MULTI_IRQ_HANDLER 42#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
43 ldr r1, =handle_arch_irq 43 ldr r1, =handle_arch_irq
44 mov r0, sp 44 mov r0, sp
45 badr lr, 9997f 45 badr lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
1226 .globl cr_alignment 1226 .globl cr_alignment
1227cr_alignment: 1227cr_alignment:
1228 .space 4 1228 .space 4
1229
1230#ifdef CONFIG_MULTI_IRQ_HANDLER
1231 .globl handle_arch_irq
1232handle_arch_irq:
1233 .space 4
1234#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 106a1466518d..746565a876dc 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,7 @@ saved_pc .req lr
48 * from those features make this path too inefficient. 48 * from those features make this path too inefficient.
49 */ 49 */
50ret_fast_syscall: 50ret_fast_syscall:
51__ret_fast_syscall:
51 UNWIND(.fnstart ) 52 UNWIND(.fnstart )
52 UNWIND(.cantunwind ) 53 UNWIND(.cantunwind )
53 disable_irq_notrace @ disable interrupts 54 disable_irq_notrace @ disable interrupts
@@ -78,6 +79,7 @@ fast_work_pending:
78 * call. 79 * call.
79 */ 80 */
80ret_fast_syscall: 81ret_fast_syscall:
82__ret_fast_syscall:
81 UNWIND(.fnstart ) 83 UNWIND(.fnstart )
82 UNWIND(.cantunwind ) 84 UNWIND(.cantunwind )
83 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 85 str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
@@ -255,7 +257,7 @@ local_restart:
255 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? 257 tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
256 bne __sys_trace 258 bne __sys_trace
257 259
258 invoke_syscall tbl, scno, r10, ret_fast_syscall 260 invoke_syscall tbl, scno, r10, __ret_fast_syscall
259 261
260 add r1, sp, #S_OFF 262 add r1, sp, #S_OFF
2612: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 2632: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..9908dacf9229 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
102 uniphier_cache_init(); 102 uniphier_cache_init();
103} 103}
104 104
105#ifdef CONFIG_MULTI_IRQ_HANDLER
106void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
107{
108 if (handle_arch_irq)
109 return;
110
111 handle_arch_irq = handle_irq;
112}
113#endif
114
115#ifdef CONFIG_SPARSE_IRQ 105#ifdef CONFIG_SPARSE_IRQ
116int __init arch_probe_nr_irqs(void) 106int __init arch_probe_nr_irqs(void)
117{ 107{
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 225d1c58d2de..d9c299133111 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
338 338
339static int __init gate_vma_init(void) 339static int __init gate_vma_init(void)
340{ 340{
341 vma_init(&gate_vma, NULL);
341 gate_vma.vm_page_prot = PAGE_READONLY_EXEC; 342 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
342 return 0; 343 return 0;
343} 344}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 35ca494c028c..4c249cb261f3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
1145 1145
1146 reserve_crashkernel(); 1146 reserve_crashkernel();
1147 1147
1148#ifdef CONFIG_MULTI_IRQ_HANDLER 1148#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1149 handle_arch_irq = mdesc->handle_irq; 1149 handle_arch_irq = mdesc->handle_irq;
1150#endif 1150#endif
1151 1151
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 39aef4876ed4..04b2f22c2739 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
212 */ 212 */
213static void ecard_init_pgtables(struct mm_struct *mm) 213static void ecard_init_pgtables(struct mm_struct *mm)
214{ 214{
215 struct vm_area_struct vma; 215 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
216 216
217 /* We want to set up the page tables for the following mapping: 217 /* We want to set up the page tables for the following mapping:
218 * Virtual Physical 218 * Virtual Physical
@@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
237 237
238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); 238 memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
239 239
240 vma.vm_flags = VM_EXEC;
241 vma.vm_mm = mm;
242
243 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); 240 flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
244 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); 241 flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
245} 242}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 42c090cf0292..3d1011957823 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -74,6 +74,7 @@ config ARM64
74 select GENERIC_CPU_AUTOPROBE 74 select GENERIC_CPU_AUTOPROBE
75 select GENERIC_EARLY_IOREMAP 75 select GENERIC_EARLY_IOREMAP
76 select GENERIC_IDLE_POLL_SETUP 76 select GENERIC_IDLE_POLL_SETUP
77 select GENERIC_IRQ_MULTI_HANDLER
77 select GENERIC_IRQ_PROBE 78 select GENERIC_IRQ_PROBE
78 select GENERIC_IRQ_SHOW 79 select GENERIC_IRQ_SHOW
79 select GENERIC_IRQ_SHOW_LEVEL 80 select GENERIC_IRQ_SHOW_LEVEL
@@ -264,9 +265,6 @@ config ARCH_SUPPORTS_UPROBES
264config ARCH_PROC_KCORE_TEXT 265config ARCH_PROC_KCORE_TEXT
265 def_bool y 266 def_bool y
266 267
267config MULTI_IRQ_HANDLER
268 def_bool y
269
270source "init/Kconfig" 268source "init/Kconfig"
271 269
272source "kernel/Kconfig.freezer" 270source "kernel/Kconfig.freezer"
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 88f5aef7934c..e3a375c4cb83 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -19,33 +19,24 @@
19 * u32 *macp, u8 const rk[], u32 rounds); 19 * u32 *macp, u8 const rk[], u32 rounds);
20 */ 20 */
21ENTRY(ce_aes_ccm_auth_data) 21ENTRY(ce_aes_ccm_auth_data)
22 frame_push 7 22 ldr w8, [x3] /* leftover from prev round? */
23
24 mov x19, x0
25 mov x20, x1
26 mov x21, x2
27 mov x22, x3
28 mov x23, x4
29 mov x24, x5
30
31 ldr w25, [x22] /* leftover from prev round? */
32 ld1 {v0.16b}, [x0] /* load mac */ 23 ld1 {v0.16b}, [x0] /* load mac */
33 cbz w25, 1f 24 cbz w8, 1f
34 sub w25, w25, #16 25 sub w8, w8, #16
35 eor v1.16b, v1.16b, v1.16b 26 eor v1.16b, v1.16b, v1.16b
360: ldrb w7, [x20], #1 /* get 1 byte of input */ 270: ldrb w7, [x1], #1 /* get 1 byte of input */
37 subs w21, w21, #1 28 subs w2, w2, #1
38 add w25, w25, #1 29 add w8, w8, #1
39 ins v1.b[0], w7 30 ins v1.b[0], w7
40 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */ 31 ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
41 beq 8f /* out of input? */ 32 beq 8f /* out of input? */
42 cbnz w25, 0b 33 cbnz w8, 0b
43 eor v0.16b, v0.16b, v1.16b 34 eor v0.16b, v0.16b, v1.16b
441: ld1 {v3.4s}, [x23] /* load first round key */ 351: ld1 {v3.4s}, [x4] /* load first round key */
45 prfm pldl1strm, [x20] 36 prfm pldl1strm, [x1]
46 cmp w24, #12 /* which key size? */ 37 cmp w5, #12 /* which key size? */
47 add x6, x23, #16 38 add x6, x4, #16
48 sub w7, w24, #2 /* modified # of rounds */ 39 sub w7, w5, #2 /* modified # of rounds */
49 bmi 2f 40 bmi 2f
50 bne 5f 41 bne 5f
51 mov v5.16b, v3.16b 42 mov v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
64 ld1 {v5.4s}, [x6], #16 /* load next round key */ 55 ld1 {v5.4s}, [x6], #16 /* load next round key */
65 bpl 3b 56 bpl 3b
66 aese v0.16b, v4.16b 57 aese v0.16b, v4.16b
67 subs w21, w21, #16 /* last data? */ 58 subs w2, w2, #16 /* last data? */
68 eor v0.16b, v0.16b, v5.16b /* final round */ 59 eor v0.16b, v0.16b, v5.16b /* final round */
69 bmi 6f 60 bmi 6f
70 ld1 {v1.16b}, [x20], #16 /* load next input block */ 61 ld1 {v1.16b}, [x1], #16 /* load next input block */
71 eor v0.16b, v0.16b, v1.16b /* xor with mac */ 62 eor v0.16b, v0.16b, v1.16b /* xor with mac */
72 beq 6f 63 bne 1b
73 646: st1 {v0.16b}, [x0] /* store mac */
74 if_will_cond_yield_neon
75 st1 {v0.16b}, [x19] /* store mac */
76 do_cond_yield_neon
77 ld1 {v0.16b}, [x19] /* reload mac */
78 endif_yield_neon
79
80 b 1b
816: st1 {v0.16b}, [x19] /* store mac */
82 beq 10f 65 beq 10f
83 adds w21, w21, #16 66 adds w2, w2, #16
84 beq 10f 67 beq 10f
85 mov w25, w21 68 mov w8, w2
867: ldrb w7, [x20], #1 697: ldrb w7, [x1], #1
87 umov w6, v0.b[0] 70 umov w6, v0.b[0]
88 eor w6, w6, w7 71 eor w6, w6, w7
89 strb w6, [x19], #1 72 strb w6, [x0], #1
90 subs w21, w21, #1 73 subs w2, w2, #1
91 beq 10f 74 beq 10f
92 ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ 75 ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
93 b 7b 76 b 7b
948: mov w7, w25 778: mov w7, w8
95 add w25, w25, #16 78 add w8, w8, #16
969: ext v1.16b, v1.16b, v1.16b, #1 799: ext v1.16b, v1.16b, v1.16b, #1
97 adds w7, w7, #1 80 adds w7, w7, #1
98 bne 9b 81 bne 9b
99 eor v0.16b, v0.16b, v1.16b 82 eor v0.16b, v0.16b, v1.16b
100 st1 {v0.16b}, [x19] 83 st1 {v0.16b}, [x0]
10110: str w25, [x22] 8410: str w8, [x3]
102
103 frame_pop
104 ret 85 ret
105ENDPROC(ce_aes_ccm_auth_data) 86ENDPROC(ce_aes_ccm_auth_data)
106 87
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
145ENDPROC(ce_aes_ccm_final) 126ENDPROC(ce_aes_ccm_final)
146 127
147 .macro aes_ccm_do_crypt,enc 128 .macro aes_ccm_do_crypt,enc
148 frame_push 8 129 ldr x8, [x6, #8] /* load lower ctr */
149 130 ld1 {v0.16b}, [x5] /* load mac */
150 mov x19, x0 131CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
151 mov x20, x1
152 mov x21, x2
153 mov x22, x3
154 mov x23, x4
155 mov x24, x5
156 mov x25, x6
157
158 ldr x26, [x25, #8] /* load lower ctr */
159 ld1 {v0.16b}, [x24] /* load mac */
160CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
1610: /* outer loop */ 1320: /* outer loop */
162 ld1 {v1.8b}, [x25] /* load upper ctr */ 133 ld1 {v1.8b}, [x6] /* load upper ctr */
163 prfm pldl1strm, [x20] 134 prfm pldl1strm, [x1]
164 add x26, x26, #1 135 add x8, x8, #1
165 rev x9, x26 136 rev x9, x8
166 cmp w23, #12 /* which key size? */ 137 cmp w4, #12 /* which key size? */
167 sub w7, w23, #2 /* get modified # of rounds */ 138 sub w7, w4, #2 /* get modified # of rounds */
168 ins v1.d[1], x9 /* no carry in lower ctr */ 139 ins v1.d[1], x9 /* no carry in lower ctr */
169 ld1 {v3.4s}, [x22] /* load first round key */ 140 ld1 {v3.4s}, [x3] /* load first round key */
170 add x10, x22, #16 141 add x10, x3, #16
171 bmi 1f 142 bmi 1f
172 bne 4f 143 bne 4f
173 mov v5.16b, v3.16b 144 mov v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
194 bpl 2b 165 bpl 2b
195 aese v0.16b, v4.16b 166 aese v0.16b, v4.16b
196 aese v1.16b, v4.16b 167 aese v1.16b, v4.16b
197 subs w21, w21, #16 168 subs w2, w2, #16
198 bmi 7f /* partial block? */ 169 bmi 6f /* partial block? */
199 ld1 {v2.16b}, [x20], #16 /* load next input block */ 170 ld1 {v2.16b}, [x1], #16 /* load next input block */
200 .if \enc == 1 171 .if \enc == 1
201 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */ 172 eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
202 eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */ 173 eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE( rev x26, x26 ) /* keep swabbed ctr in reg */
205 eor v1.16b, v2.16b, v5.16b /* final round enc */ 176 eor v1.16b, v2.16b, v5.16b /* final round enc */
206 .endif 177 .endif
207 eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */ 178 eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
208 st1 {v1.16b}, [x19], #16 /* write output block */ 179 st1 {v1.16b}, [x0], #16 /* write output block */
209 beq 5f 180 bne 0b
210 181CPU_LE( rev x8, x8 )
211 if_will_cond_yield_neon 182 st1 {v0.16b}, [x5] /* store mac */
212 st1 {v0.16b}, [x24] /* store mac */ 183 str x8, [x6, #8] /* store lsb end of ctr (BE) */
213 do_cond_yield_neon 1845: ret
214 ld1 {v0.16b}, [x24] /* reload mac */ 185
215 endif_yield_neon 1866: eor v0.16b, v0.16b, v5.16b /* final round mac */
216
217 b 0b
2185:
219CPU_LE( rev x26, x26 )
220 st1 {v0.16b}, [x24] /* store mac */
221 str x26, [x25, #8] /* store lsb end of ctr (BE) */
222
2236: frame_pop
224 ret
225
2267: eor v0.16b, v0.16b, v5.16b /* final round mac */
227 eor v1.16b, v1.16b, v5.16b /* final round enc */ 187 eor v1.16b, v1.16b, v5.16b /* final round enc */
228 st1 {v0.16b}, [x24] /* store mac */ 188 st1 {v0.16b}, [x5] /* store mac */
229 add w21, w21, #16 /* process partial tail block */ 189 add w2, w2, #16 /* process partial tail block */
2308: ldrb w9, [x20], #1 /* get 1 byte of input */ 1907: ldrb w9, [x1], #1 /* get 1 byte of input */
231 umov w6, v1.b[0] /* get top crypted ctr byte */ 191 umov w6, v1.b[0] /* get top crypted ctr byte */
232 umov w7, v0.b[0] /* get top mac byte */ 192 umov w7, v0.b[0] /* get top mac byte */
233 .if \enc == 1 193 .if \enc == 1
@@ -237,13 +197,13 @@ CPU_LE( rev x26, x26 )
237 eor w9, w9, w6 197 eor w9, w9, w6
238 eor w7, w7, w9 198 eor w7, w7, w9
239 .endif 199 .endif
240 strb w9, [x19], #1 /* store out byte */ 200 strb w9, [x0], #1 /* store out byte */
241 strb w7, [x24], #1 /* store mac byte */ 201 strb w7, [x5], #1 /* store mac byte */
242 subs w21, w21, #1 202 subs w2, w2, #1
243 beq 6b 203 beq 5b
244 ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */ 204 ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
245 ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */ 205 ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
246 b 8b 206 b 7b
247 .endm 207 .endm
248 208
249 /* 209 /*
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index dcffb9e77589..c723647b37db 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
322 .endm 322 .endm
323 323
324 .macro pmull_gcm_do_crypt, enc 324 .macro pmull_gcm_do_crypt, enc
325 frame_push 10 325 ld1 {SHASH.2d}, [x4]
326 ld1 {XL.2d}, [x1]
327 ldr x8, [x5, #8] // load lower counter
326 328
327 mov x19, x0 329 load_round_keys w7, x6
328 mov x20, x1
329 mov x21, x2
330 mov x22, x3
331 mov x23, x4
332 mov x24, x5
333 mov x25, x6
334 mov x26, x7
335 .if \enc == 1
336 ldr x27, [sp, #96] // first stacked arg
337 .endif
338
339 ldr x28, [x24, #8] // load lower counter
340CPU_LE( rev x28, x28 )
341
3420: mov x0, x25
343 load_round_keys w26, x0
344 ld1 {SHASH.2d}, [x23]
345 ld1 {XL.2d}, [x20]
346 330
347 movi MASK.16b, #0xe1 331 movi MASK.16b, #0xe1
348 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8 332 ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
333CPU_LE( rev x8, x8 )
349 shl MASK.2d, MASK.2d, #57 334 shl MASK.2d, MASK.2d, #57
350 eor SHASH2.16b, SHASH2.16b, SHASH.16b 335 eor SHASH2.16b, SHASH2.16b, SHASH.16b
351 336
352 .if \enc == 1 337 .if \enc == 1
353 ld1 {KS.16b}, [x27] 338 ldr x10, [sp]
339 ld1 {KS.16b}, [x10]
354 .endif 340 .endif
355 341
3561: ld1 {CTR.8b}, [x24] // load upper counter 3420: ld1 {CTR.8b}, [x5] // load upper counter
357 ld1 {INP.16b}, [x22], #16 343 ld1 {INP.16b}, [x3], #16
358 rev x9, x28 344 rev x9, x8
359 add x28, x28, #1 345 add x8, x8, #1
360 sub w19, w19, #1 346 sub w0, w0, #1
361 ins CTR.d[1], x9 // set lower counter 347 ins CTR.d[1], x9 // set lower counter
362 348
363 .if \enc == 1 349 .if \enc == 1
364 eor INP.16b, INP.16b, KS.16b // encrypt input 350 eor INP.16b, INP.16b, KS.16b // encrypt input
365 st1 {INP.16b}, [x21], #16 351 st1 {INP.16b}, [x2], #16
366 .endif 352 .endif
367 353
368 rev64 T1.16b, INP.16b 354 rev64 T1.16b, INP.16b
369 355
370 cmp w26, #12 356 cmp w7, #12
371 b.ge 4f // AES-192/256? 357 b.ge 2f // AES-192/256?
372 358
3732: enc_round CTR, v21 3591: enc_round CTR, v21
374 360
375 ext T2.16b, XL.16b, XL.16b, #8 361 ext T2.16b, XL.16b, XL.16b, #8
376 ext IN1.16b, T1.16b, T1.16b, #8 362 ext IN1.16b, T1.16b, T1.16b, #8
@@ -425,39 +411,27 @@ CPU_LE( rev x28, x28 )
425 411
426 .if \enc == 0 412 .if \enc == 0
427 eor INP.16b, INP.16b, KS.16b 413 eor INP.16b, INP.16b, KS.16b
428 st1 {INP.16b}, [x21], #16 414 st1 {INP.16b}, [x2], #16
429 .endif 415 .endif
430 416
431 cbz w19, 3f 417 cbnz w0, 0b
432 418
433 if_will_cond_yield_neon 419CPU_LE( rev x8, x8 )
434 st1 {XL.2d}, [x20] 420 st1 {XL.2d}, [x1]
435 .if \enc == 1 421 str x8, [x5, #8] // store lower counter
436 st1 {KS.16b}, [x27]
437 .endif
438 do_cond_yield_neon
439 b 0b
440 endif_yield_neon
441 422
442 b 1b
443
4443: st1 {XL.2d}, [x20]
445 .if \enc == 1 423 .if \enc == 1
446 st1 {KS.16b}, [x27] 424 st1 {KS.16b}, [x10]
447 .endif 425 .endif
448 426
449CPU_LE( rev x28, x28 )
450 str x28, [x24, #8] // store lower counter
451
452 frame_pop
453 ret 427 ret
454 428
4554: b.eq 5f // AES-192? 4292: b.eq 3f // AES-192?
456 enc_round CTR, v17 430 enc_round CTR, v17
457 enc_round CTR, v18 431 enc_round CTR, v18
4585: enc_round CTR, v19 4323: enc_round CTR, v19
459 enc_round CTR, v20 433 enc_round CTR, v20
460 b 2b 434 b 1b
461 .endm 435 .endm
462 436
463 /* 437 /*
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 7cf0b1aa6ea8..8a10f1d7199a 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -488,9 +488,13 @@ static int gcm_decrypt(struct aead_request *req)
488 err = skcipher_walk_done(&walk, 488 err = skcipher_walk_done(&walk,
489 walk.nbytes % AES_BLOCK_SIZE); 489 walk.nbytes % AES_BLOCK_SIZE);
490 } 490 }
491 if (walk.nbytes) 491 if (walk.nbytes) {
492 pmull_gcm_encrypt_block(iv, iv, NULL, 492 kernel_neon_begin();
493 pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
493 num_rounds(&ctx->aes_key)); 494 num_rounds(&ctx->aes_key));
495 kernel_neon_end();
496 }
497
494 } else { 498 } else {
495 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, 499 __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv,
496 num_rounds(&ctx->aes_key)); 500 num_rounds(&ctx->aes_key));
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 192d791f1103..7ed320895d1f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
87#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) 87#define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__)
88#define efi_is_64bit() (true) 88#define efi_is_64bit() (true)
89 89
90#define efi_table_attr(table, attr, instance) \
91 ((table##_t *)instance)->attr
92
90#define efi_call_proto(protocol, f, instance, ...) \ 93#define efi_call_proto(protocol, f, instance, ...) \
91 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) 94 ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
92 95
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index a0fee6985e6a..b2b0c6405eb0 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,8 +8,6 @@
8 8
9struct pt_regs; 9struct pt_regs;
10 10
11extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
12
13static inline int nr_legacy_irqs(void) 11static inline int nr_legacy_irqs(void)
14{ 12{
15 return 0; 13 return 0;
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..0ad1cf233470 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
37 37
38static inline void tlb_flush(struct mmu_gather *tlb) 38static inline void tlb_flush(struct mmu_gather *tlb)
39{ 39{
40 struct vm_area_struct vma = { .vm_mm = tlb->mm, }; 40 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
41 41
42 /* 42 /*
43 * The ASID allocator will either invalidate the ASID or mark 43 * The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f24892a40d2c..c6d80743f4ed 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1351 1351
1352static void update_cpu_capabilities(u16 scope_mask) 1352static void update_cpu_capabilities(u16 scope_mask)
1353{ 1353{
1354 __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1355 __update_cpu_capabilities(arm64_errata, scope_mask, 1354 __update_cpu_capabilities(arm64_errata, scope_mask,
1356 "enabling workaround for"); 1355 "enabling workaround for");
1356 __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1357} 1357}
1358 1358
1359static int __enable_cpu_capability(void *arg) 1359static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1408 1408
1409static void __init enable_cpu_capabilities(u16 scope_mask) 1409static void __init enable_cpu_capabilities(u16 scope_mask)
1410{ 1410{
1411 __enable_cpu_capabilities(arm64_features, scope_mask);
1412 __enable_cpu_capabilities(arm64_errata, scope_mask); 1411 __enable_cpu_capabilities(arm64_errata, scope_mask);
1412 __enable_cpu_capabilities(arm64_features, scope_mask);
1413} 1413}
1414 1414
1415/* 1415/*
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 60e5fc661f74..780a12f59a8f 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
42 return 0; 42 return 0;
43} 43}
44 44
45void (*handle_arch_irq)(struct pt_regs *) = NULL;
46
47void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
48{
49 if (handle_arch_irq)
50 return;
51
52 handle_arch_irq = handle_irq;
53}
54
55#ifdef CONFIG_VMAP_STACK 45#ifdef CONFIG_VMAP_STACK
56static void init_irq_stacks(void) 46static void init_irq_stacks(void)
57{ 47{
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index ecc6818191df..192b3ba07075 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
108 unsigned long pgsize, 108 unsigned long pgsize,
109 unsigned long ncontig) 109 unsigned long ncontig)
110{ 110{
111 struct vm_area_struct vma = { .vm_mm = mm };
112 pte_t orig_pte = huge_ptep_get(ptep); 111 pte_t orig_pte = huge_ptep_get(ptep);
113 bool valid = pte_valid(orig_pte); 112 bool valid = pte_valid(orig_pte);
114 unsigned long i, saddr = addr; 113 unsigned long i, saddr = addr;
@@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
125 orig_pte = pte_mkdirty(orig_pte); 124 orig_pte = pte_mkdirty(orig_pte);
126 } 125 }
127 126
128 if (valid) 127 if (valid) {
128 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
129 flush_tlb_range(&vma, saddr, addr); 129 flush_tlb_range(&vma, saddr, addr);
130 }
130 return orig_pte; 131 return orig_pte;
131} 132}
132 133
@@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
145 unsigned long pgsize, 146 unsigned long pgsize,
146 unsigned long ncontig) 147 unsigned long ncontig)
147{ 148{
148 struct vm_area_struct vma = { .vm_mm = mm }; 149 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
149 unsigned long i, saddr = addr; 150 unsigned long i, saddr = addr;
150 151
151 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) 152 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 325cfb3b858a..9abf8a1e7b25 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -611,11 +611,13 @@ void __init mem_init(void)
611 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); 611 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
612#endif 612#endif
613 613
614#ifdef CONFIG_SPARSEMEM_VMEMMAP
614 /* 615 /*
615 * Make sure we chose the upper bound of sizeof(struct page) 616 * Make sure we chose the upper bound of sizeof(struct page)
616 * correctly. 617 * correctly when sizing the VMEMMAP array.
617 */ 618 */
618 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); 619 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
620#endif
619 621
620 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 622 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
621 extern int sysctl_overcommit_memory; 623 extern int sysctl_overcommit_memory;
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 44f0ac0df308..516355a774bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
115 flush_tlb_all(); 115 flush_tlb_all();
116 } else { 116 } else {
117 /* 117 /*
118 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a 118 * flush_tlb_range() takes a vma instead of a mm pointer because
119 * vma pointer. 119 * some architectures want the vm_flags for ITLB/DTLB flush.
120 */ 120 */
121 struct vm_area_struct vma; 121 struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
122 122
123 vma.vm_mm = tlb->mm;
124 /* flush the address range from the tlb: */ 123 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma, start, end); 124 flush_tlb_range(&vma, start, end);
126 /* now flush the virt. page-table area mapping the address range: */ 125 /* now flush the virt. page-table area mapping the address range: */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index bdb14a369137..3b85c3ecac38 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -116,6 +116,7 @@ ia64_init_addr_space (void)
116 */ 116 */
117 vma = vm_area_alloc(current->mm); 117 vma = vm_area_alloc(current->mm);
118 if (vma) { 118 if (vma) {
119 vma_set_anonymous(vma);
119 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 120 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
120 vma->vm_end = vma->vm_start + PAGE_SIZE; 121 vma->vm_end = vma->vm_start + PAGE_SIZE;
121 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; 122 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -133,6 +134,7 @@ ia64_init_addr_space (void)
133 if (!(current->personality & MMAP_PAGE_ZERO)) { 134 if (!(current->personality & MMAP_PAGE_ZERO)) {
134 vma = vm_area_alloc(current->mm); 135 vma = vm_area_alloc(current->mm);
135 if (vma) { 136 if (vma) {
137 vma_set_anonymous(vma);
136 vma->vm_end = PAGE_SIZE; 138 vma->vm_end = PAGE_SIZE;
137 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 139 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
138 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | 140 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -273,7 +275,7 @@ static struct vm_area_struct gate_vma;
273 275
274static int __init gate_vma_init(void) 276static int __init gate_vma_init(void)
275{ 277{
276 gate_vma.vm_mm = NULL; 278 vma_init(&gate_vma, NULL);
277 gate_vma.vm_start = FIXADDR_USER_START; 279 gate_vma.vm_start = FIXADDR_USER_START;
278 gate_vma.vm_end = FIXADDR_USER_END; 280 gate_vma.vm_end = FIXADDR_USER_END;
279 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 281 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 785612b576f7..b29f93774d95 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,6 +2,7 @@
2config M68K 2config M68K
3 bool 3 bool
4 default y 4 default y
5 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
5 select ARCH_MIGHT_HAVE_PC_PARPORT if ISA 6 select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
6 select ARCH_NO_COHERENT_DMA_MMAP if !MMU 7 select ARCH_NO_COHERENT_DMA_MMAP if !MMU
7 select HAVE_IDE 8 select HAVE_IDE
@@ -24,6 +25,10 @@ config M68K
24 select MODULES_USE_ELF_RELA 25 select MODULES_USE_ELF_RELA
25 select OLD_SIGSUSPEND3 26 select OLD_SIGSUSPEND3
26 select OLD_SIGACTION 27 select OLD_SIGACTION
28 select DMA_NONCOHERENT_OPS if HAS_DMA
29 select HAVE_MEMBLOCK
30 select ARCH_DISCARD_MEMBLOCK
31 select NO_BOOTMEM
27 32
28config CPU_BIG_ENDIAN 33config CPU_BIG_ENDIAN
29 def_bool y 34 def_bool y
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index b2a6bc63f8cd..aef8d42e078d 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
31extern void dn_init_IRQ(void); 31extern void dn_init_IRQ(void);
32extern u32 dn_gettimeoffset(void); 32extern u32 dn_gettimeoffset(void);
33extern int dn_dummy_hwclk(int, struct rtc_time *); 33extern int dn_dummy_hwclk(int, struct rtc_time *);
34extern int dn_dummy_set_clock_mmss(unsigned long);
35extern void dn_dummy_reset(void); 34extern void dn_dummy_reset(void);
36#ifdef CONFIG_HEARTBEAT 35#ifdef CONFIG_HEARTBEAT
37static void dn_heartbeat(int on); 36static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
156 arch_gettimeoffset = dn_gettimeoffset; 155 arch_gettimeoffset = dn_gettimeoffset;
157 mach_max_dma_address = 0xffffffff; 156 mach_max_dma_address = 0xffffffff;
158 mach_hwclk = dn_dummy_hwclk; /* */ 157 mach_hwclk = dn_dummy_hwclk; /* */
159 mach_set_clock_mmss = dn_dummy_set_clock_mmss; /* */
160 mach_reset = dn_dummy_reset; /* */ 158 mach_reset = dn_dummy_reset; /* */
161#ifdef CONFIG_HEARTBEAT 159#ifdef CONFIG_HEARTBEAT
162 mach_heartbeat = dn_heartbeat; 160 mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
240 238
241} 239}
242 240
243int dn_dummy_set_clock_mmss(unsigned long nowtime)
244{
245 pr_info("set_clock_mmss\n");
246 return 0;
247}
248
249void dn_dummy_reset(void) { 241void dn_dummy_reset(void) {
250 242
251 dn_serial_print("The end !\n"); 243 dn_serial_print("The end !\n");
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 565c6f06ab0b..bd96702a1ad0 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
81extern u32 atari_gettimeoffset(void); 81extern u32 atari_gettimeoffset(void);
82extern int atari_mste_hwclk (int, struct rtc_time *); 82extern int atari_mste_hwclk (int, struct rtc_time *);
83extern int atari_tt_hwclk (int, struct rtc_time *); 83extern int atari_tt_hwclk (int, struct rtc_time *);
84extern int atari_mste_set_clock_mmss (unsigned long);
85extern int atari_tt_set_clock_mmss (unsigned long);
86
87 84
88/* ++roman: This is a more elaborate test for an SCC chip, since the plain 85/* ++roman: This is a more elaborate test for an SCC chip, since the plain
89 * Medusa board generates DTACK at the SCC's standard addresses, but a SCC 86 * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
362 ATARIHW_SET(TT_CLK); 359 ATARIHW_SET(TT_CLK);
363 pr_cont(" TT_CLK"); 360 pr_cont(" TT_CLK");
364 mach_hwclk = atari_tt_hwclk; 361 mach_hwclk = atari_tt_hwclk;
365 mach_set_clock_mmss = atari_tt_set_clock_mmss;
366 } 362 }
367 if (hwreg_present(&mste_rtc.sec_ones)) { 363 if (hwreg_present(&mste_rtc.sec_ones)) {
368 ATARIHW_SET(MSTE_CLK); 364 ATARIHW_SET(MSTE_CLK);
369 pr_cont(" MSTE_CLK"); 365 pr_cont(" MSTE_CLK");
370 mach_hwclk = atari_mste_hwclk; 366 mach_hwclk = atari_mste_hwclk;
371 mach_set_clock_mmss = atari_mste_set_clock_mmss;
372 } 367 }
373 if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) && 368 if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
374 hwreg_write(&dma_wd.fdc_speed, 0)) { 369 hwreg_write(&dma_wd.fdc_speed, 0)) {
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index c549b48174ec..9cca64286464 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
285 return( 0 ); 285 return( 0 );
286} 286}
287 287
288
289int atari_mste_set_clock_mmss (unsigned long nowtime)
290{
291 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
292 struct MSTE_RTC val;
293 unsigned char rtc_minutes;
294
295 mste_read(&val);
296 rtc_minutes= val.min_ones + val.min_tens * 10;
297 if ((rtc_minutes < real_minutes
298 ? real_minutes - rtc_minutes
299 : rtc_minutes - real_minutes) < 30)
300 {
301 val.sec_ones = real_seconds % 10;
302 val.sec_tens = real_seconds / 10;
303 val.min_ones = real_minutes % 10;
304 val.min_tens = real_minutes / 10;
305 mste_write(&val);
306 }
307 else
308 return -1;
309 return 0;
310}
311
312int atari_tt_set_clock_mmss (unsigned long nowtime)
313{
314 int retval = 0;
315 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
316 unsigned char save_control, save_freq_select, rtc_minutes;
317
318 save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
319 RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
320
321 save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
322 RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
323
324 rtc_minutes = RTC_READ (RTC_MINUTES);
325 if (!(save_control & RTC_DM_BINARY))
326 rtc_minutes = bcd2bin(rtc_minutes);
327
328 /* Since we're only adjusting minutes and seconds, don't interfere
329 with hour overflow. This avoids messing with unknown time zones
330 but requires your RTC not to be off by more than 30 minutes. */
331 if ((rtc_minutes < real_minutes
332 ? real_minutes - rtc_minutes
333 : rtc_minutes - real_minutes) < 30)
334 {
335 if (!(save_control & RTC_DM_BINARY))
336 {
337 real_seconds = bin2bcd(real_seconds);
338 real_minutes = bin2bcd(real_minutes);
339 }
340 RTC_WRITE (RTC_SECONDS, real_seconds);
341 RTC_WRITE (RTC_MINUTES, real_minutes);
342 }
343 else
344 retval = -1;
345
346 RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
347 RTC_WRITE (RTC_CONTROL, save_control);
348 return retval;
349}
350
351/* 288/*
352 * Local variables: 289 * Local variables:
353 * c-indent-level: 4 290 * c-indent-level: 4
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 2cfff4765040..143ee9fa3893 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
41extern void bvme6000_sched_init(irq_handler_t handler); 41extern void bvme6000_sched_init(irq_handler_t handler);
42extern u32 bvme6000_gettimeoffset(void); 42extern u32 bvme6000_gettimeoffset(void);
43extern int bvme6000_hwclk (int, struct rtc_time *); 43extern int bvme6000_hwclk (int, struct rtc_time *);
44extern int bvme6000_set_clock_mmss (unsigned long);
45extern void bvme6000_reset (void); 44extern void bvme6000_reset (void);
46void bvme6000_set_vectors (void); 45void bvme6000_set_vectors (void);
47 46
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
113 mach_init_IRQ = bvme6000_init_IRQ; 112 mach_init_IRQ = bvme6000_init_IRQ;
114 arch_gettimeoffset = bvme6000_gettimeoffset; 113 arch_gettimeoffset = bvme6000_gettimeoffset;
115 mach_hwclk = bvme6000_hwclk; 114 mach_hwclk = bvme6000_hwclk;
116 mach_set_clock_mmss = bvme6000_set_clock_mmss;
117 mach_reset = bvme6000_reset; 115 mach_reset = bvme6000_reset;
118 mach_get_model = bvme6000_get_model; 116 mach_get_model = bvme6000_get_model;
119 117
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
305 303
306 return 0; 304 return 0;
307} 305}
308
309/*
310 * Set the minutes and seconds from seconds value 'nowtime'. Fail if
311 * clock is out by > 30 minutes. Logic lifted from atari code.
312 * Algorithm is to wait for the 10ms register to change, and then to
313 * wait a short while, and then set it.
314 */
315
316int bvme6000_set_clock_mmss (unsigned long nowtime)
317{
318 int retval = 0;
319 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
320 unsigned char rtc_minutes, rtc_tenms;
321 volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
322 unsigned char msr = rtc->msr & 0xc0;
323 unsigned long flags;
324 volatile int i;
325
326 rtc->msr = 0; /* Ensure clock accessible */
327 rtc_minutes = bcd2bin (rtc->bcd_min);
328
329 if ((rtc_minutes < real_minutes
330 ? real_minutes - rtc_minutes
331 : rtc_minutes - real_minutes) < 30)
332 {
333 local_irq_save(flags);
334 rtc_tenms = rtc->bcd_tenms;
335 while (rtc_tenms == rtc->bcd_tenms)
336 ;
337 for (i = 0; i < 1000; i++)
338 ;
339 rtc->bcd_min = bin2bcd(real_minutes);
340 rtc->bcd_sec = bin2bcd(real_seconds);
341 local_irq_restore(flags);
342 }
343 else
344 retval = -1;
345
346 rtc->msr = msr;
347
348 return retval;
349}
350
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a874e54404d1..1d5483f6e457 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
52CONFIG_TLS=m 52CONFIG_TLS=m
53CONFIG_XFRM_MIGRATE=y 53CONFIG_XFRM_MIGRATE=y
54CONFIG_NET_KEY=y 54CONFIG_NET_KEY=y
55CONFIG_XDP_SOCKETS=y
55CONFIG_INET=y 56CONFIG_INET=y
56CONFIG_IP_PNP=y 57CONFIG_IP_PNP=y
57CONFIG_IP_PNP_DHCP=y 58CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
98CONFIG_NF_CONNTRACK_SIP=m 99CONFIG_NF_CONNTRACK_SIP=m
99CONFIG_NF_CONNTRACK_TFTP=m 100CONFIG_NF_CONNTRACK_TFTP=m
100CONFIG_NF_TABLES=m 101CONFIG_NF_TABLES=m
102CONFIG_NF_TABLES_SET=m
101CONFIG_NF_TABLES_INET=y 103CONFIG_NF_TABLES_INET=y
102CONFIG_NF_TABLES_NETDEV=y 104CONFIG_NF_TABLES_NETDEV=y
103CONFIG_NFT_EXTHDR=m
104CONFIG_NFT_META=m
105CONFIG_NFT_RT=m
106CONFIG_NFT_NUMGEN=m 105CONFIG_NFT_NUMGEN=m
107CONFIG_NFT_CT=m 106CONFIG_NFT_CT=m
108CONFIG_NFT_FLOW_OFFLOAD=m 107CONFIG_NFT_FLOW_OFFLOAD=m
109CONFIG_NFT_SET_RBTREE=m
110CONFIG_NFT_SET_HASH=m
111CONFIG_NFT_SET_BITMAP=m
112CONFIG_NFT_COUNTER=m 108CONFIG_NFT_COUNTER=m
109CONFIG_NFT_CONNLIMIT=m
113CONFIG_NFT_LOG=m 110CONFIG_NFT_LOG=m
114CONFIG_NFT_LIMIT=m 111CONFIG_NFT_LIMIT=m
115CONFIG_NFT_MASQ=m 112CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
122CONFIG_NFT_COMPAT=m 119CONFIG_NFT_COMPAT=m
123CONFIG_NFT_HASH=m 120CONFIG_NFT_HASH=m
124CONFIG_NFT_FIB_INET=m 121CONFIG_NFT_FIB_INET=m
122CONFIG_NFT_SOCKET=m
125CONFIG_NFT_DUP_NETDEV=m 123CONFIG_NFT_DUP_NETDEV=m
126CONFIG_NFT_FWD_NETDEV=m 124CONFIG_NFT_FWD_NETDEV=m
127CONFIG_NFT_FIB_NETDEV=m 125CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
200CONFIG_IP_SET_HASH_NETIFACE=m 198CONFIG_IP_SET_HASH_NETIFACE=m
201CONFIG_IP_SET_LIST_SET=m 199CONFIG_IP_SET_LIST_SET=m
202CONFIG_NF_CONNTRACK_IPV4=m 200CONFIG_NF_CONNTRACK_IPV4=m
203CONFIG_NF_SOCKET_IPV4=m
204CONFIG_NFT_CHAIN_ROUTE_IPV4=m 201CONFIG_NFT_CHAIN_ROUTE_IPV4=m
205CONFIG_NFT_DUP_IPV4=m 202CONFIG_NFT_DUP_IPV4=m
206CONFIG_NFT_FIB_IPV4=m 203CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
231CONFIG_IP_NF_ARPFILTER=m 228CONFIG_IP_NF_ARPFILTER=m
232CONFIG_IP_NF_ARP_MANGLE=m 229CONFIG_IP_NF_ARP_MANGLE=m
233CONFIG_NF_CONNTRACK_IPV6=m 230CONFIG_NF_CONNTRACK_IPV6=m
234CONFIG_NF_SOCKET_IPV6=m
235CONFIG_NFT_CHAIN_ROUTE_IPV6=m 231CONFIG_NFT_CHAIN_ROUTE_IPV6=m
236CONFIG_NFT_CHAIN_NAT_IPV6=m 232CONFIG_NFT_CHAIN_NAT_IPV6=m
237CONFIG_NFT_MASQ_IPV6=m 233CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
260CONFIG_IP6_NF_TARGET_MASQUERADE=m 256CONFIG_IP6_NF_TARGET_MASQUERADE=m
261CONFIG_IP6_NF_TARGET_NPT=m 257CONFIG_IP6_NF_TARGET_NPT=m
262CONFIG_NF_TABLES_BRIDGE=y 258CONFIG_NF_TABLES_BRIDGE=y
263CONFIG_NFT_BRIDGE_META=m
264CONFIG_NFT_BRIDGE_REJECT=m 259CONFIG_NFT_BRIDGE_REJECT=m
265CONFIG_NF_LOG_BRIDGE=m 260CONFIG_NF_LOG_BRIDGE=m
266CONFIG_BRIDGE_NF_EBTABLES=m 261CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
301CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 296CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
302CONFIG_DNS_RESOLVER=y 297CONFIG_DNS_RESOLVER=y
303CONFIG_BATMAN_ADV=m 298CONFIG_BATMAN_ADV=m
299# CONFIG_BATMAN_ADV_BATMAN_V is not set
304CONFIG_BATMAN_ADV_DAT=y 300CONFIG_BATMAN_ADV_DAT=y
305CONFIG_BATMAN_ADV_NC=y 301CONFIG_BATMAN_ADV_NC=y
306CONFIG_BATMAN_ADV_MCAST=y 302CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
356CONFIG_GVP11_SCSI=y 352CONFIG_GVP11_SCSI=y
357CONFIG_SCSI_A4000T=y 353CONFIG_SCSI_A4000T=y
358CONFIG_SCSI_ZORRO7XX=y 354CONFIG_SCSI_ZORRO7XX=y
355CONFIG_SCSI_ZORRO_ESP=y
359CONFIG_MD=y 356CONFIG_MD=y
360CONFIG_MD_LINEAR=m 357CONFIG_MD_LINEAR=m
361CONFIG_BLK_DEV_DM=m 358CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
363CONFIG_DM_CRYPT=m 360CONFIG_DM_CRYPT=m
364CONFIG_DM_SNAPSHOT=m 361CONFIG_DM_SNAPSHOT=m
365CONFIG_DM_THIN_PROVISIONING=m 362CONFIG_DM_THIN_PROVISIONING=m
363CONFIG_DM_WRITECACHE=m
366CONFIG_DM_ERA=m 364CONFIG_DM_ERA=m
367CONFIG_DM_MIRROR=m 365CONFIG_DM_MIRROR=m
368CONFIG_DM_RAID=m 366CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
402CONFIG_ARIADNE=y 400CONFIG_ARIADNE=y
403# CONFIG_NET_VENDOR_AQUANTIA is not set 401# CONFIG_NET_VENDOR_AQUANTIA is not set
404# CONFIG_NET_VENDOR_ARC is not set 402# CONFIG_NET_VENDOR_ARC is not set
405# CONFIG_NET_CADENCE is not set
406# CONFIG_NET_VENDOR_BROADCOM is not set 403# CONFIG_NET_VENDOR_BROADCOM is not set
404# CONFIG_NET_CADENCE is not set
407# CONFIG_NET_VENDOR_CIRRUS is not set 405# CONFIG_NET_VENDOR_CIRRUS is not set
408# CONFIG_NET_VENDOR_CORTINA is not set 406# CONFIG_NET_VENDOR_CORTINA is not set
409# CONFIG_NET_VENDOR_EZCHIP is not set 407# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
412# CONFIG_NET_VENDOR_INTEL is not set 410# CONFIG_NET_VENDOR_INTEL is not set
413# CONFIG_NET_VENDOR_MARVELL is not set 411# CONFIG_NET_VENDOR_MARVELL is not set
414# CONFIG_NET_VENDOR_MICREL is not set 412# CONFIG_NET_VENDOR_MICREL is not set
413# CONFIG_NET_VENDOR_MICROSEMI is not set
415# CONFIG_NET_VENDOR_NETRONOME is not set 414# CONFIG_NET_VENDOR_NETRONOME is not set
416# CONFIG_NET_VENDOR_NI is not set 415# CONFIG_NET_VENDOR_NI is not set
416CONFIG_XSURF100=y
417CONFIG_HYDRA=y 417CONFIG_HYDRA=y
418CONFIG_APNE=y 418CONFIG_APNE=y
419CONFIG_ZORRO8390=y 419CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
426# CONFIG_NET_VENDOR_SMSC is not set 426# CONFIG_NET_VENDOR_SMSC is not set
427# CONFIG_NET_VENDOR_SOCIONEXT is not set 427# CONFIG_NET_VENDOR_SOCIONEXT is not set
428# CONFIG_NET_VENDOR_STMICRO is not set 428# CONFIG_NET_VENDOR_STMICRO is not set
429# CONFIG_NET_VENDOR_SYNOPSYS is not set
429# CONFIG_NET_VENDOR_VIA is not set 430# CONFIG_NET_VENDOR_VIA is not set
430# CONFIG_NET_VENDOR_WIZNET is not set 431# CONFIG_NET_VENDOR_WIZNET is not set
431# CONFIG_NET_VENDOR_SYNOPSYS is not set
432CONFIG_PPP=m 432CONFIG_PPP=m
433CONFIG_PPP_BSDCOMP=m 433CONFIG_PPP_BSDCOMP=m
434CONFIG_PPP_DEFLATE=m 434CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
478CONFIG_UHID=m 478CONFIG_UHID=m
479# CONFIG_HID_GENERIC is not set 479# CONFIG_HID_GENERIC is not set
480# CONFIG_HID_ITE is not set 480# CONFIG_HID_ITE is not set
481# CONFIG_HID_REDRAGON is not set
481# CONFIG_USB_SUPPORT is not set 482# CONFIG_USB_SUPPORT is not set
482CONFIG_RTC_CLASS=y 483CONFIG_RTC_CLASS=y
483# CONFIG_RTC_NVMEM is not set 484# CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
499CONFIG_FANOTIFY=y 500CONFIG_FANOTIFY=y
500CONFIG_QUOTA_NETLINK_INTERFACE=y 501CONFIG_QUOTA_NETLINK_INTERFACE=y
501# CONFIG_PRINT_QUOTA_WARNING is not set 502# CONFIG_PRINT_QUOTA_WARNING is not set
502CONFIG_AUTOFS4_FS=m 503CONFIG_AUTOFS_FS=m
503CONFIG_FUSE_FS=m 504CONFIG_FUSE_FS=m
504CONFIG_CUSE=m 505CONFIG_CUSE=m
505CONFIG_OVERLAY_FS=m 506CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
600CONFIG_TEST_PRINTF=m 601CONFIG_TEST_PRINTF=m
601CONFIG_TEST_BITMAP=m 602CONFIG_TEST_BITMAP=m
602CONFIG_TEST_UUID=m 603CONFIG_TEST_UUID=m
604CONFIG_TEST_OVERFLOW=m
603CONFIG_TEST_RHASHTABLE=m 605CONFIG_TEST_RHASHTABLE=m
604CONFIG_TEST_HASH=m 606CONFIG_TEST_HASH=m
605CONFIG_TEST_USER_COPY=m 607CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
622CONFIG_CRYPTO_MCRYPTD=m 624CONFIG_CRYPTO_MCRYPTD=m
623CONFIG_CRYPTO_TEST=m 625CONFIG_CRYPTO_TEST=m
624CONFIG_CRYPTO_CHACHA20POLY1305=m 626CONFIG_CRYPTO_CHACHA20POLY1305=m
627CONFIG_CRYPTO_AEGIS128=m
628CONFIG_CRYPTO_AEGIS128L=m
629CONFIG_CRYPTO_AEGIS256=m
630CONFIG_CRYPTO_MORUS640=m
631CONFIG_CRYPTO_MORUS1280=m
625CONFIG_CRYPTO_CFB=m 632CONFIG_CRYPTO_CFB=m
626CONFIG_CRYPTO_LRW=m 633CONFIG_CRYPTO_LRW=m
627CONFIG_CRYPTO_PCBC=m 634CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
657CONFIG_CRYPTO_842=m 664CONFIG_CRYPTO_842=m
658CONFIG_CRYPTO_LZ4=m 665CONFIG_CRYPTO_LZ4=m
659CONFIG_CRYPTO_LZ4HC=m 666CONFIG_CRYPTO_LZ4HC=m
667CONFIG_CRYPTO_ZSTD=m
660CONFIG_CRYPTO_ANSI_CPRNG=m 668CONFIG_CRYPTO_ANSI_CPRNG=m
661CONFIG_CRYPTO_DRBG_HASH=y 669CONFIG_CRYPTO_DRBG_HASH=y
662CONFIG_CRYPTO_DRBG_CTR=y 670CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 8ce39e23aa42..52a0af127951 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
50CONFIG_TLS=m 50CONFIG_TLS=m
51CONFIG_XFRM_MIGRATE=y 51CONFIG_XFRM_MIGRATE=y
52CONFIG_NET_KEY=y 52CONFIG_NET_KEY=y
53CONFIG_XDP_SOCKETS=y
53CONFIG_INET=y 54CONFIG_INET=y
54CONFIG_IP_PNP=y 55CONFIG_IP_PNP=y
55CONFIG_IP_PNP_DHCP=y 56CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
96CONFIG_NF_CONNTRACK_SIP=m 97CONFIG_NF_CONNTRACK_SIP=m
97CONFIG_NF_CONNTRACK_TFTP=m 98CONFIG_NF_CONNTRACK_TFTP=m
98CONFIG_NF_TABLES=m 99CONFIG_NF_TABLES=m
100CONFIG_NF_TABLES_SET=m
99CONFIG_NF_TABLES_INET=y 101CONFIG_NF_TABLES_INET=y
100CONFIG_NF_TABLES_NETDEV=y 102CONFIG_NF_TABLES_NETDEV=y
101CONFIG_NFT_EXTHDR=m
102CONFIG_NFT_META=m
103CONFIG_NFT_RT=m
104CONFIG_NFT_NUMGEN=m 103CONFIG_NFT_NUMGEN=m
105CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
106CONFIG_NFT_FLOW_OFFLOAD=m 105CONFIG_NFT_FLOW_OFFLOAD=m
107CONFIG_NFT_SET_RBTREE=m
108CONFIG_NFT_SET_HASH=m
109CONFIG_NFT_SET_BITMAP=m
110CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
107CONFIG_NFT_CONNLIMIT=m
111CONFIG_NFT_LOG=m 108CONFIG_NFT_LOG=m
112CONFIG_NFT_LIMIT=m 109CONFIG_NFT_LIMIT=m
113CONFIG_NFT_MASQ=m 110CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
120CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
121CONFIG_NFT_HASH=m 118CONFIG_NFT_HASH=m
122CONFIG_NFT_FIB_INET=m 119CONFIG_NFT_FIB_INET=m
120CONFIG_NFT_SOCKET=m
123CONFIG_NFT_DUP_NETDEV=m 121CONFIG_NFT_DUP_NETDEV=m
124CONFIG_NFT_FWD_NETDEV=m 122CONFIG_NFT_FWD_NETDEV=m
125CONFIG_NFT_FIB_NETDEV=m 123CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
198CONFIG_IP_SET_HASH_NETIFACE=m 196CONFIG_IP_SET_HASH_NETIFACE=m
199CONFIG_IP_SET_LIST_SET=m 197CONFIG_IP_SET_LIST_SET=m
200CONFIG_NF_CONNTRACK_IPV4=m 198CONFIG_NF_CONNTRACK_IPV4=m
201CONFIG_NF_SOCKET_IPV4=m
202CONFIG_NFT_CHAIN_ROUTE_IPV4=m 199CONFIG_NFT_CHAIN_ROUTE_IPV4=m
203CONFIG_NFT_DUP_IPV4=m 200CONFIG_NFT_DUP_IPV4=m
204CONFIG_NFT_FIB_IPV4=m 201CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
229CONFIG_IP_NF_ARPFILTER=m 226CONFIG_IP_NF_ARPFILTER=m
230CONFIG_IP_NF_ARP_MANGLE=m 227CONFIG_IP_NF_ARP_MANGLE=m
231CONFIG_NF_CONNTRACK_IPV6=m 228CONFIG_NF_CONNTRACK_IPV6=m
232CONFIG_NF_SOCKET_IPV6=m
233CONFIG_NFT_CHAIN_ROUTE_IPV6=m 229CONFIG_NFT_CHAIN_ROUTE_IPV6=m
234CONFIG_NFT_CHAIN_NAT_IPV6=m 230CONFIG_NFT_CHAIN_NAT_IPV6=m
235CONFIG_NFT_MASQ_IPV6=m 231CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
258CONFIG_IP6_NF_TARGET_MASQUERADE=m 254CONFIG_IP6_NF_TARGET_MASQUERADE=m
259CONFIG_IP6_NF_TARGET_NPT=m 255CONFIG_IP6_NF_TARGET_NPT=m
260CONFIG_NF_TABLES_BRIDGE=y 256CONFIG_NF_TABLES_BRIDGE=y
261CONFIG_NFT_BRIDGE_META=m
262CONFIG_NFT_BRIDGE_REJECT=m 257CONFIG_NFT_BRIDGE_REJECT=m
263CONFIG_NF_LOG_BRIDGE=m 258CONFIG_NF_LOG_BRIDGE=m
264CONFIG_BRIDGE_NF_EBTABLES=m 259CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
299CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 294CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
300CONFIG_DNS_RESOLVER=y 295CONFIG_DNS_RESOLVER=y
301CONFIG_BATMAN_ADV=m 296CONFIG_BATMAN_ADV=m
297# CONFIG_BATMAN_ADV_BATMAN_V is not set
302CONFIG_BATMAN_ADV_DAT=y 298CONFIG_BATMAN_ADV_DAT=y
303CONFIG_BATMAN_ADV_NC=y 299CONFIG_BATMAN_ADV_NC=y
304CONFIG_BATMAN_ADV_MCAST=y 300CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
345CONFIG_DM_CRYPT=m 341CONFIG_DM_CRYPT=m
346CONFIG_DM_SNAPSHOT=m 342CONFIG_DM_SNAPSHOT=m
347CONFIG_DM_THIN_PROVISIONING=m 343CONFIG_DM_THIN_PROVISIONING=m
344CONFIG_DM_WRITECACHE=m
348CONFIG_DM_ERA=m 345CONFIG_DM_ERA=m
349CONFIG_DM_MIRROR=m 346CONFIG_DM_MIRROR=m
350CONFIG_DM_RAID=m 347CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
381# CONFIG_NET_VENDOR_AMAZON is not set 378# CONFIG_NET_VENDOR_AMAZON is not set
382# CONFIG_NET_VENDOR_AQUANTIA is not set 379# CONFIG_NET_VENDOR_AQUANTIA is not set
383# CONFIG_NET_VENDOR_ARC is not set 380# CONFIG_NET_VENDOR_ARC is not set
384# CONFIG_NET_CADENCE is not set
385# CONFIG_NET_VENDOR_BROADCOM is not set 381# CONFIG_NET_VENDOR_BROADCOM is not set
382# CONFIG_NET_CADENCE is not set
386# CONFIG_NET_VENDOR_CORTINA is not set 383# CONFIG_NET_VENDOR_CORTINA is not set
387# CONFIG_NET_VENDOR_EZCHIP is not set 384# CONFIG_NET_VENDOR_EZCHIP is not set
388# CONFIG_NET_VENDOR_HUAWEI is not set 385# CONFIG_NET_VENDOR_HUAWEI is not set
389# CONFIG_NET_VENDOR_INTEL is not set 386# CONFIG_NET_VENDOR_INTEL is not set
390# CONFIG_NET_VENDOR_MARVELL is not set 387# CONFIG_NET_VENDOR_MARVELL is not set
391# CONFIG_NET_VENDOR_MICREL is not set 388# CONFIG_NET_VENDOR_MICREL is not set
389# CONFIG_NET_VENDOR_MICROSEMI is not set
392# CONFIG_NET_VENDOR_NATSEMI is not set 390# CONFIG_NET_VENDOR_NATSEMI is not set
393# CONFIG_NET_VENDOR_NETRONOME is not set 391# CONFIG_NET_VENDOR_NETRONOME is not set
394# CONFIG_NET_VENDOR_NI is not set 392# CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
400# CONFIG_NET_VENDOR_SOLARFLARE is not set 398# CONFIG_NET_VENDOR_SOLARFLARE is not set
401# CONFIG_NET_VENDOR_SOCIONEXT is not set 399# CONFIG_NET_VENDOR_SOCIONEXT is not set
402# CONFIG_NET_VENDOR_STMICRO is not set 400# CONFIG_NET_VENDOR_STMICRO is not set
401# CONFIG_NET_VENDOR_SYNOPSYS is not set
403# CONFIG_NET_VENDOR_VIA is not set 402# CONFIG_NET_VENDOR_VIA is not set
404# CONFIG_NET_VENDOR_WIZNET is not set 403# CONFIG_NET_VENDOR_WIZNET is not set
405# CONFIG_NET_VENDOR_SYNOPSYS is not set
406CONFIG_PPP=m 404CONFIG_PPP=m
407CONFIG_PPP_BSDCOMP=m 405CONFIG_PPP_BSDCOMP=m
408CONFIG_PPP_DEFLATE=m 406CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
440CONFIG_UHID=m 438CONFIG_UHID=m
441# CONFIG_HID_GENERIC is not set 439# CONFIG_HID_GENERIC is not set
442# CONFIG_HID_ITE is not set 440# CONFIG_HID_ITE is not set
441# CONFIG_HID_REDRAGON is not set
443# CONFIG_USB_SUPPORT is not set 442# CONFIG_USB_SUPPORT is not set
444CONFIG_RTC_CLASS=y 443CONFIG_RTC_CLASS=y
445# CONFIG_RTC_NVMEM is not set 444# CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
458CONFIG_FANOTIFY=y 457CONFIG_FANOTIFY=y
459CONFIG_QUOTA_NETLINK_INTERFACE=y 458CONFIG_QUOTA_NETLINK_INTERFACE=y
460# CONFIG_PRINT_QUOTA_WARNING is not set 459# CONFIG_PRINT_QUOTA_WARNING is not set
461CONFIG_AUTOFS4_FS=m 460CONFIG_AUTOFS_FS=m
462CONFIG_FUSE_FS=m 461CONFIG_FUSE_FS=m
463CONFIG_CUSE=m 462CONFIG_CUSE=m
464CONFIG_OVERLAY_FS=m 463CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
559CONFIG_TEST_PRINTF=m 558CONFIG_TEST_PRINTF=m
560CONFIG_TEST_BITMAP=m 559CONFIG_TEST_BITMAP=m
561CONFIG_TEST_UUID=m 560CONFIG_TEST_UUID=m
561CONFIG_TEST_OVERFLOW=m
562CONFIG_TEST_RHASHTABLE=m 562CONFIG_TEST_RHASHTABLE=m
563CONFIG_TEST_HASH=m 563CONFIG_TEST_HASH=m
564CONFIG_TEST_USER_COPY=m 564CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
581CONFIG_CRYPTO_MCRYPTD=m 581CONFIG_CRYPTO_MCRYPTD=m
582CONFIG_CRYPTO_TEST=m 582CONFIG_CRYPTO_TEST=m
583CONFIG_CRYPTO_CHACHA20POLY1305=m 583CONFIG_CRYPTO_CHACHA20POLY1305=m
584CONFIG_CRYPTO_AEGIS128=m
585CONFIG_CRYPTO_AEGIS128L=m
586CONFIG_CRYPTO_AEGIS256=m
587CONFIG_CRYPTO_MORUS640=m
588CONFIG_CRYPTO_MORUS1280=m
584CONFIG_CRYPTO_CFB=m 589CONFIG_CRYPTO_CFB=m
585CONFIG_CRYPTO_LRW=m 590CONFIG_CRYPTO_LRW=m
586CONFIG_CRYPTO_PCBC=m 591CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
616CONFIG_CRYPTO_842=m 621CONFIG_CRYPTO_842=m
617CONFIG_CRYPTO_LZ4=m 622CONFIG_CRYPTO_LZ4=m
618CONFIG_CRYPTO_LZ4HC=m 623CONFIG_CRYPTO_LZ4HC=m
624CONFIG_CRYPTO_ZSTD=m
619CONFIG_CRYPTO_ANSI_CPRNG=m 625CONFIG_CRYPTO_ANSI_CPRNG=m
620CONFIG_CRYPTO_DRBG_HASH=y 626CONFIG_CRYPTO_DRBG_HASH=y
621CONFIG_CRYPTO_DRBG_CTR=y 627CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 346c4e75edf8..b3103e51268a 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
50CONFIG_TLS=m 50CONFIG_TLS=m
51CONFIG_XFRM_MIGRATE=y 51CONFIG_XFRM_MIGRATE=y
52CONFIG_NET_KEY=y 52CONFIG_NET_KEY=y
53CONFIG_XDP_SOCKETS=y
53CONFIG_INET=y 54CONFIG_INET=y
54CONFIG_IP_PNP=y 55CONFIG_IP_PNP=y
55CONFIG_IP_PNP_DHCP=y 56CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
96CONFIG_NF_CONNTRACK_SIP=m 97CONFIG_NF_CONNTRACK_SIP=m
97CONFIG_NF_CONNTRACK_TFTP=m 98CONFIG_NF_CONNTRACK_TFTP=m
98CONFIG_NF_TABLES=m 99CONFIG_NF_TABLES=m
100CONFIG_NF_TABLES_SET=m
99CONFIG_NF_TABLES_INET=y 101CONFIG_NF_TABLES_INET=y
100CONFIG_NF_TABLES_NETDEV=y 102CONFIG_NF_TABLES_NETDEV=y
101CONFIG_NFT_EXTHDR=m
102CONFIG_NFT_META=m
103CONFIG_NFT_RT=m
104CONFIG_NFT_NUMGEN=m 103CONFIG_NFT_NUMGEN=m
105CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
106CONFIG_NFT_FLOW_OFFLOAD=m 105CONFIG_NFT_FLOW_OFFLOAD=m
107CONFIG_NFT_SET_RBTREE=m
108CONFIG_NFT_SET_HASH=m
109CONFIG_NFT_SET_BITMAP=m
110CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
107CONFIG_NFT_CONNLIMIT=m
111CONFIG_NFT_LOG=m 108CONFIG_NFT_LOG=m
112CONFIG_NFT_LIMIT=m 109CONFIG_NFT_LIMIT=m
113CONFIG_NFT_MASQ=m 110CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
120CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
121CONFIG_NFT_HASH=m 118CONFIG_NFT_HASH=m
122CONFIG_NFT_FIB_INET=m 119CONFIG_NFT_FIB_INET=m
120CONFIG_NFT_SOCKET=m
123CONFIG_NFT_DUP_NETDEV=m 121CONFIG_NFT_DUP_NETDEV=m
124CONFIG_NFT_FWD_NETDEV=m 122CONFIG_NFT_FWD_NETDEV=m
125CONFIG_NFT_FIB_NETDEV=m 123CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
198CONFIG_IP_SET_HASH_NETIFACE=m 196CONFIG_IP_SET_HASH_NETIFACE=m
199CONFIG_IP_SET_LIST_SET=m 197CONFIG_IP_SET_LIST_SET=m
200CONFIG_NF_CONNTRACK_IPV4=m 198CONFIG_NF_CONNTRACK_IPV4=m
201CONFIG_NF_SOCKET_IPV4=m
202CONFIG_NFT_CHAIN_ROUTE_IPV4=m 199CONFIG_NFT_CHAIN_ROUTE_IPV4=m
203CONFIG_NFT_DUP_IPV4=m 200CONFIG_NFT_DUP_IPV4=m
204CONFIG_NFT_FIB_IPV4=m 201CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
229CONFIG_IP_NF_ARPFILTER=m 226CONFIG_IP_NF_ARPFILTER=m
230CONFIG_IP_NF_ARP_MANGLE=m 227CONFIG_IP_NF_ARP_MANGLE=m
231CONFIG_NF_CONNTRACK_IPV6=m 228CONFIG_NF_CONNTRACK_IPV6=m
232CONFIG_NF_SOCKET_IPV6=m
233CONFIG_NFT_CHAIN_ROUTE_IPV6=m 229CONFIG_NFT_CHAIN_ROUTE_IPV6=m
234CONFIG_NFT_CHAIN_NAT_IPV6=m 230CONFIG_NFT_CHAIN_NAT_IPV6=m
235CONFIG_NFT_MASQ_IPV6=m 231CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
258CONFIG_IP6_NF_TARGET_MASQUERADE=m 254CONFIG_IP6_NF_TARGET_MASQUERADE=m
259CONFIG_IP6_NF_TARGET_NPT=m 255CONFIG_IP6_NF_TARGET_NPT=m
260CONFIG_NF_TABLES_BRIDGE=y 256CONFIG_NF_TABLES_BRIDGE=y
261CONFIG_NFT_BRIDGE_META=m
262CONFIG_NFT_BRIDGE_REJECT=m 257CONFIG_NFT_BRIDGE_REJECT=m
263CONFIG_NF_LOG_BRIDGE=m 258CONFIG_NF_LOG_BRIDGE=m
264CONFIG_BRIDGE_NF_EBTABLES=m 259CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
299CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 294CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
300CONFIG_DNS_RESOLVER=y 295CONFIG_DNS_RESOLVER=y
301CONFIG_BATMAN_ADV=m 296CONFIG_BATMAN_ADV=m
297# CONFIG_BATMAN_ADV_BATMAN_V is not set
302CONFIG_BATMAN_ADV_DAT=y 298CONFIG_BATMAN_ADV_DAT=y
303CONFIG_BATMAN_ADV_NC=y 299CONFIG_BATMAN_ADV_NC=y
304CONFIG_BATMAN_ADV_MCAST=y 300CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
354CONFIG_DM_CRYPT=m 350CONFIG_DM_CRYPT=m
355CONFIG_DM_SNAPSHOT=m 351CONFIG_DM_SNAPSHOT=m
356CONFIG_DM_THIN_PROVISIONING=m 352CONFIG_DM_THIN_PROVISIONING=m
353CONFIG_DM_WRITECACHE=m
357CONFIG_DM_ERA=m 354CONFIG_DM_ERA=m
358CONFIG_DM_MIRROR=m 355CONFIG_DM_MIRROR=m
359CONFIG_DM_RAID=m 356CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
391CONFIG_ATARILANCE=y 388CONFIG_ATARILANCE=y
392# CONFIG_NET_VENDOR_AQUANTIA is not set 389# CONFIG_NET_VENDOR_AQUANTIA is not set
393# CONFIG_NET_VENDOR_ARC is not set 390# CONFIG_NET_VENDOR_ARC is not set
394# CONFIG_NET_CADENCE is not set
395# CONFIG_NET_VENDOR_BROADCOM is not set 391# CONFIG_NET_VENDOR_BROADCOM is not set
392# CONFIG_NET_CADENCE is not set
396# CONFIG_NET_VENDOR_CORTINA is not set 393# CONFIG_NET_VENDOR_CORTINA is not set
397# CONFIG_NET_VENDOR_EZCHIP is not set 394# CONFIG_NET_VENDOR_EZCHIP is not set
398# CONFIG_NET_VENDOR_HUAWEI is not set 395# CONFIG_NET_VENDOR_HUAWEI is not set
399# CONFIG_NET_VENDOR_INTEL is not set 396# CONFIG_NET_VENDOR_INTEL is not set
400# CONFIG_NET_VENDOR_MARVELL is not set 397# CONFIG_NET_VENDOR_MARVELL is not set
401# CONFIG_NET_VENDOR_MICREL is not set 398# CONFIG_NET_VENDOR_MICREL is not set
399# CONFIG_NET_VENDOR_MICROSEMI is not set
402# CONFIG_NET_VENDOR_NETRONOME is not set 400# CONFIG_NET_VENDOR_NETRONOME is not set
403# CONFIG_NET_VENDOR_NI is not set 401# CONFIG_NET_VENDOR_NI is not set
404CONFIG_NE2000=y 402CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
411CONFIG_SMC91X=y 409CONFIG_SMC91X=y
412# CONFIG_NET_VENDOR_SOCIONEXT is not set 410# CONFIG_NET_VENDOR_SOCIONEXT is not set
413# CONFIG_NET_VENDOR_STMICRO is not set 411# CONFIG_NET_VENDOR_STMICRO is not set
412# CONFIG_NET_VENDOR_SYNOPSYS is not set
414# CONFIG_NET_VENDOR_VIA is not set 413# CONFIG_NET_VENDOR_VIA is not set
415# CONFIG_NET_VENDOR_WIZNET is not set 414# CONFIG_NET_VENDOR_WIZNET is not set
416# CONFIG_NET_VENDOR_SYNOPSYS is not set
417CONFIG_PPP=m 415CONFIG_PPP=m
418CONFIG_PPP_BSDCOMP=m 416CONFIG_PPP_BSDCOMP=m
419CONFIG_PPP_DEFLATE=m 417CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
480CONFIG_FANOTIFY=y 478CONFIG_FANOTIFY=y
481CONFIG_QUOTA_NETLINK_INTERFACE=y 479CONFIG_QUOTA_NETLINK_INTERFACE=y
482# CONFIG_PRINT_QUOTA_WARNING is not set 480# CONFIG_PRINT_QUOTA_WARNING is not set
483CONFIG_AUTOFS4_FS=m 481CONFIG_AUTOFS_FS=m
484CONFIG_FUSE_FS=m 482CONFIG_FUSE_FS=m
485CONFIG_CUSE=m 483CONFIG_CUSE=m
486CONFIG_OVERLAY_FS=m 484CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
581CONFIG_TEST_PRINTF=m 579CONFIG_TEST_PRINTF=m
582CONFIG_TEST_BITMAP=m 580CONFIG_TEST_BITMAP=m
583CONFIG_TEST_UUID=m 581CONFIG_TEST_UUID=m
582CONFIG_TEST_OVERFLOW=m
584CONFIG_TEST_RHASHTABLE=m 583CONFIG_TEST_RHASHTABLE=m
585CONFIG_TEST_HASH=m 584CONFIG_TEST_HASH=m
586CONFIG_TEST_USER_COPY=m 585CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
603CONFIG_CRYPTO_MCRYPTD=m 602CONFIG_CRYPTO_MCRYPTD=m
604CONFIG_CRYPTO_TEST=m 603CONFIG_CRYPTO_TEST=m
605CONFIG_CRYPTO_CHACHA20POLY1305=m 604CONFIG_CRYPTO_CHACHA20POLY1305=m
605CONFIG_CRYPTO_AEGIS128=m
606CONFIG_CRYPTO_AEGIS128L=m
607CONFIG_CRYPTO_AEGIS256=m
608CONFIG_CRYPTO_MORUS640=m
609CONFIG_CRYPTO_MORUS1280=m
606CONFIG_CRYPTO_CFB=m 610CONFIG_CRYPTO_CFB=m
607CONFIG_CRYPTO_LRW=m 611CONFIG_CRYPTO_LRW=m
608CONFIG_CRYPTO_PCBC=m 612CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
638CONFIG_CRYPTO_842=m 642CONFIG_CRYPTO_842=m
639CONFIG_CRYPTO_LZ4=m 643CONFIG_CRYPTO_LZ4=m
640CONFIG_CRYPTO_LZ4HC=m 644CONFIG_CRYPTO_LZ4HC=m
645CONFIG_CRYPTO_ZSTD=m
641CONFIG_CRYPTO_ANSI_CPRNG=m 646CONFIG_CRYPTO_ANSI_CPRNG=m
642CONFIG_CRYPTO_DRBG_HASH=y 647CONFIG_CRYPTO_DRBG_HASH=y
643CONFIG_CRYPTO_DRBG_CTR=y 648CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fca9c7aa71a3..fb7d651a4cab 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
48CONFIG_TLS=m 48CONFIG_TLS=m
49CONFIG_XFRM_MIGRATE=y 49CONFIG_XFRM_MIGRATE=y
50CONFIG_NET_KEY=y 50CONFIG_NET_KEY=y
51CONFIG_XDP_SOCKETS=y
51CONFIG_INET=y 52CONFIG_INET=y
52CONFIG_IP_PNP=y 53CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 54CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
94CONFIG_NF_CONNTRACK_SIP=m 95CONFIG_NF_CONNTRACK_SIP=m
95CONFIG_NF_CONNTRACK_TFTP=m 96CONFIG_NF_CONNTRACK_TFTP=m
96CONFIG_NF_TABLES=m 97CONFIG_NF_TABLES=m
98CONFIG_NF_TABLES_SET=m
97CONFIG_NF_TABLES_INET=y 99CONFIG_NF_TABLES_INET=y
98CONFIG_NF_TABLES_NETDEV=y 100CONFIG_NF_TABLES_NETDEV=y
99CONFIG_NFT_EXTHDR=m
100CONFIG_NFT_META=m
101CONFIG_NFT_RT=m
102CONFIG_NFT_NUMGEN=m 101CONFIG_NFT_NUMGEN=m
103CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
104CONFIG_NFT_FLOW_OFFLOAD=m 103CONFIG_NFT_FLOW_OFFLOAD=m
105CONFIG_NFT_SET_RBTREE=m
106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
108CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
105CONFIG_NFT_CONNLIMIT=m
109CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
110CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
111CONFIG_NFT_MASQ=m 108CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
118CONFIG_NFT_COMPAT=m 115CONFIG_NFT_COMPAT=m
119CONFIG_NFT_HASH=m 116CONFIG_NFT_HASH=m
120CONFIG_NFT_FIB_INET=m 117CONFIG_NFT_FIB_INET=m
118CONFIG_NFT_SOCKET=m
121CONFIG_NFT_DUP_NETDEV=m 119CONFIG_NFT_DUP_NETDEV=m
122CONFIG_NFT_FWD_NETDEV=m 120CONFIG_NFT_FWD_NETDEV=m
123CONFIG_NFT_FIB_NETDEV=m 121CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
196CONFIG_IP_SET_HASH_NETIFACE=m 194CONFIG_IP_SET_HASH_NETIFACE=m
197CONFIG_IP_SET_LIST_SET=m 195CONFIG_IP_SET_LIST_SET=m
198CONFIG_NF_CONNTRACK_IPV4=m 196CONFIG_NF_CONNTRACK_IPV4=m
199CONFIG_NF_SOCKET_IPV4=m
200CONFIG_NFT_CHAIN_ROUTE_IPV4=m 197CONFIG_NFT_CHAIN_ROUTE_IPV4=m
201CONFIG_NFT_DUP_IPV4=m 198CONFIG_NFT_DUP_IPV4=m
202CONFIG_NFT_FIB_IPV4=m 199CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
227CONFIG_IP_NF_ARPFILTER=m 224CONFIG_IP_NF_ARPFILTER=m
228CONFIG_IP_NF_ARP_MANGLE=m 225CONFIG_IP_NF_ARP_MANGLE=m
229CONFIG_NF_CONNTRACK_IPV6=m 226CONFIG_NF_CONNTRACK_IPV6=m
230CONFIG_NF_SOCKET_IPV6=m
231CONFIG_NFT_CHAIN_ROUTE_IPV6=m 227CONFIG_NFT_CHAIN_ROUTE_IPV6=m
232CONFIG_NFT_CHAIN_NAT_IPV6=m 228CONFIG_NFT_CHAIN_NAT_IPV6=m
233CONFIG_NFT_MASQ_IPV6=m 229CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
256CONFIG_IP6_NF_TARGET_MASQUERADE=m 252CONFIG_IP6_NF_TARGET_MASQUERADE=m
257CONFIG_IP6_NF_TARGET_NPT=m 253CONFIG_IP6_NF_TARGET_NPT=m
258CONFIG_NF_TABLES_BRIDGE=y 254CONFIG_NF_TABLES_BRIDGE=y
259CONFIG_NFT_BRIDGE_META=m
260CONFIG_NFT_BRIDGE_REJECT=m 255CONFIG_NFT_BRIDGE_REJECT=m
261CONFIG_NF_LOG_BRIDGE=m 256CONFIG_NF_LOG_BRIDGE=m
262CONFIG_BRIDGE_NF_EBTABLES=m 257CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
297CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 292CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
298CONFIG_DNS_RESOLVER=y 293CONFIG_DNS_RESOLVER=y
299CONFIG_BATMAN_ADV=m 294CONFIG_BATMAN_ADV=m
295# CONFIG_BATMAN_ADV_BATMAN_V is not set
300CONFIG_BATMAN_ADV_DAT=y 296CONFIG_BATMAN_ADV_DAT=y
301CONFIG_BATMAN_ADV_NC=y 297CONFIG_BATMAN_ADV_NC=y
302CONFIG_BATMAN_ADV_MCAST=y 298CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
344CONFIG_DM_CRYPT=m 340CONFIG_DM_CRYPT=m
345CONFIG_DM_SNAPSHOT=m 341CONFIG_DM_SNAPSHOT=m
346CONFIG_DM_THIN_PROVISIONING=m 342CONFIG_DM_THIN_PROVISIONING=m
343CONFIG_DM_WRITECACHE=m
347CONFIG_DM_ERA=m 344CONFIG_DM_ERA=m
348CONFIG_DM_MIRROR=m 345CONFIG_DM_MIRROR=m
349CONFIG_DM_RAID=m 346CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
380# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
381# CONFIG_NET_VENDOR_AQUANTIA is not set 378# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 379# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 380# CONFIG_NET_VENDOR_BROADCOM is not set
381# CONFIG_NET_CADENCE is not set
385# CONFIG_NET_VENDOR_CORTINA is not set 382# CONFIG_NET_VENDOR_CORTINA is not set
386# CONFIG_NET_VENDOR_EZCHIP is not set 383# CONFIG_NET_VENDOR_EZCHIP is not set
387# CONFIG_NET_VENDOR_HUAWEI is not set 384# CONFIG_NET_VENDOR_HUAWEI is not set
388CONFIG_BVME6000_NET=y 385CONFIG_BVME6000_NET=y
389# CONFIG_NET_VENDOR_MARVELL is not set 386# CONFIG_NET_VENDOR_MARVELL is not set
390# CONFIG_NET_VENDOR_MICREL is not set 387# CONFIG_NET_VENDOR_MICREL is not set
388# CONFIG_NET_VENDOR_MICROSEMI is not set
391# CONFIG_NET_VENDOR_NATSEMI is not set 389# CONFIG_NET_VENDOR_NATSEMI is not set
392# CONFIG_NET_VENDOR_NETRONOME is not set 390# CONFIG_NET_VENDOR_NETRONOME is not set
393# CONFIG_NET_VENDOR_NI is not set 391# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
399# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
400# CONFIG_NET_VENDOR_SOCIONEXT is not set 398# CONFIG_NET_VENDOR_SOCIONEXT is not set
401# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
400# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 401# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 402# CONFIG_NET_VENDOR_WIZNET is not set
404# CONFIG_NET_VENDOR_SYNOPSYS is not set
405CONFIG_PPP=m 403CONFIG_PPP=m
406CONFIG_PPP_BSDCOMP=m 404CONFIG_PPP_BSDCOMP=m
407CONFIG_PPP_DEFLATE=m 405CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
433CONFIG_UHID=m 431CONFIG_UHID=m
434# CONFIG_HID_GENERIC is not set 432# CONFIG_HID_GENERIC is not set
435# CONFIG_HID_ITE is not set 433# CONFIG_HID_ITE is not set
434# CONFIG_HID_REDRAGON is not set
436# CONFIG_USB_SUPPORT is not set 435# CONFIG_USB_SUPPORT is not set
437CONFIG_RTC_CLASS=y 436CONFIG_RTC_CLASS=y
438# CONFIG_RTC_NVMEM is not set 437# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
450CONFIG_FANOTIFY=y 449CONFIG_FANOTIFY=y
451CONFIG_QUOTA_NETLINK_INTERFACE=y 450CONFIG_QUOTA_NETLINK_INTERFACE=y
452# CONFIG_PRINT_QUOTA_WARNING is not set 451# CONFIG_PRINT_QUOTA_WARNING is not set
453CONFIG_AUTOFS4_FS=m 452CONFIG_AUTOFS_FS=m
454CONFIG_FUSE_FS=m 453CONFIG_FUSE_FS=m
455CONFIG_CUSE=m 454CONFIG_CUSE=m
456CONFIG_OVERLAY_FS=m 455CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
551CONFIG_TEST_PRINTF=m 550CONFIG_TEST_PRINTF=m
552CONFIG_TEST_BITMAP=m 551CONFIG_TEST_BITMAP=m
553CONFIG_TEST_UUID=m 552CONFIG_TEST_UUID=m
553CONFIG_TEST_OVERFLOW=m
554CONFIG_TEST_RHASHTABLE=m 554CONFIG_TEST_RHASHTABLE=m
555CONFIG_TEST_HASH=m 555CONFIG_TEST_HASH=m
556CONFIG_TEST_USER_COPY=m 556CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m 573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 574CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 575CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m
577CONFIG_CRYPTO_AEGIS128L=m
578CONFIG_CRYPTO_AEGIS256=m
579CONFIG_CRYPTO_MORUS640=m
580CONFIG_CRYPTO_MORUS1280=m
576CONFIG_CRYPTO_CFB=m 581CONFIG_CRYPTO_CFB=m
577CONFIG_CRYPTO_LRW=m 582CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 583CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
608CONFIG_CRYPTO_842=m 613CONFIG_CRYPTO_842=m
609CONFIG_CRYPTO_LZ4=m 614CONFIG_CRYPTO_LZ4=m
610CONFIG_CRYPTO_LZ4HC=m 615CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_ZSTD=m
611CONFIG_CRYPTO_ANSI_CPRNG=m 617CONFIG_CRYPTO_ANSI_CPRNG=m
612CONFIG_CRYPTO_DRBG_HASH=y 618CONFIG_CRYPTO_DRBG_HASH=y
613CONFIG_CRYPTO_DRBG_CTR=y 619CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index f9eab174915c..6b37f5537c39 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
50CONFIG_TLS=m 50CONFIG_TLS=m
51CONFIG_XFRM_MIGRATE=y 51CONFIG_XFRM_MIGRATE=y
52CONFIG_NET_KEY=y 52CONFIG_NET_KEY=y
53CONFIG_XDP_SOCKETS=y
53CONFIG_INET=y 54CONFIG_INET=y
54CONFIG_IP_PNP=y 55CONFIG_IP_PNP=y
55CONFIG_IP_PNP_DHCP=y 56CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
96CONFIG_NF_CONNTRACK_SIP=m 97CONFIG_NF_CONNTRACK_SIP=m
97CONFIG_NF_CONNTRACK_TFTP=m 98CONFIG_NF_CONNTRACK_TFTP=m
98CONFIG_NF_TABLES=m 99CONFIG_NF_TABLES=m
100CONFIG_NF_TABLES_SET=m
99CONFIG_NF_TABLES_INET=y 101CONFIG_NF_TABLES_INET=y
100CONFIG_NF_TABLES_NETDEV=y 102CONFIG_NF_TABLES_NETDEV=y
101CONFIG_NFT_EXTHDR=m
102CONFIG_NFT_META=m
103CONFIG_NFT_RT=m
104CONFIG_NFT_NUMGEN=m 103CONFIG_NFT_NUMGEN=m
105CONFIG_NFT_CT=m 104CONFIG_NFT_CT=m
106CONFIG_NFT_FLOW_OFFLOAD=m 105CONFIG_NFT_FLOW_OFFLOAD=m
107CONFIG_NFT_SET_RBTREE=m
108CONFIG_NFT_SET_HASH=m
109CONFIG_NFT_SET_BITMAP=m
110CONFIG_NFT_COUNTER=m 106CONFIG_NFT_COUNTER=m
107CONFIG_NFT_CONNLIMIT=m
111CONFIG_NFT_LOG=m 108CONFIG_NFT_LOG=m
112CONFIG_NFT_LIMIT=m 109CONFIG_NFT_LIMIT=m
113CONFIG_NFT_MASQ=m 110CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
120CONFIG_NFT_COMPAT=m 117CONFIG_NFT_COMPAT=m
121CONFIG_NFT_HASH=m 118CONFIG_NFT_HASH=m
122CONFIG_NFT_FIB_INET=m 119CONFIG_NFT_FIB_INET=m
120CONFIG_NFT_SOCKET=m
123CONFIG_NFT_DUP_NETDEV=m 121CONFIG_NFT_DUP_NETDEV=m
124CONFIG_NFT_FWD_NETDEV=m 122CONFIG_NFT_FWD_NETDEV=m
125CONFIG_NFT_FIB_NETDEV=m 123CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
198CONFIG_IP_SET_HASH_NETIFACE=m 196CONFIG_IP_SET_HASH_NETIFACE=m
199CONFIG_IP_SET_LIST_SET=m 197CONFIG_IP_SET_LIST_SET=m
200CONFIG_NF_CONNTRACK_IPV4=m 198CONFIG_NF_CONNTRACK_IPV4=m
201CONFIG_NF_SOCKET_IPV4=m
202CONFIG_NFT_CHAIN_ROUTE_IPV4=m 199CONFIG_NFT_CHAIN_ROUTE_IPV4=m
203CONFIG_NFT_DUP_IPV4=m 200CONFIG_NFT_DUP_IPV4=m
204CONFIG_NFT_FIB_IPV4=m 201CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
229CONFIG_IP_NF_ARPFILTER=m 226CONFIG_IP_NF_ARPFILTER=m
230CONFIG_IP_NF_ARP_MANGLE=m 227CONFIG_IP_NF_ARP_MANGLE=m
231CONFIG_NF_CONNTRACK_IPV6=m 228CONFIG_NF_CONNTRACK_IPV6=m
232CONFIG_NF_SOCKET_IPV6=m
233CONFIG_NFT_CHAIN_ROUTE_IPV6=m 229CONFIG_NFT_CHAIN_ROUTE_IPV6=m
234CONFIG_NFT_CHAIN_NAT_IPV6=m 230CONFIG_NFT_CHAIN_NAT_IPV6=m
235CONFIG_NFT_MASQ_IPV6=m 231CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
258CONFIG_IP6_NF_TARGET_MASQUERADE=m 254CONFIG_IP6_NF_TARGET_MASQUERADE=m
259CONFIG_IP6_NF_TARGET_NPT=m 255CONFIG_IP6_NF_TARGET_NPT=m
260CONFIG_NF_TABLES_BRIDGE=y 256CONFIG_NF_TABLES_BRIDGE=y
261CONFIG_NFT_BRIDGE_META=m
262CONFIG_NFT_BRIDGE_REJECT=m 257CONFIG_NFT_BRIDGE_REJECT=m
263CONFIG_NF_LOG_BRIDGE=m 258CONFIG_NF_LOG_BRIDGE=m
264CONFIG_BRIDGE_NF_EBTABLES=m 259CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
299CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 294CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
300CONFIG_DNS_RESOLVER=y 295CONFIG_DNS_RESOLVER=y
301CONFIG_BATMAN_ADV=m 296CONFIG_BATMAN_ADV=m
297# CONFIG_BATMAN_ADV_BATMAN_V is not set
302CONFIG_BATMAN_ADV_DAT=y 298CONFIG_BATMAN_ADV_DAT=y
303CONFIG_BATMAN_ADV_NC=y 299CONFIG_BATMAN_ADV_NC=y
304CONFIG_BATMAN_ADV_MCAST=y 300CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
345CONFIG_DM_CRYPT=m 341CONFIG_DM_CRYPT=m
346CONFIG_DM_SNAPSHOT=m 342CONFIG_DM_SNAPSHOT=m
347CONFIG_DM_THIN_PROVISIONING=m 343CONFIG_DM_THIN_PROVISIONING=m
344CONFIG_DM_WRITECACHE=m
348CONFIG_DM_ERA=m 345CONFIG_DM_ERA=m
349CONFIG_DM_MIRROR=m 346CONFIG_DM_MIRROR=m
350CONFIG_DM_RAID=m 347CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
382CONFIG_HPLANCE=y 379CONFIG_HPLANCE=y
383# CONFIG_NET_VENDOR_AQUANTIA is not set 380# CONFIG_NET_VENDOR_AQUANTIA is not set
384# CONFIG_NET_VENDOR_ARC is not set 381# CONFIG_NET_VENDOR_ARC is not set
385# CONFIG_NET_CADENCE is not set
386# CONFIG_NET_VENDOR_BROADCOM is not set 382# CONFIG_NET_VENDOR_BROADCOM is not set
383# CONFIG_NET_CADENCE is not set
387# CONFIG_NET_VENDOR_CORTINA is not set 384# CONFIG_NET_VENDOR_CORTINA is not set
388# CONFIG_NET_VENDOR_EZCHIP is not set 385# CONFIG_NET_VENDOR_EZCHIP is not set
389# CONFIG_NET_VENDOR_HUAWEI is not set 386# CONFIG_NET_VENDOR_HUAWEI is not set
390# CONFIG_NET_VENDOR_INTEL is not set 387# CONFIG_NET_VENDOR_INTEL is not set
391# CONFIG_NET_VENDOR_MARVELL is not set 388# CONFIG_NET_VENDOR_MARVELL is not set
392# CONFIG_NET_VENDOR_MICREL is not set 389# CONFIG_NET_VENDOR_MICREL is not set
390# CONFIG_NET_VENDOR_MICROSEMI is not set
393# CONFIG_NET_VENDOR_NATSEMI is not set 391# CONFIG_NET_VENDOR_NATSEMI is not set
394# CONFIG_NET_VENDOR_NETRONOME is not set 392# CONFIG_NET_VENDOR_NETRONOME is not set
395# CONFIG_NET_VENDOR_NI is not set 393# CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
401# CONFIG_NET_VENDOR_SOLARFLARE is not set 399# CONFIG_NET_VENDOR_SOLARFLARE is not set
402# CONFIG_NET_VENDOR_SOCIONEXT is not set 400# CONFIG_NET_VENDOR_SOCIONEXT is not set
403# CONFIG_NET_VENDOR_STMICRO is not set 401# CONFIG_NET_VENDOR_STMICRO is not set
402# CONFIG_NET_VENDOR_SYNOPSYS is not set
404# CONFIG_NET_VENDOR_VIA is not set 403# CONFIG_NET_VENDOR_VIA is not set
405# CONFIG_NET_VENDOR_WIZNET is not set 404# CONFIG_NET_VENDOR_WIZNET is not set
406# CONFIG_NET_VENDOR_SYNOPSYS is not set
407CONFIG_PPP=m 405CONFIG_PPP=m
408CONFIG_PPP_BSDCOMP=m 406CONFIG_PPP_BSDCOMP=m
409CONFIG_PPP_DEFLATE=m 407CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
443CONFIG_UHID=m 441CONFIG_UHID=m
444# CONFIG_HID_GENERIC is not set 442# CONFIG_HID_GENERIC is not set
445# CONFIG_HID_ITE is not set 443# CONFIG_HID_ITE is not set
444# CONFIG_HID_REDRAGON is not set
446# CONFIG_USB_SUPPORT is not set 445# CONFIG_USB_SUPPORT is not set
447CONFIG_RTC_CLASS=y 446CONFIG_RTC_CLASS=y
448# CONFIG_RTC_NVMEM is not set 447# CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
460CONFIG_FANOTIFY=y 459CONFIG_FANOTIFY=y
461CONFIG_QUOTA_NETLINK_INTERFACE=y 460CONFIG_QUOTA_NETLINK_INTERFACE=y
462# CONFIG_PRINT_QUOTA_WARNING is not set 461# CONFIG_PRINT_QUOTA_WARNING is not set
463CONFIG_AUTOFS4_FS=m 462CONFIG_AUTOFS_FS=m
464CONFIG_FUSE_FS=m 463CONFIG_FUSE_FS=m
465CONFIG_CUSE=m 464CONFIG_CUSE=m
466CONFIG_OVERLAY_FS=m 465CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
561CONFIG_TEST_PRINTF=m 560CONFIG_TEST_PRINTF=m
562CONFIG_TEST_BITMAP=m 561CONFIG_TEST_BITMAP=m
563CONFIG_TEST_UUID=m 562CONFIG_TEST_UUID=m
563CONFIG_TEST_OVERFLOW=m
564CONFIG_TEST_RHASHTABLE=m 564CONFIG_TEST_RHASHTABLE=m
565CONFIG_TEST_HASH=m 565CONFIG_TEST_HASH=m
566CONFIG_TEST_USER_COPY=m 566CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
583CONFIG_CRYPTO_MCRYPTD=m 583CONFIG_CRYPTO_MCRYPTD=m
584CONFIG_CRYPTO_TEST=m 584CONFIG_CRYPTO_TEST=m
585CONFIG_CRYPTO_CHACHA20POLY1305=m 585CONFIG_CRYPTO_CHACHA20POLY1305=m
586CONFIG_CRYPTO_AEGIS128=m
587CONFIG_CRYPTO_AEGIS128L=m
588CONFIG_CRYPTO_AEGIS256=m
589CONFIG_CRYPTO_MORUS640=m
590CONFIG_CRYPTO_MORUS1280=m
586CONFIG_CRYPTO_CFB=m 591CONFIG_CRYPTO_CFB=m
587CONFIG_CRYPTO_LRW=m 592CONFIG_CRYPTO_LRW=m
588CONFIG_CRYPTO_PCBC=m 593CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
618CONFIG_CRYPTO_842=m 623CONFIG_CRYPTO_842=m
619CONFIG_CRYPTO_LZ4=m 624CONFIG_CRYPTO_LZ4=m
620CONFIG_CRYPTO_LZ4HC=m 625CONFIG_CRYPTO_LZ4HC=m
626CONFIG_CRYPTO_ZSTD=m
621CONFIG_CRYPTO_ANSI_CPRNG=m 627CONFIG_CRYPTO_ANSI_CPRNG=m
622CONFIG_CRYPTO_DRBG_HASH=y 628CONFIG_CRYPTO_DRBG_HASH=y
623CONFIG_CRYPTO_DRBG_CTR=y 629CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b52e597899eb..930cc2965a11 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
49CONFIG_TLS=m 49CONFIG_TLS=m
50CONFIG_XFRM_MIGRATE=y 50CONFIG_XFRM_MIGRATE=y
51CONFIG_NET_KEY=y 51CONFIG_NET_KEY=y
52CONFIG_XDP_SOCKETS=y
52CONFIG_INET=y 53CONFIG_INET=y
53CONFIG_IP_PNP=y 54CONFIG_IP_PNP=y
54CONFIG_IP_PNP_DHCP=y 55CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
95CONFIG_NF_CONNTRACK_SIP=m 96CONFIG_NF_CONNTRACK_SIP=m
96CONFIG_NF_CONNTRACK_TFTP=m 97CONFIG_NF_CONNTRACK_TFTP=m
97CONFIG_NF_TABLES=m 98CONFIG_NF_TABLES=m
99CONFIG_NF_TABLES_SET=m
98CONFIG_NF_TABLES_INET=y 100CONFIG_NF_TABLES_INET=y
99CONFIG_NF_TABLES_NETDEV=y 101CONFIG_NF_TABLES_NETDEV=y
100CONFIG_NFT_EXTHDR=m
101CONFIG_NFT_META=m
102CONFIG_NFT_RT=m
103CONFIG_NFT_NUMGEN=m 102CONFIG_NFT_NUMGEN=m
104CONFIG_NFT_CT=m 103CONFIG_NFT_CT=m
105CONFIG_NFT_FLOW_OFFLOAD=m 104CONFIG_NFT_FLOW_OFFLOAD=m
106CONFIG_NFT_SET_RBTREE=m
107CONFIG_NFT_SET_HASH=m
108CONFIG_NFT_SET_BITMAP=m
109CONFIG_NFT_COUNTER=m 105CONFIG_NFT_COUNTER=m
106CONFIG_NFT_CONNLIMIT=m
110CONFIG_NFT_LOG=m 107CONFIG_NFT_LOG=m
111CONFIG_NFT_LIMIT=m 108CONFIG_NFT_LIMIT=m
112CONFIG_NFT_MASQ=m 109CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
119CONFIG_NFT_COMPAT=m 116CONFIG_NFT_COMPAT=m
120CONFIG_NFT_HASH=m 117CONFIG_NFT_HASH=m
121CONFIG_NFT_FIB_INET=m 118CONFIG_NFT_FIB_INET=m
119CONFIG_NFT_SOCKET=m
122CONFIG_NFT_DUP_NETDEV=m 120CONFIG_NFT_DUP_NETDEV=m
123CONFIG_NFT_FWD_NETDEV=m 121CONFIG_NFT_FWD_NETDEV=m
124CONFIG_NFT_FIB_NETDEV=m 122CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
197CONFIG_IP_SET_HASH_NETIFACE=m 195CONFIG_IP_SET_HASH_NETIFACE=m
198CONFIG_IP_SET_LIST_SET=m 196CONFIG_IP_SET_LIST_SET=m
199CONFIG_NF_CONNTRACK_IPV4=m 197CONFIG_NF_CONNTRACK_IPV4=m
200CONFIG_NF_SOCKET_IPV4=m
201CONFIG_NFT_CHAIN_ROUTE_IPV4=m 198CONFIG_NFT_CHAIN_ROUTE_IPV4=m
202CONFIG_NFT_DUP_IPV4=m 199CONFIG_NFT_DUP_IPV4=m
203CONFIG_NFT_FIB_IPV4=m 200CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
228CONFIG_IP_NF_ARPFILTER=m 225CONFIG_IP_NF_ARPFILTER=m
229CONFIG_IP_NF_ARP_MANGLE=m 226CONFIG_IP_NF_ARP_MANGLE=m
230CONFIG_NF_CONNTRACK_IPV6=m 227CONFIG_NF_CONNTRACK_IPV6=m
231CONFIG_NF_SOCKET_IPV6=m
232CONFIG_NFT_CHAIN_ROUTE_IPV6=m 228CONFIG_NFT_CHAIN_ROUTE_IPV6=m
233CONFIG_NFT_CHAIN_NAT_IPV6=m 229CONFIG_NFT_CHAIN_NAT_IPV6=m
234CONFIG_NFT_MASQ_IPV6=m 230CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
257CONFIG_IP6_NF_TARGET_MASQUERADE=m 253CONFIG_IP6_NF_TARGET_MASQUERADE=m
258CONFIG_IP6_NF_TARGET_NPT=m 254CONFIG_IP6_NF_TARGET_NPT=m
259CONFIG_NF_TABLES_BRIDGE=y 255CONFIG_NF_TABLES_BRIDGE=y
260CONFIG_NFT_BRIDGE_META=m
261CONFIG_NFT_BRIDGE_REJECT=m 256CONFIG_NFT_BRIDGE_REJECT=m
262CONFIG_NF_LOG_BRIDGE=m 257CONFIG_NF_LOG_BRIDGE=m
263CONFIG_BRIDGE_NF_EBTABLES=m 258CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
301CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 296CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
302CONFIG_DNS_RESOLVER=y 297CONFIG_DNS_RESOLVER=y
303CONFIG_BATMAN_ADV=m 298CONFIG_BATMAN_ADV=m
299# CONFIG_BATMAN_ADV_BATMAN_V is not set
304CONFIG_BATMAN_ADV_DAT=y 300CONFIG_BATMAN_ADV_DAT=y
305CONFIG_BATMAN_ADV_NC=y 301CONFIG_BATMAN_ADV_NC=y
306CONFIG_BATMAN_ADV_MCAST=y 302CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
354CONFIG_DM_CRYPT=m 350CONFIG_DM_CRYPT=m
355CONFIG_DM_SNAPSHOT=m 351CONFIG_DM_SNAPSHOT=m
356CONFIG_DM_THIN_PROVISIONING=m 352CONFIG_DM_THIN_PROVISIONING=m
353CONFIG_DM_WRITECACHE=m
357CONFIG_DM_ERA=m 354CONFIG_DM_ERA=m
358CONFIG_DM_MIRROR=m 355CONFIG_DM_MIRROR=m
359CONFIG_DM_RAID=m 356CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
398CONFIG_MACMACE=y 395CONFIG_MACMACE=y
399# CONFIG_NET_VENDOR_AQUANTIA is not set 396# CONFIG_NET_VENDOR_AQUANTIA is not set
400# CONFIG_NET_VENDOR_ARC is not set 397# CONFIG_NET_VENDOR_ARC is not set
401# CONFIG_NET_CADENCE is not set
402# CONFIG_NET_VENDOR_BROADCOM is not set 398# CONFIG_NET_VENDOR_BROADCOM is not set
399# CONFIG_NET_CADENCE is not set
403CONFIG_MAC89x0=y 400CONFIG_MAC89x0=y
404# CONFIG_NET_VENDOR_CORTINA is not set 401# CONFIG_NET_VENDOR_CORTINA is not set
405# CONFIG_NET_VENDOR_EZCHIP is not set 402# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
407# CONFIG_NET_VENDOR_INTEL is not set 404# CONFIG_NET_VENDOR_INTEL is not set
408# CONFIG_NET_VENDOR_MARVELL is not set 405# CONFIG_NET_VENDOR_MARVELL is not set
409# CONFIG_NET_VENDOR_MICREL is not set 406# CONFIG_NET_VENDOR_MICREL is not set
407# CONFIG_NET_VENDOR_MICROSEMI is not set
410CONFIG_MACSONIC=y 408CONFIG_MACSONIC=y
411# CONFIG_NET_VENDOR_NETRONOME is not set 409# CONFIG_NET_VENDOR_NETRONOME is not set
412# CONFIG_NET_VENDOR_NI is not set 410# CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
420# CONFIG_NET_VENDOR_SMSC is not set 418# CONFIG_NET_VENDOR_SMSC is not set
421# CONFIG_NET_VENDOR_SOCIONEXT is not set 419# CONFIG_NET_VENDOR_SOCIONEXT is not set
422# CONFIG_NET_VENDOR_STMICRO is not set 420# CONFIG_NET_VENDOR_STMICRO is not set
421# CONFIG_NET_VENDOR_SYNOPSYS is not set
423# CONFIG_NET_VENDOR_VIA is not set 422# CONFIG_NET_VENDOR_VIA is not set
424# CONFIG_NET_VENDOR_WIZNET is not set 423# CONFIG_NET_VENDOR_WIZNET is not set
425# CONFIG_NET_VENDOR_SYNOPSYS is not set
426CONFIG_PPP=m 424CONFIG_PPP=m
427CONFIG_PPP_BSDCOMP=m 425CONFIG_PPP_BSDCOMP=m
428CONFIG_PPP_DEFLATE=m 426CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
465CONFIG_UHID=m 463CONFIG_UHID=m
466# CONFIG_HID_GENERIC is not set 464# CONFIG_HID_GENERIC is not set
467# CONFIG_HID_ITE is not set 465# CONFIG_HID_ITE is not set
466# CONFIG_HID_REDRAGON is not set
468# CONFIG_USB_SUPPORT is not set 467# CONFIG_USB_SUPPORT is not set
469CONFIG_RTC_CLASS=y 468CONFIG_RTC_CLASS=y
470# CONFIG_RTC_NVMEM is not set 469# CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
482CONFIG_FANOTIFY=y 481CONFIG_FANOTIFY=y
483CONFIG_QUOTA_NETLINK_INTERFACE=y 482CONFIG_QUOTA_NETLINK_INTERFACE=y
484# CONFIG_PRINT_QUOTA_WARNING is not set 483# CONFIG_PRINT_QUOTA_WARNING is not set
485CONFIG_AUTOFS4_FS=m 484CONFIG_AUTOFS_FS=m
486CONFIG_FUSE_FS=m 485CONFIG_FUSE_FS=m
487CONFIG_CUSE=m 486CONFIG_CUSE=m
488CONFIG_OVERLAY_FS=m 487CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
583CONFIG_TEST_PRINTF=m 582CONFIG_TEST_PRINTF=m
584CONFIG_TEST_BITMAP=m 583CONFIG_TEST_BITMAP=m
585CONFIG_TEST_UUID=m 584CONFIG_TEST_UUID=m
585CONFIG_TEST_OVERFLOW=m
586CONFIG_TEST_RHASHTABLE=m 586CONFIG_TEST_RHASHTABLE=m
587CONFIG_TEST_HASH=m 587CONFIG_TEST_HASH=m
588CONFIG_TEST_USER_COPY=m 588CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
605CONFIG_CRYPTO_MCRYPTD=m 605CONFIG_CRYPTO_MCRYPTD=m
606CONFIG_CRYPTO_TEST=m 606CONFIG_CRYPTO_TEST=m
607CONFIG_CRYPTO_CHACHA20POLY1305=m 607CONFIG_CRYPTO_CHACHA20POLY1305=m
608CONFIG_CRYPTO_AEGIS128=m
609CONFIG_CRYPTO_AEGIS128L=m
610CONFIG_CRYPTO_AEGIS256=m
611CONFIG_CRYPTO_MORUS640=m
612CONFIG_CRYPTO_MORUS1280=m
608CONFIG_CRYPTO_CFB=m 613CONFIG_CRYPTO_CFB=m
609CONFIG_CRYPTO_LRW=m 614CONFIG_CRYPTO_LRW=m
610CONFIG_CRYPTO_PCBC=m 615CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
640CONFIG_CRYPTO_842=m 645CONFIG_CRYPTO_842=m
641CONFIG_CRYPTO_LZ4=m 646CONFIG_CRYPTO_LZ4=m
642CONFIG_CRYPTO_LZ4HC=m 647CONFIG_CRYPTO_LZ4HC=m
648CONFIG_CRYPTO_ZSTD=m
643CONFIG_CRYPTO_ANSI_CPRNG=m 649CONFIG_CRYPTO_ANSI_CPRNG=m
644CONFIG_CRYPTO_DRBG_HASH=y 650CONFIG_CRYPTO_DRBG_HASH=y
645CONFIG_CRYPTO_DRBG_CTR=y 651CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 2a84eeec5b02..e7dd25300127 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
59CONFIG_TLS=m 59CONFIG_TLS=m
60CONFIG_XFRM_MIGRATE=y 60CONFIG_XFRM_MIGRATE=y
61CONFIG_NET_KEY=y 61CONFIG_NET_KEY=y
62CONFIG_XDP_SOCKETS=y
62CONFIG_INET=y 63CONFIG_INET=y
63CONFIG_IP_PNP=y 64CONFIG_IP_PNP=y
64CONFIG_IP_PNP_DHCP=y 65CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
105CONFIG_NF_CONNTRACK_SIP=m 106CONFIG_NF_CONNTRACK_SIP=m
106CONFIG_NF_CONNTRACK_TFTP=m 107CONFIG_NF_CONNTRACK_TFTP=m
107CONFIG_NF_TABLES=m 108CONFIG_NF_TABLES=m
109CONFIG_NF_TABLES_SET=m
108CONFIG_NF_TABLES_INET=y 110CONFIG_NF_TABLES_INET=y
109CONFIG_NF_TABLES_NETDEV=y 111CONFIG_NF_TABLES_NETDEV=y
110CONFIG_NFT_EXTHDR=m
111CONFIG_NFT_META=m
112CONFIG_NFT_RT=m
113CONFIG_NFT_NUMGEN=m 112CONFIG_NFT_NUMGEN=m
114CONFIG_NFT_CT=m 113CONFIG_NFT_CT=m
115CONFIG_NFT_FLOW_OFFLOAD=m 114CONFIG_NFT_FLOW_OFFLOAD=m
116CONFIG_NFT_SET_RBTREE=m
117CONFIG_NFT_SET_HASH=m
118CONFIG_NFT_SET_BITMAP=m
119CONFIG_NFT_COUNTER=m 115CONFIG_NFT_COUNTER=m
116CONFIG_NFT_CONNLIMIT=m
120CONFIG_NFT_LOG=m 117CONFIG_NFT_LOG=m
121CONFIG_NFT_LIMIT=m 118CONFIG_NFT_LIMIT=m
122CONFIG_NFT_MASQ=m 119CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
129CONFIG_NFT_COMPAT=m 126CONFIG_NFT_COMPAT=m
130CONFIG_NFT_HASH=m 127CONFIG_NFT_HASH=m
131CONFIG_NFT_FIB_INET=m 128CONFIG_NFT_FIB_INET=m
129CONFIG_NFT_SOCKET=m
132CONFIG_NFT_DUP_NETDEV=m 130CONFIG_NFT_DUP_NETDEV=m
133CONFIG_NFT_FWD_NETDEV=m 131CONFIG_NFT_FWD_NETDEV=m
134CONFIG_NFT_FIB_NETDEV=m 132CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
207CONFIG_IP_SET_HASH_NETIFACE=m 205CONFIG_IP_SET_HASH_NETIFACE=m
208CONFIG_IP_SET_LIST_SET=m 206CONFIG_IP_SET_LIST_SET=m
209CONFIG_NF_CONNTRACK_IPV4=m 207CONFIG_NF_CONNTRACK_IPV4=m
210CONFIG_NF_SOCKET_IPV4=m
211CONFIG_NFT_CHAIN_ROUTE_IPV4=m 208CONFIG_NFT_CHAIN_ROUTE_IPV4=m
212CONFIG_NFT_DUP_IPV4=m 209CONFIG_NFT_DUP_IPV4=m
213CONFIG_NFT_FIB_IPV4=m 210CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
238CONFIG_IP_NF_ARPFILTER=m 235CONFIG_IP_NF_ARPFILTER=m
239CONFIG_IP_NF_ARP_MANGLE=m 236CONFIG_IP_NF_ARP_MANGLE=m
240CONFIG_NF_CONNTRACK_IPV6=m 237CONFIG_NF_CONNTRACK_IPV6=m
241CONFIG_NF_SOCKET_IPV6=m
242CONFIG_NFT_CHAIN_ROUTE_IPV6=m 238CONFIG_NFT_CHAIN_ROUTE_IPV6=m
243CONFIG_NFT_CHAIN_NAT_IPV6=m 239CONFIG_NFT_CHAIN_NAT_IPV6=m
244CONFIG_NFT_MASQ_IPV6=m 240CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
267CONFIG_IP6_NF_TARGET_MASQUERADE=m 263CONFIG_IP6_NF_TARGET_MASQUERADE=m
268CONFIG_IP6_NF_TARGET_NPT=m 264CONFIG_IP6_NF_TARGET_NPT=m
269CONFIG_NF_TABLES_BRIDGE=y 265CONFIG_NF_TABLES_BRIDGE=y
270CONFIG_NFT_BRIDGE_META=m
271CONFIG_NFT_BRIDGE_REJECT=m 266CONFIG_NFT_BRIDGE_REJECT=m
272CONFIG_NF_LOG_BRIDGE=m 267CONFIG_NF_LOG_BRIDGE=m
273CONFIG_BRIDGE_NF_EBTABLES=m 268CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
311CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 306CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
312CONFIG_DNS_RESOLVER=y 307CONFIG_DNS_RESOLVER=y
313CONFIG_BATMAN_ADV=m 308CONFIG_BATMAN_ADV=m
309# CONFIG_BATMAN_ADV_BATMAN_V is not set
314CONFIG_BATMAN_ADV_DAT=y 310CONFIG_BATMAN_ADV_DAT=y
315CONFIG_BATMAN_ADV_NC=y 311CONFIG_BATMAN_ADV_NC=y
316CONFIG_BATMAN_ADV_MCAST=y 312CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
373CONFIG_GVP11_SCSI=y 369CONFIG_GVP11_SCSI=y
374CONFIG_SCSI_A4000T=y 370CONFIG_SCSI_A4000T=y
375CONFIG_SCSI_ZORRO7XX=y 371CONFIG_SCSI_ZORRO7XX=y
372CONFIG_SCSI_ZORRO_ESP=y
376CONFIG_ATARI_SCSI=y 373CONFIG_ATARI_SCSI=y
377CONFIG_MAC_SCSI=y 374CONFIG_MAC_SCSI=y
378CONFIG_SCSI_MAC_ESP=y 375CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
387CONFIG_DM_CRYPT=m 384CONFIG_DM_CRYPT=m
388CONFIG_DM_SNAPSHOT=m 385CONFIG_DM_SNAPSHOT=m
389CONFIG_DM_THIN_PROVISIONING=m 386CONFIG_DM_THIN_PROVISIONING=m
387CONFIG_DM_WRITECACHE=m
390CONFIG_DM_ERA=m 388CONFIG_DM_ERA=m
391CONFIG_DM_MIRROR=m 389CONFIG_DM_MIRROR=m
392CONFIG_DM_RAID=m 390CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
438CONFIG_MACMACE=y 436CONFIG_MACMACE=y
439# CONFIG_NET_VENDOR_AQUANTIA is not set 437# CONFIG_NET_VENDOR_AQUANTIA is not set
440# CONFIG_NET_VENDOR_ARC is not set 438# CONFIG_NET_VENDOR_ARC is not set
441# CONFIG_NET_CADENCE is not set
442# CONFIG_NET_VENDOR_BROADCOM is not set 439# CONFIG_NET_VENDOR_BROADCOM is not set
440# CONFIG_NET_CADENCE is not set
443CONFIG_MAC89x0=y 441CONFIG_MAC89x0=y
444# CONFIG_NET_VENDOR_CORTINA is not set 442# CONFIG_NET_VENDOR_CORTINA is not set
445# CONFIG_NET_VENDOR_EZCHIP is not set 443# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
449CONFIG_MVME16x_NET=y 447CONFIG_MVME16x_NET=y
450# CONFIG_NET_VENDOR_MARVELL is not set 448# CONFIG_NET_VENDOR_MARVELL is not set
451# CONFIG_NET_VENDOR_MICREL is not set 449# CONFIG_NET_VENDOR_MICREL is not set
450# CONFIG_NET_VENDOR_MICROSEMI is not set
452CONFIG_MACSONIC=y 451CONFIG_MACSONIC=y
453# CONFIG_NET_VENDOR_NETRONOME is not set 452# CONFIG_NET_VENDOR_NETRONOME is not set
454# CONFIG_NET_VENDOR_NI is not set 453# CONFIG_NET_VENDOR_NI is not set
454CONFIG_XSURF100=y
455CONFIG_HYDRA=y 455CONFIG_HYDRA=y
456CONFIG_MAC8390=y 456CONFIG_MAC8390=y
457CONFIG_NE2000=y 457CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
466CONFIG_SMC91X=y 466CONFIG_SMC91X=y
467# CONFIG_NET_VENDOR_SOCIONEXT is not set 467# CONFIG_NET_VENDOR_SOCIONEXT is not set
468# CONFIG_NET_VENDOR_STMICRO is not set 468# CONFIG_NET_VENDOR_STMICRO is not set
469# CONFIG_NET_VENDOR_SYNOPSYS is not set
469# CONFIG_NET_VENDOR_VIA is not set 470# CONFIG_NET_VENDOR_VIA is not set
470# CONFIG_NET_VENDOR_WIZNET is not set 471# CONFIG_NET_VENDOR_WIZNET is not set
471# CONFIG_NET_VENDOR_SYNOPSYS is not set
472CONFIG_PLIP=m 472CONFIG_PLIP=m
473CONFIG_PPP=m 473CONFIG_PPP=m
474CONFIG_PPP_BSDCOMP=m 474CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
533CONFIG_UHID=m 533CONFIG_UHID=m
534# CONFIG_HID_GENERIC is not set 534# CONFIG_HID_GENERIC is not set
535# CONFIG_HID_ITE is not set 535# CONFIG_HID_ITE is not set
536# CONFIG_HID_REDRAGON is not set
536# CONFIG_USB_SUPPORT is not set 537# CONFIG_USB_SUPPORT is not set
537CONFIG_RTC_CLASS=y 538CONFIG_RTC_CLASS=y
538# CONFIG_RTC_NVMEM is not set 539# CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
562CONFIG_FANOTIFY=y 563CONFIG_FANOTIFY=y
563CONFIG_QUOTA_NETLINK_INTERFACE=y 564CONFIG_QUOTA_NETLINK_INTERFACE=y
564# CONFIG_PRINT_QUOTA_WARNING is not set 565# CONFIG_PRINT_QUOTA_WARNING is not set
565CONFIG_AUTOFS4_FS=m 566CONFIG_AUTOFS_FS=m
566CONFIG_FUSE_FS=m 567CONFIG_FUSE_FS=m
567CONFIG_CUSE=m 568CONFIG_CUSE=m
568CONFIG_OVERLAY_FS=m 569CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
663CONFIG_TEST_PRINTF=m 664CONFIG_TEST_PRINTF=m
664CONFIG_TEST_BITMAP=m 665CONFIG_TEST_BITMAP=m
665CONFIG_TEST_UUID=m 666CONFIG_TEST_UUID=m
667CONFIG_TEST_OVERFLOW=m
666CONFIG_TEST_RHASHTABLE=m 668CONFIG_TEST_RHASHTABLE=m
667CONFIG_TEST_HASH=m 669CONFIG_TEST_HASH=m
668CONFIG_TEST_USER_COPY=m 670CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
685CONFIG_CRYPTO_MCRYPTD=m 687CONFIG_CRYPTO_MCRYPTD=m
686CONFIG_CRYPTO_TEST=m 688CONFIG_CRYPTO_TEST=m
687CONFIG_CRYPTO_CHACHA20POLY1305=m 689CONFIG_CRYPTO_CHACHA20POLY1305=m
690CONFIG_CRYPTO_AEGIS128=m
691CONFIG_CRYPTO_AEGIS128L=m
692CONFIG_CRYPTO_AEGIS256=m
693CONFIG_CRYPTO_MORUS640=m
694CONFIG_CRYPTO_MORUS1280=m
688CONFIG_CRYPTO_CFB=m 695CONFIG_CRYPTO_CFB=m
689CONFIG_CRYPTO_LRW=m 696CONFIG_CRYPTO_LRW=m
690CONFIG_CRYPTO_PCBC=m 697CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
720CONFIG_CRYPTO_842=m 727CONFIG_CRYPTO_842=m
721CONFIG_CRYPTO_LZ4=m 728CONFIG_CRYPTO_LZ4=m
722CONFIG_CRYPTO_LZ4HC=m 729CONFIG_CRYPTO_LZ4HC=m
730CONFIG_CRYPTO_ZSTD=m
723CONFIG_CRYPTO_ANSI_CPRNG=m 731CONFIG_CRYPTO_ANSI_CPRNG=m
724CONFIG_CRYPTO_DRBG_HASH=y 732CONFIG_CRYPTO_DRBG_HASH=y
725CONFIG_CRYPTO_DRBG_CTR=y 733CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 476e69994340..b383327fd77a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
47CONFIG_TLS=m 47CONFIG_TLS=m
48CONFIG_XFRM_MIGRATE=y 48CONFIG_XFRM_MIGRATE=y
49CONFIG_NET_KEY=y 49CONFIG_NET_KEY=y
50CONFIG_XDP_SOCKETS=y
50CONFIG_INET=y 51CONFIG_INET=y
51CONFIG_IP_PNP=y 52CONFIG_IP_PNP=y
52CONFIG_IP_PNP_DHCP=y 53CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
93CONFIG_NF_CONNTRACK_SIP=m 94CONFIG_NF_CONNTRACK_SIP=m
94CONFIG_NF_CONNTRACK_TFTP=m 95CONFIG_NF_CONNTRACK_TFTP=m
95CONFIG_NF_TABLES=m 96CONFIG_NF_TABLES=m
97CONFIG_NF_TABLES_SET=m
96CONFIG_NF_TABLES_INET=y 98CONFIG_NF_TABLES_INET=y
97CONFIG_NF_TABLES_NETDEV=y 99CONFIG_NF_TABLES_NETDEV=y
98CONFIG_NFT_EXTHDR=m
99CONFIG_NFT_META=m
100CONFIG_NFT_RT=m
101CONFIG_NFT_NUMGEN=m 100CONFIG_NFT_NUMGEN=m
102CONFIG_NFT_CT=m 101CONFIG_NFT_CT=m
103CONFIG_NFT_FLOW_OFFLOAD=m 102CONFIG_NFT_FLOW_OFFLOAD=m
104CONFIG_NFT_SET_RBTREE=m
105CONFIG_NFT_SET_HASH=m
106CONFIG_NFT_SET_BITMAP=m
107CONFIG_NFT_COUNTER=m 103CONFIG_NFT_COUNTER=m
104CONFIG_NFT_CONNLIMIT=m
108CONFIG_NFT_LOG=m 105CONFIG_NFT_LOG=m
109CONFIG_NFT_LIMIT=m 106CONFIG_NFT_LIMIT=m
110CONFIG_NFT_MASQ=m 107CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
117CONFIG_NFT_COMPAT=m 114CONFIG_NFT_COMPAT=m
118CONFIG_NFT_HASH=m 115CONFIG_NFT_HASH=m
119CONFIG_NFT_FIB_INET=m 116CONFIG_NFT_FIB_INET=m
117CONFIG_NFT_SOCKET=m
120CONFIG_NFT_DUP_NETDEV=m 118CONFIG_NFT_DUP_NETDEV=m
121CONFIG_NFT_FWD_NETDEV=m 119CONFIG_NFT_FWD_NETDEV=m
122CONFIG_NFT_FIB_NETDEV=m 120CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
195CONFIG_IP_SET_HASH_NETIFACE=m 193CONFIG_IP_SET_HASH_NETIFACE=m
196CONFIG_IP_SET_LIST_SET=m 194CONFIG_IP_SET_LIST_SET=m
197CONFIG_NF_CONNTRACK_IPV4=m 195CONFIG_NF_CONNTRACK_IPV4=m
198CONFIG_NF_SOCKET_IPV4=m
199CONFIG_NFT_CHAIN_ROUTE_IPV4=m 196CONFIG_NFT_CHAIN_ROUTE_IPV4=m
200CONFIG_NFT_DUP_IPV4=m 197CONFIG_NFT_DUP_IPV4=m
201CONFIG_NFT_FIB_IPV4=m 198CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
226CONFIG_IP_NF_ARPFILTER=m 223CONFIG_IP_NF_ARPFILTER=m
227CONFIG_IP_NF_ARP_MANGLE=m 224CONFIG_IP_NF_ARP_MANGLE=m
228CONFIG_NF_CONNTRACK_IPV6=m 225CONFIG_NF_CONNTRACK_IPV6=m
229CONFIG_NF_SOCKET_IPV6=m
230CONFIG_NFT_CHAIN_ROUTE_IPV6=m 226CONFIG_NFT_CHAIN_ROUTE_IPV6=m
231CONFIG_NFT_CHAIN_NAT_IPV6=m 227CONFIG_NFT_CHAIN_NAT_IPV6=m
232CONFIG_NFT_MASQ_IPV6=m 228CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
255CONFIG_IP6_NF_TARGET_MASQUERADE=m 251CONFIG_IP6_NF_TARGET_MASQUERADE=m
256CONFIG_IP6_NF_TARGET_NPT=m 252CONFIG_IP6_NF_TARGET_NPT=m
257CONFIG_NF_TABLES_BRIDGE=y 253CONFIG_NF_TABLES_BRIDGE=y
258CONFIG_NFT_BRIDGE_META=m
259CONFIG_NFT_BRIDGE_REJECT=m 254CONFIG_NFT_BRIDGE_REJECT=m
260CONFIG_NF_LOG_BRIDGE=m 255CONFIG_NF_LOG_BRIDGE=m
261CONFIG_BRIDGE_NF_EBTABLES=m 256CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
296CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 291CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
297CONFIG_DNS_RESOLVER=y 292CONFIG_DNS_RESOLVER=y
298CONFIG_BATMAN_ADV=m 293CONFIG_BATMAN_ADV=m
294# CONFIG_BATMAN_ADV_BATMAN_V is not set
299CONFIG_BATMAN_ADV_DAT=y 295CONFIG_BATMAN_ADV_DAT=y
300CONFIG_BATMAN_ADV_NC=y 296CONFIG_BATMAN_ADV_NC=y
301CONFIG_BATMAN_ADV_MCAST=y 297CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
343CONFIG_DM_CRYPT=m 339CONFIG_DM_CRYPT=m
344CONFIG_DM_SNAPSHOT=m 340CONFIG_DM_SNAPSHOT=m
345CONFIG_DM_THIN_PROVISIONING=m 341CONFIG_DM_THIN_PROVISIONING=m
342CONFIG_DM_WRITECACHE=m
346CONFIG_DM_ERA=m 343CONFIG_DM_ERA=m
347CONFIG_DM_MIRROR=m 344CONFIG_DM_MIRROR=m
348CONFIG_DM_RAID=m 345CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
380CONFIG_MVME147_NET=y 377CONFIG_MVME147_NET=y
381# CONFIG_NET_VENDOR_AQUANTIA is not set 378# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 379# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 380# CONFIG_NET_VENDOR_BROADCOM is not set
381# CONFIG_NET_CADENCE is not set
385# CONFIG_NET_VENDOR_CORTINA is not set 382# CONFIG_NET_VENDOR_CORTINA is not set
386# CONFIG_NET_VENDOR_EZCHIP is not set 383# CONFIG_NET_VENDOR_EZCHIP is not set
387# CONFIG_NET_VENDOR_HUAWEI is not set 384# CONFIG_NET_VENDOR_HUAWEI is not set
388# CONFIG_NET_VENDOR_INTEL is not set 385# CONFIG_NET_VENDOR_INTEL is not set
389# CONFIG_NET_VENDOR_MARVELL is not set 386# CONFIG_NET_VENDOR_MARVELL is not set
390# CONFIG_NET_VENDOR_MICREL is not set 387# CONFIG_NET_VENDOR_MICREL is not set
388# CONFIG_NET_VENDOR_MICROSEMI is not set
391# CONFIG_NET_VENDOR_NATSEMI is not set 389# CONFIG_NET_VENDOR_NATSEMI is not set
392# CONFIG_NET_VENDOR_NETRONOME is not set 390# CONFIG_NET_VENDOR_NETRONOME is not set
393# CONFIG_NET_VENDOR_NI is not set 391# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
399# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
400# CONFIG_NET_VENDOR_SOCIONEXT is not set 398# CONFIG_NET_VENDOR_SOCIONEXT is not set
401# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
400# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 401# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 402# CONFIG_NET_VENDOR_WIZNET is not set
404# CONFIG_NET_VENDOR_SYNOPSYS is not set
405CONFIG_PPP=m 403CONFIG_PPP=m
406CONFIG_PPP_BSDCOMP=m 404CONFIG_PPP_BSDCOMP=m
407CONFIG_PPP_DEFLATE=m 405CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
433CONFIG_UHID=m 431CONFIG_UHID=m
434# CONFIG_HID_GENERIC is not set 432# CONFIG_HID_GENERIC is not set
435# CONFIG_HID_ITE is not set 433# CONFIG_HID_ITE is not set
434# CONFIG_HID_REDRAGON is not set
436# CONFIG_USB_SUPPORT is not set 435# CONFIG_USB_SUPPORT is not set
437CONFIG_RTC_CLASS=y 436CONFIG_RTC_CLASS=y
438# CONFIG_RTC_NVMEM is not set 437# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
450CONFIG_FANOTIFY=y 449CONFIG_FANOTIFY=y
451CONFIG_QUOTA_NETLINK_INTERFACE=y 450CONFIG_QUOTA_NETLINK_INTERFACE=y
452# CONFIG_PRINT_QUOTA_WARNING is not set 451# CONFIG_PRINT_QUOTA_WARNING is not set
453CONFIG_AUTOFS4_FS=m 452CONFIG_AUTOFS_FS=m
454CONFIG_FUSE_FS=m 453CONFIG_FUSE_FS=m
455CONFIG_CUSE=m 454CONFIG_CUSE=m
456CONFIG_OVERLAY_FS=m 455CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
551CONFIG_TEST_PRINTF=m 550CONFIG_TEST_PRINTF=m
552CONFIG_TEST_BITMAP=m 551CONFIG_TEST_BITMAP=m
553CONFIG_TEST_UUID=m 552CONFIG_TEST_UUID=m
553CONFIG_TEST_OVERFLOW=m
554CONFIG_TEST_RHASHTABLE=m 554CONFIG_TEST_RHASHTABLE=m
555CONFIG_TEST_HASH=m 555CONFIG_TEST_HASH=m
556CONFIG_TEST_USER_COPY=m 556CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m 573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 574CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 575CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m
577CONFIG_CRYPTO_AEGIS128L=m
578CONFIG_CRYPTO_AEGIS256=m
579CONFIG_CRYPTO_MORUS640=m
580CONFIG_CRYPTO_MORUS1280=m
576CONFIG_CRYPTO_CFB=m 581CONFIG_CRYPTO_CFB=m
577CONFIG_CRYPTO_LRW=m 582CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 583CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
608CONFIG_CRYPTO_842=m 613CONFIG_CRYPTO_842=m
609CONFIG_CRYPTO_LZ4=m 614CONFIG_CRYPTO_LZ4=m
610CONFIG_CRYPTO_LZ4HC=m 615CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_ZSTD=m
611CONFIG_CRYPTO_ANSI_CPRNG=m 617CONFIG_CRYPTO_ANSI_CPRNG=m
612CONFIG_CRYPTO_DRBG_HASH=y 618CONFIG_CRYPTO_DRBG_HASH=y
613CONFIG_CRYPTO_DRBG_CTR=y 619CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 1477cda9146e..9783d3deb9e9 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
48CONFIG_TLS=m 48CONFIG_TLS=m
49CONFIG_XFRM_MIGRATE=y 49CONFIG_XFRM_MIGRATE=y
50CONFIG_NET_KEY=y 50CONFIG_NET_KEY=y
51CONFIG_XDP_SOCKETS=y
51CONFIG_INET=y 52CONFIG_INET=y
52CONFIG_IP_PNP=y 53CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 54CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
94CONFIG_NF_CONNTRACK_SIP=m 95CONFIG_NF_CONNTRACK_SIP=m
95CONFIG_NF_CONNTRACK_TFTP=m 96CONFIG_NF_CONNTRACK_TFTP=m
96CONFIG_NF_TABLES=m 97CONFIG_NF_TABLES=m
98CONFIG_NF_TABLES_SET=m
97CONFIG_NF_TABLES_INET=y 99CONFIG_NF_TABLES_INET=y
98CONFIG_NF_TABLES_NETDEV=y 100CONFIG_NF_TABLES_NETDEV=y
99CONFIG_NFT_EXTHDR=m
100CONFIG_NFT_META=m
101CONFIG_NFT_RT=m
102CONFIG_NFT_NUMGEN=m 101CONFIG_NFT_NUMGEN=m
103CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
104CONFIG_NFT_FLOW_OFFLOAD=m 103CONFIG_NFT_FLOW_OFFLOAD=m
105CONFIG_NFT_SET_RBTREE=m
106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
108CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
105CONFIG_NFT_CONNLIMIT=m
109CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
110CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
111CONFIG_NFT_MASQ=m 108CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
118CONFIG_NFT_COMPAT=m 115CONFIG_NFT_COMPAT=m
119CONFIG_NFT_HASH=m 116CONFIG_NFT_HASH=m
120CONFIG_NFT_FIB_INET=m 117CONFIG_NFT_FIB_INET=m
118CONFIG_NFT_SOCKET=m
121CONFIG_NFT_DUP_NETDEV=m 119CONFIG_NFT_DUP_NETDEV=m
122CONFIG_NFT_FWD_NETDEV=m 120CONFIG_NFT_FWD_NETDEV=m
123CONFIG_NFT_FIB_NETDEV=m 121CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
196CONFIG_IP_SET_HASH_NETIFACE=m 194CONFIG_IP_SET_HASH_NETIFACE=m
197CONFIG_IP_SET_LIST_SET=m 195CONFIG_IP_SET_LIST_SET=m
198CONFIG_NF_CONNTRACK_IPV4=m 196CONFIG_NF_CONNTRACK_IPV4=m
199CONFIG_NF_SOCKET_IPV4=m
200CONFIG_NFT_CHAIN_ROUTE_IPV4=m 197CONFIG_NFT_CHAIN_ROUTE_IPV4=m
201CONFIG_NFT_DUP_IPV4=m 198CONFIG_NFT_DUP_IPV4=m
202CONFIG_NFT_FIB_IPV4=m 199CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
227CONFIG_IP_NF_ARPFILTER=m 224CONFIG_IP_NF_ARPFILTER=m
228CONFIG_IP_NF_ARP_MANGLE=m 225CONFIG_IP_NF_ARP_MANGLE=m
229CONFIG_NF_CONNTRACK_IPV6=m 226CONFIG_NF_CONNTRACK_IPV6=m
230CONFIG_NF_SOCKET_IPV6=m
231CONFIG_NFT_CHAIN_ROUTE_IPV6=m 227CONFIG_NFT_CHAIN_ROUTE_IPV6=m
232CONFIG_NFT_CHAIN_NAT_IPV6=m 228CONFIG_NFT_CHAIN_NAT_IPV6=m
233CONFIG_NFT_MASQ_IPV6=m 229CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
256CONFIG_IP6_NF_TARGET_MASQUERADE=m 252CONFIG_IP6_NF_TARGET_MASQUERADE=m
257CONFIG_IP6_NF_TARGET_NPT=m 253CONFIG_IP6_NF_TARGET_NPT=m
258CONFIG_NF_TABLES_BRIDGE=y 254CONFIG_NF_TABLES_BRIDGE=y
259CONFIG_NFT_BRIDGE_META=m
260CONFIG_NFT_BRIDGE_REJECT=m 255CONFIG_NFT_BRIDGE_REJECT=m
261CONFIG_NF_LOG_BRIDGE=m 256CONFIG_NF_LOG_BRIDGE=m
262CONFIG_BRIDGE_NF_EBTABLES=m 257CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
297CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 292CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
298CONFIG_DNS_RESOLVER=y 293CONFIG_DNS_RESOLVER=y
299CONFIG_BATMAN_ADV=m 294CONFIG_BATMAN_ADV=m
295# CONFIG_BATMAN_ADV_BATMAN_V is not set
300CONFIG_BATMAN_ADV_DAT=y 296CONFIG_BATMAN_ADV_DAT=y
301CONFIG_BATMAN_ADV_NC=y 297CONFIG_BATMAN_ADV_NC=y
302CONFIG_BATMAN_ADV_MCAST=y 298CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
344CONFIG_DM_CRYPT=m 340CONFIG_DM_CRYPT=m
345CONFIG_DM_SNAPSHOT=m 341CONFIG_DM_SNAPSHOT=m
346CONFIG_DM_THIN_PROVISIONING=m 342CONFIG_DM_THIN_PROVISIONING=m
343CONFIG_DM_WRITECACHE=m
347CONFIG_DM_ERA=m 344CONFIG_DM_ERA=m
348CONFIG_DM_MIRROR=m 345CONFIG_DM_MIRROR=m
349CONFIG_DM_RAID=m 346CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
380# CONFIG_NET_VENDOR_AMAZON is not set 377# CONFIG_NET_VENDOR_AMAZON is not set
381# CONFIG_NET_VENDOR_AQUANTIA is not set 378# CONFIG_NET_VENDOR_AQUANTIA is not set
382# CONFIG_NET_VENDOR_ARC is not set 379# CONFIG_NET_VENDOR_ARC is not set
383# CONFIG_NET_CADENCE is not set
384# CONFIG_NET_VENDOR_BROADCOM is not set 380# CONFIG_NET_VENDOR_BROADCOM is not set
381# CONFIG_NET_CADENCE is not set
385# CONFIG_NET_VENDOR_CORTINA is not set 382# CONFIG_NET_VENDOR_CORTINA is not set
386# CONFIG_NET_VENDOR_EZCHIP is not set 383# CONFIG_NET_VENDOR_EZCHIP is not set
387# CONFIG_NET_VENDOR_HUAWEI is not set 384# CONFIG_NET_VENDOR_HUAWEI is not set
388CONFIG_MVME16x_NET=y 385CONFIG_MVME16x_NET=y
389# CONFIG_NET_VENDOR_MARVELL is not set 386# CONFIG_NET_VENDOR_MARVELL is not set
390# CONFIG_NET_VENDOR_MICREL is not set 387# CONFIG_NET_VENDOR_MICREL is not set
388# CONFIG_NET_VENDOR_MICROSEMI is not set
391# CONFIG_NET_VENDOR_NATSEMI is not set 389# CONFIG_NET_VENDOR_NATSEMI is not set
392# CONFIG_NET_VENDOR_NETRONOME is not set 390# CONFIG_NET_VENDOR_NETRONOME is not set
393# CONFIG_NET_VENDOR_NI is not set 391# CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
399# CONFIG_NET_VENDOR_SOLARFLARE is not set 397# CONFIG_NET_VENDOR_SOLARFLARE is not set
400# CONFIG_NET_VENDOR_SOCIONEXT is not set 398# CONFIG_NET_VENDOR_SOCIONEXT is not set
401# CONFIG_NET_VENDOR_STMICRO is not set 399# CONFIG_NET_VENDOR_STMICRO is not set
400# CONFIG_NET_VENDOR_SYNOPSYS is not set
402# CONFIG_NET_VENDOR_VIA is not set 401# CONFIG_NET_VENDOR_VIA is not set
403# CONFIG_NET_VENDOR_WIZNET is not set 402# CONFIG_NET_VENDOR_WIZNET is not set
404# CONFIG_NET_VENDOR_SYNOPSYS is not set
405CONFIG_PPP=m 403CONFIG_PPP=m
406CONFIG_PPP_BSDCOMP=m 404CONFIG_PPP_BSDCOMP=m
407CONFIG_PPP_DEFLATE=m 405CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
433CONFIG_UHID=m 431CONFIG_UHID=m
434# CONFIG_HID_GENERIC is not set 432# CONFIG_HID_GENERIC is not set
435# CONFIG_HID_ITE is not set 433# CONFIG_HID_ITE is not set
434# CONFIG_HID_REDRAGON is not set
436# CONFIG_USB_SUPPORT is not set 435# CONFIG_USB_SUPPORT is not set
437CONFIG_RTC_CLASS=y 436CONFIG_RTC_CLASS=y
438# CONFIG_RTC_NVMEM is not set 437# CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
450CONFIG_FANOTIFY=y 449CONFIG_FANOTIFY=y
451CONFIG_QUOTA_NETLINK_INTERFACE=y 450CONFIG_QUOTA_NETLINK_INTERFACE=y
452# CONFIG_PRINT_QUOTA_WARNING is not set 451# CONFIG_PRINT_QUOTA_WARNING is not set
453CONFIG_AUTOFS4_FS=m 452CONFIG_AUTOFS_FS=m
454CONFIG_FUSE_FS=m 453CONFIG_FUSE_FS=m
455CONFIG_CUSE=m 454CONFIG_CUSE=m
456CONFIG_OVERLAY_FS=m 455CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
551CONFIG_TEST_PRINTF=m 550CONFIG_TEST_PRINTF=m
552CONFIG_TEST_BITMAP=m 551CONFIG_TEST_BITMAP=m
553CONFIG_TEST_UUID=m 552CONFIG_TEST_UUID=m
553CONFIG_TEST_OVERFLOW=m
554CONFIG_TEST_RHASHTABLE=m 554CONFIG_TEST_RHASHTABLE=m
555CONFIG_TEST_HASH=m 555CONFIG_TEST_HASH=m
556CONFIG_TEST_USER_COPY=m 556CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
573CONFIG_CRYPTO_MCRYPTD=m 573CONFIG_CRYPTO_MCRYPTD=m
574CONFIG_CRYPTO_TEST=m 574CONFIG_CRYPTO_TEST=m
575CONFIG_CRYPTO_CHACHA20POLY1305=m 575CONFIG_CRYPTO_CHACHA20POLY1305=m
576CONFIG_CRYPTO_AEGIS128=m
577CONFIG_CRYPTO_AEGIS128L=m
578CONFIG_CRYPTO_AEGIS256=m
579CONFIG_CRYPTO_MORUS640=m
580CONFIG_CRYPTO_MORUS1280=m
576CONFIG_CRYPTO_CFB=m 581CONFIG_CRYPTO_CFB=m
577CONFIG_CRYPTO_LRW=m 582CONFIG_CRYPTO_LRW=m
578CONFIG_CRYPTO_PCBC=m 583CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
608CONFIG_CRYPTO_842=m 613CONFIG_CRYPTO_842=m
609CONFIG_CRYPTO_LZ4=m 614CONFIG_CRYPTO_LZ4=m
610CONFIG_CRYPTO_LZ4HC=m 615CONFIG_CRYPTO_LZ4HC=m
616CONFIG_CRYPTO_ZSTD=m
611CONFIG_CRYPTO_ANSI_CPRNG=m 617CONFIG_CRYPTO_ANSI_CPRNG=m
612CONFIG_CRYPTO_DRBG_HASH=y 618CONFIG_CRYPTO_DRBG_HASH=y
613CONFIG_CRYPTO_DRBG_CTR=y 619CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index b3a543dc48a0..a35d10ee10cb 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
48CONFIG_TLS=m 48CONFIG_TLS=m
49CONFIG_XFRM_MIGRATE=y 49CONFIG_XFRM_MIGRATE=y
50CONFIG_NET_KEY=y 50CONFIG_NET_KEY=y
51CONFIG_XDP_SOCKETS=y
51CONFIG_INET=y 52CONFIG_INET=y
52CONFIG_IP_PNP=y 53CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 54CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
94CONFIG_NF_CONNTRACK_SIP=m 95CONFIG_NF_CONNTRACK_SIP=m
95CONFIG_NF_CONNTRACK_TFTP=m 96CONFIG_NF_CONNTRACK_TFTP=m
96CONFIG_NF_TABLES=m 97CONFIG_NF_TABLES=m
98CONFIG_NF_TABLES_SET=m
97CONFIG_NF_TABLES_INET=y 99CONFIG_NF_TABLES_INET=y
98CONFIG_NF_TABLES_NETDEV=y 100CONFIG_NF_TABLES_NETDEV=y
99CONFIG_NFT_EXTHDR=m
100CONFIG_NFT_META=m
101CONFIG_NFT_RT=m
102CONFIG_NFT_NUMGEN=m 101CONFIG_NFT_NUMGEN=m
103CONFIG_NFT_CT=m 102CONFIG_NFT_CT=m
104CONFIG_NFT_FLOW_OFFLOAD=m 103CONFIG_NFT_FLOW_OFFLOAD=m
105CONFIG_NFT_SET_RBTREE=m
106CONFIG_NFT_SET_HASH=m
107CONFIG_NFT_SET_BITMAP=m
108CONFIG_NFT_COUNTER=m 104CONFIG_NFT_COUNTER=m
105CONFIG_NFT_CONNLIMIT=m
109CONFIG_NFT_LOG=m 106CONFIG_NFT_LOG=m
110CONFIG_NFT_LIMIT=m 107CONFIG_NFT_LIMIT=m
111CONFIG_NFT_MASQ=m 108CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
118CONFIG_NFT_COMPAT=m 115CONFIG_NFT_COMPAT=m
119CONFIG_NFT_HASH=m 116CONFIG_NFT_HASH=m
120CONFIG_NFT_FIB_INET=m 117CONFIG_NFT_FIB_INET=m
118CONFIG_NFT_SOCKET=m
121CONFIG_NFT_DUP_NETDEV=m 119CONFIG_NFT_DUP_NETDEV=m
122CONFIG_NFT_FWD_NETDEV=m 120CONFIG_NFT_FWD_NETDEV=m
123CONFIG_NFT_FIB_NETDEV=m 121CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
196CONFIG_IP_SET_HASH_NETIFACE=m 194CONFIG_IP_SET_HASH_NETIFACE=m
197CONFIG_IP_SET_LIST_SET=m 195CONFIG_IP_SET_LIST_SET=m
198CONFIG_NF_CONNTRACK_IPV4=m 196CONFIG_NF_CONNTRACK_IPV4=m
199CONFIG_NF_SOCKET_IPV4=m
200CONFIG_NFT_CHAIN_ROUTE_IPV4=m 197CONFIG_NFT_CHAIN_ROUTE_IPV4=m
201CONFIG_NFT_DUP_IPV4=m 198CONFIG_NFT_DUP_IPV4=m
202CONFIG_NFT_FIB_IPV4=m 199CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
227CONFIG_IP_NF_ARPFILTER=m 224CONFIG_IP_NF_ARPFILTER=m
228CONFIG_IP_NF_ARP_MANGLE=m 225CONFIG_IP_NF_ARP_MANGLE=m
229CONFIG_NF_CONNTRACK_IPV6=m 226CONFIG_NF_CONNTRACK_IPV6=m
230CONFIG_NF_SOCKET_IPV6=m
231CONFIG_NFT_CHAIN_ROUTE_IPV6=m 227CONFIG_NFT_CHAIN_ROUTE_IPV6=m
232CONFIG_NFT_CHAIN_NAT_IPV6=m 228CONFIG_NFT_CHAIN_NAT_IPV6=m
233CONFIG_NFT_MASQ_IPV6=m 229CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
256CONFIG_IP6_NF_TARGET_MASQUERADE=m 252CONFIG_IP6_NF_TARGET_MASQUERADE=m
257CONFIG_IP6_NF_TARGET_NPT=m 253CONFIG_IP6_NF_TARGET_NPT=m
258CONFIG_NF_TABLES_BRIDGE=y 254CONFIG_NF_TABLES_BRIDGE=y
259CONFIG_NFT_BRIDGE_META=m
260CONFIG_NFT_BRIDGE_REJECT=m 255CONFIG_NFT_BRIDGE_REJECT=m
261CONFIG_NF_LOG_BRIDGE=m 256CONFIG_NF_LOG_BRIDGE=m
262CONFIG_BRIDGE_NF_EBTABLES=m 257CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
297CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 292CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
298CONFIG_DNS_RESOLVER=y 293CONFIG_DNS_RESOLVER=y
299CONFIG_BATMAN_ADV=m 294CONFIG_BATMAN_ADV=m
295# CONFIG_BATMAN_ADV_BATMAN_V is not set
300CONFIG_BATMAN_ADV_DAT=y 296CONFIG_BATMAN_ADV_DAT=y
301CONFIG_BATMAN_ADV_NC=y 297CONFIG_BATMAN_ADV_NC=y
302CONFIG_BATMAN_ADV_MCAST=y 298CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
350CONFIG_DM_CRYPT=m 346CONFIG_DM_CRYPT=m
351CONFIG_DM_SNAPSHOT=m 347CONFIG_DM_SNAPSHOT=m
352CONFIG_DM_THIN_PROVISIONING=m 348CONFIG_DM_THIN_PROVISIONING=m
349CONFIG_DM_WRITECACHE=m
353CONFIG_DM_ERA=m 350CONFIG_DM_ERA=m
354CONFIG_DM_MIRROR=m 351CONFIG_DM_MIRROR=m
355CONFIG_DM_RAID=m 352CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
388# CONFIG_NET_VENDOR_AMD is not set 385# CONFIG_NET_VENDOR_AMD is not set
389# CONFIG_NET_VENDOR_AQUANTIA is not set 386# CONFIG_NET_VENDOR_AQUANTIA is not set
390# CONFIG_NET_VENDOR_ARC is not set 387# CONFIG_NET_VENDOR_ARC is not set
391# CONFIG_NET_CADENCE is not set
392# CONFIG_NET_VENDOR_BROADCOM is not set 388# CONFIG_NET_VENDOR_BROADCOM is not set
389# CONFIG_NET_CADENCE is not set
393# CONFIG_NET_VENDOR_CIRRUS is not set 390# CONFIG_NET_VENDOR_CIRRUS is not set
394# CONFIG_NET_VENDOR_CORTINA is not set 391# CONFIG_NET_VENDOR_CORTINA is not set
395# CONFIG_NET_VENDOR_EZCHIP is not set 392# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
398# CONFIG_NET_VENDOR_INTEL is not set 395# CONFIG_NET_VENDOR_INTEL is not set
399# CONFIG_NET_VENDOR_MARVELL is not set 396# CONFIG_NET_VENDOR_MARVELL is not set
400# CONFIG_NET_VENDOR_MICREL is not set 397# CONFIG_NET_VENDOR_MICREL is not set
398# CONFIG_NET_VENDOR_MICROSEMI is not set
401# CONFIG_NET_VENDOR_NETRONOME is not set 399# CONFIG_NET_VENDOR_NETRONOME is not set
402# CONFIG_NET_VENDOR_NI is not set 400# CONFIG_NET_VENDOR_NI is not set
403CONFIG_NE2000=y 401CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
410# CONFIG_NET_VENDOR_SMSC is not set 408# CONFIG_NET_VENDOR_SMSC is not set
411# CONFIG_NET_VENDOR_SOCIONEXT is not set 409# CONFIG_NET_VENDOR_SOCIONEXT is not set
412# CONFIG_NET_VENDOR_STMICRO is not set 410# CONFIG_NET_VENDOR_STMICRO is not set
411# CONFIG_NET_VENDOR_SYNOPSYS is not set
413# CONFIG_NET_VENDOR_VIA is not set 412# CONFIG_NET_VENDOR_VIA is not set
414# CONFIG_NET_VENDOR_WIZNET is not set 413# CONFIG_NET_VENDOR_WIZNET is not set
415# CONFIG_NET_VENDOR_SYNOPSYS is not set
416CONFIG_PLIP=m 414CONFIG_PLIP=m
417CONFIG_PPP=m 415CONFIG_PPP=m
418CONFIG_PPP_BSDCOMP=m 416CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
455CONFIG_UHID=m 453CONFIG_UHID=m
456# CONFIG_HID_GENERIC is not set 454# CONFIG_HID_GENERIC is not set
457# CONFIG_HID_ITE is not set 455# CONFIG_HID_ITE is not set
456# CONFIG_HID_REDRAGON is not set
458# CONFIG_USB_SUPPORT is not set 457# CONFIG_USB_SUPPORT is not set
459CONFIG_RTC_CLASS=y 458CONFIG_RTC_CLASS=y
460# CONFIG_RTC_NVMEM is not set 459# CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
473CONFIG_FANOTIFY=y 472CONFIG_FANOTIFY=y
474CONFIG_QUOTA_NETLINK_INTERFACE=y 473CONFIG_QUOTA_NETLINK_INTERFACE=y
475# CONFIG_PRINT_QUOTA_WARNING is not set 474# CONFIG_PRINT_QUOTA_WARNING is not set
476CONFIG_AUTOFS4_FS=m 475CONFIG_AUTOFS_FS=m
477CONFIG_FUSE_FS=m 476CONFIG_FUSE_FS=m
478CONFIG_CUSE=m 477CONFIG_CUSE=m
479CONFIG_OVERLAY_FS=m 478CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
574CONFIG_TEST_PRINTF=m 573CONFIG_TEST_PRINTF=m
575CONFIG_TEST_BITMAP=m 574CONFIG_TEST_BITMAP=m
576CONFIG_TEST_UUID=m 575CONFIG_TEST_UUID=m
576CONFIG_TEST_OVERFLOW=m
577CONFIG_TEST_RHASHTABLE=m 577CONFIG_TEST_RHASHTABLE=m
578CONFIG_TEST_HASH=m 578CONFIG_TEST_HASH=m
579CONFIG_TEST_USER_COPY=m 579CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
596CONFIG_CRYPTO_MCRYPTD=m 596CONFIG_CRYPTO_MCRYPTD=m
597CONFIG_CRYPTO_TEST=m 597CONFIG_CRYPTO_TEST=m
598CONFIG_CRYPTO_CHACHA20POLY1305=m 598CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_AEGIS128=m
600CONFIG_CRYPTO_AEGIS128L=m
601CONFIG_CRYPTO_AEGIS256=m
602CONFIG_CRYPTO_MORUS640=m
603CONFIG_CRYPTO_MORUS1280=m
599CONFIG_CRYPTO_CFB=m 604CONFIG_CRYPTO_CFB=m
600CONFIG_CRYPTO_LRW=m 605CONFIG_CRYPTO_LRW=m
601CONFIG_CRYPTO_PCBC=m 606CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
631CONFIG_CRYPTO_842=m 636CONFIG_CRYPTO_842=m
632CONFIG_CRYPTO_LZ4=m 637CONFIG_CRYPTO_LZ4=m
633CONFIG_CRYPTO_LZ4HC=m 638CONFIG_CRYPTO_LZ4HC=m
639CONFIG_CRYPTO_ZSTD=m
634CONFIG_CRYPTO_ANSI_CPRNG=m 640CONFIG_CRYPTO_ANSI_CPRNG=m
635CONFIG_CRYPTO_DRBG_HASH=y 641CONFIG_CRYPTO_DRBG_HASH=y
636CONFIG_CRYPTO_DRBG_CTR=y 642CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d543ed5dfa96..573bf922d448 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
45CONFIG_TLS=m 45CONFIG_TLS=m
46CONFIG_XFRM_MIGRATE=y 46CONFIG_XFRM_MIGRATE=y
47CONFIG_NET_KEY=y 47CONFIG_NET_KEY=y
48CONFIG_XDP_SOCKETS=y
48CONFIG_INET=y 49CONFIG_INET=y
49CONFIG_IP_PNP=y 50CONFIG_IP_PNP=y
50CONFIG_IP_PNP_DHCP=y 51CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
91CONFIG_NF_CONNTRACK_SIP=m 92CONFIG_NF_CONNTRACK_SIP=m
92CONFIG_NF_CONNTRACK_TFTP=m 93CONFIG_NF_CONNTRACK_TFTP=m
93CONFIG_NF_TABLES=m 94CONFIG_NF_TABLES=m
95CONFIG_NF_TABLES_SET=m
94CONFIG_NF_TABLES_INET=y 96CONFIG_NF_TABLES_INET=y
95CONFIG_NF_TABLES_NETDEV=y 97CONFIG_NF_TABLES_NETDEV=y
96CONFIG_NFT_EXTHDR=m
97CONFIG_NFT_META=m
98CONFIG_NFT_RT=m
99CONFIG_NFT_NUMGEN=m 98CONFIG_NFT_NUMGEN=m
100CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
101CONFIG_NFT_FLOW_OFFLOAD=m 100CONFIG_NFT_FLOW_OFFLOAD=m
102CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
105CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
102CONFIG_NFT_CONNLIMIT=m
106CONFIG_NFT_LOG=m 103CONFIG_NFT_LOG=m
107CONFIG_NFT_LIMIT=m 104CONFIG_NFT_LIMIT=m
108CONFIG_NFT_MASQ=m 105CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
115CONFIG_NFT_COMPAT=m 112CONFIG_NFT_COMPAT=m
116CONFIG_NFT_HASH=m 113CONFIG_NFT_HASH=m
117CONFIG_NFT_FIB_INET=m 114CONFIG_NFT_FIB_INET=m
115CONFIG_NFT_SOCKET=m
118CONFIG_NFT_DUP_NETDEV=m 116CONFIG_NFT_DUP_NETDEV=m
119CONFIG_NFT_FWD_NETDEV=m 117CONFIG_NFT_FWD_NETDEV=m
120CONFIG_NFT_FIB_NETDEV=m 118CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
193CONFIG_IP_SET_HASH_NETIFACE=m 191CONFIG_IP_SET_HASH_NETIFACE=m
194CONFIG_IP_SET_LIST_SET=m 192CONFIG_IP_SET_LIST_SET=m
195CONFIG_NF_CONNTRACK_IPV4=m 193CONFIG_NF_CONNTRACK_IPV4=m
196CONFIG_NF_SOCKET_IPV4=m
197CONFIG_NFT_CHAIN_ROUTE_IPV4=m 194CONFIG_NFT_CHAIN_ROUTE_IPV4=m
198CONFIG_NFT_DUP_IPV4=m 195CONFIG_NFT_DUP_IPV4=m
199CONFIG_NFT_FIB_IPV4=m 196CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
224CONFIG_IP_NF_ARPFILTER=m 221CONFIG_IP_NF_ARPFILTER=m
225CONFIG_IP_NF_ARP_MANGLE=m 222CONFIG_IP_NF_ARP_MANGLE=m
226CONFIG_NF_CONNTRACK_IPV6=m 223CONFIG_NF_CONNTRACK_IPV6=m
227CONFIG_NF_SOCKET_IPV6=m
228CONFIG_NFT_CHAIN_ROUTE_IPV6=m 224CONFIG_NFT_CHAIN_ROUTE_IPV6=m
229CONFIG_NFT_CHAIN_NAT_IPV6=m 225CONFIG_NFT_CHAIN_NAT_IPV6=m
230CONFIG_NFT_MASQ_IPV6=m 226CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
253CONFIG_IP6_NF_TARGET_MASQUERADE=m 249CONFIG_IP6_NF_TARGET_MASQUERADE=m
254CONFIG_IP6_NF_TARGET_NPT=m 250CONFIG_IP6_NF_TARGET_NPT=m
255CONFIG_NF_TABLES_BRIDGE=y 251CONFIG_NF_TABLES_BRIDGE=y
256CONFIG_NFT_BRIDGE_META=m
257CONFIG_NFT_BRIDGE_REJECT=m 252CONFIG_NFT_BRIDGE_REJECT=m
258CONFIG_NF_LOG_BRIDGE=m 253CONFIG_NF_LOG_BRIDGE=m
259CONFIG_BRIDGE_NF_EBTABLES=m 254CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
294CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 289CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
295CONFIG_DNS_RESOLVER=y 290CONFIG_DNS_RESOLVER=y
296CONFIG_BATMAN_ADV=m 291CONFIG_BATMAN_ADV=m
292# CONFIG_BATMAN_ADV_BATMAN_V is not set
297CONFIG_BATMAN_ADV_DAT=y 293CONFIG_BATMAN_ADV_DAT=y
298CONFIG_BATMAN_ADV_NC=y 294CONFIG_BATMAN_ADV_NC=y
299CONFIG_BATMAN_ADV_MCAST=y 295CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
341CONFIG_DM_CRYPT=m 337CONFIG_DM_CRYPT=m
342CONFIG_DM_SNAPSHOT=m 338CONFIG_DM_SNAPSHOT=m
343CONFIG_DM_THIN_PROVISIONING=m 339CONFIG_DM_THIN_PROVISIONING=m
340CONFIG_DM_WRITECACHE=m
344CONFIG_DM_ERA=m 341CONFIG_DM_ERA=m
345CONFIG_DM_MIRROR=m 342CONFIG_DM_MIRROR=m
346CONFIG_DM_RAID=m 343CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
385CONFIG_SUN3_82586=y 382CONFIG_SUN3_82586=y
386# CONFIG_NET_VENDOR_MARVELL is not set 383# CONFIG_NET_VENDOR_MARVELL is not set
387# CONFIG_NET_VENDOR_MICREL is not set 384# CONFIG_NET_VENDOR_MICREL is not set
385# CONFIG_NET_VENDOR_MICROSEMI is not set
388# CONFIG_NET_VENDOR_NATSEMI is not set 386# CONFIG_NET_VENDOR_NATSEMI is not set
389# CONFIG_NET_VENDOR_NETRONOME is not set 387# CONFIG_NET_VENDOR_NETRONOME is not set
390# CONFIG_NET_VENDOR_NI is not set 388# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
397# CONFIG_NET_VENDOR_SOCIONEXT is not set 395# CONFIG_NET_VENDOR_SOCIONEXT is not set
398# CONFIG_NET_VENDOR_STMICRO is not set 396# CONFIG_NET_VENDOR_STMICRO is not set
399# CONFIG_NET_VENDOR_SUN is not set 397# CONFIG_NET_VENDOR_SUN is not set
398# CONFIG_NET_VENDOR_SYNOPSYS is not set
400# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
401# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
402# CONFIG_NET_VENDOR_SYNOPSYS is not set
403CONFIG_PPP=m 401CONFIG_PPP=m
404CONFIG_PPP_BSDCOMP=m 402CONFIG_PPP_BSDCOMP=m
405CONFIG_PPP_DEFLATE=m 403CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
435CONFIG_UHID=m 433CONFIG_UHID=m
436# CONFIG_HID_GENERIC is not set 434# CONFIG_HID_GENERIC is not set
437# CONFIG_HID_ITE is not set 435# CONFIG_HID_ITE is not set
436# CONFIG_HID_REDRAGON is not set
438# CONFIG_USB_SUPPORT is not set 437# CONFIG_USB_SUPPORT is not set
439CONFIG_RTC_CLASS=y 438CONFIG_RTC_CLASS=y
440# CONFIG_RTC_NVMEM is not set 439# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
452CONFIG_FANOTIFY=y 451CONFIG_FANOTIFY=y
453CONFIG_QUOTA_NETLINK_INTERFACE=y 452CONFIG_QUOTA_NETLINK_INTERFACE=y
454# CONFIG_PRINT_QUOTA_WARNING is not set 453# CONFIG_PRINT_QUOTA_WARNING is not set
455CONFIG_AUTOFS4_FS=m 454CONFIG_AUTOFS_FS=m
456CONFIG_FUSE_FS=m 455CONFIG_FUSE_FS=m
457CONFIG_CUSE=m 456CONFIG_CUSE=m
458CONFIG_OVERLAY_FS=m 457CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
553CONFIG_TEST_PRINTF=m 552CONFIG_TEST_PRINTF=m
554CONFIG_TEST_BITMAP=m 553CONFIG_TEST_BITMAP=m
555CONFIG_TEST_UUID=m 554CONFIG_TEST_UUID=m
555CONFIG_TEST_OVERFLOW=m
556CONFIG_TEST_RHASHTABLE=m 556CONFIG_TEST_RHASHTABLE=m
557CONFIG_TEST_HASH=m 557CONFIG_TEST_HASH=m
558CONFIG_TEST_USER_COPY=m 558CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
574CONFIG_CRYPTO_MCRYPTD=m 574CONFIG_CRYPTO_MCRYPTD=m
575CONFIG_CRYPTO_TEST=m 575CONFIG_CRYPTO_TEST=m
576CONFIG_CRYPTO_CHACHA20POLY1305=m 576CONFIG_CRYPTO_CHACHA20POLY1305=m
577CONFIG_CRYPTO_AEGIS128=m
578CONFIG_CRYPTO_AEGIS128L=m
579CONFIG_CRYPTO_AEGIS256=m
580CONFIG_CRYPTO_MORUS640=m
581CONFIG_CRYPTO_MORUS1280=m
577CONFIG_CRYPTO_CFB=m 582CONFIG_CRYPTO_CFB=m
578CONFIG_CRYPTO_LRW=m 583CONFIG_CRYPTO_LRW=m
579CONFIG_CRYPTO_PCBC=m 584CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
609CONFIG_CRYPTO_842=m 614CONFIG_CRYPTO_842=m
610CONFIG_CRYPTO_LZ4=m 615CONFIG_CRYPTO_LZ4=m
611CONFIG_CRYPTO_LZ4HC=m 616CONFIG_CRYPTO_LZ4HC=m
617CONFIG_CRYPTO_ZSTD=m
612CONFIG_CRYPTO_ANSI_CPRNG=m 618CONFIG_CRYPTO_ANSI_CPRNG=m
613CONFIG_CRYPTO_DRBG_HASH=y 619CONFIG_CRYPTO_DRBG_HASH=y
614CONFIG_CRYPTO_DRBG_CTR=y 620CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a67e54246023..efb27a7fcc55 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
45CONFIG_TLS=m 45CONFIG_TLS=m
46CONFIG_XFRM_MIGRATE=y 46CONFIG_XFRM_MIGRATE=y
47CONFIG_NET_KEY=y 47CONFIG_NET_KEY=y
48CONFIG_XDP_SOCKETS=y
48CONFIG_INET=y 49CONFIG_INET=y
49CONFIG_IP_PNP=y 50CONFIG_IP_PNP=y
50CONFIG_IP_PNP_DHCP=y 51CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
91CONFIG_NF_CONNTRACK_SIP=m 92CONFIG_NF_CONNTRACK_SIP=m
92CONFIG_NF_CONNTRACK_TFTP=m 93CONFIG_NF_CONNTRACK_TFTP=m
93CONFIG_NF_TABLES=m 94CONFIG_NF_TABLES=m
95CONFIG_NF_TABLES_SET=m
94CONFIG_NF_TABLES_INET=y 96CONFIG_NF_TABLES_INET=y
95CONFIG_NF_TABLES_NETDEV=y 97CONFIG_NF_TABLES_NETDEV=y
96CONFIG_NFT_EXTHDR=m
97CONFIG_NFT_META=m
98CONFIG_NFT_RT=m
99CONFIG_NFT_NUMGEN=m 98CONFIG_NFT_NUMGEN=m
100CONFIG_NFT_CT=m 99CONFIG_NFT_CT=m
101CONFIG_NFT_FLOW_OFFLOAD=m 100CONFIG_NFT_FLOW_OFFLOAD=m
102CONFIG_NFT_SET_RBTREE=m
103CONFIG_NFT_SET_HASH=m
104CONFIG_NFT_SET_BITMAP=m
105CONFIG_NFT_COUNTER=m 101CONFIG_NFT_COUNTER=m
102CONFIG_NFT_CONNLIMIT=m
106CONFIG_NFT_LOG=m 103CONFIG_NFT_LOG=m
107CONFIG_NFT_LIMIT=m 104CONFIG_NFT_LIMIT=m
108CONFIG_NFT_MASQ=m 105CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
115CONFIG_NFT_COMPAT=m 112CONFIG_NFT_COMPAT=m
116CONFIG_NFT_HASH=m 113CONFIG_NFT_HASH=m
117CONFIG_NFT_FIB_INET=m 114CONFIG_NFT_FIB_INET=m
115CONFIG_NFT_SOCKET=m
118CONFIG_NFT_DUP_NETDEV=m 116CONFIG_NFT_DUP_NETDEV=m
119CONFIG_NFT_FWD_NETDEV=m 117CONFIG_NFT_FWD_NETDEV=m
120CONFIG_NFT_FIB_NETDEV=m 118CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
193CONFIG_IP_SET_HASH_NETIFACE=m 191CONFIG_IP_SET_HASH_NETIFACE=m
194CONFIG_IP_SET_LIST_SET=m 192CONFIG_IP_SET_LIST_SET=m
195CONFIG_NF_CONNTRACK_IPV4=m 193CONFIG_NF_CONNTRACK_IPV4=m
196CONFIG_NF_SOCKET_IPV4=m
197CONFIG_NFT_CHAIN_ROUTE_IPV4=m 194CONFIG_NFT_CHAIN_ROUTE_IPV4=m
198CONFIG_NFT_DUP_IPV4=m 195CONFIG_NFT_DUP_IPV4=m
199CONFIG_NFT_FIB_IPV4=m 196CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
224CONFIG_IP_NF_ARPFILTER=m 221CONFIG_IP_NF_ARPFILTER=m
225CONFIG_IP_NF_ARP_MANGLE=m 222CONFIG_IP_NF_ARP_MANGLE=m
226CONFIG_NF_CONNTRACK_IPV6=m 223CONFIG_NF_CONNTRACK_IPV6=m
227CONFIG_NF_SOCKET_IPV6=m
228CONFIG_NFT_CHAIN_ROUTE_IPV6=m 224CONFIG_NFT_CHAIN_ROUTE_IPV6=m
229CONFIG_NFT_CHAIN_NAT_IPV6=m 225CONFIG_NFT_CHAIN_NAT_IPV6=m
230CONFIG_NFT_MASQ_IPV6=m 226CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
253CONFIG_IP6_NF_TARGET_MASQUERADE=m 249CONFIG_IP6_NF_TARGET_MASQUERADE=m
254CONFIG_IP6_NF_TARGET_NPT=m 250CONFIG_IP6_NF_TARGET_NPT=m
255CONFIG_NF_TABLES_BRIDGE=y 251CONFIG_NF_TABLES_BRIDGE=y
256CONFIG_NFT_BRIDGE_META=m
257CONFIG_NFT_BRIDGE_REJECT=m 252CONFIG_NFT_BRIDGE_REJECT=m
258CONFIG_NF_LOG_BRIDGE=m 253CONFIG_NF_LOG_BRIDGE=m
259CONFIG_BRIDGE_NF_EBTABLES=m 254CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
294CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m 289CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
295CONFIG_DNS_RESOLVER=y 290CONFIG_DNS_RESOLVER=y
296CONFIG_BATMAN_ADV=m 291CONFIG_BATMAN_ADV=m
292# CONFIG_BATMAN_ADV_BATMAN_V is not set
297CONFIG_BATMAN_ADV_DAT=y 293CONFIG_BATMAN_ADV_DAT=y
298CONFIG_BATMAN_ADV_NC=y 294CONFIG_BATMAN_ADV_NC=y
299CONFIG_BATMAN_ADV_MCAST=y 295CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
341CONFIG_DM_CRYPT=m 337CONFIG_DM_CRYPT=m
342CONFIG_DM_SNAPSHOT=m 338CONFIG_DM_SNAPSHOT=m
343CONFIG_DM_THIN_PROVISIONING=m 339CONFIG_DM_THIN_PROVISIONING=m
340CONFIG_DM_WRITECACHE=m
344CONFIG_DM_ERA=m 341CONFIG_DM_ERA=m
345CONFIG_DM_MIRROR=m 342CONFIG_DM_MIRROR=m
346CONFIG_DM_RAID=m 343CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
378CONFIG_SUN3LANCE=y 375CONFIG_SUN3LANCE=y
379# CONFIG_NET_VENDOR_AQUANTIA is not set 376# CONFIG_NET_VENDOR_AQUANTIA is not set
380# CONFIG_NET_VENDOR_ARC is not set 377# CONFIG_NET_VENDOR_ARC is not set
381# CONFIG_NET_CADENCE is not set
382# CONFIG_NET_VENDOR_BROADCOM is not set 378# CONFIG_NET_VENDOR_BROADCOM is not set
379# CONFIG_NET_CADENCE is not set
383# CONFIG_NET_VENDOR_CORTINA is not set 380# CONFIG_NET_VENDOR_CORTINA is not set
384# CONFIG_NET_VENDOR_EZCHIP is not set 381# CONFIG_NET_VENDOR_EZCHIP is not set
385# CONFIG_NET_VENDOR_HUAWEI is not set 382# CONFIG_NET_VENDOR_HUAWEI is not set
386# CONFIG_NET_VENDOR_INTEL is not set 383# CONFIG_NET_VENDOR_INTEL is not set
387# CONFIG_NET_VENDOR_MARVELL is not set 384# CONFIG_NET_VENDOR_MARVELL is not set
388# CONFIG_NET_VENDOR_MICREL is not set 385# CONFIG_NET_VENDOR_MICREL is not set
386# CONFIG_NET_VENDOR_MICROSEMI is not set
389# CONFIG_NET_VENDOR_NATSEMI is not set 387# CONFIG_NET_VENDOR_NATSEMI is not set
390# CONFIG_NET_VENDOR_NETRONOME is not set 388# CONFIG_NET_VENDOR_NETRONOME is not set
391# CONFIG_NET_VENDOR_NI is not set 389# CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
397# CONFIG_NET_VENDOR_SOLARFLARE is not set 395# CONFIG_NET_VENDOR_SOLARFLARE is not set
398# CONFIG_NET_VENDOR_SOCIONEXT is not set 396# CONFIG_NET_VENDOR_SOCIONEXT is not set
399# CONFIG_NET_VENDOR_STMICRO is not set 397# CONFIG_NET_VENDOR_STMICRO is not set
398# CONFIG_NET_VENDOR_SYNOPSYS is not set
400# CONFIG_NET_VENDOR_VIA is not set 399# CONFIG_NET_VENDOR_VIA is not set
401# CONFIG_NET_VENDOR_WIZNET is not set 400# CONFIG_NET_VENDOR_WIZNET is not set
402# CONFIG_NET_VENDOR_SYNOPSYS is not set
403CONFIG_PPP=m 401CONFIG_PPP=m
404CONFIG_PPP_BSDCOMP=m 402CONFIG_PPP_BSDCOMP=m
405CONFIG_PPP_DEFLATE=m 403CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
435CONFIG_UHID=m 433CONFIG_UHID=m
436# CONFIG_HID_GENERIC is not set 434# CONFIG_HID_GENERIC is not set
437# CONFIG_HID_ITE is not set 435# CONFIG_HID_ITE is not set
436# CONFIG_HID_REDRAGON is not set
438# CONFIG_USB_SUPPORT is not set 437# CONFIG_USB_SUPPORT is not set
439CONFIG_RTC_CLASS=y 438CONFIG_RTC_CLASS=y
440# CONFIG_RTC_NVMEM is not set 439# CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
452CONFIG_FANOTIFY=y 451CONFIG_FANOTIFY=y
453CONFIG_QUOTA_NETLINK_INTERFACE=y 452CONFIG_QUOTA_NETLINK_INTERFACE=y
454# CONFIG_PRINT_QUOTA_WARNING is not set 453# CONFIG_PRINT_QUOTA_WARNING is not set
455CONFIG_AUTOFS4_FS=m 454CONFIG_AUTOFS_FS=m
456CONFIG_FUSE_FS=m 455CONFIG_FUSE_FS=m
457CONFIG_CUSE=m 456CONFIG_CUSE=m
458CONFIG_OVERLAY_FS=m 457CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
553CONFIG_TEST_PRINTF=m 552CONFIG_TEST_PRINTF=m
554CONFIG_TEST_BITMAP=m 553CONFIG_TEST_BITMAP=m
555CONFIG_TEST_UUID=m 554CONFIG_TEST_UUID=m
555CONFIG_TEST_OVERFLOW=m
556CONFIG_TEST_RHASHTABLE=m 556CONFIG_TEST_RHASHTABLE=m
557CONFIG_TEST_HASH=m 557CONFIG_TEST_HASH=m
558CONFIG_TEST_USER_COPY=m 558CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
575CONFIG_CRYPTO_MCRYPTD=m 575CONFIG_CRYPTO_MCRYPTD=m
576CONFIG_CRYPTO_TEST=m 576CONFIG_CRYPTO_TEST=m
577CONFIG_CRYPTO_CHACHA20POLY1305=m 577CONFIG_CRYPTO_CHACHA20POLY1305=m
578CONFIG_CRYPTO_AEGIS128=m
579CONFIG_CRYPTO_AEGIS128L=m
580CONFIG_CRYPTO_AEGIS256=m
581CONFIG_CRYPTO_MORUS640=m
582CONFIG_CRYPTO_MORUS1280=m
578CONFIG_CRYPTO_CFB=m 583CONFIG_CRYPTO_CFB=m
579CONFIG_CRYPTO_LRW=m 584CONFIG_CRYPTO_LRW=m
580CONFIG_CRYPTO_PCBC=m 585CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
610CONFIG_CRYPTO_842=m 615CONFIG_CRYPTO_842=m
611CONFIG_CRYPTO_LZ4=m 616CONFIG_CRYPTO_LZ4=m
612CONFIG_CRYPTO_LZ4HC=m 617CONFIG_CRYPTO_LZ4HC=m
618CONFIG_CRYPTO_ZSTD=m
613CONFIG_CRYPTO_ANSI_CPRNG=m 619CONFIG_CRYPTO_ANSI_CPRNG=m
614CONFIG_CRYPTO_DRBG_HASH=y 620CONFIG_CRYPTO_DRBG_HASH=y
615CONFIG_CRYPTO_DRBG_CTR=y 621CONFIG_CRYPTO_DRBG_CTR=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 4d8d68c4e3dd..a4b8d3331a9e 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,6 +1,7 @@
1generic-y += barrier.h 1generic-y += barrier.h
2generic-y += compat.h 2generic-y += compat.h
3generic-y += device.h 3generic-y += device.h
4generic-y += dma-mapping.h
4generic-y += emergency-restart.h 5generic-y += emergency-restart.h
5generic-y += exec.h 6generic-y += exec.h
6generic-y += extable.h 7generic-y += extable.h
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 93b47b1f6fb4..54009ea710b3 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
454 */ 454 */
455#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \ 455#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
456 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32) 456 !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
457static inline int __ffs(int x) 457static inline unsigned long __ffs(unsigned long x)
458{ 458{
459 __asm__ __volatile__ ("bitrev %0; ff1 %0" 459 __asm__ __volatile__ ("bitrev %0; ff1 %0"
460 : "=d" (x) 460 : "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
493 : "dm" (x & -x)); 493 : "dm" (x & -x));
494 return 32 - cnt; 494 return 32 - cnt;
495} 495}
496#define __ffs(x) (ffs(x) - 1) 496
497static inline unsigned long __ffs(unsigned long x)
498{
499 return ffs(x) - 1;
500}
497 501
498/* 502/*
499 * fls: find last bit set. 503 * fls: find last bit set.
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644
index e3722ed04fbb..000000000000
--- a/arch/m68k/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _M68K_DMA_MAPPING_H
3#define _M68K_DMA_MAPPING_H
4
5extern const struct dma_map_ops m68k_dma_ops;
6
7static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
8{
9 return &m68k_dma_ops;
10}
11
12#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/include/asm/io.h b/arch/m68k/include/asm/io.h
index ca2849afb087..aabe6420ead2 100644
--- a/arch/m68k/include/asm/io.h
+++ b/arch/m68k/include/asm/io.h
@@ -1,6 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _M68K_IO_H
3#define _M68K_IO_H
4
2#if defined(__uClinux__) || defined(CONFIG_COLDFIRE) 5#if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
3#include <asm/io_no.h> 6#include <asm/io_no.h>
4#else 7#else
5#include <asm/io_mm.h> 8#include <asm/io_mm.h>
6#endif 9#endif
10
11#include <asm-generic/io.h>
12
13#endif /* _M68K_IO_H */
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index fe485f4f5fac..782b78f8a048 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -16,13 +16,11 @@
16 * isa_readX(),isa_writeX() are for ISA memory 16 * isa_readX(),isa_writeX() are for ISA memory
17 */ 17 */
18 18
19#ifndef _IO_H 19#ifndef _M68K_IO_MM_H
20#define _IO_H 20#define _M68K_IO_MM_H
21 21
22#ifdef __KERNEL__ 22#ifdef __KERNEL__
23 23
24#define ARCH_HAS_IOREMAP_WT
25
26#include <linux/compiler.h> 24#include <linux/compiler.h>
27#include <asm/raw_io.h> 25#include <asm/raw_io.h>
28#include <asm/virtconvert.h> 26#include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
369#define writew(val, addr) out_le16((addr), (val)) 367#define writew(val, addr) out_le16((addr), (val))
370#endif /* CONFIG_ATARI_ROM_ISA */ 368#endif /* CONFIG_ATARI_ROM_ISA */
371 369
372#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
373/*
374 * We need to define dummy functions for GENERIC_IOMAP support.
375 */
376#define inb(port) 0xff
377#define inb_p(port) 0xff
378#define outb(val,port) ((void)0)
379#define outb_p(val,port) ((void)0)
380#define inw(port) 0xffff
381#define inw_p(port) 0xffff
382#define outw(val,port) ((void)0)
383#define outw_p(val,port) ((void)0)
384#define inl(port) 0xffffffffUL
385#define inl_p(port) 0xffffffffUL
386#define outl(val,port) ((void)0)
387#define outl_p(val,port) ((void)0)
388
389#define insb(port,buf,nr) ((void)0)
390#define outsb(port,buf,nr) ((void)0)
391#define insw(port,buf,nr) ((void)0)
392#define outsw(port,buf,nr) ((void)0)
393#define insl(port,buf,nr) ((void)0)
394#define outsl(port,buf,nr) ((void)0)
395
396/*
397 * These should be valid on any ioremap()ed region
398 */
399#define readb(addr) in_8(addr)
400#define writeb(val,addr) out_8((addr),(val))
401#define readw(addr) in_le16(addr)
402#define writew(val,addr) out_le16((addr),(val))
403
404#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
405
406#define readl(addr) in_le32(addr) 370#define readl(addr) in_le32(addr)
407#define writel(val,addr) out_le32((addr),(val)) 371#define writel(val,addr) out_le32((addr),(val))
408 372
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
444#define writew_relaxed(b, addr) writew(b, addr) 408#define writew_relaxed(b, addr) writew(b, addr)
445#define writel_relaxed(b, addr) writel(b, addr) 409#define writel_relaxed(b, addr) writel(b, addr)
446 410
447#endif /* _IO_H */ 411#endif /* _M68K_IO_MM_H */
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 83a0a6d449f4..0498192e1d98 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
131#define PCI_SPACE_LIMIT PCI_IO_MASK 131#define PCI_SPACE_LIMIT PCI_IO_MASK
132#endif /* CONFIG_PCI */ 132#endif /* CONFIG_PCI */
133 133
134/*
135 * These are defined in kmap.h as static inline functions. To maintain
136 * previous behavior we put these define guards here so io_mm.h doesn't
137 * see them.
138 */
139#ifdef CONFIG_MMU
140#define memset_io memset_io
141#define memcpy_fromio memcpy_fromio
142#define memcpy_toio memcpy_toio
143#endif
144
145#include <asm/kmap.h> 134#include <asm/kmap.h>
146#include <asm/virtconvert.h> 135#include <asm/virtconvert.h>
147#include <asm-generic/io.h>
148 136
149#endif /* _M68KNOMMU_IO_H */ 137#endif /* _M68KNOMMU_IO_H */
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 84b8333db8ad..aac7f045f7f0 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -4,6 +4,8 @@
4 4
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6 6
7#define ARCH_HAS_IOREMAP_WT
8
7/* Values for nocacheflag and cmode */ 9/* Values for nocacheflag and cmode */
8#define IOMAP_FULL_CACHING 0 10#define IOMAP_FULL_CACHING 0
9#define IOMAP_NOCACHE_SER 1 11#define IOMAP_NOCACHE_SER 1
@@ -16,6 +18,7 @@
16 */ 18 */
17extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, 19extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
18 int cacheflag); 20 int cacheflag);
21#define iounmap iounmap
19extern void iounmap(void __iomem *addr); 22extern void iounmap(void __iomem *addr);
20extern void __iounmap(void *addr, unsigned long size); 23extern void __iounmap(void *addr, unsigned long size);
21 24
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
33} 36}
34 37
35#define ioremap_uc ioremap_nocache 38#define ioremap_uc ioremap_nocache
39#define ioremap_wt ioremap_wt
36static inline void __iomem *ioremap_wt(unsigned long physaddr, 40static inline void __iomem *ioremap_wt(unsigned long physaddr,
37 unsigned long size) 41 unsigned long size)
38{ 42{
39 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); 43 return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
40} 44}
41 45
42#define ioremap_fillcache ioremap_fullcache 46#define ioremap_fullcache ioremap_fullcache
43static inline void __iomem *ioremap_fullcache(unsigned long physaddr, 47static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
44 unsigned long size) 48 unsigned long size)
45{ 49{
46 return __ioremap(physaddr, size, IOMAP_FULL_CACHING); 50 return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
47} 51}
48 52
53#define memset_io memset_io
49static inline void memset_io(volatile void __iomem *addr, unsigned char val, 54static inline void memset_io(volatile void __iomem *addr, unsigned char val,
50 int count) 55 int count)
51{ 56{
52 __builtin_memset((void __force *) addr, val, count); 57 __builtin_memset((void __force *) addr, val, count);
53} 58}
54 59
60#define memcpy_fromio memcpy_fromio
55static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, 61static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
56 int count) 62 int count)
57{ 63{
58 __builtin_memcpy(dst, (void __force *) src, count); 64 __builtin_memcpy(dst, (void __force *) src, count);
59} 65}
60 66
67#define memcpy_toio memcpy_toio
61static inline void memcpy_toio(volatile void __iomem *dst, const void *src, 68static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
62 int count) 69 int count)
63{ 70{
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 1605da48ebf2..49bd3266b4b1 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
22extern unsigned int (*mach_get_ss)(void); 22extern unsigned int (*mach_get_ss)(void);
23extern int (*mach_get_rtc_pll)(struct rtc_pll_info *); 23extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
24extern int (*mach_set_rtc_pll)(struct rtc_pll_info *); 24extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
25extern int (*mach_set_clock_mmss)(unsigned long);
26extern void (*mach_reset)( void ); 25extern void (*mach_reset)( void );
27extern void (*mach_halt)( void ); 26extern void (*mach_halt)( void );
28extern void (*mach_power_off)( void ); 27extern void (*mach_power_off)( void );
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index 9b840c03ebb7..08cee11180e6 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -57,7 +57,6 @@ struct mac_model
57#define MAC_SCSI_IIFX 5 57#define MAC_SCSI_IIFX 5
58#define MAC_SCSI_DUO 6 58#define MAC_SCSI_DUO 6
59#define MAC_SCSI_LC 7 59#define MAC_SCSI_LC 7
60#define MAC_SCSI_LATE 8
61 60
62#define MAC_IDE_NONE 0 61#define MAC_IDE_NONE 0
63#define MAC_IDE_QUADRA 1 62#define MAC_IDE_QUADRA 1
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index e644c4daf540..6bbe52025de3 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
18#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 18#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
19 19
20#define __pa(vaddr) ((unsigned long)(vaddr)) 20#define __pa(vaddr) ((unsigned long)(vaddr))
21#define __va(paddr) ((void *)(paddr)) 21#define __va(paddr) ((void *)((unsigned long)(paddr)))
22 22
23#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 23#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
24#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 24#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 463572c4943f..e99993c57d6b 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -6,7 +6,7 @@
6 6
7#undef DEBUG 7#undef DEBUG
8 8
9#include <linux/dma-mapping.h> 9#include <linux/dma-noncoherent.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
@@ -19,7 +19,7 @@
19 19
20#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) 20#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
21 21
22static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 22void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
23 gfp_t flag, unsigned long attrs) 23 gfp_t flag, unsigned long attrs)
24{ 24{
25 struct page *page, **map; 25 struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
62 return addr; 62 return addr;
63} 63}
64 64
65static void m68k_dma_free(struct device *dev, size_t size, void *addr, 65void arch_dma_free(struct device *dev, size_t size, void *addr,
66 dma_addr_t handle, unsigned long attrs) 66 dma_addr_t handle, unsigned long attrs)
67{ 67{
68 pr_debug("dma_free_coherent: %p, %x\n", addr, handle); 68 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
73 73
74#include <asm/cacheflush.h> 74#include <asm/cacheflush.h>
75 75
76static void *m68k_dma_alloc(struct device *dev, size_t size, 76void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
77 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 77 gfp_t gfp, unsigned long attrs)
78{ 78{
79 void *ret; 79 void *ret;
80 80
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
89 return ret; 89 return ret;
90} 90}
91 91
92static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, 92void arch_dma_free(struct device *dev, size_t size, void *vaddr,
93 dma_addr_t dma_handle, unsigned long attrs) 93 dma_addr_t dma_handle, unsigned long attrs)
94{ 94{
95 free_pages((unsigned long)vaddr, get_order(size)); 95 free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
97 97
98#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */ 98#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
99 99
100static void m68k_dma_sync_single_for_device(struct device *dev, 100void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
101 dma_addr_t handle, size_t size, enum dma_data_direction dir) 101 size_t size, enum dma_data_direction dir)
102{ 102{
103 switch (dir) { 103 switch (dir) {
104 case DMA_BIDIRECTIONAL: 104 case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
115 } 115 }
116} 116}
117 117
118static void m68k_dma_sync_sg_for_device(struct device *dev,
119 struct scatterlist *sglist, int nents, enum dma_data_direction dir)
120{
121 int i;
122 struct scatterlist *sg;
123
124 for_each_sg(sglist, sg, nents, i) {
125 dma_sync_single_for_device(dev, sg->dma_address, sg->length,
126 dir);
127 }
128}
129
130static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
131 unsigned long offset, size_t size, enum dma_data_direction dir,
132 unsigned long attrs)
133{
134 dma_addr_t handle = page_to_phys(page) + offset;
135
136 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
137 dma_sync_single_for_device(dev, handle, size, dir);
138
139 return handle;
140}
141
142static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
143 int nents, enum dma_data_direction dir, unsigned long attrs)
144{
145 int i;
146 struct scatterlist *sg;
147
148 for_each_sg(sglist, sg, nents, i) {
149 sg->dma_address = sg_phys(sg);
150
151 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
152 continue;
153
154 dma_sync_single_for_device(dev, sg->dma_address, sg->length,
155 dir);
156 }
157 return nents;
158}
159
160const struct dma_map_ops m68k_dma_ops = {
161 .alloc = m68k_dma_alloc,
162 .free = m68k_dma_free,
163 .map_page = m68k_dma_map_page,
164 .map_sg = m68k_dma_map_sg,
165 .sync_single_for_device = m68k_dma_sync_single_for_device,
166 .sync_sg_for_device = m68k_dma_sync_sg_for_device,
167};
168EXPORT_SYMBOL(m68k_dma_ops);
169
170void arch_setup_pdev_archdata(struct platform_device *pdev) 118void arch_setup_pdev_archdata(struct platform_device *pdev)
171{ 119{
172 if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE && 120 if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index f35e3ebd6331..5d3596c180f9 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -21,6 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/memblock.h>
24#include <linux/proc_fs.h> 25#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 26#include <linux/seq_file.h>
26#include <linux/module.h> 27#include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
88/* machine dependent timer functions */ 89/* machine dependent timer functions */
89int (*mach_hwclk) (int, struct rtc_time*); 90int (*mach_hwclk) (int, struct rtc_time*);
90EXPORT_SYMBOL(mach_hwclk); 91EXPORT_SYMBOL(mach_hwclk);
91int (*mach_set_clock_mmss) (unsigned long);
92unsigned int (*mach_get_ss)(void); 92unsigned int (*mach_get_ss)(void);
93int (*mach_get_rtc_pll)(struct rtc_pll_info *); 93int (*mach_get_rtc_pll)(struct rtc_pll_info *);
94int (*mach_set_rtc_pll)(struct rtc_pll_info *); 94int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
165 be32_to_cpu(m->addr); 165 be32_to_cpu(m->addr);
166 m68k_memory[m68k_num_memory].size = 166 m68k_memory[m68k_num_memory].size =
167 be32_to_cpu(m->size); 167 be32_to_cpu(m->size);
168 memblock_add(m68k_memory[m68k_num_memory].addr,
169 m68k_memory[m68k_num_memory].size);
168 m68k_num_memory++; 170 m68k_num_memory++;
169 } else 171 } else
170 pr_warn("%s: too many memory chunks\n", 172 pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
224 226
225void __init setup_arch(char **cmdline_p) 227void __init setup_arch(char **cmdline_p)
226{ 228{
227#ifndef CONFIG_SUN3
228 int i;
229#endif
230
231 /* The bootinfo is located right after the kernel */ 229 /* The bootinfo is located right after the kernel */
232 if (!CPU_IS_COLDFIRE) 230 if (!CPU_IS_COLDFIRE)
233 m68k_parse_bootinfo((const struct bi_record *)_end); 231 m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
356#endif 354#endif
357 355
358#ifndef CONFIG_SUN3 356#ifndef CONFIG_SUN3
359 for (i = 1; i < m68k_num_memory; i++)
360 free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
361 m68k_memory[i].size);
362#ifdef CONFIG_BLK_DEV_INITRD 357#ifdef CONFIG_BLK_DEV_INITRD
363 if (m68k_ramdisk.size) { 358 if (m68k_ramdisk.size) {
364 reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)), 359 memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
365 m68k_ramdisk.addr, m68k_ramdisk.size,
366 BOOTMEM_DEFAULT);
367 initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); 360 initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
368 initrd_end = initrd_start + m68k_ramdisk.size; 361 initrd_end = initrd_start + m68k_ramdisk.size;
369 pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); 362 pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a98af1018201..cfd5475bfc31 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -28,6 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/bootmem.h> 30#include <linux/bootmem.h>
31#include <linux/memblock.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/initrd.h> 34#include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
51 52
52/* machine dependent timer functions */ 53/* machine dependent timer functions */
53void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL; 54void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
54int (*mach_set_clock_mmss)(unsigned long);
55int (*mach_hwclk) (int, struct rtc_time*); 55int (*mach_hwclk) (int, struct rtc_time*);
56 56
57/* machine dependent reboot functions */ 57/* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
86 86
87void __init setup_arch(char **cmdline_p) 87void __init setup_arch(char **cmdline_p)
88{ 88{
89 int bootmap_size;
90
91 memory_start = PAGE_ALIGN(_ramstart); 89 memory_start = PAGE_ALIGN(_ramstart);
92 memory_end = _ramend; 90 memory_end = _ramend;
93 91
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
142 pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", 140 pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
143 __bss_stop, memory_start, memory_start, memory_end); 141 __bss_stop, memory_start, memory_start, memory_end);
144 142
143 memblock_add(memory_start, memory_end - memory_start);
144
145 /* Keep a copy of command line */ 145 /* Keep a copy of command line */
146 *cmdline_p = &command_line[0]; 146 *cmdline_p = &command_line[0];
147 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 147 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
158 min_low_pfn = PFN_DOWN(memory_start); 158 min_low_pfn = PFN_DOWN(memory_start);
159 max_pfn = max_low_pfn = PFN_DOWN(memory_end); 159 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
160 160
161 bootmap_size = init_bootmem_node(
162 NODE_DATA(0),
163 min_low_pfn, /* map goes here */
164 PFN_DOWN(PAGE_OFFSET),
165 max_pfn);
166 /*
167 * Free the usable memory, we have to make sure we do not free
168 * the bootmem bitmap so we then reserve it after freeing it :-)
169 */
170 free_bootmem(memory_start, memory_end - memory_start);
171 reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
172
173#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD) 161#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
174 if ((initrd_start > 0) && (initrd_start < initrd_end) && 162 if ((initrd_start > 0) && (initrd_start < initrd_end) &&
175 (initrd_end < memory_end)) 163 (initrd_end < memory_end))
176 reserve_bootmem(initrd_start, initrd_end - initrd_start, 164 memblock_reserve(initrd_start, initrd_end - initrd_start);
177 BOOTMEM_DEFAULT);
178#endif /* if defined(CONFIG_BLK_DEV_INITRD) */ 165#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
179 166
180 /* 167 /*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index e522307db47c..b02d7254b73a 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
57/* Mac specific timer functions */ 57/* Mac specific timer functions */
58extern u32 mac_gettimeoffset(void); 58extern u32 mac_gettimeoffset(void);
59extern int mac_hwclk(int, struct rtc_time *); 59extern int mac_hwclk(int, struct rtc_time *);
60extern int mac_set_clock_mmss(unsigned long);
61extern void iop_preinit(void); 60extern void iop_preinit(void);
62extern void iop_init(void); 61extern void iop_init(void);
63extern void via_init(void); 62extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
158 mach_get_model = mac_get_model; 157 mach_get_model = mac_get_model;
159 arch_gettimeoffset = mac_gettimeoffset; 158 arch_gettimeoffset = mac_gettimeoffset;
160 mach_hwclk = mac_hwclk; 159 mach_hwclk = mac_hwclk;
161 mach_set_clock_mmss = mac_set_clock_mmss;
162 mach_reset = mac_reset; 160 mach_reset = mac_reset;
163 mach_halt = mac_poweroff; 161 mach_halt = mac_poweroff;
164 mach_power_off = mac_poweroff; 162 mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
709 .name = "PowerBook 520", 707 .name = "PowerBook 520",
710 .adb_type = MAC_ADB_PB2, 708 .adb_type = MAC_ADB_PB2,
711 .via_type = MAC_VIA_QUADRA, 709 .via_type = MAC_VIA_QUADRA,
712 .scsi_type = MAC_SCSI_LATE, 710 .scsi_type = MAC_SCSI_OLD,
713 .scc_type = MAC_SCC_QUADRA, 711 .scc_type = MAC_SCC_QUADRA,
714 .ether_type = MAC_ETHER_SONIC, 712 .ether_type = MAC_ETHER_SONIC,
715 .floppy_type = MAC_FLOPPY_SWIM_ADDR2, 713 .floppy_type = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
943 }, 941 },
944}; 942};
945 943
946static const struct resource mac_scsi_late_rsrc[] __initconst = {
947 {
948 .flags = IORESOURCE_IRQ,
949 .start = IRQ_MAC_SCSI,
950 .end = IRQ_MAC_SCSI,
951 }, {
952 .flags = IORESOURCE_MEM,
953 .start = 0x50010000,
954 .end = 0x50011FFF,
955 },
956};
957
958static const struct resource mac_scsi_ccl_rsrc[] __initconst = { 944static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
959 { 945 {
960 .flags = IORESOURCE_IRQ, 946 .flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
1064 platform_device_register_simple("mac_scsi", 0, 1050 platform_device_register_simple("mac_scsi", 0,
1065 mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc)); 1051 mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
1066 break; 1052 break;
1067 case MAC_SCSI_LATE:
1068 /* XXX PDMA support for PowerBook 500 series needs testing */
1069 platform_device_register_simple("mac_scsi", 0,
1070 mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
1071 break;
1072 case MAC_SCSI_LC: 1053 case MAC_SCSI_LC:
1073 /* Addresses from Mac LC data in Designing Cards & Drivers 3ed. 1054 /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
1074 * Also from the Developer Notes for Classic II, LC III, 1055 * Also from the Developer Notes for Classic II, LC III,
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index c68054361615..19e9d8eef1f2 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -26,33 +26,38 @@
26 26
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28 28
29/* Offset between Unix time (1970-based) and Mac time (1904-based) */ 29/*
30 * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
31 * times wrap in 2040. If we need to handle later times, the read_time functions
32 * need to be changed to interpret wrapped times as post-2040.
33 */
30 34
31#define RTC_OFFSET 2082844800 35#define RTC_OFFSET 2082844800
32 36
33static void (*rom_reset)(void); 37static void (*rom_reset)(void);
34 38
35#ifdef CONFIG_ADB_CUDA 39#ifdef CONFIG_ADB_CUDA
36static long cuda_read_time(void) 40static time64_t cuda_read_time(void)
37{ 41{
38 struct adb_request req; 42 struct adb_request req;
39 long time; 43 time64_t time;
40 44
41 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) 45 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
42 return 0; 46 return 0;
43 while (!req.complete) 47 while (!req.complete)
44 cuda_poll(); 48 cuda_poll();
45 49
46 time = (req.reply[3] << 24) | (req.reply[4] << 16) | 50 time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
47 (req.reply[5] << 8) | req.reply[6]; 51 (req.reply[5] << 8) | req.reply[6]);
52
48 return time - RTC_OFFSET; 53 return time - RTC_OFFSET;
49} 54}
50 55
51static void cuda_write_time(long data) 56static void cuda_write_time(time64_t time)
52{ 57{
53 struct adb_request req; 58 struct adb_request req;
59 u32 data = lower_32_bits(time + RTC_OFFSET);
54 60
55 data += RTC_OFFSET;
56 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, 61 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
57 (data >> 24) & 0xFF, (data >> 16) & 0xFF, 62 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
58 (data >> 8) & 0xFF, data & 0xFF) < 0) 63 (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
86#endif /* CONFIG_ADB_CUDA */ 91#endif /* CONFIG_ADB_CUDA */
87 92
88#ifdef CONFIG_ADB_PMU68K 93#ifdef CONFIG_ADB_PMU68K
89static long pmu_read_time(void) 94static time64_t pmu_read_time(void)
90{ 95{
91 struct adb_request req; 96 struct adb_request req;
92 long time; 97 time64_t time;
93 98
94 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) 99 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
95 return 0; 100 return 0;
96 while (!req.complete) 101 while (!req.complete)
97 pmu_poll(); 102 pmu_poll();
98 103
99 time = (req.reply[1] << 24) | (req.reply[2] << 16) | 104 time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
100 (req.reply[3] << 8) | req.reply[4]; 105 (req.reply[3] << 8) | req.reply[4]);
106
101 return time - RTC_OFFSET; 107 return time - RTC_OFFSET;
102} 108}
103 109
104static void pmu_write_time(long data) 110static void pmu_write_time(time64_t time)
105{ 111{
106 struct adb_request req; 112 struct adb_request req;
113 u32 data = lower_32_bits(time + RTC_OFFSET);
107 114
108 data += RTC_OFFSET;
109 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, 115 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
110 (data >> 24) & 0xFF, (data >> 16) & 0xFF, 116 (data >> 24) & 0xFF, (data >> 16) & 0xFF,
111 (data >> 8) & 0xFF, data & 0xFF) < 0) 117 (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
245 * is basically any machine with Mac II-style ADB. 251 * is basically any machine with Mac II-style ADB.
246 */ 252 */
247 253
248static long via_read_time(void) 254static time64_t via_read_time(void)
249{ 255{
250 union { 256 union {
251 __u8 cdata[4]; 257 __u8 cdata[4];
252 long idata; 258 __u32 idata;
253 } result, last_result; 259 } result, last_result;
254 int count = 1; 260 int count = 1;
255 261
@@ -270,7 +276,7 @@ static long via_read_time(void)
270 via_pram_command(0x8D, &result.cdata[0]); 276 via_pram_command(0x8D, &result.cdata[0]);
271 277
272 if (result.idata == last_result.idata) 278 if (result.idata == last_result.idata)
273 return result.idata - RTC_OFFSET; 279 return (time64_t)result.idata - RTC_OFFSET;
274 280
275 if (++count > 10) 281 if (++count > 10)
276 break; 282 break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
278 last_result.idata = result.idata; 284 last_result.idata = result.idata;
279 } 285 }
280 286
281 pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n", 287 pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
282 last_result.idata, result.idata); 288 __func__, last_result.idata, result.idata);
283 289
284 return 0; 290 return 0;
285} 291}
@@ -291,11 +297,11 @@ static long via_read_time(void)
291 * is basically any machine with Mac II-style ADB. 297 * is basically any machine with Mac II-style ADB.
292 */ 298 */
293 299
294static void via_write_time(long time) 300static void via_write_time(time64_t time)
295{ 301{
296 union { 302 union {
297 __u8 cdata[4]; 303 __u8 cdata[4];
298 long idata; 304 __u32 idata;
299 } data; 305 } data;
300 __u8 temp; 306 __u8 temp;
301 307
@@ -304,7 +310,7 @@ static void via_write_time(long time)
304 temp = 0x55; 310 temp = 0x55;
305 via_pram_command(0x35, &temp); 311 via_pram_command(0x35, &temp);
306 312
307 data.idata = time + RTC_OFFSET; 313 data.idata = lower_32_bits(time + RTC_OFFSET);
308 via_pram_command(0x01, &data.cdata[3]); 314 via_pram_command(0x01, &data.cdata[3]);
309 via_pram_command(0x05, &data.cdata[2]); 315 via_pram_command(0x05, &data.cdata[2]);
310 via_pram_command(0x09, &data.cdata[1]); 316 via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
585 * This function translates seconds since 1970 into a proper date. 591 * This function translates seconds since 1970 into a proper date.
586 * 592 *
587 * Algorithm cribbed from glibc2.1, __offtime(). 593 * Algorithm cribbed from glibc2.1, __offtime().
594 *
595 * This is roughly same as rtc_time64_to_tm(), which we should probably
596 * use here, but it's only available when CONFIG_RTC_LIB is enabled.
588 */ 597 */
589#define SECS_PER_MINUTE (60) 598#define SECS_PER_MINUTE (60)
590#define SECS_PER_HOUR (SECS_PER_MINUTE * 60) 599#define SECS_PER_HOUR (SECS_PER_MINUTE * 60)
591#define SECS_PER_DAY (SECS_PER_HOUR * 24) 600#define SECS_PER_DAY (SECS_PER_HOUR * 24)
592 601
593static void unmktime(unsigned long time, long offset, 602static void unmktime(time64_t time, long offset,
594 int *yearp, int *monp, int *dayp, 603 int *yearp, int *monp, int *dayp,
595 int *hourp, int *minp, int *secp) 604 int *hourp, int *minp, int *secp)
596{ 605{
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
602 /* Leap years. */ 611 /* Leap years. */
603 { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } 612 { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
604 }; 613 };
605 long int days, rem, y, wday, yday; 614 int days, rem, y, wday, yday;
606 const unsigned short int *ip; 615 const unsigned short int *ip;
607 616
608 days = time / SECS_PER_DAY; 617 days = div_u64_rem(time, SECS_PER_DAY, &rem);
609 rem = time % SECS_PER_DAY;
610 rem += offset; 618 rem += offset;
611 while (rem < 0) { 619 while (rem < 0) {
612 rem += SECS_PER_DAY; 620 rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
657 665
658int mac_hwclk(int op, struct rtc_time *t) 666int mac_hwclk(int op, struct rtc_time *t)
659{ 667{
660 unsigned long now; 668 time64_t now;
661 669
662 if (!op) { /* read */ 670 if (!op) { /* read */
663 switch (macintosh_config->adb_type) { 671 switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
693 __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, 701 __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
694 t->tm_hour, t->tm_min, t->tm_sec); 702 t->tm_hour, t->tm_min, t->tm_sec);
695 703
696 now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, 704 now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
697 t->tm_hour, t->tm_min, t->tm_sec); 705 t->tm_hour, t->tm_min, t->tm_sec);
698 706
699 switch (macintosh_config->adb_type) { 707 switch (macintosh_config->adb_type) {
700 case MAC_ADB_IOP: 708 case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
719 } 727 }
720 return 0; 728 return 0;
721} 729}
722
723/*
724 * Set minutes/seconds in the hardware clock
725 */
726
727int mac_set_clock_mmss (unsigned long nowtime)
728{
729 struct rtc_time now;
730
731 mac_hwclk(0, &now);
732 now.tm_sec = nowtime % 60;
733 now.tm_min = (nowtime / 60) % 60;
734 mac_hwclk(1, &now);
735
736 return 0;
737}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8827b7f91402..38e2b272c220 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
71 pg_data_table[i] = pg_data_map + node; 71 pg_data_table[i] = pg_data_map + node;
72 } 72 }
73#endif 73#endif
74 pg_data_map[node].bdata = bootmem_node_data + node;
75 node_set_online(node); 74 node_set_online(node);
76} 75}
77 76
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 2925d795d71a..70dde040779b 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/memblock.h>
17 18
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/page.h> 20#include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
153 154
154void __init cf_bootmem_alloc(void) 155void __init cf_bootmem_alloc(void)
155{ 156{
156 unsigned long start_pfn;
157 unsigned long memstart; 157 unsigned long memstart;
158 158
159 /* _rambase and _ramend will be naturally page aligned */ 159 /* _rambase and _ramend will be naturally page aligned */
160 m68k_memory[0].addr = _rambase; 160 m68k_memory[0].addr = _rambase;
161 m68k_memory[0].size = _ramend - _rambase; 161 m68k_memory[0].size = _ramend - _rambase;
162 162
163 memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
164
163 /* compute total pages in system */ 165 /* compute total pages in system */
164 num_pages = PFN_DOWN(_ramend - _rambase); 166 num_pages = PFN_DOWN(_ramend - _rambase);
165 167
166 /* page numbers */ 168 /* page numbers */
167 memstart = PAGE_ALIGN(_ramstart); 169 memstart = PAGE_ALIGN(_ramstart);
168 min_low_pfn = PFN_DOWN(_rambase); 170 min_low_pfn = PFN_DOWN(_rambase);
169 start_pfn = PFN_DOWN(memstart);
170 max_pfn = max_low_pfn = PFN_DOWN(_ramend); 171 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
171 high_memory = (void *)_ramend; 172 high_memory = (void *)_ramend;
172 173
174 /* Reserve kernel text/data/bss */
175 memblock_reserve(memstart, memstart - _rambase);
176
173 m68k_virt_to_node_shift = fls(_ramend - 1) - 6; 177 m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
174 module_fixup(NULL, __start_fixup, __stop_fixup); 178 module_fixup(NULL, __start_fixup, __stop_fixup);
175 179
176 /* setup bootmem data */ 180 /* setup node data */
177 m68k_setup_node(0); 181 m68k_setup_node(0);
178 memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
179 min_low_pfn, max_low_pfn);
180 free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
181} 182}
182 183
183/* 184/*
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index e490ecc7842c..4e17ecb5928a 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/memblock.h>
22#include <linux/gfp.h> 23#include <linux/gfp.h>
23 24
24#include <asm/setup.h> 25#include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
208{ 209{
209 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 210 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
210 unsigned long min_addr, max_addr; 211 unsigned long min_addr, max_addr;
211 unsigned long addr, size, end; 212 unsigned long addr;
212 int i; 213 int i;
213 214
214#ifdef DEBUG 215#ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
253 min_low_pfn = availmem >> PAGE_SHIFT; 254 min_low_pfn = availmem >> PAGE_SHIFT;
254 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT; 255 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
255 256
256 for (i = 0; i < m68k_num_memory; i++) { 257 /* Reserve kernel text/data/bss and the memory allocated in head.S */
257 addr = m68k_memory[i].addr; 258 memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
258 end = addr + m68k_memory[i].size;
259 m68k_setup_node(i);
260 availmem = PAGE_ALIGN(availmem);
261 availmem += init_bootmem_node(NODE_DATA(i),
262 availmem >> PAGE_SHIFT,
263 addr >> PAGE_SHIFT,
264 end >> PAGE_SHIFT);
265 }
266 259
267 /* 260 /*
268 * Map the physical memory available into the kernel virtual 261 * Map the physical memory available into the kernel virtual
269 * address space. First initialize the bootmem allocator with 262 * address space. Make sure memblock will not try to allocate
270 * the memory we already mapped, so map_node() has something 263 * pages beyond the memory we already mapped in head.S
271 * to allocate.
272 */ 264 */
273 addr = m68k_memory[0].addr; 265 memblock_set_bottom_up(true);
274 size = m68k_memory[0].size; 266
275 free_bootmem_node(NODE_DATA(0), availmem, 267 for (i = 0; i < m68k_num_memory; i++) {
276 min(m68k_init_mapped_size, size) - (availmem - addr)); 268 m68k_setup_node(i);
277 map_node(0);
278 if (size > m68k_init_mapped_size)
279 free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
280 size - m68k_init_mapped_size);
281
282 for (i = 1; i < m68k_num_memory; i++)
283 map_node(i); 269 map_node(i);
270 }
284 271
285 flush_tlb_all(); 272 flush_tlb_all();
286 273
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index f8a710fd84cd..adea549d240e 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
40extern void mvme147_sched_init(irq_handler_t handler); 40extern void mvme147_sched_init(irq_handler_t handler);
41extern u32 mvme147_gettimeoffset(void); 41extern u32 mvme147_gettimeoffset(void);
42extern int mvme147_hwclk (int, struct rtc_time *); 42extern int mvme147_hwclk (int, struct rtc_time *);
43extern int mvme147_set_clock_mmss (unsigned long);
44extern void mvme147_reset (void); 43extern void mvme147_reset (void);
45 44
46 45
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
92 mach_init_IRQ = mvme147_init_IRQ; 91 mach_init_IRQ = mvme147_init_IRQ;
93 arch_gettimeoffset = mvme147_gettimeoffset; 92 arch_gettimeoffset = mvme147_gettimeoffset;
94 mach_hwclk = mvme147_hwclk; 93 mach_hwclk = mvme147_hwclk;
95 mach_set_clock_mmss = mvme147_set_clock_mmss;
96 mach_reset = mvme147_reset; 94 mach_reset = mvme147_reset;
97 mach_get_model = mvme147_get_model; 95 mach_get_model = mvme147_get_model;
98 96
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
164 } 162 }
165 return 0; 163 return 0;
166} 164}
167
168int mvme147_set_clock_mmss (unsigned long nowtime)
169{
170 return 0;
171}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 4ffd9ef98de4..6ee36a5b528d 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
46extern void mvme16x_sched_init(irq_handler_t handler); 46extern void mvme16x_sched_init(irq_handler_t handler);
47extern u32 mvme16x_gettimeoffset(void); 47extern u32 mvme16x_gettimeoffset(void);
48extern int mvme16x_hwclk (int, struct rtc_time *); 48extern int mvme16x_hwclk (int, struct rtc_time *);
49extern int mvme16x_set_clock_mmss (unsigned long);
50extern void mvme16x_reset (void); 49extern void mvme16x_reset (void);
51 50
52int bcd2int (unsigned char b); 51int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
280 mach_init_IRQ = mvme16x_init_IRQ; 279 mach_init_IRQ = mvme16x_init_IRQ;
281 arch_gettimeoffset = mvme16x_gettimeoffset; 280 arch_gettimeoffset = mvme16x_gettimeoffset;
282 mach_hwclk = mvme16x_hwclk; 281 mach_hwclk = mvme16x_hwclk;
283 mach_set_clock_mmss = mvme16x_set_clock_mmss;
284 mach_reset = mvme16x_reset; 282 mach_reset = mvme16x_reset;
285 mach_get_model = mvme16x_get_model; 283 mach_get_model = mvme16x_get_model;
286 mach_get_hardware_list = mvme16x_get_hardware_list; 284 mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
411 } 409 }
412 return 0; 410 return 0;
413} 411}
414
415int mvme16x_set_clock_mmss (unsigned long nowtime)
416{
417 return 0;
418}
419
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 71c0867ecf20..96810d91da2b 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
43static u32 q40_gettimeoffset(void); 43static u32 q40_gettimeoffset(void);
44static int q40_hwclk(int, struct rtc_time *); 44static int q40_hwclk(int, struct rtc_time *);
45static unsigned int q40_get_ss(void); 45static unsigned int q40_get_ss(void);
46static int q40_set_clock_mmss(unsigned long);
47static int q40_get_rtc_pll(struct rtc_pll_info *pll); 46static int q40_get_rtc_pll(struct rtc_pll_info *pll);
48static int q40_set_rtc_pll(struct rtc_pll_info *pll); 47static int q40_set_rtc_pll(struct rtc_pll_info *pll);
49 48
@@ -175,7 +174,6 @@ void __init config_q40(void)
175 mach_get_ss = q40_get_ss; 174 mach_get_ss = q40_get_ss;
176 mach_get_rtc_pll = q40_get_rtc_pll; 175 mach_get_rtc_pll = q40_get_rtc_pll;
177 mach_set_rtc_pll = q40_set_rtc_pll; 176 mach_set_rtc_pll = q40_set_rtc_pll;
178 mach_set_clock_mmss = q40_set_clock_mmss;
179 177
180 mach_reset = q40_reset; 178 mach_reset = q40_reset;
181 mach_get_model = q40_get_model; 179 mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
267 return bcd2bin(Q40_RTC_SECS); 265 return bcd2bin(Q40_RTC_SECS);
268} 266}
269 267
270/*
271 * Set the minutes and seconds from seconds value 'nowtime'. Fail if
272 * clock is out by > 30 minutes. Logic lifted from atari code.
273 */
274
275static int q40_set_clock_mmss(unsigned long nowtime)
276{
277 int retval = 0;
278 short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
279
280 int rtc_minutes;
281
282 rtc_minutes = bcd2bin(Q40_RTC_MINS);
283
284 if ((rtc_minutes < real_minutes ?
285 real_minutes - rtc_minutes :
286 rtc_minutes - real_minutes) < 30) {
287 Q40_RTC_CTRL |= Q40_RTC_WRITE;
288 Q40_RTC_MINS = bin2bcd(real_minutes);
289 Q40_RTC_SECS = bin2bcd(real_seconds);
290 Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
291 } else
292 retval = -1;
293
294 return retval;
295}
296
297
298/* get and set PLL calibration of RTC clock */ 268/* get and set PLL calibration of RTC clock */
299#define Q40_RTC_PLL_MASK ((1<<5)-1) 269#define Q40_RTC_PLL_MASK ((1<<5)-1)
300#define Q40_RTC_PLL_SIGN (1<<5) 270#define Q40_RTC_PLL_SIGN (1<<5)
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 1d28d380e8cc..79a2bb857906 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
123 availmem = memory_start; 123 availmem = memory_start;
124 124
125 m68k_setup_node(0); 125 m68k_setup_node(0);
126 availmem += init_bootmem(start_page, num_pages);
127 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
128
129 free_bootmem(__pa(availmem), memory_end - (availmem));
130} 126}
131 127
132 128
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 10a405d593df..c782b10ddf50 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
58 58
59void ath79_ddr_wb_flush(u32 reg) 59void ath79_ddr_wb_flush(u32 reg)
60{ 60{
61 void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; 61 void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
62 62
63 /* Flush the DDR write buffer. */ 63 /* Flush the DDR write buffer. */
64 __raw_writel(0x1, flush_reg); 64 __raw_writel(0x1, flush_reg);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 8c9cbf13d32a..6054d49e608e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
212 */ 212 */
213 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) 213 if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
214 cpu_wait = NULL; 214 cpu_wait = NULL;
215
216 /*
217 * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
218 * Enable ExternalSync for sync instruction to take effect
219 */
220 set_c0_config7(MIPS_CONF7_ES);
221 break; 215 break;
222#endif 216#endif
223 } 217 }
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 0bc270806ec5..ae461d91cd1f 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -681,8 +681,6 @@
681#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 681#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
682 682
683#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 683#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
684/* ExternalSync */
685#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
686 684
687#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 685#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
688#define MIPS_CONF7_AR (_ULCAST_(1) << 16) 686#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status)
2767__BUILD_SET_C0(cause) 2765__BUILD_SET_C0(cause)
2768__BUILD_SET_C0(config) 2766__BUILD_SET_C0(config)
2769__BUILD_SET_C0(config5) 2767__BUILD_SET_C0(config5)
2770__BUILD_SET_C0(config7)
2771__BUILD_SET_C0(intcontrol) 2768__BUILD_SET_C0(intcontrol)
2772__BUILD_SET_C0(intctl) 2769__BUILD_SET_C0(intctl)
2773__BUILD_SET_C0(srsmap) 2770__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 9632436d74d7..c2e94cf5ecda 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
54 phys_addr_t size = resource_size(rsrc); 54 phys_addr_t size = resource_size(rsrc);
55 55
56 *start = fixup_bigphys_addr(rsrc->start, size); 56 *start = fixup_bigphys_addr(rsrc->start, size);
57 *end = rsrc->start + size; 57 *end = rsrc->start + size - 1;
58} 58}
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 9ecad05bfc73..dfb6a79ba7ff 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -27,7 +27,6 @@ config OPENRISC
27 select GENERIC_STRNLEN_USER 27 select GENERIC_STRNLEN_USER
28 select GENERIC_SMP_IDLE_THREAD 28 select GENERIC_SMP_IDLE_THREAD
29 select MODULES_USE_ELF_RELA 29 select MODULES_USE_ELF_RELA
30 select MULTI_IRQ_HANDLER
31 select HAVE_DEBUG_STACKOVERFLOW 30 select HAVE_DEBUG_STACKOVERFLOW
32 select OR1K_PIC 31 select OR1K_PIC
33 select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 32 select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
36 select ARCH_USE_QUEUED_RWLOCKS 35 select ARCH_USE_QUEUED_RWLOCKS
37 select OMPIC if SMP 36 select OMPIC if SMP
38 select ARCH_WANT_FRAME_POINTERS 37 select ARCH_WANT_FRAME_POINTERS
38 select GENERIC_IRQ_MULTI_HANDLER
39 39
40config CPU_BIG_ENDIAN 40config CPU_BIG_ENDIAN
41 def_bool y 41 def_bool y
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
69config LOCKDEP_SUPPORT 69config LOCKDEP_SUPPORT
70 def_bool y 70 def_bool y
71 71
72config MULTI_IRQ_HANDLER
73 def_bool y
74
75source "init/Kconfig" 72source "init/Kconfig"
76 73
77source "kernel/Kconfig.freezer" 74source "kernel/Kconfig.freezer"
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
index d9eee0a2b7b4..eb612b1865d2 100644
--- a/arch/openrisc/include/asm/irq.h
+++ b/arch/openrisc/include/asm/irq.h
@@ -24,6 +24,4 @@
24 24
25#define NO_IRQ (-1) 25#define NO_IRQ (-1)
26 26
27extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
28
29#endif /* __ASM_OPENRISC_IRQ_H__ */ 27#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/kernel/irq.c b/arch/openrisc/kernel/irq.c
index 35e478a93116..5f9445effaf8 100644
--- a/arch/openrisc/kernel/irq.c
+++ b/arch/openrisc/kernel/irq.c
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
41 irqchip_init(); 41 irqchip_init();
42} 42}
43 43
44static void (*handle_arch_irq)(struct pt_regs *);
45
46void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
47{
48 handle_arch_irq = handle_irq;
49}
50
51void __irq_entry do_IRQ(struct pt_regs *regs) 44void __irq_entry do_IRQ(struct pt_regs *regs)
52{ 45{
53 handle_arch_irq(regs); 46 handle_arch_irq(regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 17526bebcbd2..e7705dde953f 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -11,7 +11,6 @@ config PARISC
11 select ARCH_HAS_ELF_RANDOMIZE 11 select ARCH_HAS_ELF_RANDOMIZE
12 select ARCH_HAS_STRICT_KERNEL_RWX 12 select ARCH_HAS_STRICT_KERNEL_RWX
13 select ARCH_HAS_UBSAN_SANITIZE_ALL 13 select ARCH_HAS_UBSAN_SANITIZE_ALL
14 select ARCH_WANTS_UBSAN_NO_NULL
15 select ARCH_SUPPORTS_MEMORY_FAILURE 14 select ARCH_SUPPORTS_MEMORY_FAILURE
16 select RTC_CLASS 15 select RTC_CLASS
17 select RTC_DRV_GENERIC 16 select RTC_DRV_GENERIC
@@ -195,7 +194,7 @@ config PREFETCH
195 194
196config MLONGCALLS 195config MLONGCALLS
197 bool "Enable the -mlong-calls compiler option for big kernels" 196 bool "Enable the -mlong-calls compiler option for big kernels"
198 def_bool y if (!MODULES) 197 default y
199 depends on PA8X00 198 depends on PA8X00
200 help 199 help
201 If you configure the kernel to include many drivers built-in instead 200 If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 000000000000..dbaaca84f27f
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_BARRIER_H
3#define __ASM_BARRIER_H
4
5#ifndef __ASSEMBLY__
6
7/* The synchronize caches instruction executes as a nop on systems in
8 which all memory references are performed in order. */
9#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
10
11#if defined(CONFIG_SMP)
12#define mb() do { synchronize_caches(); } while (0)
13#define rmb() mb()
14#define wmb() mb()
15#define dma_rmb() mb()
16#define dma_wmb() mb()
17#else
18#define mb() barrier()
19#define rmb() barrier()
20#define wmb() barrier()
21#define dma_rmb() barrier()
22#define dma_wmb() barrier()
23#endif
24
25#define __smp_mb() mb()
26#define __smp_rmb() mb()
27#define __smp_wmb() mb()
28
29#include <asm-generic/barrier.h>
30
31#endif /* !__ASSEMBLY__ */
32#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e95207c0565e..1b4732e20137 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -482,6 +482,8 @@
482 .macro tlb_unlock0 spc,tmp 482 .macro tlb_unlock0 spc,tmp
483#ifdef CONFIG_SMP 483#ifdef CONFIG_SMP
484 or,COND(=) %r0,\spc,%r0 484 or,COND(=) %r0,\spc,%r0
485 sync
486 or,COND(=) %r0,\spc,%r0
485 stw \spc,0(\tmp) 487 stw \spc,0(\tmp)
486#endif 488#endif
487 .endm 489 .endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 22e6374ece44..97451e67d35b 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
353 .macro tlb_unlock la,flags,tmp 353 .macro tlb_unlock la,flags,tmp
354#ifdef CONFIG_SMP 354#ifdef CONFIG_SMP
355 ldi 1,\tmp 355 ldi 1,\tmp
356 sync
356 stw \tmp,0(\la) 357 stw \tmp,0(\la)
357 mtsm \flags 358 mtsm \flags
358#endif 359#endif
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index e775f80ae28c..4886a6db42e9 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -633,6 +633,7 @@ cas_action:
633 sub,<> %r28, %r25, %r0 633 sub,<> %r28, %r25, %r0
6342: stw,ma %r24, 0(%r26) 6342: stw,ma %r24, 0(%r26)
635 /* Free lock */ 635 /* Free lock */
636 sync
636 stw,ma %r20, 0(%sr2,%r20) 637 stw,ma %r20, 0(%sr2,%r20)
637#if ENABLE_LWS_DEBUG 638#if ENABLE_LWS_DEBUG
638 /* Clear thread register indicator */ 639 /* Clear thread register indicator */
@@ -647,6 +648,7 @@ cas_action:
6473: 6483:
648 /* Error occurred on load or store */ 649 /* Error occurred on load or store */
649 /* Free lock */ 650 /* Free lock */
651 sync
650 stw %r20, 0(%sr2,%r20) 652 stw %r20, 0(%sr2,%r20)
651#if ENABLE_LWS_DEBUG 653#if ENABLE_LWS_DEBUG
652 stw %r0, 4(%sr2,%r20) 654 stw %r0, 4(%sr2,%r20)
@@ -848,6 +850,7 @@ cas2_action:
848 850
849cas2_end: 851cas2_end:
850 /* Free lock */ 852 /* Free lock */
853 sync
851 stw,ma %r20, 0(%sr2,%r20) 854 stw,ma %r20, 0(%sr2,%r20)
852 /* Enable interrupts */ 855 /* Enable interrupts */
853 ssm PSW_SM_I, %r0 856 ssm PSW_SM_I, %r0
@@ -858,6 +861,7 @@ cas2_end:
85822: 86122:
859 /* Error occurred on load or store */ 862 /* Error occurred on load or store */
860 /* Free lock */ 863 /* Free lock */
864 sync
861 stw %r20, 0(%sr2,%r20) 865 stw %r20, 0(%sr2,%r20)
862 ssm PSW_SM_I, %r0 866 ssm PSW_SM_I, %r0
863 ldo 1(%r0),%r28 867 ldo 1(%r0),%r28
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 79d570cbf332..b2f89b621b15 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -143,24 +143,33 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
143{ 143{
144 int c; 144 int c;
145 145
146 c = atomic_dec_if_positive(&mm->context.copros);
147
148 /* Detect imbalance between add and remove */
149 WARN_ON(c < 0);
150
151 /* 146 /*
152 * Need to broadcast a global flush of the full mm before 147 * When removing the last copro, we need to broadcast a global
153 * decrementing active_cpus count, as the next TLBI may be 148 * flush of the full mm, as the next TLBI may be local and the
154 * local and the nMMU and/or PSL need to be cleaned up. 149 * nMMU and/or PSL need to be cleaned up.
155 * Should be rare enough so that it's acceptable. 150 *
151 * Both the 'copros' and 'active_cpus' counts are looked at in
152 * flush_all_mm() to determine the scope (local/global) of the
153 * TLBIs, so we need to flush first before decrementing
154 * 'copros'. If this API is used by several callers for the
155 * same context, it can lead to over-flushing. It's hopefully
156 * not common enough to be a problem.
156 * 157 *
157 * Skip on hash, as we don't know how to do the proper flush 158 * Skip on hash, as we don't know how to do the proper flush
158 * for the time being. Invalidations will remain global if 159 * for the time being. Invalidations will remain global if
159 * used on hash. 160 * used on hash. Note that we can't drop 'copros' either, as
161 * it could make some invalidations local with no flush
162 * in-between.
160 */ 163 */
161 if (c == 0 && radix_enabled()) { 164 if (radix_enabled()) {
162 flush_all_mm(mm); 165 flush_all_mm(mm);
163 dec_mm_active_cpus(mm); 166
167 c = atomic_dec_if_positive(&mm->context.copros);
168 /* Detect imbalance between add and remove */
169 WARN_ON(c < 0);
170
171 if (c == 0)
172 dec_mm_active_cpus(mm);
164 } 173 }
165} 174}
166#else 175#else
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index fe9733ffffaa..471aac313b89 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -42,6 +42,8 @@
42#include <asm/ppc-pci.h> 42#include <asm/ppc-pci.h>
43#include <asm/eeh.h> 43#include <asm/eeh.h>
44 44
45#include "../../../drivers/pci/pci.h"
46
45/* hose_spinlock protects accesses to the the phb_bitmap. */ 47/* hose_spinlock protects accesses to the the phb_bitmap. */
46static DEFINE_SPINLOCK(hose_spinlock); 48static DEFINE_SPINLOCK(hose_spinlock);
47LIST_HEAD(hose_list); 49LIST_HEAD(hose_list);
@@ -1014,7 +1016,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
1014 /* Cardbus can call us to add new devices to a bus, so ignore 1016 /* Cardbus can call us to add new devices to a bus, so ignore
1015 * those who are already fully discovered 1017 * those who are already fully discovered
1016 */ 1018 */
1017 if (dev->is_added) 1019 if (pci_dev_is_added(dev))
1018 continue; 1020 continue;
1019 1021
1020 pcibios_setup_device(dev); 1022 pcibios_setup_device(dev);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 380cbf9a40d9..c0a9bcd28356 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
286 u64 imm64; 286 u64 imm64;
287 u8 *func; 287 u8 *func;
288 u32 true_cond; 288 u32 true_cond;
289 u32 tmp_idx;
289 290
290 /* 291 /*
291 * addrs[] maps a BPF bytecode address into a real offset from 292 * addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +638,7 @@ emit_clear:
637 case BPF_STX | BPF_XADD | BPF_W: 638 case BPF_STX | BPF_XADD | BPF_W:
638 /* Get EA into TMP_REG_1 */ 639 /* Get EA into TMP_REG_1 */
639 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); 640 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
640 /* error if EA is not word-aligned */ 641 tmp_idx = ctx->idx * 4;
641 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
642 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
643 PPC_LI(b2p[BPF_REG_0], 0);
644 PPC_JMP(exit_addr);
645 /* load value from memory into TMP_REG_2 */ 642 /* load value from memory into TMP_REG_2 */
646 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 643 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
647 /* add value from src_reg into this */ 644 /* add value from src_reg into this */
@@ -649,32 +646,16 @@ emit_clear:
649 /* store result back */ 646 /* store result back */
650 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 647 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
651 /* we're done if this succeeded */ 648 /* we're done if this succeeded */
652 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); 649 PPC_BCC_SHORT(COND_NE, tmp_idx);
653 /* otherwise, let's try once more */
654 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
655 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
656 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
657 /* exit if the store was not successful */
658 PPC_LI(b2p[BPF_REG_0], 0);
659 PPC_BCC(COND_NE, exit_addr);
660 break; 650 break;
661 /* *(u64 *)(dst + off) += src */ 651 /* *(u64 *)(dst + off) += src */
662 case BPF_STX | BPF_XADD | BPF_DW: 652 case BPF_STX | BPF_XADD | BPF_DW:
663 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); 653 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
664 /* error if EA is not doubleword-aligned */ 654 tmp_idx = ctx->idx * 4;
665 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
666 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
667 PPC_LI(b2p[BPF_REG_0], 0);
668 PPC_JMP(exit_addr);
669 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
670 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
671 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
672 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
673 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); 655 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
674 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); 656 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
675 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); 657 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
676 PPC_LI(b2p[BPF_REG_0], 0); 658 PPC_BCC_SHORT(COND_NE, tmp_idx);
677 PPC_BCC(COND_NE, exit_addr);
678 break; 659 break;
679 660
680 /* 661 /*
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5bd0eb6681bc..70b2e1e0f23c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -46,6 +46,7 @@
46 46
47#include "powernv.h" 47#include "powernv.h"
48#include "pci.h" 48#include "pci.h"
49#include "../../../../drivers/pci/pci.h"
49 50
50#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */ 51#define PNV_IODA1_M64_NUM 16 /* Number of M64 BARs */
51#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */ 52#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
@@ -3138,7 +3139,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
3138 struct pci_dn *pdn; 3139 struct pci_dn *pdn;
3139 int mul, total_vfs; 3140 int mul, total_vfs;
3140 3141
3141 if (!pdev->is_physfn || pdev->is_added) 3142 if (!pdev->is_physfn || pci_dev_is_added(pdev))
3142 return; 3143 return;
3143 3144
3144 pdn = pci_get_pdn(pdev); 3145 pdn = pci_get_pdn(pdev);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 139f0af6c3d9..8a4868a3964b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -71,6 +71,7 @@
71#include <asm/security_features.h> 71#include <asm/security_features.h>
72 72
73#include "pseries.h" 73#include "pseries.h"
74#include "../../../../drivers/pci/pci.h"
74 75
75int CMO_PrPSP = -1; 76int CMO_PrPSP = -1;
76int CMO_SecPSP = -1; 77int CMO_SecPSP = -1;
@@ -664,7 +665,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
664 const int *indexes; 665 const int *indexes;
665 struct device_node *dn = pci_device_to_OF_node(pdev); 666 struct device_node *dn = pci_device_to_OF_node(pdev);
666 667
667 if (!pdev->is_physfn || pdev->is_added) 668 if (!pdev->is_physfn || pci_dev_is_added(pdev))
668 return; 669 return;
669 /*Firmware must support open sriov otherwise dont configure*/ 670 /*Firmware must support open sriov otherwise dont configure*/
670 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL); 671 indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e44bb2b2873e..4fe5b2affa23 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -106,7 +106,6 @@ config S390
106 select ARCH_USE_BUILTIN_BSWAP 106 select ARCH_USE_BUILTIN_BSWAP
107 select ARCH_USE_CMPXCHG_LOCKREF 107 select ARCH_USE_CMPXCHG_LOCKREF
108 select ARCH_WANTS_DYNAMIC_TASK_STRUCT 108 select ARCH_WANTS_DYNAMIC_TASK_STRUCT
109 select ARCH_WANTS_UBSAN_NO_NULL
110 select ARCH_WANT_IPC_PARSE_VERSION 109 select ARCH_WANT_IPC_PARSE_VERSION
111 select BUILDTIME_EXTABLE_SORT 110 select BUILDTIME_EXTABLE_SORT
112 select CLONE_BACKWARDS2 111 select CLONE_BACKWARDS2
@@ -140,7 +139,7 @@ config S390
140 select HAVE_FUNCTION_GRAPH_TRACER 139 select HAVE_FUNCTION_GRAPH_TRACER
141 select HAVE_FUNCTION_TRACER 140 select HAVE_FUNCTION_TRACER
142 select HAVE_FUTEX_CMPXCHG if FUTEX 141 select HAVE_FUTEX_CMPXCHG if FUTEX
143 select HAVE_GCC_PLUGINS 142 select HAVE_GCC_PLUGINS if BROKEN
144 select HAVE_KERNEL_BZIP2 143 select HAVE_KERNEL_BZIP2
145 select HAVE_KERNEL_GZIP 144 select HAVE_KERNEL_GZIP
146 select HAVE_KERNEL_LZ4 145 select HAVE_KERNEL_LZ4
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index ac67828da201..410b263ef5c8 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += local64.h
13generic-y += mcs_spinlock.h 13generic-y += mcs_spinlock.h
14generic-y += mm-arch-hooks.h 14generic-y += mm-arch-hooks.h
15generic-y += module.h 15generic-y += module.h
16generic-y += msi.h
16generic-y += preempt.h 17generic-y += preempt.h
17generic-y += rwsem.h 18generic-y += rwsem.h
18generic-y += serial.h 19generic-y += serial.h
diff --git a/arch/sparc/include/asm/msi.h b/arch/sparc/include/asm/msi.h
deleted file mode 100644
index 3c17c1074431..000000000000
--- a/arch/sparc/include/asm/msi.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * msi.h: Defines specific to the MBus - Sbus - Interface.
4 *
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 */
8
9#ifndef _SPARC_MSI_H
10#define _SPARC_MSI_H
11
12/*
13 * Locations of MSI Registers.
14 */
15#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
16
17/*
18 * Useful bits in the MSI Registers.
19 */
20#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
21
22
23static inline void msi_set_sync(void)
24{
25 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
26 "andn %%g3, %2, %%g3\n\t"
27 "sta %%g3, [%0] %1\n\t" : :
28 "r" (MSI_MBUS_ARBEN),
29 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
30}
31
32#endif /* !(_SPARC_MSI_H) */
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2ef8cfa9677e..f0eba72aa1ad 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
814 } 814 }
815} 815}
816 816
817static void init_tick_ops(struct sparc64_tick_ops *ops) 817static void __init init_tick_ops(struct sparc64_tick_ops *ops)
818{ 818{
819 unsigned long freq, quotient, tick; 819 unsigned long freq, quotient, tick;
820 820
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 1d70c3f6d986..be9cb0065179 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -37,7 +37,6 @@
37#include <asm/mbus.h> 37#include <asm/mbus.h>
38#include <asm/page.h> 38#include <asm/page.h>
39#include <asm/asi.h> 39#include <asm/asi.h>
40#include <asm/msi.h>
41#include <asm/smp.h> 40#include <asm/smp.h>
42#include <asm/io.h> 41#include <asm/io.h>
43 42
@@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
116 set_pte((pte_t *)ctxp, pte); 115 set_pte((pte_t *)ctxp, pte);
117} 116}
118 117
118/*
119 * Locations of MSI Registers.
120 */
121#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
122
123/*
124 * Useful bits in the MSI Registers.
125 */
126#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
127
128static void msi_set_sync(void)
129{
130 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
131 "andn %%g3, %2, %%g3\n\t"
132 "sta %%g3, [%0] %1\n\t" : :
133 "r" (MSI_MBUS_ARBEN),
134 "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
135}
136
119void pmd_set(pmd_t *pmdp, pte_t *ptep) 137void pmd_set(pmd_t *pmdp, pte_t *ptep)
120{ 138{
121 unsigned long ptp; /* Physical address, shifted right by 4 */ 139 unsigned long ptp; /* Physical address, shifted right by 4 */
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index fa42f895fdde..169c2feda14a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -106,9 +106,13 @@ define cmd_check_data_rel
106 done 106 done
107endef 107endef
108 108
109# We need to run two commands under "if_changed", so merge them into a
110# single invocation.
111quiet_cmd_check-and-link-vmlinux = LD $@
112 cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
113
109$(obj)/vmlinux: $(vmlinux-objs-y) FORCE 114$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
110 $(call if_changed,check_data_rel) 115 $(call if_changed,check-and-link-vmlinux)
111 $(call if_changed,ld)
112 116
113OBJCOPYFLAGS_vmlinux.bin := -R .comment -S 117OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
114$(obj)/vmlinux.bin: vmlinux FORCE 118$(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e98522ea6f09..1458b1700fc7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c) \
34 \ 34 \
35 table = (typeof(table))sys_table; \ 35 table = (typeof(table))sys_table; \
36 \ 36 \
37 c->runtime_services = table->runtime; \ 37 c->runtime_services = table->runtime; \
38 c->boot_services = table->boottime; \ 38 c->boot_services = table->boottime; \
39 c->text_output = table->con_out; \ 39 c->text_output = table->con_out; \
40} 40}
41BOOT_SERVICES(32); 41BOOT_SERVICES(32);
42BOOT_SERVICES(64); 42BOOT_SERVICES(64);
43 43
44static inline efi_status_t __open_volume32(void *__image, void **__fh)
45{
46 efi_file_io_interface_t *io;
47 efi_loaded_image_32_t *image = __image;
48 efi_file_handle_32_t *fh;
49 efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
50 efi_status_t status;
51 void *handle = (void *)(unsigned long)image->device_handle;
52 unsigned long func;
53
54 status = efi_call_early(handle_protocol, handle,
55 &fs_proto, (void **)&io);
56 if (status != EFI_SUCCESS) {
57 efi_printk(sys_table, "Failed to handle fs_proto\n");
58 return status;
59 }
60
61 func = (unsigned long)io->open_volume;
62 status = efi_early->call(func, io, &fh);
63 if (status != EFI_SUCCESS)
64 efi_printk(sys_table, "Failed to open volume\n");
65
66 *__fh = fh;
67 return status;
68}
69
70static inline efi_status_t __open_volume64(void *__image, void **__fh)
71{
72 efi_file_io_interface_t *io;
73 efi_loaded_image_64_t *image = __image;
74 efi_file_handle_64_t *fh;
75 efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
76 efi_status_t status;
77 void *handle = (void *)(unsigned long)image->device_handle;
78 unsigned long func;
79
80 status = efi_call_early(handle_protocol, handle,
81 &fs_proto, (void **)&io);
82 if (status != EFI_SUCCESS) {
83 efi_printk(sys_table, "Failed to handle fs_proto\n");
84 return status;
85 }
86
87 func = (unsigned long)io->open_volume;
88 status = efi_early->call(func, io, &fh);
89 if (status != EFI_SUCCESS)
90 efi_printk(sys_table, "Failed to open volume\n");
91
92 *__fh = fh;
93 return status;
94}
95
96efi_status_t
97efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
98{
99 if (efi_early->is64)
100 return __open_volume64(__image, __fh);
101
102 return __open_volume32(__image, __fh);
103}
104
105void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) 44void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
106{ 45{
107 efi_call_proto(efi_simple_text_output_protocol, output_string, 46 efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
109} 48}
110 49
111static efi_status_t 50static efi_status_t
112__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom) 51preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
113{ 52{
114 struct pci_setup_rom *rom = NULL; 53 struct pci_setup_rom *rom = NULL;
115 efi_status_t status; 54 efi_status_t status;
@@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
134 73
135 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); 74 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
136 if (status != EFI_SUCCESS) { 75 if (status != EFI_SUCCESS) {
137 efi_printk(sys_table, "Failed to alloc mem for rom\n"); 76 efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
138 return status; 77 return status;
139 } 78 }
140 79
141 memset(rom, 0, sizeof(*rom)); 80 memset(rom, 0, sizeof(*rom));
142 81
143 rom->data.type = SETUP_PCI; 82 rom->data.type = SETUP_PCI;
144 rom->data.len = size - sizeof(struct setup_data); 83 rom->data.len = size - sizeof(struct setup_data);
145 rom->data.next = 0; 84 rom->data.next = 0;
146 rom->pcilen = pci->romsize; 85 rom->pcilen = pci->romsize;
147 *__rom = rom; 86 *__rom = rom;
148 87
149 status = efi_call_proto(efi_pci_io_protocol, pci.read, pci, 88 status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -179,96 +118,6 @@ free_struct:
179 return status; 118 return status;
180} 119}
181 120
182static void
183setup_efi_pci32(struct boot_params *params, void **pci_handle,
184 unsigned long size)
185{
186 efi_pci_io_protocol_t *pci = NULL;
187 efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
188 u32 *handles = (u32 *)(unsigned long)pci_handle;
189 efi_status_t status;
190 unsigned long nr_pci;
191 struct setup_data *data;
192 int i;
193
194 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
195
196 while (data && data->next)
197 data = (struct setup_data *)(unsigned long)data->next;
198
199 nr_pci = size / sizeof(u32);
200 for (i = 0; i < nr_pci; i++) {
201 struct pci_setup_rom *rom = NULL;
202 u32 h = handles[i];
203
204 status = efi_call_early(handle_protocol, h,
205 &pci_proto, (void **)&pci);
206
207 if (status != EFI_SUCCESS)
208 continue;
209
210 if (!pci)
211 continue;
212
213 status = __setup_efi_pci(pci, &rom);
214 if (status != EFI_SUCCESS)
215 continue;
216
217 if (data)
218 data->next = (unsigned long)rom;
219 else
220 params->hdr.setup_data = (unsigned long)rom;
221
222 data = (struct setup_data *)rom;
223
224 }
225}
226
227static void
228setup_efi_pci64(struct boot_params *params, void **pci_handle,
229 unsigned long size)
230{
231 efi_pci_io_protocol_t *pci = NULL;
232 efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
233 u64 *handles = (u64 *)(unsigned long)pci_handle;
234 efi_status_t status;
235 unsigned long nr_pci;
236 struct setup_data *data;
237 int i;
238
239 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
240
241 while (data && data->next)
242 data = (struct setup_data *)(unsigned long)data->next;
243
244 nr_pci = size / sizeof(u64);
245 for (i = 0; i < nr_pci; i++) {
246 struct pci_setup_rom *rom = NULL;
247 u64 h = handles[i];
248
249 status = efi_call_early(handle_protocol, h,
250 &pci_proto, (void **)&pci);
251
252 if (status != EFI_SUCCESS)
253 continue;
254
255 if (!pci)
256 continue;
257
258 status = __setup_efi_pci(pci, &rom);
259 if (status != EFI_SUCCESS)
260 continue;
261
262 if (data)
263 data->next = (unsigned long)rom;
264 else
265 params->hdr.setup_data = (unsigned long)rom;
266
267 data = (struct setup_data *)rom;
268
269 }
270}
271
272/* 121/*
273 * There's no way to return an informative status from this function, 122 * There's no way to return an informative status from this function,
274 * because any analysis (and printing of error messages) needs to be 123 * because any analysis (and printing of error messages) needs to be
@@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
284 void **pci_handle = NULL; 133 void **pci_handle = NULL;
285 efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID; 134 efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
286 unsigned long size = 0; 135 unsigned long size = 0;
136 unsigned long nr_pci;
137 struct setup_data *data;
138 int i;
287 139
288 status = efi_call_early(locate_handle, 140 status = efi_call_early(locate_handle,
289 EFI_LOCATE_BY_PROTOCOL, 141 EFI_LOCATE_BY_PROTOCOL,
@@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
295 size, (void **)&pci_handle); 147 size, (void **)&pci_handle);
296 148
297 if (status != EFI_SUCCESS) { 149 if (status != EFI_SUCCESS) {
298 efi_printk(sys_table, "Failed to alloc mem for pci_handle\n"); 150 efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
299 return; 151 return;
300 } 152 }
301 153
@@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
307 if (status != EFI_SUCCESS) 159 if (status != EFI_SUCCESS)
308 goto free_handle; 160 goto free_handle;
309 161
310 if (efi_early->is64) 162 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
311 setup_efi_pci64(params, pci_handle, size); 163
312 else 164 while (data && data->next)
313 setup_efi_pci32(params, pci_handle, size); 165 data = (struct setup_data *)(unsigned long)data->next;
166
167 nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
168 for (i = 0; i < nr_pci; i++) {
169 efi_pci_io_protocol_t *pci = NULL;
170 struct pci_setup_rom *rom;
171
172 status = efi_call_early(handle_protocol,
173 efi_is_64bit() ? ((u64 *)pci_handle)[i]
174 : ((u32 *)pci_handle)[i],
175 &pci_proto, (void **)&pci);
176 if (status != EFI_SUCCESS || !pci)
177 continue;
178
179 status = preserve_pci_rom_image(pci, &rom);
180 if (status != EFI_SUCCESS)
181 continue;
182
183 if (data)
184 data->next = (unsigned long)rom;
185 else
186 params->hdr.setup_data = (unsigned long)rom;
187
188 data = (struct setup_data *)rom;
189 }
314 190
315free_handle: 191free_handle:
316 efi_call_early(free_pool, pci_handle); 192 efi_call_early(free_pool, pci_handle);
@@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
341 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 217 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
342 size + sizeof(struct setup_data), &new); 218 size + sizeof(struct setup_data), &new);
343 if (status != EFI_SUCCESS) { 219 if (status != EFI_SUCCESS) {
344 efi_printk(sys_table, 220 efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
345 "Failed to alloc mem for properties\n");
346 return; 221 return;
347 } 222 }
348 223
@@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
358 new->next = 0; 233 new->next = 0;
359 234
360 data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data; 235 data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
361 if (!data) 236 if (!data) {
362 boot_params->hdr.setup_data = (unsigned long)new; 237 boot_params->hdr.setup_data = (unsigned long)new;
363 else { 238 } else {
364 while (data->next) 239 while (data->next)
365 data = (struct setup_data *)(unsigned long)data->next; 240 data = (struct setup_data *)(unsigned long)data->next;
366 data->next = (unsigned long)new; 241 data->next = (unsigned long)new;
@@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
380 } 255 }
381} 256}
382 257
258/*
259 * See if we have Universal Graphics Adapter (UGA) protocol
260 */
383static efi_status_t 261static efi_status_t
384setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) 262setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
385{ 263{
386 struct efi_uga_draw_protocol *uga = NULL, *first_uga; 264 efi_status_t status;
387 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; 265 u32 width, height;
266 void **uga_handle = NULL;
267 efi_uga_draw_protocol_t *uga = NULL, *first_uga;
388 unsigned long nr_ugas; 268 unsigned long nr_ugas;
389 u32 *handles = (u32 *)uga_handle;
390 efi_status_t status = EFI_INVALID_PARAMETER;
391 int i; 269 int i;
392 270
393 first_uga = NULL; 271 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
394 nr_ugas = size / sizeof(u32); 272 size, (void **)&uga_handle);
395 for (i = 0; i < nr_ugas; i++) { 273 if (status != EFI_SUCCESS)
396 efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; 274 return status;
397 u32 w, h, depth, refresh;
398 void *pciio;
399 u32 handle = handles[i];
400
401 status = efi_call_early(handle_protocol, handle,
402 &uga_proto, (void **)&uga);
403 if (status != EFI_SUCCESS)
404 continue;
405
406 efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
407
408 status = efi_early->call((unsigned long)uga->get_mode, uga,
409 &w, &h, &depth, &refresh);
410 if (status == EFI_SUCCESS && (!first_uga || pciio)) {
411 *width = w;
412 *height = h;
413
414 /*
415 * Once we've found a UGA supporting PCIIO,
416 * don't bother looking any further.
417 */
418 if (pciio)
419 break;
420
421 first_uga = uga;
422 }
423 }
424 275
425 return status; 276 status = efi_call_early(locate_handle,
426} 277 EFI_LOCATE_BY_PROTOCOL,
278 uga_proto, NULL, &size, uga_handle);
279 if (status != EFI_SUCCESS)
280 goto free_handle;
427 281
428static efi_status_t 282 height = 0;
429setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height) 283 width = 0;
430{
431 struct efi_uga_draw_protocol *uga = NULL, *first_uga;
432 efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
433 unsigned long nr_ugas;
434 u64 *handles = (u64 *)uga_handle;
435 efi_status_t status = EFI_INVALID_PARAMETER;
436 int i;
437 284
438 first_uga = NULL; 285 first_uga = NULL;
439 nr_ugas = size / sizeof(u64); 286 nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
440 for (i = 0; i < nr_ugas; i++) { 287 for (i = 0; i < nr_ugas; i++) {
441 efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; 288 efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
442 u32 w, h, depth, refresh; 289 u32 w, h, depth, refresh;
443 void *pciio; 290 void *pciio;
444 u64 handle = handles[i]; 291 unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
292 : ((u32 *)uga_handle)[i];
445 293
446 status = efi_call_early(handle_protocol, handle, 294 status = efi_call_early(handle_protocol, handle,
447 &uga_proto, (void **)&uga); 295 uga_proto, (void **)&uga);
448 if (status != EFI_SUCCESS) 296 if (status != EFI_SUCCESS)
449 continue; 297 continue;
450 298
299 pciio = NULL;
451 efi_call_early(handle_protocol, handle, &pciio_proto, &pciio); 300 efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
452 301
453 status = efi_early->call((unsigned long)uga->get_mode, uga, 302 status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
454 &w, &h, &depth, &refresh); 303 &w, &h, &depth, &refresh);
455 if (status == EFI_SUCCESS && (!first_uga || pciio)) { 304 if (status == EFI_SUCCESS && (!first_uga || pciio)) {
456 *width = w; 305 width = w;
457 *height = h; 306 height = h;
458 307
459 /* 308 /*
460 * Once we've found a UGA supporting PCIIO, 309 * Once we've found a UGA supporting PCIIO,
@@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
467 } 316 }
468 } 317 }
469 318
470 return status;
471}
472
473/*
474 * See if we have Universal Graphics Adapter (UGA) protocol
475 */
476static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
477 unsigned long size)
478{
479 efi_status_t status;
480 u32 width, height;
481 void **uga_handle = NULL;
482
483 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
484 size, (void **)&uga_handle);
485 if (status != EFI_SUCCESS)
486 return status;
487
488 status = efi_call_early(locate_handle,
489 EFI_LOCATE_BY_PROTOCOL,
490 uga_proto, NULL, &size, uga_handle);
491 if (status != EFI_SUCCESS)
492 goto free_handle;
493
494 height = 0;
495 width = 0;
496
497 if (efi_early->is64)
498 status = setup_uga64(uga_handle, size, &width, &height);
499 else
500 status = setup_uga32(uga_handle, size, &width, &height);
501
502 if (!width && !height) 319 if (!width && !height)
503 goto free_handle; 320 goto free_handle;
504 321
505 /* EFI framebuffer */ 322 /* EFI framebuffer */
506 si->orig_video_isVGA = VIDEO_TYPE_EFI; 323 si->orig_video_isVGA = VIDEO_TYPE_EFI;
507 324
508 si->lfb_depth = 32; 325 si->lfb_depth = 32;
509 si->lfb_width = width; 326 si->lfb_width = width;
510 si->lfb_height = height; 327 si->lfb_height = height;
511 328
512 si->red_size = 8; 329 si->red_size = 8;
513 si->red_pos = 16; 330 si->red_pos = 16;
514 si->green_size = 8; 331 si->green_size = 8;
515 si->green_pos = 8; 332 si->green_pos = 8;
516 si->blue_size = 8; 333 si->blue_size = 8;
517 si->blue_pos = 0; 334 si->blue_pos = 0;
518 si->rsvd_size = 8; 335 si->rsvd_size = 8;
519 si->rsvd_pos = 24; 336 si->rsvd_pos = 24;
520 337
521free_handle: 338free_handle:
522 efi_call_early(free_pool, uga_handle); 339 efi_call_early(free_pool, uga_handle);
340
523 return status; 341 return status;
524} 342}
525 343
@@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
586 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 404 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
587 return NULL; 405 return NULL;
588 406
589 if (efi_early->is64) 407 if (efi_is_64bit())
590 setup_boot_services64(efi_early); 408 setup_boot_services64(efi_early);
591 else 409 else
592 setup_boot_services32(efi_early); 410 setup_boot_services32(efi_early);
@@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
601 status = efi_low_alloc(sys_table, 0x4000, 1, 419 status = efi_low_alloc(sys_table, 0x4000, 1,
602 (unsigned long *)&boot_params); 420 (unsigned long *)&boot_params);
603 if (status != EFI_SUCCESS) { 421 if (status != EFI_SUCCESS) {
604 efi_printk(sys_table, "Failed to alloc lowmem for boot params\n"); 422 efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
605 return NULL; 423 return NULL;
606 } 424 }
607 425
@@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
617 * Fill out some of the header fields ourselves because the 435 * Fill out some of the header fields ourselves because the
618 * EFI firmware loader doesn't load the first sector. 436 * EFI firmware loader doesn't load the first sector.
619 */ 437 */
620 hdr->root_flags = 1; 438 hdr->root_flags = 1;
621 hdr->vid_mode = 0xffff; 439 hdr->vid_mode = 0xffff;
622 hdr->boot_flag = 0xAA55; 440 hdr->boot_flag = 0xAA55;
623 441
624 hdr->type_of_loader = 0x21; 442 hdr->type_of_loader = 0x21;
625 443
@@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
627 cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size); 445 cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
628 if (!cmdline_ptr) 446 if (!cmdline_ptr)
629 goto fail; 447 goto fail;
448
630 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; 449 hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
631 /* Fill in upper bits of command line address, NOP on 32 bit */ 450 /* Fill in upper bits of command line address, NOP on 32 bit */
632 boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; 451 boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
663 boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32; 482 boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32;
664 483
665 return boot_params; 484 return boot_params;
485
666fail2: 486fail2:
667 efi_free(sys_table, options_size, hdr->cmd_line_ptr); 487 efi_free(sys_table, options_size, hdr->cmd_line_ptr);
668fail: 488fail:
669 efi_free(sys_table, 0x4000, (unsigned long)boot_params); 489 efi_free(sys_table, 0x4000, (unsigned long)boot_params);
490
670 return NULL; 491 return NULL;
671} 492}
672 493
@@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
678 unsigned long size; 499 unsigned long size;
679 500
680 e820ext->type = SETUP_E820_EXT; 501 e820ext->type = SETUP_E820_EXT;
681 e820ext->len = nr_entries * sizeof(struct boot_e820_entry); 502 e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
682 e820ext->next = 0; 503 e820ext->next = 0;
683 504
684 data = (struct setup_data *)(unsigned long)params->hdr.setup_data; 505 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
692 params->hdr.setup_data = (unsigned long)e820ext; 513 params->hdr.setup_data = (unsigned long)e820ext;
693} 514}
694 515
695static efi_status_t setup_e820(struct boot_params *params, 516static efi_status_t
696 struct setup_data *e820ext, u32 e820ext_size) 517setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
697{ 518{
698 struct boot_e820_entry *entry = params->e820_table; 519 struct boot_e820_entry *entry = params->e820_table;
699 struct efi_info *efi = &params->efi_info; 520 struct efi_info *efi = &params->efi_info;
@@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
814} 635}
815 636
816struct exit_boot_struct { 637struct exit_boot_struct {
817 struct boot_params *boot_params; 638 struct boot_params *boot_params;
818 struct efi_info *efi; 639 struct efi_info *efi;
819 struct setup_data *e820ext; 640 struct setup_data *e820ext;
820 __u32 e820ext_size; 641 __u32 e820ext_size;
821 bool is64;
822}; 642};
823 643
824static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, 644static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
845 first = false; 665 first = false;
846 } 666 }
847 667
848 signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE; 668 signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
669 : EFI32_LOADER_SIGNATURE;
849 memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32)); 670 memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
850 671
851 p->efi->efi_systab = (unsigned long)sys_table_arg; 672 p->efi->efi_systab = (unsigned long)sys_table_arg;
852 p->efi->efi_memdesc_size = *map->desc_size; 673 p->efi->efi_memdesc_size = *map->desc_size;
853 p->efi->efi_memdesc_version = *map->desc_ver; 674 p->efi->efi_memdesc_version = *map->desc_ver;
854 p->efi->efi_memmap = (unsigned long)*map->map; 675 p->efi->efi_memmap = (unsigned long)*map->map;
855 p->efi->efi_memmap_size = *map->map_size; 676 p->efi->efi_memmap_size = *map->map_size;
856 677
857#ifdef CONFIG_X86_64 678#ifdef CONFIG_X86_64
858 p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32; 679 p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
859 p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; 680 p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
860#endif 681#endif
861 682
862 return EFI_SUCCESS; 683 return EFI_SUCCESS;
863} 684}
864 685
865static efi_status_t exit_boot(struct boot_params *boot_params, 686static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
866 void *handle, bool is64)
867{ 687{
868 unsigned long map_sz, key, desc_size, buff_size; 688 unsigned long map_sz, key, desc_size, buff_size;
869 efi_memory_desc_t *mem_map; 689 efi_memory_desc_t *mem_map;
@@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
874 struct efi_boot_memmap map; 694 struct efi_boot_memmap map;
875 struct exit_boot_struct priv; 695 struct exit_boot_struct priv;
876 696
877 map.map = &mem_map; 697 map.map = &mem_map;
878 map.map_size = &map_sz; 698 map.map_size = &map_sz;
879 map.desc_size = &desc_size; 699 map.desc_size = &desc_size;
880 map.desc_ver = &desc_version; 700 map.desc_ver = &desc_version;
881 map.key_ptr = &key; 701 map.key_ptr = &key;
882 map.buff_size = &buff_size; 702 map.buff_size = &buff_size;
883 priv.boot_params = boot_params; 703 priv.boot_params = boot_params;
884 priv.efi = &boot_params->efi_info; 704 priv.efi = &boot_params->efi_info;
885 priv.e820ext = NULL; 705 priv.e820ext = NULL;
886 priv.e820ext_size = 0; 706 priv.e820ext_size = 0;
887 priv.is64 = is64;
888 707
889 /* Might as well exit boot services now */ 708 /* Might as well exit boot services now */
890 status = efi_exit_boot_services(sys_table, handle, &map, &priv, 709 status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
892 if (status != EFI_SUCCESS) 711 if (status != EFI_SUCCESS)
893 return status; 712 return status;
894 713
895 e820ext = priv.e820ext; 714 e820ext = priv.e820ext;
896 e820ext_size = priv.e820ext_size; 715 e820ext_size = priv.e820ext_size;
716
897 /* Historic? */ 717 /* Historic? */
898 boot_params->alt_mem_k = 32 * 1024; 718 boot_params->alt_mem_k = 32 * 1024;
899 719
900 status = setup_e820(boot_params, e820ext, e820ext_size); 720 status = setup_e820(boot_params, e820ext, e820ext_size);
901 if (status != EFI_SUCCESS) 721 if (status != EFI_SUCCESS)
@@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
908 * On success we return a pointer to a boot_params structure, and NULL 728 * On success we return a pointer to a boot_params structure, and NULL
909 * on failure. 729 * on failure.
910 */ 730 */
911struct boot_params *efi_main(struct efi_config *c, 731struct boot_params *
912 struct boot_params *boot_params) 732efi_main(struct efi_config *c, struct boot_params *boot_params)
913{ 733{
914 struct desc_ptr *gdt = NULL; 734 struct desc_ptr *gdt = NULL;
915 efi_loaded_image_t *image; 735 efi_loaded_image_t *image;
@@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
918 struct desc_struct *desc; 738 struct desc_struct *desc;
919 void *handle; 739 void *handle;
920 efi_system_table_t *_table; 740 efi_system_table_t *_table;
921 bool is64;
922 741
923 efi_early = c; 742 efi_early = c;
924 743
925 _table = (efi_system_table_t *)(unsigned long)efi_early->table; 744 _table = (efi_system_table_t *)(unsigned long)efi_early->table;
926 handle = (void *)(unsigned long)efi_early->image_handle; 745 handle = (void *)(unsigned long)efi_early->image_handle;
927 is64 = efi_early->is64;
928 746
929 sys_table = _table; 747 sys_table = _table;
930 748
@@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
932 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) 750 if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
933 goto fail; 751 goto fail;
934 752
935 if (is64) 753 if (efi_is_64bit())
936 setup_boot_services64(efi_early); 754 setup_boot_services64(efi_early);
937 else 755 else
938 setup_boot_services32(efi_early); 756 setup_boot_services32(efi_early);
@@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
957 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, 775 status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
958 sizeof(*gdt), (void **)&gdt); 776 sizeof(*gdt), (void **)&gdt);
959 if (status != EFI_SUCCESS) { 777 if (status != EFI_SUCCESS) {
960 efi_printk(sys_table, "Failed to alloc mem for gdt structure\n"); 778 efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
961 goto fail; 779 goto fail;
962 } 780 }
963 781
@@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
965 status = efi_low_alloc(sys_table, gdt->size, 8, 783 status = efi_low_alloc(sys_table, gdt->size, 8,
966 (unsigned long *)&gdt->address); 784 (unsigned long *)&gdt->address);
967 if (status != EFI_SUCCESS) { 785 if (status != EFI_SUCCESS) {
968 efi_printk(sys_table, "Failed to alloc mem for gdt\n"); 786 efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
969 goto fail; 787 goto fail;
970 } 788 }
971 789
@@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
988 hdr->code32_start = bzimage_addr; 806 hdr->code32_start = bzimage_addr;
989 } 807 }
990 808
991 status = exit_boot(boot_params, handle, is64); 809 status = exit_boot(boot_params, handle);
992 if (status != EFI_SUCCESS) { 810 if (status != EFI_SUCCESS) {
993 efi_printk(sys_table, "exit_boot() failed!\n"); 811 efi_printk(sys_table, "exit_boot() failed!\n");
994 goto fail; 812 goto fail;
@@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
1002 820
1003 if (IS_ENABLED(CONFIG_X86_64)) { 821 if (IS_ENABLED(CONFIG_X86_64)) {
1004 /* __KERNEL32_CS */ 822 /* __KERNEL32_CS */
1005 desc->limit0 = 0xffff; 823 desc->limit0 = 0xffff;
1006 desc->base0 = 0x0000; 824 desc->base0 = 0x0000;
1007 desc->base1 = 0x0000; 825 desc->base1 = 0x0000;
1008 desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; 826 desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
1009 desc->s = DESC_TYPE_CODE_DATA; 827 desc->s = DESC_TYPE_CODE_DATA;
1010 desc->dpl = 0; 828 desc->dpl = 0;
1011 desc->p = 1; 829 desc->p = 1;
1012 desc->limit1 = 0xf; 830 desc->limit1 = 0xf;
1013 desc->avl = 0; 831 desc->avl = 0;
1014 desc->l = 0; 832 desc->l = 0;
1015 desc->d = SEG_OP_SIZE_32BIT; 833 desc->d = SEG_OP_SIZE_32BIT;
1016 desc->g = SEG_GRANULARITY_4KB; 834 desc->g = SEG_GRANULARITY_4KB;
1017 desc->base2 = 0x00; 835 desc->base2 = 0x00;
836
1018 desc++; 837 desc++;
1019 } else { 838 } else {
1020 /* Second entry is unused on 32-bit */ 839 /* Second entry is unused on 32-bit */
@@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
1022 } 841 }
1023 842
1024 /* __KERNEL_CS */ 843 /* __KERNEL_CS */
1025 desc->limit0 = 0xffff; 844 desc->limit0 = 0xffff;
1026 desc->base0 = 0x0000; 845 desc->base0 = 0x0000;
1027 desc->base1 = 0x0000; 846 desc->base1 = 0x0000;
1028 desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; 847 desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
1029 desc->s = DESC_TYPE_CODE_DATA; 848 desc->s = DESC_TYPE_CODE_DATA;
1030 desc->dpl = 0; 849 desc->dpl = 0;
1031 desc->p = 1; 850 desc->p = 1;
1032 desc->limit1 = 0xf; 851 desc->limit1 = 0xf;
1033 desc->avl = 0; 852 desc->avl = 0;
853
1034 if (IS_ENABLED(CONFIG_X86_64)) { 854 if (IS_ENABLED(CONFIG_X86_64)) {
1035 desc->l = 1; 855 desc->l = 1;
1036 desc->d = 0; 856 desc->d = 0;
@@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
1038 desc->l = 0; 858 desc->l = 0;
1039 desc->d = SEG_OP_SIZE_32BIT; 859 desc->d = SEG_OP_SIZE_32BIT;
1040 } 860 }
1041 desc->g = SEG_GRANULARITY_4KB; 861 desc->g = SEG_GRANULARITY_4KB;
1042 desc->base2 = 0x00; 862 desc->base2 = 0x00;
1043 desc++; 863 desc++;
1044 864
1045 /* __KERNEL_DS */ 865 /* __KERNEL_DS */
1046 desc->limit0 = 0xffff; 866 desc->limit0 = 0xffff;
1047 desc->base0 = 0x0000; 867 desc->base0 = 0x0000;
1048 desc->base1 = 0x0000; 868 desc->base1 = 0x0000;
1049 desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE; 869 desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
1050 desc->s = DESC_TYPE_CODE_DATA; 870 desc->s = DESC_TYPE_CODE_DATA;
1051 desc->dpl = 0; 871 desc->dpl = 0;
1052 desc->p = 1; 872 desc->p = 1;
1053 desc->limit1 = 0xf; 873 desc->limit1 = 0xf;
1054 desc->avl = 0; 874 desc->avl = 0;
1055 desc->l = 0; 875 desc->l = 0;
1056 desc->d = SEG_OP_SIZE_32BIT; 876 desc->d = SEG_OP_SIZE_32BIT;
1057 desc->g = SEG_GRANULARITY_4KB; 877 desc->g = SEG_GRANULARITY_4KB;
1058 desc->base2 = 0x00; 878 desc->base2 = 0x00;
1059 desc++; 879 desc++;
1060 880
1061 if (IS_ENABLED(CONFIG_X86_64)) { 881 if (IS_ENABLED(CONFIG_X86_64)) {
1062 /* Task segment value */ 882 /* Task segment value */
1063 desc->limit0 = 0x0000; 883 desc->limit0 = 0x0000;
1064 desc->base0 = 0x0000; 884 desc->base0 = 0x0000;
1065 desc->base1 = 0x0000; 885 desc->base1 = 0x0000;
1066 desc->type = SEG_TYPE_TSS; 886 desc->type = SEG_TYPE_TSS;
1067 desc->s = 0; 887 desc->s = 0;
1068 desc->dpl = 0; 888 desc->dpl = 0;
1069 desc->p = 1; 889 desc->p = 1;
1070 desc->limit1 = 0x0; 890 desc->limit1 = 0x0;
1071 desc->avl = 0; 891 desc->avl = 0;
1072 desc->l = 0; 892 desc->l = 0;
1073 desc->d = 0; 893 desc->d = 0;
1074 desc->g = SEG_GRANULARITY_4KB; 894 desc->g = SEG_GRANULARITY_4KB;
1075 desc->base2 = 0x00; 895 desc->base2 = 0x00;
1076 desc++; 896 desc++;
1077 } 897 }
1078 898
@@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
1082 return boot_params; 902 return boot_params;
1083fail: 903fail:
1084 efi_printk(sys_table, "efi_main() failed!\n"); 904 efi_printk(sys_table, "efi_main() failed!\n");
905
1085 return NULL; 906 return NULL;
1086} 907}
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index e799dc5c6448..8297387c4676 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -12,22 +12,22 @@
12 12
13#define DESC_TYPE_CODE_DATA (1 << 0) 13#define DESC_TYPE_CODE_DATA (1 << 0)
14 14
15struct efi_uga_draw_protocol_32 { 15typedef struct {
16 u32 get_mode; 16 u32 get_mode;
17 u32 set_mode; 17 u32 set_mode;
18 u32 blt; 18 u32 blt;
19}; 19} efi_uga_draw_protocol_32_t;
20 20
21struct efi_uga_draw_protocol_64 { 21typedef struct {
22 u64 get_mode; 22 u64 get_mode;
23 u64 set_mode; 23 u64 set_mode;
24 u64 blt; 24 u64 blt;
25}; 25} efi_uga_draw_protocol_64_t;
26 26
27struct efi_uga_draw_protocol { 27typedef struct {
28 void *get_mode; 28 void *get_mode;
29 void *set_mode; 29 void *set_mode;
30 void *blt; 30 void *blt;
31}; 31} efi_uga_draw_protocol_t;
32 32
33#endif /* BOOT_COMPRESSED_EBOOT_H */ 33#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 8c5107545251..9e2157371491 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,3 +1,4 @@
1#include <asm/e820/types.h>
1#include <asm/processor.h> 2#include <asm/processor.h>
2#include "pgtable.h" 3#include "pgtable.h"
3#include "../string.h" 4#include "../string.h"
@@ -34,10 +35,62 @@ unsigned long *trampoline_32bit __section(.data);
34extern struct boot_params *boot_params; 35extern struct boot_params *boot_params;
35int cmdline_find_option_bool(const char *option); 36int cmdline_find_option_bool(const char *option);
36 37
38static unsigned long find_trampoline_placement(void)
39{
40 unsigned long bios_start, ebda_start;
41 unsigned long trampoline_start;
42 struct boot_e820_entry *entry;
43 int i;
44
45 /*
46 * Find a suitable spot for the trampoline.
47 * This code is based on reserve_bios_regions().
48 */
49
50 ebda_start = *(unsigned short *)0x40e << 4;
51 bios_start = *(unsigned short *)0x413 << 10;
52
53 if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
54 bios_start = BIOS_START_MAX;
55
56 if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
57 bios_start = ebda_start;
58
59 bios_start = round_down(bios_start, PAGE_SIZE);
60
61 /* Find the first usable memory region under bios_start. */
62 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
63 entry = &boot_params->e820_table[i];
64
65 /* Skip all entries above bios_start. */
66 if (bios_start <= entry->addr)
67 continue;
68
69 /* Skip non-RAM entries. */
70 if (entry->type != E820_TYPE_RAM)
71 continue;
72
73 /* Adjust bios_start to the end of the entry if needed. */
74 if (bios_start > entry->addr + entry->size)
75 bios_start = entry->addr + entry->size;
76
77 /* Keep bios_start page-aligned. */
78 bios_start = round_down(bios_start, PAGE_SIZE);
79
80 /* Skip the entry if it's too small. */
81 if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
82 continue;
83
84 break;
85 }
86
87 /* Place the trampoline just below the end of low memory */
88 return bios_start - TRAMPOLINE_32BIT_SIZE;
89}
90
37struct paging_config paging_prepare(void *rmode) 91struct paging_config paging_prepare(void *rmode)
38{ 92{
39 struct paging_config paging_config = {}; 93 struct paging_config paging_config = {};
40 unsigned long bios_start, ebda_start;
41 94
42 /* Initialize boot_params. Required for cmdline_find_option_bool(). */ 95 /* Initialize boot_params. Required for cmdline_find_option_bool(). */
43 boot_params = rmode; 96 boot_params = rmode;
@@ -61,23 +114,7 @@ struct paging_config paging_prepare(void *rmode)
61 paging_config.l5_required = 1; 114 paging_config.l5_required = 1;
62 } 115 }
63 116
64 /* 117 paging_config.trampoline_start = find_trampoline_placement();
65 * Find a suitable spot for the trampoline.
66 * This code is based on reserve_bios_regions().
67 */
68
69 ebda_start = *(unsigned short *)0x40e << 4;
70 bios_start = *(unsigned short *)0x413 << 10;
71
72 if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
73 bios_start = BIOS_START_MAX;
74
75 if (ebda_start > BIOS_START_MIN && ebda_start < bios_start)
76 bios_start = ebda_start;
77
78 /* Place the trampoline just below the end of low memory, aligned to 4k */
79 paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE;
80 paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE);
81 118
82 trampoline_32bit = (unsigned long *)paging_config.trampoline_start; 119 trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
83 120
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 5de7c0d46edf..acd11b3bf639 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
375 } 375 }
376}; 376};
377 377
378static const struct x86_cpu_id aesni_cpu_id[] = {
379 X86_FEATURE_MATCH(X86_FEATURE_AES),
380 X86_FEATURE_MATCH(X86_FEATURE_XMM2),
381 {}
382};
383MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
384
385static int __init crypto_aegis128_aesni_module_init(void) 378static int __init crypto_aegis128_aesni_module_init(void)
386{ 379{
387 if (!x86_match_cpu(aesni_cpu_id)) 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
388 return -ENODEV; 384 return -ENODEV;
389 385
390 return crypto_register_aeads(crypto_aegis128_aesni_alg, 386 return crypto_register_aeads(crypto_aegis128_aesni_alg,
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 876e4866e633..2071c3d1ae07 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
375 } 375 }
376}; 376};
377 377
378static const struct x86_cpu_id aesni_cpu_id[] = {
379 X86_FEATURE_MATCH(X86_FEATURE_AES),
380 X86_FEATURE_MATCH(X86_FEATURE_XMM2),
381 {}
382};
383MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
384
385static int __init crypto_aegis128l_aesni_module_init(void) 378static int __init crypto_aegis128l_aesni_module_init(void)
386{ 379{
387 if (!x86_match_cpu(aesni_cpu_id)) 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
388 return -ENODEV; 384 return -ENODEV;
389 385
390 return crypto_register_aeads(crypto_aegis128l_aesni_alg, 386 return crypto_register_aeads(crypto_aegis128l_aesni_alg,
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 2b5dd3af8f4d..b5f2a8fd5a71 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
375 } 375 }
376}; 376};
377 377
378static const struct x86_cpu_id aesni_cpu_id[] = {
379 X86_FEATURE_MATCH(X86_FEATURE_AES),
380 X86_FEATURE_MATCH(X86_FEATURE_XMM2),
381 {}
382};
383MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
384
385static int __init crypto_aegis256_aesni_module_init(void) 378static int __init crypto_aegis256_aesni_module_init(void)
386{ 379{
387 if (!x86_match_cpu(aesni_cpu_id)) 380 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
381 !boot_cpu_has(X86_FEATURE_AES) ||
382 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
383 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
388 return -ENODEV; 384 return -ENODEV;
389 385
390 return crypto_register_aeads(crypto_aegis256_aesni_alg, 386 return crypto_register_aeads(crypto_aegis256_aesni_alg,
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index f111f36d26dc..6634907d6ccd 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
37 37
38MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400); 38MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
39 39
40static const struct x86_cpu_id avx2_cpu_id[] = {
41 X86_FEATURE_MATCH(X86_FEATURE_AVX2),
42 {}
43};
44MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
45
46static int __init crypto_morus1280_avx2_module_init(void) 40static int __init crypto_morus1280_avx2_module_init(void)
47{ 41{
48 if (!x86_match_cpu(avx2_cpu_id)) 42 if (!boot_cpu_has(X86_FEATURE_AVX2) ||
43 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
49 return -ENODEV; 45 return -ENODEV;
50 46
51 return crypto_register_aeads(crypto_morus1280_avx2_algs, 47 return crypto_register_aeads(crypto_morus1280_avx2_algs,
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 839270aa713c..95cf857d2cbb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
37 37
38MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350); 38MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
39 39
40static const struct x86_cpu_id sse2_cpu_id[] = {
41 X86_FEATURE_MATCH(X86_FEATURE_XMM2),
42 {}
43};
44MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
45
46static int __init crypto_morus1280_sse2_module_init(void) 40static int __init crypto_morus1280_sse2_module_init(void)
47{ 41{
48 if (!x86_match_cpu(sse2_cpu_id)) 42 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
43 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
49 return -ENODEV; 45 return -ENODEV;
50 46
51 return crypto_register_aeads(crypto_morus1280_sse2_algs, 47 return crypto_register_aeads(crypto_morus1280_sse2_algs,
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 26b47e2db8d2..615fb7bc9a32 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
37 37
38MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400); 38MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
39 39
40static const struct x86_cpu_id sse2_cpu_id[] = {
41 X86_FEATURE_MATCH(X86_FEATURE_XMM2),
42 {}
43};
44MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
45
46static int __init crypto_morus640_sse2_module_init(void) 40static int __init crypto_morus640_sse2_module_init(void)
47{ 41{
48 if (!x86_match_cpu(sse2_cpu_id)) 42 if (!boot_cpu_has(X86_FEATURE_XMM2) ||
43 !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
44 !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
49 return -ENODEV; 45 return -ENODEV;
50 46
51 return crypto_register_aeads(crypto_morus640_sse2_algs, 47 return crypto_register_aeads(crypto_morus640_sse2_algs,
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 73a522d53b53..8ae7ffda8f98 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -981,7 +981,7 @@ ENTRY(\sym)
981 981
982 call \do_sym 982 call \do_sym
983 983
984 jmp error_exit /* %ebx: no swapgs flag */ 984 jmp error_exit
985 .endif 985 .endif
986END(\sym) 986END(\sym)
987.endm 987.endm
@@ -1222,7 +1222,6 @@ END(paranoid_exit)
1222 1222
1223/* 1223/*
1224 * Save all registers in pt_regs, and switch GS if needed. 1224 * Save all registers in pt_regs, and switch GS if needed.
1225 * Return: EBX=0: came from user mode; EBX=1: otherwise
1226 */ 1225 */
1227ENTRY(error_entry) 1226ENTRY(error_entry)
1228 UNWIND_HINT_FUNC 1227 UNWIND_HINT_FUNC
@@ -1269,7 +1268,6 @@ ENTRY(error_entry)
1269 * for these here too. 1268 * for these here too.
1270 */ 1269 */
1271.Lerror_kernelspace: 1270.Lerror_kernelspace:
1272 incl %ebx
1273 leaq native_irq_return_iret(%rip), %rcx 1271 leaq native_irq_return_iret(%rip), %rcx
1274 cmpq %rcx, RIP+8(%rsp) 1272 cmpq %rcx, RIP+8(%rsp)
1275 je .Lerror_bad_iret 1273 je .Lerror_bad_iret
@@ -1303,28 +1301,20 @@ ENTRY(error_entry)
1303 1301
1304 /* 1302 /*
1305 * Pretend that the exception came from user mode: set up pt_regs 1303 * Pretend that the exception came from user mode: set up pt_regs
1306 * as if we faulted immediately after IRET and clear EBX so that 1304 * as if we faulted immediately after IRET.
1307 * error_exit knows that we will be returning to user mode.
1308 */ 1305 */
1309 mov %rsp, %rdi 1306 mov %rsp, %rdi
1310 call fixup_bad_iret 1307 call fixup_bad_iret
1311 mov %rax, %rsp 1308 mov %rax, %rsp
1312 decl %ebx
1313 jmp .Lerror_entry_from_usermode_after_swapgs 1309 jmp .Lerror_entry_from_usermode_after_swapgs
1314END(error_entry) 1310END(error_entry)
1315 1311
1316
1317/*
1318 * On entry, EBX is a "return to kernel mode" flag:
1319 * 1: already in kernel mode, don't need SWAPGS
1320 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1321 */
1322ENTRY(error_exit) 1312ENTRY(error_exit)
1323 UNWIND_HINT_REGS 1313 UNWIND_HINT_REGS
1324 DISABLE_INTERRUPTS(CLBR_ANY) 1314 DISABLE_INTERRUPTS(CLBR_ANY)
1325 TRACE_IRQS_OFF 1315 TRACE_IRQS_OFF
1326 testl %ebx, %ebx 1316 testb $3, CS(%rsp)
1327 jnz retint_kernel 1317 jz retint_kernel
1328 jmp retint_user 1318 jmp retint_user
1329END(error_exit) 1319END(error_exit)
1330 1320
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 4b98101209a1..d50bb4dc0650 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
579{ 579{
580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); 580 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
581 struct perf_event *event = pcpu->event; 581 struct perf_event *event = pcpu->event;
582 struct hw_perf_event *hwc = &event->hw; 582 struct hw_perf_event *hwc;
583 struct perf_sample_data data; 583 struct perf_sample_data data;
584 struct perf_raw_record raw; 584 struct perf_raw_record raw;
585 struct pt_regs regs; 585 struct pt_regs regs;
@@ -602,6 +602,10 @@ fail:
602 return 0; 602 return 0;
603 } 603 }
604 604
605 if (WARN_ON_ONCE(!event))
606 goto fail;
607
608 hwc = &event->hw;
605 msr = hwc->config_base; 609 msr = hwc->config_base;
606 buf = ibs_data.regs; 610 buf = ibs_data.regs;
607 rdmsrl(msr, *buf); 611 rdmsrl(msr, *buf);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 707b2a96e516..86f0c15dcc2d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2997,6 +2997,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
2997 } 2997 }
2998 if (x86_pmu.pebs_aliases) 2998 if (x86_pmu.pebs_aliases)
2999 x86_pmu.pebs_aliases(event); 2999 x86_pmu.pebs_aliases(event);
3000
3001 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3002 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3000 } 3003 }
3001 3004
3002 if (needs_branch_stack(event)) { 3005 if (needs_branch_stack(event)) {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8cf03f101938..8dbba77e0518 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1186,16 +1186,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
1186 } 1186 }
1187 1187
1188 /* 1188 /*
1189 * We must however always use iregs for the unwinder to stay sane; the
1190 * record BP,SP,IP can point into thin air when the record is from a
1191 * previous PMI context or an (I)RET happend between the record and
1192 * PMI.
1193 */
1194 if (sample_type & PERF_SAMPLE_CALLCHAIN)
1195 data->callchain = perf_callchain(event, iregs);
1196
1197 /*
1189 * We use the interrupt regs as a base because the PEBS record does not 1198 * We use the interrupt regs as a base because the PEBS record does not
1190 * contain a full regs set, specifically it seems to lack segment 1199 * contain a full regs set, specifically it seems to lack segment
1191 * descriptors, which get used by things like user_mode(). 1200 * descriptors, which get used by things like user_mode().
1192 * 1201 *
1193 * In the simple case fix up only the IP for PERF_SAMPLE_IP. 1202 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1194 *
1195 * We must however always use BP,SP from iregs for the unwinder to stay
1196 * sane; the record BP,SP can point into thin air when the record is
1197 * from a previous PMI context or an (I)RET happend between the record
1198 * and PMI.
1199 */ 1203 */
1200 *regs = *iregs; 1204 *regs = *iregs;
1201 1205
@@ -1214,15 +1218,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
1214 regs->si = pebs->si; 1218 regs->si = pebs->si;
1215 regs->di = pebs->di; 1219 regs->di = pebs->di;
1216 1220
1217 /* 1221 regs->bp = pebs->bp;
1218 * Per the above; only set BP,SP if we don't need callchains. 1222 regs->sp = pebs->sp;
1219 *
1220 * XXX: does this make sense?
1221 */
1222 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1223 regs->bp = pebs->bp;
1224 regs->sp = pebs->sp;
1225 }
1226 1223
1227#ifndef CONFIG_X86_32 1224#ifndef CONFIG_X86_32
1228 regs->r8 = pebs->r8; 1225 regs->r8 = pebs->r8;
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index c9e1e0bef3c3..e17ab885b1e9 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -28,7 +28,7 @@
28#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) 28#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
29#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) 29#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
30#define UNCORE_EXTRA_PCI_DEV 0xff 30#define UNCORE_EXTRA_PCI_DEV 0xff
31#define UNCORE_EXTRA_PCI_DEV_MAX 3 31#define UNCORE_EXTRA_PCI_DEV_MAX 4
32 32
33#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 33#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
34 34
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 87dc0263a2e1..51d7c117e3c7 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1029,6 +1029,7 @@ void snbep_uncore_cpu_init(void)
1029enum { 1029enum {
1030 SNBEP_PCI_QPI_PORT0_FILTER, 1030 SNBEP_PCI_QPI_PORT0_FILTER,
1031 SNBEP_PCI_QPI_PORT1_FILTER, 1031 SNBEP_PCI_QPI_PORT1_FILTER,
1032 BDX_PCI_QPI_PORT2_FILTER,
1032 HSWEP_PCI_PCU_3, 1033 HSWEP_PCI_PCU_3,
1033}; 1034};
1034 1035
@@ -3286,15 +3287,18 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
3286 }, 3287 },
3287 { /* QPI Port 0 filter */ 3288 { /* QPI Port 0 filter */
3288 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86), 3289 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f86),
3289 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 0), 3290 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3291 SNBEP_PCI_QPI_PORT0_FILTER),
3290 }, 3292 },
3291 { /* QPI Port 1 filter */ 3293 { /* QPI Port 1 filter */
3292 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96), 3294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f96),
3293 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 1), 3295 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3296 SNBEP_PCI_QPI_PORT1_FILTER),
3294 }, 3297 },
3295 { /* QPI Port 2 filter */ 3298 { /* QPI Port 2 filter */
3296 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), 3299 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
3297 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), 3300 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
3301 BDX_PCI_QPI_PORT2_FILTER),
3298 }, 3302 },
3299 { /* PCU.3 (for Capability registers) */ 3303 { /* PCU.3 (for Capability registers) */
3300 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), 3304 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 9ef5ee03d2d7..159622ee0674 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -43,7 +43,7 @@ asm (".pushsection .text;"
43 "push %rdx;" 43 "push %rdx;"
44 "mov $0x1,%eax;" 44 "mov $0x1,%eax;"
45 "xor %edx,%edx;" 45 "xor %edx,%edx;"
46 "lock cmpxchg %dl,(%rdi);" 46 LOCK_PREFIX "cmpxchg %dl,(%rdi);"
47 "cmp $0x1,%al;" 47 "cmp $0x1,%al;"
48 "jne .slowpath;" 48 "jne .slowpath;"
49 "pop %rdx;" 49 "pop %rdx;"
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 2aabd4cb0e3f..adbda5847b14 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
573 case 0x04: return 0x02000014; 573 case 0x04: return 0x02000014;
574 } 574 }
575 575
576 if (boot_cpu_data.x86_stepping > 4)
577 return 0;
578
576 return ~0U; 579 return ~0U;
577} 580}
578 581
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 8c50754c09c1..4b767284b7f5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
123{ 123{
124 memset(m, 0, sizeof(struct mce)); 124 memset(m, 0, sizeof(struct mce));
125 m->cpu = m->extcpu = smp_processor_id(); 125 m->cpu = m->extcpu = smp_processor_id();
126 /* We hope get_seconds stays lockless */ 126 /* need the internal __ version to avoid deadlocks */
127 m->time = get_seconds(); 127 m->time = __ktime_get_real_seconds();
128 m->cpuvendor = boot_cpu_data.x86_vendor; 128 m->cpuvendor = boot_cpu_data.x86_vendor;
129 m->cpuid = cpuid_eax(1); 129 m->cpuid = cpuid_eax(1);
130 m->socketid = cpu_data(m->extcpu).phys_proc_id; 130 m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
1104} 1104}
1105#endif 1105#endif
1106 1106
1107
1108/*
1109 * Cases where we avoid rendezvous handler timeout:
1110 * 1) If this CPU is offline.
1111 *
1112 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1113 * skip those CPUs which remain looping in the 1st kernel - see
1114 * crash_nmi_callback().
1115 *
1116 * Note: there still is a small window between kexec-ing and the new,
1117 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1118 * might not get handled properly.
1119 */
1120static bool __mc_check_crashing_cpu(int cpu)
1121{
1122 if (cpu_is_offline(cpu) ||
1123 (crashing_cpu != -1 && crashing_cpu != cpu)) {
1124 u64 mcgstatus;
1125
1126 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1127 if (mcgstatus & MCG_STATUS_RIPV) {
1128 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1129 return true;
1130 }
1131 }
1132 return false;
1133}
1134
1135static void __mc_scan_banks(struct mce *m, struct mce *final,
1136 unsigned long *toclear, unsigned long *valid_banks,
1137 int no_way_out, int *worst)
1138{
1139 struct mca_config *cfg = &mca_cfg;
1140 int severity, i;
1141
1142 for (i = 0; i < cfg->banks; i++) {
1143 __clear_bit(i, toclear);
1144 if (!test_bit(i, valid_banks))
1145 continue;
1146
1147 if (!mce_banks[i].ctl)
1148 continue;
1149
1150 m->misc = 0;
1151 m->addr = 0;
1152 m->bank = i;
1153
1154 m->status = mce_rdmsrl(msr_ops.status(i));
1155 if (!(m->status & MCI_STATUS_VAL))
1156 continue;
1157
1158 /*
1159 * Corrected or non-signaled errors are handled by
1160 * machine_check_poll(). Leave them alone, unless this panics.
1161 */
1162 if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1163 !no_way_out)
1164 continue;
1165
1166 /* Set taint even when machine check was not enabled. */
1167 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1168
1169 severity = mce_severity(m, cfg->tolerant, NULL, true);
1170
1171 /*
1172 * When machine check was for corrected/deferred handler don't
1173 * touch, unless we're panicking.
1174 */
1175 if ((severity == MCE_KEEP_SEVERITY ||
1176 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1177 continue;
1178
1179 __set_bit(i, toclear);
1180
1181 /* Machine check event was not enabled. Clear, but ignore. */
1182 if (severity == MCE_NO_SEVERITY)
1183 continue;
1184
1185 mce_read_aux(m, i);
1186
1187 /* assuming valid severity level != 0 */
1188 m->severity = severity;
1189
1190 mce_log(m);
1191
1192 if (severity > *worst) {
1193 *final = *m;
1194 *worst = severity;
1195 }
1196 }
1197
1198 /* mce_clear_state will clear *final, save locally for use later */
1199 *m = *final;
1200}
1201
1107/* 1202/*
1108 * The actual machine check handler. This only handles real 1203 * The actual machine check handler. This only handles real
1109 * exceptions when something got corrupted coming in through int 18. 1204 * exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
1118 */ 1213 */
1119void do_machine_check(struct pt_regs *regs, long error_code) 1214void do_machine_check(struct pt_regs *regs, long error_code)
1120{ 1215{
1216 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1217 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1121 struct mca_config *cfg = &mca_cfg; 1218 struct mca_config *cfg = &mca_cfg;
1219 int cpu = smp_processor_id();
1220 char *msg = "Unknown";
1122 struct mce m, *final; 1221 struct mce m, *final;
1123 int i;
1124 int worst = 0; 1222 int worst = 0;
1125 int severity;
1126 1223
1127 /* 1224 /*
1128 * Establish sequential order between the CPUs entering the machine 1225 * Establish sequential order between the CPUs entering the machine
1129 * check handler. 1226 * check handler.
1130 */ 1227 */
1131 int order = -1; 1228 int order = -1;
1229
1132 /* 1230 /*
1133 * If no_way_out gets set, there is no safe way to recover from this 1231 * If no_way_out gets set, there is no safe way to recover from this
1134 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway. 1232 * MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
1135 */ 1233 */
1136 int no_way_out = 0; 1234 int no_way_out = 0;
1235
1137 /* 1236 /*
1138 * If kill_it gets set, there might be a way to recover from this 1237 * If kill_it gets set, there might be a way to recover from this
1139 * error. 1238 * error.
1140 */ 1239 */
1141 int kill_it = 0; 1240 int kill_it = 0;
1142 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1143 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1144 char *msg = "Unknown";
1145 1241
1146 /* 1242 /*
1147 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES 1243 * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1148 * on Intel. 1244 * on Intel.
1149 */ 1245 */
1150 int lmce = 1; 1246 int lmce = 1;
1151 int cpu = smp_processor_id();
1152 1247
1153 /* 1248 if (__mc_check_crashing_cpu(cpu))
1154 * Cases where we avoid rendezvous handler timeout: 1249 return;
1155 * 1) If this CPU is offline.
1156 *
1157 * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1158 * skip those CPUs which remain looping in the 1st kernel - see
1159 * crash_nmi_callback().
1160 *
1161 * Note: there still is a small window between kexec-ing and the new,
1162 * kdump kernel establishing a new #MC handler where a broadcasted MCE
1163 * might not get handled properly.
1164 */
1165 if (cpu_is_offline(cpu) ||
1166 (crashing_cpu != -1 && crashing_cpu != cpu)) {
1167 u64 mcgstatus;
1168
1169 mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
1170 if (mcgstatus & MCG_STATUS_RIPV) {
1171 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1172 return;
1173 }
1174 }
1175 1250
1176 ist_enter(regs); 1251 ist_enter(regs);
1177 1252
1178 this_cpu_inc(mce_exception_count); 1253 this_cpu_inc(mce_exception_count);
1179 1254
1180 if (!cfg->banks)
1181 goto out;
1182
1183 mce_gather_info(&m, regs); 1255 mce_gather_info(&m, regs);
1184 m.tsc = rdtsc(); 1256 m.tsc = rdtsc();
1185 1257
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1220 order = mce_start(&no_way_out); 1292 order = mce_start(&no_way_out);
1221 } 1293 }
1222 1294
1223 for (i = 0; i < cfg->banks; i++) { 1295 __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
1224 __clear_bit(i, toclear);
1225 if (!test_bit(i, valid_banks))
1226 continue;
1227 if (!mce_banks[i].ctl)
1228 continue;
1229
1230 m.misc = 0;
1231 m.addr = 0;
1232 m.bank = i;
1233
1234 m.status = mce_rdmsrl(msr_ops.status(i));
1235 if ((m.status & MCI_STATUS_VAL) == 0)
1236 continue;
1237
1238 /*
1239 * Non uncorrected or non signaled errors are handled by
1240 * machine_check_poll. Leave them alone, unless this panics.
1241 */
1242 if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1243 !no_way_out)
1244 continue;
1245
1246 /*
1247 * Set taint even when machine check was not enabled.
1248 */
1249 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1250
1251 severity = mce_severity(&m, cfg->tolerant, NULL, true);
1252
1253 /*
1254 * When machine check was for corrected/deferred handler don't
1255 * touch, unless we're panicing.
1256 */
1257 if ((severity == MCE_KEEP_SEVERITY ||
1258 severity == MCE_UCNA_SEVERITY) && !no_way_out)
1259 continue;
1260 __set_bit(i, toclear);
1261 if (severity == MCE_NO_SEVERITY) {
1262 /*
1263 * Machine check event was not enabled. Clear, but
1264 * ignore.
1265 */
1266 continue;
1267 }
1268
1269 mce_read_aux(&m, i);
1270
1271 /* assuming valid severity level != 0 */
1272 m.severity = severity;
1273
1274 mce_log(&m);
1275
1276 if (severity > worst) {
1277 *final = m;
1278 worst = severity;
1279 }
1280 }
1281
1282 /* mce_clear_state will clear *final, save locally for use later */
1283 m = *final;
1284 1296
1285 if (!no_way_out) 1297 if (!no_way_out)
1286 mce_clear_state(toclear); 1298 mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1319 if (worst > 0) 1331 if (worst > 0)
1320 mce_report_event(regs); 1332 mce_report_event(regs);
1321 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); 1333 mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1322out: 1334
1323 sync_core(); 1335 sync_core();
1324 1336
1325 if (worst != MCE_AR_SEVERITY && !kill_it) 1337 if (worst != MCE_AR_SEVERITY && !kill_it)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d594690d8b95..6b8f11521c41 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
890 if (cache->nobjs >= min) 890 if (cache->nobjs >= min)
891 return 0; 891 return 0;
892 while (cache->nobjs < ARRAY_SIZE(cache->objects)) { 892 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
893 page = (void *)__get_free_page(GFP_KERNEL); 893 page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
894 if (!page) 894 if (!page)
895 return -ENOMEM; 895 return -ENOMEM;
896 cache->objects[cache->nobjs++] = page; 896 cache->objects[cache->nobjs++] = page;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e30da9a2430c..5d8e317c2b04 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7893,6 +7893,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
7893 HRTIMER_MODE_REL_PINNED); 7893 HRTIMER_MODE_REL_PINNED);
7894 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; 7894 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
7895 7895
7896 vmx->nested.vpid02 = allocate_vpid();
7897
7896 vmx->nested.vmxon = true; 7898 vmx->nested.vmxon = true;
7897 return 0; 7899 return 0;
7898 7900
@@ -8480,21 +8482,20 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
8480/* Emulate the VMPTRST instruction */ 8482/* Emulate the VMPTRST instruction */
8481static int handle_vmptrst(struct kvm_vcpu *vcpu) 8483static int handle_vmptrst(struct kvm_vcpu *vcpu)
8482{ 8484{
8483 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 8485 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
8484 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); 8486 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8485 gva_t vmcs_gva; 8487 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
8486 struct x86_exception e; 8488 struct x86_exception e;
8489 gva_t gva;
8487 8490
8488 if (!nested_vmx_check_permission(vcpu)) 8491 if (!nested_vmx_check_permission(vcpu))
8489 return 1; 8492 return 1;
8490 8493
8491 if (get_vmx_mem_address(vcpu, exit_qualification, 8494 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
8492 vmx_instruction_info, true, &vmcs_gva))
8493 return 1; 8495 return 1;
8494 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ 8496 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
8495 if (kvm_write_guest_virt_system(vcpu, vmcs_gva, 8497 if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
8496 (void *)&to_vmx(vcpu)->nested.current_vmptr, 8498 sizeof(gpa_t), &e)) {
8497 sizeof(u64), &e)) {
8498 kvm_inject_page_fault(vcpu, &e); 8499 kvm_inject_page_fault(vcpu, &e);
8499 return 1; 8500 return 1;
8500 } 8501 }
@@ -10370,11 +10371,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
10370 goto free_vmcs; 10371 goto free_vmcs;
10371 } 10372 }
10372 10373
10373 if (nested) { 10374 if (nested)
10374 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, 10375 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
10375 kvm_vcpu_apicv_active(&vmx->vcpu)); 10376 kvm_vcpu_apicv_active(&vmx->vcpu));
10376 vmx->nested.vpid02 = allocate_vpid();
10377 }
10378 10377
10379 vmx->nested.posted_intr_nv = -1; 10378 vmx->nested.posted_intr_nv = -1;
10380 vmx->nested.current_vmptr = -1ull; 10379 vmx->nested.current_vmptr = -1ull;
@@ -10391,7 +10390,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
10391 return &vmx->vcpu; 10390 return &vmx->vcpu;
10392 10391
10393free_vmcs: 10392free_vmcs:
10394 free_vpid(vmx->nested.vpid02);
10395 free_loaded_vmcs(vmx->loaded_vmcs); 10393 free_loaded_vmcs(vmx->loaded_vmcs);
10396free_msrs: 10394free_msrs:
10397 kfree(vmx->guest_msrs); 10395 kfree(vmx->guest_msrs);
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 55799873ebe5..8f6cc71e0848 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
1441 1441
1442 /* sub esp,STACK_SIZE */ 1442 /* sub esp,STACK_SIZE */
1443 EMIT2_off32(0x81, 0xEC, STACK_SIZE); 1443 EMIT2_off32(0x81, 0xEC, STACK_SIZE);
1444 /* sub ebp,SCRATCH_SIZE+4+12*/ 1444 /* sub ebp,SCRATCH_SIZE+12*/
1445 EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16); 1445 EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
1446 /* xor ebx,ebx */ 1446 /* xor ebx,ebx */
1447 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX)); 1447 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
1448 1448
@@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
1475 /* mov edx,dword ptr [ebp+off]*/ 1475 /* mov edx,dword ptr [ebp+off]*/
1476 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1])); 1476 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
1477 1477
1478 /* add ebp,SCRATCH_SIZE+4+12*/ 1478 /* add ebp,SCRATCH_SIZE+12*/
1479 EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16); 1479 EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
1480 1480
1481 /* mov ebx,dword ptr [ebp-12]*/ 1481 /* mov ebx,dword ptr [ebp-12]*/
1482 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12); 1482 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 77873ce700ae..ee5d08f25ce4 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
417 if (!(md->attribute & EFI_MEMORY_WB)) 417 if (!(md->attribute & EFI_MEMORY_WB))
418 flags |= _PAGE_PCD; 418 flags |= _PAGE_PCD;
419 419
420 if (sev_active()) 420 if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
421 flags |= _PAGE_ENC; 421 flags |= _PAGE_ENC;
422 422
423 pfn = md->phys_addr >> PAGE_SHIFT; 423 pfn = md->phys_addr >> PAGE_SHIFT;
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
636#ifdef CONFIG_EFI_MIXED 636#ifdef CONFIG_EFI_MIXED
637extern efi_status_t efi64_thunk(u32, ...); 637extern efi_status_t efi64_thunk(u32, ...);
638 638
639static DEFINE_SPINLOCK(efi_runtime_lock);
640
639#define runtime_service32(func) \ 641#define runtime_service32(func) \
640({ \ 642({ \
641 u32 table = (u32)(unsigned long)efi.systab; \ 643 u32 table = (u32)(unsigned long)efi.systab; \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
657#define efi_thunk(f, ...) \ 659#define efi_thunk(f, ...) \
658({ \ 660({ \
659 efi_status_t __s; \ 661 efi_status_t __s; \
660 unsigned long __flags; \
661 u32 __func; \ 662 u32 __func; \
662 \ 663 \
663 local_irq_save(__flags); \
664 arch_efi_call_virt_setup(); \ 664 arch_efi_call_virt_setup(); \
665 \ 665 \
666 __func = runtime_service32(f); \ 666 __func = runtime_service32(f); \
667 __s = efi64_thunk(__func, __VA_ARGS__); \ 667 __s = efi64_thunk(__func, __VA_ARGS__); \
668 \ 668 \
669 arch_efi_call_virt_teardown(); \ 669 arch_efi_call_virt_teardown(); \
670 local_irq_restore(__flags); \
671 \ 670 \
672 __s; \ 671 __s; \
673}) 672})
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
702{ 701{
703 efi_status_t status; 702 efi_status_t status;
704 u32 phys_tm, phys_tc; 703 u32 phys_tm, phys_tc;
704 unsigned long flags;
705 705
706 spin_lock(&rtc_lock); 706 spin_lock(&rtc_lock);
707 spin_lock_irqsave(&efi_runtime_lock, flags);
707 708
708 phys_tm = virt_to_phys_or_null(tm); 709 phys_tm = virt_to_phys_or_null(tm);
709 phys_tc = virt_to_phys_or_null(tc); 710 phys_tc = virt_to_phys_or_null(tc);
710 711
711 status = efi_thunk(get_time, phys_tm, phys_tc); 712 status = efi_thunk(get_time, phys_tm, phys_tc);
712 713
714 spin_unlock_irqrestore(&efi_runtime_lock, flags);
713 spin_unlock(&rtc_lock); 715 spin_unlock(&rtc_lock);
714 716
715 return status; 717 return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
719{ 721{
720 efi_status_t status; 722 efi_status_t status;
721 u32 phys_tm; 723 u32 phys_tm;
724 unsigned long flags;
722 725
723 spin_lock(&rtc_lock); 726 spin_lock(&rtc_lock);
727 spin_lock_irqsave(&efi_runtime_lock, flags);
724 728
725 phys_tm = virt_to_phys_or_null(tm); 729 phys_tm = virt_to_phys_or_null(tm);
726 730
727 status = efi_thunk(set_time, phys_tm); 731 status = efi_thunk(set_time, phys_tm);
728 732
733 spin_unlock_irqrestore(&efi_runtime_lock, flags);
729 spin_unlock(&rtc_lock); 734 spin_unlock(&rtc_lock);
730 735
731 return status; 736 return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
737{ 742{
738 efi_status_t status; 743 efi_status_t status;
739 u32 phys_enabled, phys_pending, phys_tm; 744 u32 phys_enabled, phys_pending, phys_tm;
745 unsigned long flags;
740 746
741 spin_lock(&rtc_lock); 747 spin_lock(&rtc_lock);
748 spin_lock_irqsave(&efi_runtime_lock, flags);
742 749
743 phys_enabled = virt_to_phys_or_null(enabled); 750 phys_enabled = virt_to_phys_or_null(enabled);
744 phys_pending = virt_to_phys_or_null(pending); 751 phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
747 status = efi_thunk(get_wakeup_time, phys_enabled, 754 status = efi_thunk(get_wakeup_time, phys_enabled,
748 phys_pending, phys_tm); 755 phys_pending, phys_tm);
749 756
757 spin_unlock_irqrestore(&efi_runtime_lock, flags);
750 spin_unlock(&rtc_lock); 758 spin_unlock(&rtc_lock);
751 759
752 return status; 760 return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
757{ 765{
758 efi_status_t status; 766 efi_status_t status;
759 u32 phys_tm; 767 u32 phys_tm;
768 unsigned long flags;
760 769
761 spin_lock(&rtc_lock); 770 spin_lock(&rtc_lock);
771 spin_lock_irqsave(&efi_runtime_lock, flags);
762 772
763 phys_tm = virt_to_phys_or_null(tm); 773 phys_tm = virt_to_phys_or_null(tm);
764 774
765 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 775 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
766 776
777 spin_unlock_irqrestore(&efi_runtime_lock, flags);
767 spin_unlock(&rtc_lock); 778 spin_unlock(&rtc_lock);
768 779
769 return status; 780 return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
781 efi_status_t status; 792 efi_status_t status;
782 u32 phys_name, phys_vendor, phys_attr; 793 u32 phys_name, phys_vendor, phys_attr;
783 u32 phys_data_size, phys_data; 794 u32 phys_data_size, phys_data;
795 unsigned long flags;
796
797 spin_lock_irqsave(&efi_runtime_lock, flags);
784 798
785 phys_data_size = virt_to_phys_or_null(data_size); 799 phys_data_size = virt_to_phys_or_null(data_size);
786 phys_vendor = virt_to_phys_or_null(vendor); 800 phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
791 status = efi_thunk(get_variable, phys_name, phys_vendor, 805 status = efi_thunk(get_variable, phys_name, phys_vendor,
792 phys_attr, phys_data_size, phys_data); 806 phys_attr, phys_data_size, phys_data);
793 807
808 spin_unlock_irqrestore(&efi_runtime_lock, flags);
809
794 return status; 810 return status;
795} 811}
796 812
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
800{ 816{
801 u32 phys_name, phys_vendor, phys_data; 817 u32 phys_name, phys_vendor, phys_data;
802 efi_status_t status; 818 efi_status_t status;
819 unsigned long flags;
820
821 spin_lock_irqsave(&efi_runtime_lock, flags);
822
823 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
824 phys_vendor = virt_to_phys_or_null(vendor);
825 phys_data = virt_to_phys_or_null_size(data, data_size);
826
827 /* If data_size is > sizeof(u32) we've got problems */
828 status = efi_thunk(set_variable, phys_name, phys_vendor,
829 attr, data_size, phys_data);
830
831 spin_unlock_irqrestore(&efi_runtime_lock, flags);
832
833 return status;
834}
835
836static efi_status_t
837efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
838 u32 attr, unsigned long data_size,
839 void *data)
840{
841 u32 phys_name, phys_vendor, phys_data;
842 efi_status_t status;
843 unsigned long flags;
844
845 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
846 return EFI_NOT_READY;
803 847
804 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); 848 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
805 phys_vendor = virt_to_phys_or_null(vendor); 849 phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
809 status = efi_thunk(set_variable, phys_name, phys_vendor, 853 status = efi_thunk(set_variable, phys_name, phys_vendor,
810 attr, data_size, phys_data); 854 attr, data_size, phys_data);
811 855
856 spin_unlock_irqrestore(&efi_runtime_lock, flags);
857
812 return status; 858 return status;
813} 859}
814 860
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
819{ 865{
820 efi_status_t status; 866 efi_status_t status;
821 u32 phys_name_size, phys_name, phys_vendor; 867 u32 phys_name_size, phys_name, phys_vendor;
868 unsigned long flags;
869
870 spin_lock_irqsave(&efi_runtime_lock, flags);
822 871
823 phys_name_size = virt_to_phys_or_null(name_size); 872 phys_name_size = virt_to_phys_or_null(name_size);
824 phys_vendor = virt_to_phys_or_null(vendor); 873 phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
827 status = efi_thunk(get_next_variable, phys_name_size, 876 status = efi_thunk(get_next_variable, phys_name_size,
828 phys_name, phys_vendor); 877 phys_name, phys_vendor);
829 878
879 spin_unlock_irqrestore(&efi_runtime_lock, flags);
880
830 return status; 881 return status;
831} 882}
832 883
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
835{ 886{
836 efi_status_t status; 887 efi_status_t status;
837 u32 phys_count; 888 u32 phys_count;
889 unsigned long flags;
890
891 spin_lock_irqsave(&efi_runtime_lock, flags);
838 892
839 phys_count = virt_to_phys_or_null(count); 893 phys_count = virt_to_phys_or_null(count);
840 status = efi_thunk(get_next_high_mono_count, phys_count); 894 status = efi_thunk(get_next_high_mono_count, phys_count);
841 895
896 spin_unlock_irqrestore(&efi_runtime_lock, flags);
897
842 return status; 898 return status;
843} 899}
844 900
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
847 unsigned long data_size, efi_char16_t *data) 903 unsigned long data_size, efi_char16_t *data)
848{ 904{
849 u32 phys_data; 905 u32 phys_data;
906 unsigned long flags;
907
908 spin_lock_irqsave(&efi_runtime_lock, flags);
850 909
851 phys_data = virt_to_phys_or_null_size(data, data_size); 910 phys_data = virt_to_phys_or_null_size(data, data_size);
852 911
853 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 912 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
913
914 spin_unlock_irqrestore(&efi_runtime_lock, flags);
854} 915}
855 916
856static efi_status_t 917static efi_status_t
@@ -872,10 +933,40 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
872{ 933{
873 efi_status_t status; 934 efi_status_t status;
874 u32 phys_storage, phys_remaining, phys_max; 935 u32 phys_storage, phys_remaining, phys_max;
936 unsigned long flags;
937
938 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
939 return EFI_UNSUPPORTED;
940
941 spin_lock_irqsave(&efi_runtime_lock, flags);
942
943 phys_storage = virt_to_phys_or_null(storage_space);
944 phys_remaining = virt_to_phys_or_null(remaining_space);
945 phys_max = virt_to_phys_or_null(max_variable_size);
946
947 status = efi_thunk(query_variable_info, attr, phys_storage,
948 phys_remaining, phys_max);
949
950 spin_unlock_irqrestore(&efi_runtime_lock, flags);
951
952 return status;
953}
954
955static efi_status_t
956efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
957 u64 *remaining_space,
958 u64 *max_variable_size)
959{
960 efi_status_t status;
961 u32 phys_storage, phys_remaining, phys_max;
962 unsigned long flags;
875 963
876 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 964 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
877 return EFI_UNSUPPORTED; 965 return EFI_UNSUPPORTED;
878 966
967 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
968 return EFI_NOT_READY;
969
879 phys_storage = virt_to_phys_or_null(storage_space); 970 phys_storage = virt_to_phys_or_null(storage_space);
880 phys_remaining = virt_to_phys_or_null(remaining_space); 971 phys_remaining = virt_to_phys_or_null(remaining_space);
881 phys_max = virt_to_phys_or_null(max_variable_size); 972 phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +974,8 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
883 status = efi_thunk(query_variable_info, attr, phys_storage, 974 status = efi_thunk(query_variable_info, attr, phys_storage,
884 phys_remaining, phys_max); 975 phys_remaining, phys_max);
885 976
977 spin_unlock_irqrestore(&efi_runtime_lock, flags);
978
886 return status; 979 return status;
887} 980}
888 981
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
908 efi.get_variable = efi_thunk_get_variable; 1001 efi.get_variable = efi_thunk_get_variable;
909 efi.get_next_variable = efi_thunk_get_next_variable; 1002 efi.get_next_variable = efi_thunk_get_next_variable;
910 efi.set_variable = efi_thunk_set_variable; 1003 efi.set_variable = efi_thunk_set_variable;
1004 efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
911 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count; 1005 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
912 efi.reset_system = efi_thunk_reset_system; 1006 efi.reset_system = efi_thunk_reset_system;
913 efi.query_variable_info = efi_thunk_query_variable_info; 1007 efi.query_variable_info = efi_thunk_query_variable_info;
1008 efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
914 efi.update_capsule = efi_thunk_update_capsule; 1009 efi.update_capsule = efi_thunk_update_capsule;
915 efi.query_capsule_caps = efi_thunk_query_capsule_caps; 1010 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
916} 1011}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 36c1f8b9f7e0..844d31cb8a0c 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
105*/ 105*/
106void efi_delete_dummy_variable(void) 106void efi_delete_dummy_variable(void)
107{ 107{
108 efi.set_variable((efi_char16_t *)efi_dummy_name, 108 efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
109 &EFI_DUMMY_GUID, 109 &EFI_DUMMY_GUID,
110 EFI_VARIABLE_NON_VOLATILE | 110 EFI_VARIABLE_NON_VOLATILE |
111 EFI_VARIABLE_BOOTSERVICE_ACCESS | 111 EFI_VARIABLE_BOOTSERVICE_ACCESS |
112 EFI_VARIABLE_RUNTIME_ACCESS, 112 EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
113 0, NULL);
114} 113}
115 114
116/* 115/*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
249 int num_entries; 248 int num_entries;
250 void *new; 249 void *new;
251 250
252 if (efi_mem_desc_lookup(addr, &md)) { 251 if (efi_mem_desc_lookup(addr, &md) ||
252 md.type != EFI_BOOT_SERVICES_DATA) {
253 pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr); 253 pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
254 return; 254 return;
255 } 255 }
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 744afdc18cf3..56c44d865f7b 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
16 if (!FIXADDR_USER_START) 16 if (!FIXADDR_USER_START)
17 return 0; 17 return 0;
18 18
19 gate_vma.vm_mm = NULL; 19 vma_init(&gate_vma, NULL);
20 gate_vma.vm_start = FIXADDR_USER_START; 20 gate_vma.vm_start = FIXADDR_USER_START;
21 gate_vma.vm_end = FIXADDR_USER_END; 21 gate_vma.vm_end = FIXADDR_USER_END;
22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 22 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/block/bio.c b/block/bio.c
index 67eff5eddc49..047c5dca6d90 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
903EXPORT_SYMBOL(bio_add_page); 903EXPORT_SYMBOL(bio_add_page);
904 904
905/** 905/**
906 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 906 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
907 * @bio: bio to add pages to 907 * @bio: bio to add pages to
908 * @iter: iov iterator describing the region to be mapped 908 * @iter: iov iterator describing the region to be mapped
909 * 909 *
910 * Pins as many pages from *iter and appends them to @bio's bvec array. The 910 * Pins pages from *iter and appends them to @bio's bvec array. The
911 * pages will have to be released using put_page() when done. 911 * pages will have to be released using put_page() when done.
912 * For multi-segment *iter, this function only adds pages from the
913 * the next non-empty segment of the iov iterator.
912 */ 914 */
913int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 915static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
914{ 916{
915 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 917 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
916 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 918 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
917 struct page **pages = (struct page **)bv; 919 struct page **pages = (struct page **)bv;
918 size_t offset, diff; 920 size_t offset;
919 ssize_t size; 921 ssize_t size;
920 922
921 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 923 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
922 if (unlikely(size <= 0)) 924 if (unlikely(size <= 0))
923 return size ? size : -EFAULT; 925 return size ? size : -EFAULT;
924 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 926 idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
925 927
926 /* 928 /*
927 * Deep magic below: We need to walk the pinned pages backwards 929 * Deep magic below: We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
934 bio->bi_iter.bi_size += size; 936 bio->bi_iter.bi_size += size;
935 bio->bi_vcnt += nr_pages; 937 bio->bi_vcnt += nr_pages;
936 938
937 diff = (nr_pages * PAGE_SIZE - offset) - size; 939 while (idx--) {
938 while (nr_pages--) { 940 bv[idx].bv_page = pages[idx];
939 bv[nr_pages].bv_page = pages[nr_pages]; 941 bv[idx].bv_len = PAGE_SIZE;
940 bv[nr_pages].bv_len = PAGE_SIZE; 942 bv[idx].bv_offset = 0;
941 bv[nr_pages].bv_offset = 0;
942 } 943 }
943 944
944 bv[0].bv_offset += offset; 945 bv[0].bv_offset += offset;
945 bv[0].bv_len -= offset; 946 bv[0].bv_len -= offset;
946 if (diff) 947 bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
947 bv[bio->bi_vcnt - 1].bv_len -= diff;
948 948
949 iov_iter_advance(iter, size); 949 iov_iter_advance(iter, size);
950 return 0; 950 return 0;
951} 951}
952
953/**
954 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
955 * @bio: bio to add pages to
956 * @iter: iov iterator describing the region to be mapped
957 *
958 * Pins pages from *iter and appends them to @bio's bvec array. The
959 * pages will have to be released using put_page() when done.
960 * The function tries, but does not guarantee, to pin as many pages as
961 * fit into the bio, or are requested in *iter, whatever is smaller.
962 * If MM encounters an error pinning the requested pages, it stops.
963 * Error is returned only if 0 pages could be pinned.
964 */
965int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
966{
967 unsigned short orig_vcnt = bio->bi_vcnt;
968
969 do {
970 int ret = __bio_iov_iter_get_pages(bio, iter);
971
972 if (unlikely(ret))
973 return bio->bi_vcnt > orig_vcnt ? 0 : ret;
974
975 } while (iov_iter_count(iter) && !bio_full(bio));
976
977 return 0;
978}
952EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 979EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
953 980
954static void submit_bio_wait_endio(struct bio *bio) 981static void submit_bio_wait_endio(struct bio *bio)
@@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
1866 bio_integrity_trim(split); 1893 bio_integrity_trim(split);
1867 1894
1868 bio_advance(bio, split->bi_iter.bi_size); 1895 bio_advance(bio, split->bi_iter.bi_size);
1896 bio->bi_iter.bi_done = 0;
1869 1897
1870 if (bio_flagged(bio, BIO_TRACE_COMPLETION)) 1898 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1871 bio_set_flag(split, BIO_TRACE_COMPLETION); 1899 bio_set_flag(split, BIO_TRACE_COMPLETION);
diff --git a/block/blk-core.c b/block/blk-core.c
index f84a9b7b6f5a..ee33590f54eb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2155,11 +2155,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
2155 if (part->policy && op_is_write(bio_op(bio))) { 2155 if (part->policy && op_is_write(bio_op(bio))) {
2156 char b[BDEVNAME_SIZE]; 2156 char b[BDEVNAME_SIZE];
2157 2157
2158 printk(KERN_ERR 2158 WARN_ONCE(1,
2159 "generic_make_request: Trying to write " 2159 "generic_make_request: Trying to write "
2160 "to read-only block-device %s (partno %d)\n", 2160 "to read-only block-device %s (partno %d)\n",
2161 bio_devname(bio, b), part->partno); 2161 bio_devname(bio, b), part->partno);
2162 return true; 2162 /* Older lvm-tools actually trigger this */
2163 return false;
2163 } 2164 }
2164 2165
2165 return false; 2166 return false;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 09b2ee6694fb..3de0836163c2 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -271,7 +271,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
271 * test and set the bit before assining ->rqs[]. 271 * test and set the bit before assining ->rqs[].
272 */ 272 */
273 rq = tags->rqs[bitnr]; 273 rq = tags->rqs[bitnr];
274 if (rq && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) 274 if (rq && blk_mq_request_started(rq))
275 iter_data->fn(rq, iter_data->data, reserved); 275 iter_data->fn(rq, iter_data->data, reserved);
276 276
277 return true; 277 return true;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 95919268564b..654b0dc7e001 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
558 bool shared = false; 558 bool shared = false;
559 int cpu; 559 int cpu;
560 560
561 if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) != 561 if (!blk_mq_mark_complete(rq))
562 MQ_RQ_IN_FLIGHT)
563 return; 562 return;
564
565 if (rq->internal_tag != -1) 563 if (rq->internal_tag != -1)
566 blk_mq_sched_completed_request(rq); 564 blk_mq_sched_completed_request(rq);
567 565
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f8fecfec5df9..9706613eecf9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
879#define LPSS_GPIODEF0_DMA_LLP BIT(13) 879#define LPSS_GPIODEF0_DMA_LLP BIT(13)
880 880
881static DEFINE_MUTEX(lpss_iosf_mutex); 881static DEFINE_MUTEX(lpss_iosf_mutex);
882static bool lpss_iosf_d3_entered;
882 883
883static void lpss_iosf_enter_d3_state(void) 884static void lpss_iosf_enter_d3_state(void)
884{ 885{
@@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
921 922
922 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 923 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
923 LPSS_IOSF_GPIODEF0, value1, mask1); 924 LPSS_IOSF_GPIODEF0, value1, mask1);
925
926 lpss_iosf_d3_entered = true;
927
924exit: 928exit:
925 mutex_unlock(&lpss_iosf_mutex); 929 mutex_unlock(&lpss_iosf_mutex);
926} 930}
@@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
935 939
936 mutex_lock(&lpss_iosf_mutex); 940 mutex_lock(&lpss_iosf_mutex);
937 941
942 if (!lpss_iosf_d3_entered)
943 goto exit;
944
945 lpss_iosf_d3_entered = false;
946
938 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE, 947 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
939 LPSS_IOSF_GPIODEF0, value1, mask1); 948 LPSS_IOSF_GPIODEF0, value1, mask1);
940 949
@@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
944 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE, 953 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
945 LPSS_IOSF_PMCSR, value2, mask2); 954 LPSS_IOSF_PMCSR, value2, mask2);
946 955
956exit:
947 mutex_unlock(&lpss_iosf_mutex); 957 mutex_unlock(&lpss_iosf_mutex);
948} 958}
949 959
950static int acpi_lpss_suspend(struct device *dev, bool runtime) 960static int acpi_lpss_suspend(struct device *dev, bool wakeup)
951{ 961{
952 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 962 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
953 bool wakeup = runtime || device_may_wakeup(dev);
954 int ret; 963 int ret;
955 964
956 if (pdata->dev_desc->flags & LPSS_SAVE_CTX) 965 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
963 * wrong status for devices being about to be powered off. See 972 * wrong status for devices being about to be powered off. See
964 * lpss_iosf_enter_d3_state() for further information. 973 * lpss_iosf_enter_d3_state() for further information.
965 */ 974 */
966 if ((runtime || !pm_suspend_via_firmware()) && 975 if (acpi_target_system_state() == ACPI_STATE_S0 &&
967 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available()) 976 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
968 lpss_iosf_enter_d3_state(); 977 lpss_iosf_enter_d3_state();
969 978
970 return ret; 979 return ret;
971} 980}
972 981
973static int acpi_lpss_resume(struct device *dev, bool runtime) 982static int acpi_lpss_resume(struct device *dev)
974{ 983{
975 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 984 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
976 int ret; 985 int ret;
@@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
979 * This call is kept first to be in symmetry with 988 * This call is kept first to be in symmetry with
980 * acpi_lpss_runtime_suspend() one. 989 * acpi_lpss_runtime_suspend() one.
981 */ 990 */
982 if ((runtime || !pm_resume_via_firmware()) && 991 if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
983 lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
984 lpss_iosf_exit_d3_state(); 992 lpss_iosf_exit_d3_state();
985 993
986 ret = acpi_dev_resume(dev); 994 ret = acpi_dev_resume(dev);
@@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
1004 return 0; 1012 return 0;
1005 1013
1006 ret = pm_generic_suspend_late(dev); 1014 ret = pm_generic_suspend_late(dev);
1007 return ret ? ret : acpi_lpss_suspend(dev, false); 1015 return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
1008} 1016}
1009 1017
1010static int acpi_lpss_resume_early(struct device *dev) 1018static int acpi_lpss_resume_early(struct device *dev)
1011{ 1019{
1012 int ret = acpi_lpss_resume(dev, false); 1020 int ret = acpi_lpss_resume(dev);
1013 1021
1014 return ret ? ret : pm_generic_resume_early(dev); 1022 return ret ? ret : pm_generic_resume_early(dev);
1015} 1023}
@@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
1024 1032
1025static int acpi_lpss_runtime_resume(struct device *dev) 1033static int acpi_lpss_runtime_resume(struct device *dev)
1026{ 1034{
1027 int ret = acpi_lpss_resume(dev, true); 1035 int ret = acpi_lpss_resume(dev);
1028 1036
1029 return ret ? ret : pm_generic_runtime_resume(dev); 1037 return ret ? ret : pm_generic_runtime_resume(dev);
1030} 1038}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..44f35ab3347d 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
497 status = 497 status =
498 acpi_ps_create_op(walk_state, aml_op_start, &op); 498 acpi_ps_create_op(walk_state, aml_op_start, &op);
499 if (ACPI_FAILURE(status)) { 499 if (ACPI_FAILURE(status)) {
500 /*
501 * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
502 * executing it as a control method. However, if we encounter
503 * an error while loading the table, we need to keep trying to
504 * load the table rather than aborting the table load. Set the
505 * status to AE_OK to proceed with the table load.
506 */
507 if ((walk_state->
508 parse_flags & ACPI_PARSE_MODULE_LEVEL)
509 && status == AE_ALREADY_EXISTS) {
510 status = AE_OK;
511 }
500 if (status == AE_CTRL_PARSE_CONTINUE) { 512 if (status == AE_CTRL_PARSE_CONTINUE) {
501 continue; 513 continue;
502 } 514 }
@@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
694 acpi_ps_next_parse_state(walk_state, op, status); 706 acpi_ps_next_parse_state(walk_state, op, status);
695 if (status == AE_CTRL_PENDING) { 707 if (status == AE_CTRL_PENDING) {
696 status = AE_OK; 708 status = AE_OK;
709 } else
710 if ((walk_state->
711 parse_flags & ACPI_PARSE_MODULE_LEVEL)
712 && status != AE_CTRL_TRANSFER
713 && ACPI_FAILURE(status)) {
714 /*
715 * ACPI_PARSE_MODULE_LEVEL flag means that we are currently
716 * loading a table by executing it as a control method.
717 * However, if we encounter an error while loading the table,
718 * we need to keep trying to load the table rather than
719 * aborting the table load (setting the status to AE_OK
720 * continues the table load). If we get a failure at this
721 * point, it means that the dispatcher got an error while
722 * processing Op (most likely an AML operand error) or a
723 * control method was called from module level and the
724 * dispatcher returned AE_CTRL_TRANSFER. In the latter case,
725 * leave the status alone, there's nothing wrong with it.
726 */
727 status = AE_OK;
697 } 728 }
698 } 729 }
699 730
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..6ebcd65d64b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -434,14 +434,6 @@ re_probe:
434 goto probe_failed; 434 goto probe_failed;
435 } 435 }
436 436
437 /*
438 * Ensure devices are listed in devices_kset in correct order
439 * It's important to move Dev to the end of devices_kset before
440 * calling .probe, because it could be recursive and parent Dev
441 * should always go first
442 */
443 devices_kset_move_last(dev);
444
445 if (dev->bus->probe) { 437 if (dev->bus->probe) {
446 ret = dev->bus->probe(dev); 438 ret = dev->bus->probe(dev);
447 if (ret) 439 if (ret)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 74a05561b620..3fb95c8d9fd8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -112,12 +112,16 @@ struct nbd_device {
112 struct task_struct *task_setup; 112 struct task_struct *task_setup;
113}; 113};
114 114
115#define NBD_CMD_REQUEUED 1
116
115struct nbd_cmd { 117struct nbd_cmd {
116 struct nbd_device *nbd; 118 struct nbd_device *nbd;
119 struct mutex lock;
117 int index; 120 int index;
118 int cookie; 121 int cookie;
119 struct completion send_complete;
120 blk_status_t status; 122 blk_status_t status;
123 unsigned long flags;
124 u32 cmd_cookie;
121}; 125};
122 126
123#if IS_ENABLED(CONFIG_DEBUG_FS) 127#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
146 return disk_to_dev(nbd->disk); 150 return disk_to_dev(nbd->disk);
147} 151}
148 152
153static void nbd_requeue_cmd(struct nbd_cmd *cmd)
154{
155 struct request *req = blk_mq_rq_from_pdu(cmd);
156
157 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
158 blk_mq_requeue_request(req, true);
159}
160
161#define NBD_COOKIE_BITS 32
162
163static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
164{
165 struct request *req = blk_mq_rq_from_pdu(cmd);
166 u32 tag = blk_mq_unique_tag(req);
167 u64 cookie = cmd->cmd_cookie;
168
169 return (cookie << NBD_COOKIE_BITS) | tag;
170}
171
172static u32 nbd_handle_to_tag(u64 handle)
173{
174 return (u32)handle;
175}
176
177static u32 nbd_handle_to_cookie(u64 handle)
178{
179 return (u32)(handle >> NBD_COOKIE_BITS);
180}
181
149static const char *nbdcmd_to_ascii(int cmd) 182static const char *nbdcmd_to_ascii(int cmd)
150{ 183{
151 switch (cmd) { 184 switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
319 } 352 }
320 config = nbd->config; 353 config = nbd->config;
321 354
355 if (!mutex_trylock(&cmd->lock))
356 return BLK_EH_RESET_TIMER;
357
322 if (config->num_connections > 1) { 358 if (config->num_connections > 1) {
323 dev_err_ratelimited(nbd_to_dev(nbd), 359 dev_err_ratelimited(nbd_to_dev(nbd),
324 "Connection timed out, retrying (%d/%d alive)\n", 360 "Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
343 nbd_mark_nsock_dead(nbd, nsock, 1); 379 nbd_mark_nsock_dead(nbd, nsock, 1);
344 mutex_unlock(&nsock->tx_lock); 380 mutex_unlock(&nsock->tx_lock);
345 } 381 }
346 blk_mq_requeue_request(req, true); 382 mutex_unlock(&cmd->lock);
383 nbd_requeue_cmd(cmd);
347 nbd_config_put(nbd); 384 nbd_config_put(nbd);
348 return BLK_EH_DONE; 385 return BLK_EH_DONE;
349 } 386 }
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
353 } 390 }
354 set_bit(NBD_TIMEDOUT, &config->runtime_flags); 391 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
355 cmd->status = BLK_STS_IOERR; 392 cmd->status = BLK_STS_IOERR;
393 mutex_unlock(&cmd->lock);
356 sock_shutdown(nbd); 394 sock_shutdown(nbd);
357 nbd_config_put(nbd); 395 nbd_config_put(nbd);
358done: 396done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
430 struct iov_iter from; 468 struct iov_iter from;
431 unsigned long size = blk_rq_bytes(req); 469 unsigned long size = blk_rq_bytes(req);
432 struct bio *bio; 470 struct bio *bio;
471 u64 handle;
433 u32 type; 472 u32 type;
434 u32 nbd_cmd_flags = 0; 473 u32 nbd_cmd_flags = 0;
435 u32 tag = blk_mq_unique_tag(req);
436 int sent = nsock->sent, skip = 0; 474 int sent = nsock->sent, skip = 0;
437 475
438 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 476 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
474 goto send_pages; 512 goto send_pages;
475 } 513 }
476 iov_iter_advance(&from, sent); 514 iov_iter_advance(&from, sent);
515 } else {
516 cmd->cmd_cookie++;
477 } 517 }
478 cmd->index = index; 518 cmd->index = index;
479 cmd->cookie = nsock->cookie; 519 cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
482 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 522 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
483 request.len = htonl(size); 523 request.len = htonl(size);
484 } 524 }
485 memcpy(request.handle, &tag, sizeof(tag)); 525 handle = nbd_cmd_handle(cmd);
526 memcpy(request.handle, &handle, sizeof(handle));
486 527
487 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 528 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
488 req, nbdcmd_to_ascii(type), 529 req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
500 nsock->pending = req; 541 nsock->pending = req;
501 nsock->sent = sent; 542 nsock->sent = sent;
502 } 543 }
544 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
503 return BLK_STS_RESOURCE; 545 return BLK_STS_RESOURCE;
504 } 546 }
505 dev_err_ratelimited(disk_to_dev(nbd->disk), 547 dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
541 */ 583 */
542 nsock->pending = req; 584 nsock->pending = req;
543 nsock->sent = sent; 585 nsock->sent = sent;
586 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
544 return BLK_STS_RESOURCE; 587 return BLK_STS_RESOURCE;
545 } 588 }
546 dev_err(disk_to_dev(nbd->disk), 589 dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
573 struct nbd_reply reply; 616 struct nbd_reply reply;
574 struct nbd_cmd *cmd; 617 struct nbd_cmd *cmd;
575 struct request *req = NULL; 618 struct request *req = NULL;
619 u64 handle;
576 u16 hwq; 620 u16 hwq;
577 u32 tag; 621 u32 tag;
578 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 622 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
579 struct iov_iter to; 623 struct iov_iter to;
624 int ret = 0;
580 625
581 reply.magic = 0; 626 reply.magic = 0;
582 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 627 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
594 return ERR_PTR(-EPROTO); 639 return ERR_PTR(-EPROTO);
595 } 640 }
596 641
597 memcpy(&tag, reply.handle, sizeof(u32)); 642 memcpy(&handle, reply.handle, sizeof(handle));
598 643 tag = nbd_handle_to_tag(handle);
599 hwq = blk_mq_unique_tag_to_hwq(tag); 644 hwq = blk_mq_unique_tag_to_hwq(tag);
600 if (hwq < nbd->tag_set.nr_hw_queues) 645 if (hwq < nbd->tag_set.nr_hw_queues)
601 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 646 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
606 return ERR_PTR(-ENOENT); 651 return ERR_PTR(-ENOENT);
607 } 652 }
608 cmd = blk_mq_rq_to_pdu(req); 653 cmd = blk_mq_rq_to_pdu(req);
654
655 mutex_lock(&cmd->lock);
656 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
657 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
658 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
659 ret = -ENOENT;
660 goto out;
661 }
662 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
663 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
664 req);
665 ret = -ENOENT;
666 goto out;
667 }
609 if (ntohl(reply.error)) { 668 if (ntohl(reply.error)) {
610 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 669 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
611 ntohl(reply.error)); 670 ntohl(reply.error));
612 cmd->status = BLK_STS_IOERR; 671 cmd->status = BLK_STS_IOERR;
613 return cmd; 672 goto out;
614 } 673 }
615 674
616 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 675 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
635 if (nbd_disconnected(config) || 694 if (nbd_disconnected(config) ||
636 config->num_connections <= 1) { 695 config->num_connections <= 1) {
637 cmd->status = BLK_STS_IOERR; 696 cmd->status = BLK_STS_IOERR;
638 return cmd; 697 goto out;
639 } 698 }
640 return ERR_PTR(-EIO); 699 ret = -EIO;
700 goto out;
641 } 701 }
642 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 702 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
643 req, bvec.bv_len); 703 req, bvec.bv_len);
644 } 704 }
645 } else {
646 /* See the comment in nbd_queue_rq. */
647 wait_for_completion(&cmd->send_complete);
648 } 705 }
649 return cmd; 706out:
707 mutex_unlock(&cmd->lock);
708 return ret ? ERR_PTR(ret) : cmd;
650} 709}
651 710
652static void recv_work(struct work_struct *work) 711static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
805 */ 864 */
806 blk_mq_start_request(req); 865 blk_mq_start_request(req);
807 if (unlikely(nsock->pending && nsock->pending != req)) { 866 if (unlikely(nsock->pending && nsock->pending != req)) {
808 blk_mq_requeue_request(req, true); 867 nbd_requeue_cmd(cmd);
809 ret = 0; 868 ret = 0;
810 goto out; 869 goto out;
811 } 870 }
@@ -818,7 +877,7 @@ again:
818 dev_err_ratelimited(disk_to_dev(nbd->disk), 877 dev_err_ratelimited(disk_to_dev(nbd->disk),
819 "Request send failed, requeueing\n"); 878 "Request send failed, requeueing\n");
820 nbd_mark_nsock_dead(nbd, nsock, 1); 879 nbd_mark_nsock_dead(nbd, nsock, 1);
821 blk_mq_requeue_request(req, true); 880 nbd_requeue_cmd(cmd);
822 ret = 0; 881 ret = 0;
823 } 882 }
824out: 883out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
842 * that the server is misbehaving (or there was an error) before we're 901 * that the server is misbehaving (or there was an error) before we're
843 * done sending everything over the wire. 902 * done sending everything over the wire.
844 */ 903 */
845 init_completion(&cmd->send_complete); 904 mutex_lock(&cmd->lock);
905 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
846 906
847 /* We can be called directly from the user space process, which means we 907 /* We can be called directly from the user space process, which means we
848 * could possibly have signals pending so our sendmsg will fail. In 908 * could possibly have signals pending so our sendmsg will fail. In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
854 ret = BLK_STS_IOERR; 914 ret = BLK_STS_IOERR;
855 else if (!ret) 915 else if (!ret)
856 ret = BLK_STS_OK; 916 ret = BLK_STS_OK;
857 complete(&cmd->send_complete); 917 mutex_unlock(&cmd->lock);
858 918
859 return ret; 919 return ret;
860} 920}
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1460{ 1520{
1461 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1521 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1462 cmd->nbd = set->driver_data; 1522 cmd->nbd = set->driver_data;
1523 cmd->flags = 0;
1524 mutex_init(&cmd->lock);
1463 return 0; 1525 return 0;
1464} 1526}
1465 1527
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7436b2d27fa3..a390c6d4f72d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
298 zram->backing_dev = NULL; 298 zram->backing_dev = NULL;
299 zram->old_block_size = 0; 299 zram->old_block_size = 0;
300 zram->bdev = NULL; 300 zram->bdev = NULL;
301 301 zram->disk->queue->backing_dev_info->capabilities |=
302 BDI_CAP_SYNCHRONOUS_IO;
302 kvfree(zram->bitmap); 303 kvfree(zram->bitmap);
303 zram->bitmap = NULL; 304 zram->bitmap = NULL;
304} 305}
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
400 zram->backing_dev = backing_dev; 401 zram->backing_dev = backing_dev;
401 zram->bitmap = bitmap; 402 zram->bitmap = bitmap;
402 zram->nr_pages = nr_pages; 403 zram->nr_pages = nr_pages;
404 /*
405 * With writeback feature, zram does asynchronous IO so it's no longer
406 * synchronous device so let's remove synchronous io flag. Othewise,
407 * upper layer(e.g., swap) could wait IO completion rather than
408 * (submit and return), which will cause system sluggish.
409 * Furthermore, when the IO function returns(e.g., swap_readpage),
410 * upper layer expects IO was done so it could deallocate the page
411 * freely but in fact, IO is going on so finally could cause
412 * use-after-free when the IO is really done.
413 */
414 zram->disk->queue->backing_dev_info->capabilities &=
415 ~BDI_CAP_SYNCHRONOUS_IO;
403 up_write(&zram->init_lock); 416 up_write(&zram->init_lock);
404 417
405 pr_info("setup backing device %s\n", file_name); 418 pr_info("setup backing device %s\n", file_name);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..df66a9dd0aae 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
708#endif 708#endif
709 if (vma->vm_flags & VM_SHARED) 709 if (vma->vm_flags & VM_SHARED)
710 return shmem_zero_setup(vma); 710 return shmem_zero_setup(vma);
711 vma_set_anonymous(vma);
711 return 0; 712 return 0;
712} 713}
713 714
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cd888d4ee605..bd449ad52442 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1895,14 +1895,22 @@ static int
1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count) 1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1896{ 1896{
1897 size_t bytes; 1897 size_t bytes;
1898 __u32 buf[16]; 1898 __u32 t, buf[16];
1899 const char __user *p = buffer; 1899 const char __user *p = buffer;
1900 1900
1901 while (count > 0) { 1901 while (count > 0) {
1902 int b, i = 0;
1903
1902 bytes = min(count, sizeof(buf)); 1904 bytes = min(count, sizeof(buf));
1903 if (copy_from_user(&buf, p, bytes)) 1905 if (copy_from_user(&buf, p, bytes))
1904 return -EFAULT; 1906 return -EFAULT;
1905 1907
1908 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1909 if (!arch_get_random_int(&t))
1910 break;
1911 buf[i] ^= t;
1912 }
1913
1906 count -= bytes; 1914 count -= bytes;
1907 p += bytes; 1915 p += bytes;
1908 1916
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..7b70a074095d 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
24#define ASPEED_MPLL_PARAM 0x20 24#define ASPEED_MPLL_PARAM 0x20
25#define ASPEED_HPLL_PARAM 0x24 25#define ASPEED_HPLL_PARAM 0x24
26#define AST2500_HPLL_BYPASS_EN BIT(20) 26#define AST2500_HPLL_BYPASS_EN BIT(20)
27#define AST2400_HPLL_STRAPPED BIT(18) 27#define AST2400_HPLL_PROGRAMMED BIT(18)
28#define AST2400_HPLL_BYPASS_EN BIT(17) 28#define AST2400_HPLL_BYPASS_EN BIT(17)
29#define ASPEED_MISC_CTRL 0x2c 29#define ASPEED_MISC_CTRL 0x2c
30#define UART_DIV13_EN BIT(12) 30#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ 91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ 92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ 93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ 94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ 95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, 96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ 97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ 98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
212{ 212{
213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
214 u32 clk = BIT(gate->clock_idx); 214 u32 clk = BIT(gate->clock_idx);
215 u32 rst = BIT(gate->reset_idx);
215 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; 216 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
216 u32 reg; 217 u32 reg;
217 218
219 /*
220 * If the IP is in reset, treat the clock as not enabled,
221 * this happens with some clocks such as the USB one when
222 * coming from cold reset. Without this, aspeed_clk_enable()
223 * will fail to lift the reset.
224 */
225 if (gate->reset_idx >= 0) {
226 regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
227 if (reg & rst)
228 return 0;
229 }
230
218 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg); 231 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
219 232
220 return ((reg & clk) == enval) ? 1 : 0; 233 return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
565static void __init aspeed_ast2400_cc(struct regmap *map) 578static void __init aspeed_ast2400_cc(struct regmap *map)
566{ 579{
567 struct clk_hw *hw; 580 struct clk_hw *hw;
568 u32 val, freq, div; 581 u32 val, div, clkin, hpll;
582 const u16 hpll_rates[][4] = {
583 {384, 360, 336, 408},
584 {400, 375, 350, 425},
585 };
586 int rate;
569 587
570 /* 588 /*
571 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by 589 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
572 * strapping 590 * strapping
573 */ 591 */
574 regmap_read(map, ASPEED_STRAP, &val); 592 regmap_read(map, ASPEED_STRAP, &val);
575 if (val & CLKIN_25MHZ_EN) 593 rate = (val >> 8) & 3;
576 freq = 25000000; 594 if (val & CLKIN_25MHZ_EN) {
577 else if (val & AST2400_CLK_SOURCE_SEL) 595 clkin = 25000000;
578 freq = 48000000; 596 hpll = hpll_rates[1][rate];
579 else 597 } else if (val & AST2400_CLK_SOURCE_SEL) {
580 freq = 24000000; 598 clkin = 48000000;
581 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); 599 hpll = hpll_rates[0][rate];
582 pr_debug("clkin @%u MHz\n", freq / 1000000); 600 } else {
601 clkin = 24000000;
602 hpll = hpll_rates[0][rate];
603 }
604 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
605 pr_debug("clkin @%u MHz\n", clkin / 1000000);
583 606
584 /* 607 /*
585 * High-speed PLL clock derived from the crystal. This the CPU clock, 608 * High-speed PLL clock derived from the crystal. This the CPU clock,
586 * and we assume that it is enabled 609 * and we assume that it is enabled. It can be configured through the
610 * HPLL_PARAM register, or set to a specified frequency by strapping.
587 */ 611 */
588 regmap_read(map, ASPEED_HPLL_PARAM, &val); 612 regmap_read(map, ASPEED_HPLL_PARAM, &val);
589 WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); 613 if (val & AST2400_HPLL_PROGRAMMED)
590 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); 614 hw = aspeed_ast2400_calc_pll("hpll", val);
615 else
616 hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
617 hpll * 1000000);
618
619 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
591 620
592 /* 621 /*
593 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK) 622 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..e2ed078abd90 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/clkdev.h> 26#include <linux/clkdev.h>
27#include <linux/stringify.h>
28 27
29#include "clk.h" 28#include "clk.h"
30 29
@@ -2559,7 +2558,7 @@ static const struct {
2559 unsigned long flag; 2558 unsigned long flag;
2560 const char *name; 2559 const char *name;
2561} clk_flags[] = { 2560} clk_flags[] = {
2562#define ENTRY(f) { f, __stringify(f) } 2561#define ENTRY(f) { f, #f }
2563 ENTRY(CLK_SET_RATE_GATE), 2562 ENTRY(CLK_SET_RATE_GATE),
2564 ENTRY(CLK_SET_PARENT_GATE), 2563 ENTRY(CLK_SET_PARENT_GATE),
2565 ENTRY(CLK_SET_RATE_PARENT), 2564 ENTRY(CLK_SET_RATE_PARENT),
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 58f546e04807..e4cf96ba704e 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); 51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
52 unsigned long divider; 52 unsigned long divider;
53 53
54 divider = meson_parm_read(clk->map, &adiv->div); 54 divider = meson_parm_read(clk->map, &adiv->div) + 1;
55 55
56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider); 56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
57} 57}
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..177fffb9ebef 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
498 .ops = &clk_regmap_gate_ops, 498 .ops = &clk_regmap_gate_ops,
499 .parent_names = (const char *[]){ "fclk_div2_div" }, 499 .parent_names = (const char *[]){ "fclk_div2_div" },
500 .num_parents = 1, 500 .num_parents = 1,
501 .flags = CLK_IS_CRITICAL,
501 }, 502 },
502}; 503};
503 504
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..44e4e27eddad 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -35,6 +35,7 @@
35#define CLK_SEL 0x10 35#define CLK_SEL 0x10
36#define CLK_DIS 0x14 36#define CLK_DIS 0x14
37 37
38#define ARMADA_37XX_DVFS_LOAD_1 1
38#define LOAD_LEVEL_NR 4 39#define LOAD_LEVEL_NR 4
39 40
40#define ARMADA_37XX_NB_L0L1 0x18 41#define ARMADA_37XX_NB_L0L1 0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
507 return -EINVAL; 508 return -EINVAL;
508} 509}
509 510
511/*
512 * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
513 * respectively) to L0 frequency (1.2 Ghz) requires a significant
514 * amount of time to let VDD stabilize to the appropriate
515 * voltage. This amount of time is large enough that it cannot be
516 * covered by the hardware countdown register. Due to this, the CPU
517 * might start operating at L0 before the voltage is stabilized,
518 * leading to CPU stalls.
519 *
520 * To work around this problem, we prevent switching directly from the
521 * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
522 * frequency in-between. The sequence therefore becomes:
523 * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
524 * 2. Sleep 20ms for stabling VDD voltage
525 * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
526 */
527static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
528{
529 unsigned int cur_level;
530
531 if (rate != 1200 * 1000 * 1000)
532 return;
533
534 regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
535 cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
536 if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
537 return;
538
539 regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
540 ARMADA_37XX_NB_CPU_LOAD_MASK,
541 ARMADA_37XX_DVFS_LOAD_1);
542 msleep(20);
543}
544
510static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, 545static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
511 unsigned long parent_rate) 546 unsigned long parent_rate)
512{ 547{
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
537 */ 572 */
538 reg = ARMADA_37XX_NB_CPU_LOAD; 573 reg = ARMADA_37XX_NB_CPU_LOAD;
539 mask = ARMADA_37XX_NB_CPU_LOAD_MASK; 574 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
575
576 clk_pm_cpu_set_rate_wa(rate, base);
577
540 regmap_update_bits(base, reg, mask, load_level); 578 regmap_update_bits(base, reg, mask, load_level);
541 579
542 return rate; 580 return rate;
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..ff8d66fd94e6 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
2781 2781
2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = { 2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
2783 .halt_reg = 0x75018, 2783 .halt_reg = 0x75018,
2784 .halt_check = BRANCH_HALT_SKIP,
2784 .clkr = { 2785 .clkr = {
2785 .enable_reg = 0x75018, 2786 .enable_reg = 0x75018,
2786 .enable_mask = BIT(0), 2787 .enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..4b20d1b67a1b 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
2910 .name = "mmagic_bimc", 2910 .name = "mmagic_bimc",
2911 }, 2911 },
2912 .pwrsts = PWRSTS_OFF_ON, 2912 .pwrsts = PWRSTS_OFF_ON,
2913 .flags = ALWAYS_ON,
2913}; 2914};
2914 2915
2915static struct gdsc mmagic_video_gdsc = { 2916static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3c3971256130..d4ed0022b0dd 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
311 311
312#ifdef CONFIG_ACPI 312#ifdef CONFIG_ACPI
313 313
314static bool intel_pstate_get_ppc_enable_status(void) 314static bool intel_pstate_acpi_pm_profile_server(void)
315{ 315{
316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || 316 if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) 317 acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
318 return true; 318 return true;
319 319
320 return false;
321}
322
323static bool intel_pstate_get_ppc_enable_status(void)
324{
325 if (intel_pstate_acpi_pm_profile_server())
326 return true;
327
320 return acpi_ppc; 328 return acpi_ppc;
321} 329}
322 330
@@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
459static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) 467static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
460{ 468{
461} 469}
470
471static inline bool intel_pstate_acpi_pm_profile_server(void)
472{
473 return false;
474}
462#endif 475#endif
463 476
464static inline void update_turbo_state(void) 477static inline void update_turbo_state(void)
@@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1841 intel_pstate_hwp_enable(cpu); 1854 intel_pstate_hwp_enable(cpu);
1842 1855
1843 id = x86_match_cpu(intel_pstate_hwp_boost_ids); 1856 id = x86_match_cpu(intel_pstate_hwp_boost_ids);
1844 if (id) 1857 if (id && intel_pstate_acpi_pm_profile_server())
1845 hwp_boost = true; 1858 hwp_boost = true;
1846 } 1859 }
1847 1860
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 29389accf3e9..efc9a7ae4857 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { 183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
184 { .compatible = "qcom,apq8096", }, 184 { .compatible = "qcom,apq8096", },
185 { .compatible = "qcom,msm8996", }, 185 { .compatible = "qcom,msm8996", },
186 {}
186}; 187};
187 188
188/* 189/*
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 1c6cbda56afe..09d823d36d3a 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
266 return; 266 return;
267 } 267 }
268 268
269 count -= initial;
270
269 if (initial) 271 if (initial)
270 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
271 : "+S"(input), "+D"(output) 273 : "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
273 275
274 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ 276 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
275 : "+S"(input), "+D"(output) 277 : "+S"(input), "+D"(output)
276 : "d"(control_word), "b"(key), "c"(count - initial)); 278 : "d"(control_word), "b"(key), "c"(count));
277} 279}
278 280
279static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, 281static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
284 if (count < cbc_fetch_blocks) 286 if (count < cbc_fetch_blocks)
285 return cbc_crypt(input, output, key, iv, control_word, count); 287 return cbc_crypt(input, output, key, iv, control_word, count);
286 288
289 count -= initial;
290
287 if (initial) 291 if (initial)
288 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
289 : "+S" (input), "+D" (output), "+a" (iv) 293 : "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
291 295
292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 296 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
293 : "+S" (input), "+D" (output), "+a" (iv) 297 : "+S" (input), "+D" (output), "+a" (iv)
294 : "d" (control_word), "b" (key), "c" (count-initial)); 298 : "d" (control_word), "b" (key), "c" (count));
295 return iv; 299 return iv;
296} 300}
297 301
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 781a4a337557..d8e159feb573 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
87config EFI_ARMSTUB 87config EFI_ARMSTUB
88 bool 88 bool
89 89
90config EFI_ARMSTUB_DTB_LOADER
91 bool "Enable the DTB loader"
92 depends on EFI_ARMSTUB
93 help
94 Select this config option to add support for the dtb= command
95 line parameter, allowing a device tree blob to be loaded into
96 memory from the EFI System Partition by the stub.
97
98 The device tree is typically provided by the platform or by
99 the bootloader, so this option is mostly for development
100 purposes only.
101
90config EFI_BOOTLOADER_CONTROL 102config EFI_BOOTLOADER_CONTROL
91 tristate "EFI Bootloader Control" 103 tristate "EFI Bootloader Control"
92 depends on EFI_VARS 104 depends on EFI_VARS
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 3bf0dca378a6..a7902fccdcfa 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
48{ 48{
49 static atomic64_t seq; 49 static atomic64_t seq;
50 50
51 if (!atomic64_read(&seq)) 51 if (!atomic64_read(&seq)) {
52 atomic64_set(&seq, ((u64)get_seconds()) << 32); 52 time64_t time = ktime_get_real_seconds();
53
54 /*
55 * This code is unlikely to still be needed in year 2106,
56 * but just in case, let's use a few more bits for timestamps
57 * after y2038 to be sure they keep increasing monotonically
58 * for the next few hundred years...
59 */
60 if (time < 0x80000000)
61 atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
62 else
63 atomic64_set(&seq, 0x8000000000000000ull |
64 ktime_get_real_seconds() << 24);
65 }
53 66
54 return atomic64_inc_return(&seq); 67 return atomic64_inc_return(&seq);
55} 68}
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
459 else 472 else
460 goto err_section_too_small; 473 goto err_section_too_small;
461#if defined(CONFIG_ARM64) || defined(CONFIG_ARM) 474#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
462 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) { 475 } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
463 struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata); 476 struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
464 477
465 printk("%ssection_type: ARM processor error\n", newpfx); 478 printk("%ssection_type: ARM processor error\n", newpfx);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 232f4915223b..d8a33a781a57 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -84,6 +84,8 @@ struct mm_struct efi_mm = {
84 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), 84 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
85}; 85};
86 86
87struct workqueue_struct *efi_rts_wq;
88
87static bool disable_runtime; 89static bool disable_runtime;
88static int __init setup_noefi(char *arg) 90static int __init setup_noefi(char *arg)
89{ 91{
@@ -337,6 +339,18 @@ static int __init efisubsys_init(void)
337 if (!efi_enabled(EFI_BOOT)) 339 if (!efi_enabled(EFI_BOOT))
338 return 0; 340 return 0;
339 341
342 /*
343 * Since we process only one efi_runtime_service() at a time, an
344 * ordered workqueue (which creates only one execution context)
345 * should suffice all our needs.
346 */
347 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
348 if (!efi_rts_wq) {
349 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
350 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
351 return 0;
352 }
353
340 /* We register the efi directory at /sys/firmware/efi */ 354 /* We register the efi directory at /sys/firmware/efi */
341 efi_kobj = kobject_create_and_add("efi", firmware_kobj); 355 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
342 if (!efi_kobj) { 356 if (!efi_kobj) {
@@ -388,7 +402,7 @@ subsys_initcall(efisubsys_init);
388 * and if so, populate the supplied memory descriptor with the appropriate 402 * and if so, populate the supplied memory descriptor with the appropriate
389 * data. 403 * data.
390 */ 404 */
391int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md) 405int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
392{ 406{
393 efi_memory_desc_t *md; 407 efi_memory_desc_t *md;
394 408
@@ -406,12 +420,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
406 u64 size; 420 u64 size;
407 u64 end; 421 u64 end;
408 422
409 if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
410 md->type != EFI_BOOT_SERVICES_DATA &&
411 md->type != EFI_RUNTIME_SERVICES_DATA) {
412 continue;
413 }
414
415 size = md->num_pages << EFI_PAGE_SHIFT; 423 size = md->num_pages << EFI_PAGE_SHIFT;
416 end = md->phys_addr + size; 424 end = md->phys_addr + size;
417 if (phys_addr >= md->phys_addr && phys_addr < end) { 425 if (phys_addr >= md->phys_addr && phys_addr < end) {
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 1ab80e06e7c5..5d06bd247d07 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
250 return; 250 return;
251 251
252 rc = efi_mem_desc_lookup(efi.esrt, &md); 252 rc = efi_mem_desc_lookup(efi.esrt, &md);
253 if (rc < 0) { 253 if (rc < 0 ||
254 (!(md.attribute & EFI_MEMORY_RUNTIME) &&
255 md.type != EFI_BOOT_SERVICES_DATA &&
256 md.type != EFI_RUNTIME_SERVICES_DATA)) {
254 pr_warn("ESRT header is not in the memory map.\n"); 257 pr_warn("ESRT header is not in the memory map.\n");
255 return; 258 return;
256 } 259 }
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
326 329
327 end = esrt_data + size; 330 end = esrt_data + size;
328 pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end); 331 pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
329 efi_mem_reserve(esrt_data, esrt_data_size); 332 if (md.type == EFI_BOOT_SERVICES_DATA)
333 efi_mem_reserve(esrt_data, esrt_data_size);
330 334
331 pr_debug("esrt-init: loaded.\n"); 335 pr_debug("esrt-init: loaded.\n");
332} 336}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 01a9d78ee415..6920033de6d4 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -40,31 +40,6 @@
40 40
41static u64 virtmap_base = EFI_RT_VIRTUAL_BASE; 41static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
42 42
43efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
44 void *__image, void **__fh)
45{
46 efi_file_io_interface_t *io;
47 efi_loaded_image_t *image = __image;
48 efi_file_handle_t *fh;
49 efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
50 efi_status_t status;
51 void *handle = (void *)(unsigned long)image->device_handle;
52
53 status = sys_table_arg->boottime->handle_protocol(handle,
54 &fs_proto, (void **)&io);
55 if (status != EFI_SUCCESS) {
56 efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
57 return status;
58 }
59
60 status = io->open_volume(io, &fh);
61 if (status != EFI_SUCCESS)
62 efi_printk(sys_table_arg, "Failed to open volume\n");
63
64 *__fh = fh;
65 return status;
66}
67
68void efi_char16_printk(efi_system_table_t *sys_table_arg, 43void efi_char16_printk(efi_system_table_t *sys_table_arg,
69 efi_char16_t *str) 44 efi_char16_t *str)
70{ 45{
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
202 * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure 177 * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure
203 * boot is enabled if we can't determine its state. 178 * boot is enabled if we can't determine its state.
204 */ 179 */
205 if (secure_boot != efi_secureboot_mode_disabled && 180 if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
206 strstr(cmdline_ptr, "dtb=")) { 181 secure_boot != efi_secureboot_mode_disabled) {
207 pr_efi(sys_table, "Ignoring DTB from command line.\n"); 182 if (strstr(cmdline_ptr, "dtb="))
183 pr_efi(sys_table, "Ignoring DTB from command line.\n");
208 } else { 184 } else {
209 status = handle_cmdline_files(sys_table, image, cmdline_ptr, 185 status = handle_cmdline_files(sys_table, image, cmdline_ptr,
210 "dtb=", 186 "dtb=",
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 50a9cab5a834..e94975f4655b 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
413 return efi_call_proto(efi_file_handle, close, handle); 413 return efi_call_proto(efi_file_handle, close, handle);
414} 414}
415 415
416static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
417 efi_loaded_image_t *image,
418 efi_file_handle_t **__fh)
419{
420 efi_file_io_interface_t *io;
421 efi_file_handle_t *fh;
422 efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
423 efi_status_t status;
424 void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
425 device_handle,
426 image);
427
428 status = efi_call_early(handle_protocol, handle,
429 &fs_proto, (void **)&io);
430 if (status != EFI_SUCCESS) {
431 efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
432 return status;
433 }
434
435 status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
436 if (status != EFI_SUCCESS)
437 efi_printk(sys_table_arg, "Failed to open volume\n");
438 else
439 *__fh = fh;
440
441 return status;
442}
443
416/* 444/*
417 * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= 445 * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
418 * option, e.g. efi=nochunk. 446 * option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
563 591
564 /* Only open the volume once. */ 592 /* Only open the volume once. */
565 if (!i) { 593 if (!i) {
566 status = efi_open_volume(sys_table_arg, image, 594 status = efi_open_volume(sys_table_arg, image, &fh);
567 (void **)&fh);
568 if (status != EFI_SUCCESS) 595 if (status != EFI_SUCCESS)
569 goto free_files; 596 goto free_files;
570 } 597 }
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f59564b72ddc..32799cf039ef 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
36 36
37void efi_char16_printk(efi_system_table_t *, efi_char16_t *); 37void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
38 38
39efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
40 void **__fh);
41
42unsigned long get_dram_base(efi_system_table_t *sys_table_arg); 39unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
43 40
44efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, 41efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index ae54870b2788..aa66cbf23512 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -1,6 +1,15 @@
1/* 1/*
2 * runtime-wrappers.c - Runtime Services function call wrappers 2 * runtime-wrappers.c - Runtime Services function call wrappers
3 * 3 *
4 * Implementation summary:
5 * -----------------------
6 * 1. When user/kernel thread requests to execute efi_runtime_service(),
7 * enqueue work to efi_rts_wq.
8 * 2. Caller thread waits for completion until the work is finished
9 * because it's dependent on the return status and execution of
10 * efi_runtime_service().
11 * For instance, get_variable() and get_next_variable().
12 *
4 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> 13 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
5 * 14 *
6 * Split off from arch/x86/platform/efi/efi.c 15 * Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
22#include <linux/mutex.h> 31#include <linux/mutex.h>
23#include <linux/semaphore.h> 32#include <linux/semaphore.h>
24#include <linux/stringify.h> 33#include <linux/stringify.h>
34#include <linux/workqueue.h>
35#include <linux/completion.h>
36
25#include <asm/efi.h> 37#include <asm/efi.h>
26 38
27/* 39/*
@@ -33,6 +45,76 @@
33#define __efi_call_virt(f, args...) \ 45#define __efi_call_virt(f, args...) \
34 __efi_call_virt_pointer(efi.systab->runtime, f, args) 46 __efi_call_virt_pointer(efi.systab->runtime, f, args)
35 47
48/* efi_runtime_service() function identifiers */
49enum efi_rts_ids {
50 GET_TIME,
51 SET_TIME,
52 GET_WAKEUP_TIME,
53 SET_WAKEUP_TIME,
54 GET_VARIABLE,
55 GET_NEXT_VARIABLE,
56 SET_VARIABLE,
57 QUERY_VARIABLE_INFO,
58 GET_NEXT_HIGH_MONO_COUNT,
59 UPDATE_CAPSULE,
60 QUERY_CAPSULE_CAPS,
61};
62
63/*
64 * efi_runtime_work: Details of EFI Runtime Service work
65 * @arg<1-5>: EFI Runtime Service function arguments
66 * @status: Status of executing EFI Runtime Service
67 * @efi_rts_id: EFI Runtime Service function identifier
68 * @efi_rts_comp: Struct used for handling completions
69 */
70struct efi_runtime_work {
71 void *arg1;
72 void *arg2;
73 void *arg3;
74 void *arg4;
75 void *arg5;
76 efi_status_t status;
77 struct work_struct work;
78 enum efi_rts_ids efi_rts_id;
79 struct completion efi_rts_comp;
80};
81
82/*
83 * efi_queue_work: Queue efi_runtime_service() and wait until it's done
84 * @rts: efi_runtime_service() function identifier
85 * @rts_arg<1-5>: efi_runtime_service() function arguments
86 *
87 * Accesses to efi_runtime_services() are serialized by a binary
88 * semaphore (efi_runtime_lock) and caller waits until the work is
89 * finished, hence _only_ one work is queued at a time and the caller
90 * thread waits for completion.
91 */
92#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
93({ \
94 struct efi_runtime_work efi_rts_work; \
95 efi_rts_work.status = EFI_ABORTED; \
96 \
97 init_completion(&efi_rts_work.efi_rts_comp); \
98 INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
99 efi_rts_work.arg1 = _arg1; \
100 efi_rts_work.arg2 = _arg2; \
101 efi_rts_work.arg3 = _arg3; \
102 efi_rts_work.arg4 = _arg4; \
103 efi_rts_work.arg5 = _arg5; \
104 efi_rts_work.efi_rts_id = _rts; \
105 \
106 /* \
107 * queue_work() returns 0 if work was already on queue, \
108 * _ideally_ this should never happen. \
109 */ \
110 if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
111 wait_for_completion(&efi_rts_work.efi_rts_comp); \
112 else \
113 pr_err("Failed to queue work to efi_rts_wq.\n"); \
114 \
115 efi_rts_work.status; \
116})
117
36void efi_call_virt_check_flags(unsigned long flags, const char *call) 118void efi_call_virt_check_flags(unsigned long flags, const char *call)
37{ 119{
38 unsigned long cur_flags, mismatch; 120 unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
90 */ 172 */
91static DEFINE_SEMAPHORE(efi_runtime_lock); 173static DEFINE_SEMAPHORE(efi_runtime_lock);
92 174
175/*
176 * Calls the appropriate efi_runtime_service() with the appropriate
177 * arguments.
178 *
179 * Semantics followed by efi_call_rts() to understand efi_runtime_work:
180 * 1. If argument was a pointer, recast it from void pointer to original
181 * pointer type.
182 * 2. If argument was a value, recast it from void pointer to original
183 * pointer type and dereference it.
184 */
185static void efi_call_rts(struct work_struct *work)
186{
187 struct efi_runtime_work *efi_rts_work;
188 void *arg1, *arg2, *arg3, *arg4, *arg5;
189 efi_status_t status = EFI_NOT_FOUND;
190
191 efi_rts_work = container_of(work, struct efi_runtime_work, work);
192 arg1 = efi_rts_work->arg1;
193 arg2 = efi_rts_work->arg2;
194 arg3 = efi_rts_work->arg3;
195 arg4 = efi_rts_work->arg4;
196 arg5 = efi_rts_work->arg5;
197
198 switch (efi_rts_work->efi_rts_id) {
199 case GET_TIME:
200 status = efi_call_virt(get_time, (efi_time_t *)arg1,
201 (efi_time_cap_t *)arg2);
202 break;
203 case SET_TIME:
204 status = efi_call_virt(set_time, (efi_time_t *)arg1);
205 break;
206 case GET_WAKEUP_TIME:
207 status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
208 (efi_bool_t *)arg2, (efi_time_t *)arg3);
209 break;
210 case SET_WAKEUP_TIME:
211 status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
212 (efi_time_t *)arg2);
213 break;
214 case GET_VARIABLE:
215 status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
216 (efi_guid_t *)arg2, (u32 *)arg3,
217 (unsigned long *)arg4, (void *)arg5);
218 break;
219 case GET_NEXT_VARIABLE:
220 status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
221 (efi_char16_t *)arg2,
222 (efi_guid_t *)arg3);
223 break;
224 case SET_VARIABLE:
225 status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
226 (efi_guid_t *)arg2, *(u32 *)arg3,
227 *(unsigned long *)arg4, (void *)arg5);
228 break;
229 case QUERY_VARIABLE_INFO:
230 status = efi_call_virt(query_variable_info, *(u32 *)arg1,
231 (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
232 break;
233 case GET_NEXT_HIGH_MONO_COUNT:
234 status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
235 break;
236 case UPDATE_CAPSULE:
237 status = efi_call_virt(update_capsule,
238 (efi_capsule_header_t **)arg1,
239 *(unsigned long *)arg2,
240 *(unsigned long *)arg3);
241 break;
242 case QUERY_CAPSULE_CAPS:
243 status = efi_call_virt(query_capsule_caps,
244 (efi_capsule_header_t **)arg1,
245 *(unsigned long *)arg2, (u64 *)arg3,
246 (int *)arg4);
247 break;
248 default:
249 /*
250 * Ideally, we should never reach here because a caller of this
251 * function should have put the right efi_runtime_service()
252 * function identifier into efi_rts_work->efi_rts_id
253 */
254 pr_err("Requested executing invalid EFI Runtime Service.\n");
255 }
256 efi_rts_work->status = status;
257 complete(&efi_rts_work->efi_rts_comp);
258}
259
93static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) 260static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
94{ 261{
95 efi_status_t status; 262 efi_status_t status;
96 263
97 if (down_interruptible(&efi_runtime_lock)) 264 if (down_interruptible(&efi_runtime_lock))
98 return EFI_ABORTED; 265 return EFI_ABORTED;
99 status = efi_call_virt(get_time, tm, tc); 266 status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
100 up(&efi_runtime_lock); 267 up(&efi_runtime_lock);
101 return status; 268 return status;
102} 269}
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
107 274
108 if (down_interruptible(&efi_runtime_lock)) 275 if (down_interruptible(&efi_runtime_lock))
109 return EFI_ABORTED; 276 return EFI_ABORTED;
110 status = efi_call_virt(set_time, tm); 277 status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
111 up(&efi_runtime_lock); 278 up(&efi_runtime_lock);
112 return status; 279 return status;
113} 280}
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
120 287
121 if (down_interruptible(&efi_runtime_lock)) 288 if (down_interruptible(&efi_runtime_lock))
122 return EFI_ABORTED; 289 return EFI_ABORTED;
123 status = efi_call_virt(get_wakeup_time, enabled, pending, tm); 290 status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
291 NULL);
124 up(&efi_runtime_lock); 292 up(&efi_runtime_lock);
125 return status; 293 return status;
126} 294}
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
131 299
132 if (down_interruptible(&efi_runtime_lock)) 300 if (down_interruptible(&efi_runtime_lock))
133 return EFI_ABORTED; 301 return EFI_ABORTED;
134 status = efi_call_virt(set_wakeup_time, enabled, tm); 302 status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
303 NULL);
135 up(&efi_runtime_lock); 304 up(&efi_runtime_lock);
136 return status; 305 return status;
137} 306}
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
146 315
147 if (down_interruptible(&efi_runtime_lock)) 316 if (down_interruptible(&efi_runtime_lock))
148 return EFI_ABORTED; 317 return EFI_ABORTED;
149 status = efi_call_virt(get_variable, name, vendor, attr, data_size, 318 status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
150 data); 319 data);
151 up(&efi_runtime_lock); 320 up(&efi_runtime_lock);
152 return status; 321 return status;
153} 322}
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
160 329
161 if (down_interruptible(&efi_runtime_lock)) 330 if (down_interruptible(&efi_runtime_lock))
162 return EFI_ABORTED; 331 return EFI_ABORTED;
163 status = efi_call_virt(get_next_variable, name_size, name, vendor); 332 status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
333 NULL, NULL);
164 up(&efi_runtime_lock); 334 up(&efi_runtime_lock);
165 return status; 335 return status;
166} 336}
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
175 345
176 if (down_interruptible(&efi_runtime_lock)) 346 if (down_interruptible(&efi_runtime_lock))
177 return EFI_ABORTED; 347 return EFI_ABORTED;
178 status = efi_call_virt(set_variable, name, vendor, attr, data_size, 348 status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
179 data); 349 data);
180 up(&efi_runtime_lock); 350 up(&efi_runtime_lock);
181 return status; 351 return status;
182} 352}
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
210 380
211 if (down_interruptible(&efi_runtime_lock)) 381 if (down_interruptible(&efi_runtime_lock))
212 return EFI_ABORTED; 382 return EFI_ABORTED;
213 status = efi_call_virt(query_variable_info, attr, storage_space, 383 status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
214 remaining_space, max_variable_size); 384 remaining_space, max_variable_size, NULL);
215 up(&efi_runtime_lock); 385 up(&efi_runtime_lock);
216 return status; 386 return status;
217} 387}
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
242 412
243 if (down_interruptible(&efi_runtime_lock)) 413 if (down_interruptible(&efi_runtime_lock))
244 return EFI_ABORTED; 414 return EFI_ABORTED;
245 status = efi_call_virt(get_next_high_mono_count, count); 415 status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
416 NULL, NULL);
246 up(&efi_runtime_lock); 417 up(&efi_runtime_lock);
247 return status; 418 return status;
248} 419}
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
272 443
273 if (down_interruptible(&efi_runtime_lock)) 444 if (down_interruptible(&efi_runtime_lock))
274 return EFI_ABORTED; 445 return EFI_ABORTED;
275 status = efi_call_virt(update_capsule, capsules, count, sg_list); 446 status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
447 NULL, NULL);
276 up(&efi_runtime_lock); 448 up(&efi_runtime_lock);
277 return status; 449 return status;
278} 450}
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
289 461
290 if (down_interruptible(&efi_runtime_lock)) 462 if (down_interruptible(&efi_runtime_lock))
291 return EFI_ABORTED; 463 return EFI_ABORTED;
292 status = efi_call_virt(query_capsule_caps, capsules, count, max_size, 464 status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
293 reset_type); 465 max_size, reset_type, NULL);
294 up(&efi_runtime_lock); 466 up(&efi_runtime_lock);
295 return status; 467 return status;
296} 468}
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..58faeb1cef63 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); 181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
182 fwspec.param_count = 2; 182 fwspec.param_count = 2;
183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; 183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
184 fwspec.param[1] = IRQ_TYPE_NONE; 184 /*
185 * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
186 * temporarily. Anyway, ->irq_set_type() will override it later.
187 */
188 fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
185 189
186 return irq_create_fwspec_mapping(&fwspec); 190 return irq_create_fwspec_mapping(&fwspec);
187} 191}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index e2232cbcec8b..addd9fecc198 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -25,6 +25,7 @@
25 25
26struct acpi_gpio_event { 26struct acpi_gpio_event {
27 struct list_head node; 27 struct list_head node;
28 struct list_head initial_sync_list;
28 acpi_handle handle; 29 acpi_handle handle;
29 unsigned int pin; 30 unsigned int pin;
30 unsigned int irq; 31 unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
50 struct list_head events; 51 struct list_head events;
51}; 52};
52 53
54static LIST_HEAD(acpi_gpio_initial_sync_list);
55static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
56
53static int acpi_gpiochip_find(struct gpio_chip *gc, void *data) 57static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
54{ 58{
55 if (!gc->parent) 59 if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
85 return gpiochip_get_desc(chip, pin); 89 return gpiochip_get_desc(chip, pin);
86} 90}
87 91
92static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
93{
94 mutex_lock(&acpi_gpio_initial_sync_list_lock);
95 list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
96 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
97}
98
99static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
100{
101 mutex_lock(&acpi_gpio_initial_sync_list_lock);
102 if (!list_empty(&event->initial_sync_list))
103 list_del_init(&event->initial_sync_list);
104 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
105}
106
88static irqreturn_t acpi_gpio_irq_handler(int irq, void *data) 107static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
89{ 108{
90 struct acpi_gpio_event *event = data; 109 struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
136 irq_handler_t handler = NULL; 155 irq_handler_t handler = NULL;
137 struct gpio_desc *desc; 156 struct gpio_desc *desc;
138 unsigned long irqflags; 157 unsigned long irqflags;
139 int ret, pin, irq; 158 int ret, pin, irq, value;
140 159
141 if (!acpi_gpio_get_irq_resource(ares, &agpio)) 160 if (!acpi_gpio_get_irq_resource(ares, &agpio))
142 return AE_OK; 161 return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
167 186
168 gpiod_direction_input(desc); 187 gpiod_direction_input(desc);
169 188
189 value = gpiod_get_value(desc);
190
170 ret = gpiochip_lock_as_irq(chip, pin); 191 ret = gpiochip_lock_as_irq(chip, pin);
171 if (ret) { 192 if (ret) {
172 dev_err(chip->parent, "Failed to lock GPIO as interrupt\n"); 193 dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
208 event->irq = irq; 229 event->irq = irq;
209 event->pin = pin; 230 event->pin = pin;
210 event->desc = desc; 231 event->desc = desc;
232 INIT_LIST_HEAD(&event->initial_sync_list);
211 233
212 ret = request_threaded_irq(event->irq, NULL, handler, irqflags, 234 ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
213 "ACPI:Event", event); 235 "ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
222 enable_irq_wake(irq); 244 enable_irq_wake(irq);
223 245
224 list_add_tail(&event->node, &acpi_gpio->events); 246 list_add_tail(&event->node, &acpi_gpio->events);
247
248 /*
249 * Make sure we trigger the initial state of the IRQ when using RISING
250 * or FALLING. Note we run the handlers on late_init, the AML code
251 * may refer to OperationRegions from other (builtin) drivers which
252 * may be probed after us.
253 */
254 if (handler == acpi_gpio_irq_handler &&
255 (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
256 ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
257 acpi_gpio_add_to_initial_sync_list(event);
258
225 return AE_OK; 259 return AE_OK;
226 260
227fail_free_event: 261fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
294 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 328 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
295 struct gpio_desc *desc; 329 struct gpio_desc *desc;
296 330
331 acpi_gpio_del_from_initial_sync_list(event);
332
297 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) 333 if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
298 disable_irq_wake(event->irq); 334 disable_irq_wake(event->irq);
299 335
@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1158 1194
1159 return con_id == NULL; 1195 return con_id == NULL;
1160} 1196}
1197
1198/* Sync the initial state of handlers after all builtin drivers have probed */
1199static int acpi_gpio_initial_sync(void)
1200{
1201 struct acpi_gpio_event *event, *ep;
1202
1203 mutex_lock(&acpi_gpio_initial_sync_list_lock);
1204 list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
1205 initial_sync_list) {
1206 acpi_evaluate_object(event->handle, NULL, NULL, NULL);
1207 list_del_init(&event->initial_sync_list);
1208 }
1209 mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1210
1211 return 0;
1212}
1213/* We must use _sync so that this runs after the first deferred_probe run */
1214late_initcall_sync(acpi_gpio_initial_sync);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..53a14ee8ad6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
64 * Note that active low is the default. 64 * Note that active low is the default.
65 */ 65 */
66 if (IS_ENABLED(CONFIG_REGULATOR) && 66 if (IS_ENABLED(CONFIG_REGULATOR) &&
67 (of_device_is_compatible(np, "reg-fixed-voltage") || 67 (of_device_is_compatible(np, "regulator-fixed") ||
68 of_device_is_compatible(np, "reg-fixed-voltage") ||
68 of_device_is_compatible(np, "regulator-gpio"))) { 69 of_device_is_compatible(np, "regulator-gpio"))) {
69 /* 70 /*
70 * The regulator GPIO handles are specified such that the 71 * The regulator GPIO handles are specified such that the
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 73021b388e12..dd3ff2f2cdce 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
429 else 429 else
430 status = connector_status_disconnected; 430 status = connector_status_disconnected;
431 431
432 /*
433 * The bridge resets its registers on unplug. So when we get a plug
434 * event and we're already supposed to be powered, cycle the bridge to
435 * restore its state.
436 */
437 if (status == connector_status_connected &&
438 adv7511->connector.status == connector_status_disconnected &&
439 adv7511->powered) {
440 regcache_mark_dirty(adv7511->regmap);
441 adv7511_power_on(adv7511);
442 }
443
432 if (adv7511->connector.status != status) { 444 if (adv7511->connector.status != status) {
433 adv7511->connector.status = status; 445 adv7511->connector.status = status;
434 if (status == connector_status_disconnected) 446 if (status == connector_status_disconnected)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 130da5195f3b..81e32199d3ef 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1510,8 +1510,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
1510{ 1510{
1511 struct drm_crtc *crtc; 1511 struct drm_crtc *crtc;
1512 struct drm_crtc_state *crtc_state; 1512 struct drm_crtc_state *crtc_state;
1513 struct drm_plane *plane; 1513 struct drm_plane *plane = NULL;
1514 struct drm_plane_state *old_plane_state, *new_plane_state; 1514 struct drm_plane_state *old_plane_state = NULL;
1515 struct drm_plane_state *new_plane_state = NULL;
1515 const struct drm_plane_helper_funcs *funcs; 1516 const struct drm_plane_helper_funcs *funcs;
1516 int i, n_planes = 0; 1517 int i, n_planes = 0;
1517 1518
@@ -1527,7 +1528,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
1527 if (n_planes != 1) 1528 if (n_planes != 1)
1528 return -EINVAL; 1529 return -EINVAL;
1529 1530
1530 if (!new_plane_state->crtc) 1531 if (!new_plane_state->crtc ||
1532 old_plane_state->crtc != new_plane_state->crtc)
1531 return -EINVAL; 1533 return -EINVAL;
1532 1534
1533 funcs = plane->helper_private; 1535 funcs = plane->helper_private;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 3c4000facb36..f973d287696a 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
372 ctx->handle = drm_legacy_ctxbitmap_next(dev); 372 ctx->handle = drm_legacy_ctxbitmap_next(dev);
373 } 373 }
374 DRM_DEBUG("%d\n", ctx->handle); 374 DRM_DEBUG("%d\n", ctx->handle);
375 if (ctx->handle == -1) { 375 if (ctx->handle < 0) {
376 DRM_DEBUG("Not enough free contexts.\n"); 376 DRM_DEBUG("Not enough free contexts.\n");
377 /* Should this return -EBUSY instead? */ 377 /* Should this return -EBUSY instead? */
378 return -ENOMEM; 378 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 52f3b91d14fd..71e1aa54f774 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -652,6 +652,7 @@ enum intel_sbi_destination {
652#define QUIRK_BACKLIGHT_PRESENT (1<<3) 652#define QUIRK_BACKLIGHT_PRESENT (1<<3)
653#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 653#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
654#define QUIRK_INCREASE_T12_DELAY (1<<6) 654#define QUIRK_INCREASE_T12_DELAY (1<<6)
655#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
655 656
656struct intel_fbdev; 657struct intel_fbdev;
657struct intel_fbc_work; 658struct intel_fbc_work;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..fed26d6e4e27 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
1782 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); 1782 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1783} 1783}
1784 1784
1785void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1785void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
1786 enum transcoder cpu_transcoder)
1787{ 1786{
1787 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1788 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1789 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1788 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1790 i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1789 uint32_t val = I915_READ(reg); 1791 uint32_t val = I915_READ(reg);
1790 1792
1791 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1793 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
1792 val |= TRANS_DDI_PORT_NONE; 1794 val |= TRANS_DDI_PORT_NONE;
1793 I915_WRITE(reg, val); 1795 I915_WRITE(reg, val);
1796
1797 if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
1798 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1799 DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
1800 /* Quirk time at 100ms for reliable operation */
1801 msleep(100);
1802 }
1794} 1803}
1795 1804
1796int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, 1805int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2cc6faa1daa8..dec0d60921bf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5809 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5809 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
5810 5810
5811 if (!transcoder_is_dsi(cpu_transcoder)) 5811 if (!transcoder_is_dsi(cpu_transcoder))
5812 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5812 intel_ddi_disable_transcoder_func(old_crtc_state);
5813 5813
5814 if (INTEL_GEN(dev_priv) >= 9) 5814 if (INTEL_GEN(dev_priv) >= 9)
5815 skylake_scaler_disable(intel_crtc); 5815 skylake_scaler_disable(intel_crtc);
@@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
14646 DRM_INFO("Applying T12 delay quirk\n"); 14646 DRM_INFO("Applying T12 delay quirk\n");
14647} 14647}
14648 14648
14649/*
14650 * GeminiLake NUC HDMI outputs require additional off time
14651 * this allows the onboard retimer to correctly sync to signal
14652 */
14653static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14654{
14655 struct drm_i915_private *dev_priv = to_i915(dev);
14656
14657 dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14658 DRM_INFO("Applying Increase DDI Disabled quirk\n");
14659}
14660
14649struct intel_quirk { 14661struct intel_quirk {
14650 int device; 14662 int device;
14651 int subsystem_vendor; 14663 int subsystem_vendor;
@@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
14732 14744
14733 /* Toshiba Satellite P50-C-18C */ 14745 /* Toshiba Satellite P50-C-18C */
14734 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, 14746 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14747
14748 /* GeminiLake NUC */
14749 { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14750 { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14751 /* ASRock ITX*/
14752 { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14753 { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14735}; 14754};
14736 14755
14737static void intel_init_quirks(struct drm_device *dev) 14756static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..b8eefbffc77d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
1388void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); 1388void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
1389bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); 1389bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
1390void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state); 1390void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
1391void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, 1391void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
1392 enum transcoder cpu_transcoder);
1393void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); 1392void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
1394void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); 1393void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
1395struct intel_encoder * 1394struct intel_encoder *
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 56dd7a9a8e25..dd5312b02a8d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
612 return PTR_ERR(imx_ldb->regmap); 612 return PTR_ERR(imx_ldb->regmap);
613 } 613 }
614 614
615 /* disable LDB by resetting the control register to POR default */
616 regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
617
615 imx_ldb->dev = dev; 618 imx_ldb->dev = dev;
616 619
617 if (of_id) 620 if (of_id)
@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
652 if (ret || i < 0 || i > 1) 655 if (ret || i < 0 || i > 1)
653 return -EINVAL; 656 return -EINVAL;
654 657
658 if (!of_device_is_available(child))
659 continue;
660
655 if (dual && i > 0) { 661 if (dual && i > 0) {
656 dev_warn(dev, "dual-channel mode, ignoring second output\n"); 662 dev_warn(dev, "dual-channel mode, ignoring second output\n");
657 continue; 663 continue;
658 } 664 }
659 665
660 if (!of_device_is_available(child))
661 continue;
662
663 channel = &imx_ldb->channel[i]; 666 channel = &imx_ldb->channel[i];
664 channel->ldb = imx_ldb; 667 channel->ldb = imx_ldb;
665 channel->chno = i; 668 channel->chno = i;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 1d34619eb3fe..a951ec75d01f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -320,6 +320,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ; 320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) 321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ; 322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
323 } else {
324 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
325 vc4_state->y_scaling[1] = VC4_SCALING_NONE;
323 } 326 }
324 327
325 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && 328 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index caa05b0702e1..5450a2db1219 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
339 break; 339 break;
340 case V4L2_MBUS_BT656: 340 case V4L2_MBUS_BT656:
341 csicfg->ext_vsync = 0; 341 csicfg->ext_vsync = 0;
342 if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field)) 342 if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
343 mbus_fmt->field == V4L2_FIELD_ALTERNATE)
343 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED; 344 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
344 else 345 else
345 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE; 346 csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..7379043711df 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
237 /* 237 /*
238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back 238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back
239 * to minimal possible clkh in this case. 239 * to minimal possible clkh in this case.
240 *
241 * Note:
242 * CLKH is not allowed to be 0, in this case I2C clock is not generated
243 * at all
240 */ 244 */
241 if (clk >= clkl + d) { 245 if (clk > clkl + d) {
242 clkh = clk - clkl - d; 246 clkh = clk - clkl - d;
243 clkl -= d; 247 clkl -= d;
244 } else { 248 } else {
245 clkh = 0; 249 clkh = 1;
246 clkl = clk - (d << 1); 250 clkl = clk - (d << 1);
247 } 251 }
248 252
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..498c5e891649 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
368 goto err_desc; 368 goto err_desc;
369 } 369 }
370 370
371 reinit_completion(&dma->cmd_complete);
371 txdesc->callback = i2c_imx_dma_callback; 372 txdesc->callback = i2c_imx_dma_callback;
372 txdesc->callback_param = i2c_imx; 373 txdesc->callback_param = i2c_imx;
373 if (dma_submit_error(dmaengine_submit(txdesc))) { 374 if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
622 * The first byte must be transmitted by the CPU. 623 * The first byte must be transmitted by the CPU.
623 */ 624 */
624 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); 625 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
625 reinit_completion(&i2c_imx->dma->cmd_complete);
626 time_left = wait_for_completion_timeout( 626 time_left = wait_for_completion_timeout(
627 &i2c_imx->dma->cmd_complete, 627 &i2c_imx->dma->cmd_complete,
628 msecs_to_jiffies(DMA_TIMEOUT)); 628 msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
681 if (result) 681 if (result)
682 return result; 682 return result;
683 683
684 reinit_completion(&i2c_imx->dma->cmd_complete);
685 time_left = wait_for_completion_timeout( 684 time_left = wait_for_completion_timeout(
686 &i2c_imx->dma->cmd_complete, 685 &i2c_imx->dma->cmd_complete,
687 msecs_to_jiffies(DMA_TIMEOUT)); 686 msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1010 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, 1009 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
1011 "gpio"); 1010 "gpio");
1012 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); 1011 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
1013 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH); 1012 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
1014 1013
1015 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || 1014 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
1016 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { 1015 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..3c1c817f6968 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -32,6 +32,7 @@
32#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/reset.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36 37
37/* register offsets */ 38/* register offsets */
@@ -111,8 +112,9 @@
111#define ID_ARBLOST (1 << 3) 112#define ID_ARBLOST (1 << 3)
112#define ID_NACK (1 << 4) 113#define ID_NACK (1 << 4)
113/* persistent flags */ 114/* persistent flags */
115#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
114#define ID_P_PM_BLOCKED (1 << 31) 116#define ID_P_PM_BLOCKED (1 << 31)
115#define ID_P_MASK ID_P_PM_BLOCKED 117#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
116 118
117enum rcar_i2c_type { 119enum rcar_i2c_type {
118 I2C_RCAR_GEN1, 120 I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
141 struct dma_chan *dma_rx; 143 struct dma_chan *dma_rx;
142 struct scatterlist sg; 144 struct scatterlist sg;
143 enum dma_data_direction dma_direction; 145 enum dma_data_direction dma_direction;
146
147 struct reset_control *rstc;
144}; 148};
145 149
146#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 150#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
370 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 374 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
371 sg_dma_len(&priv->sg), priv->dma_direction); 375 sg_dma_len(&priv->sg), priv->dma_direction);
372 376
377 /* Gen3 can only do one RXDMA per transfer and we just completed it */
378 if (priv->devtype == I2C_RCAR_GEN3 &&
379 priv->dma_direction == DMA_FROM_DEVICE)
380 priv->flags |= ID_P_NO_RXDMA;
381
373 priv->dma_direction = DMA_NONE; 382 priv->dma_direction = DMA_NONE;
374} 383}
375 384
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
407 unsigned char *buf; 416 unsigned char *buf;
408 int len; 417 int len;
409 418
410 /* Do not use DMA if it's not available or for messages < 8 bytes */ 419 /* Do various checks to see if DMA is feasible at all */
411 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE)) 420 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
421 (read && priv->flags & ID_P_NO_RXDMA))
412 return; 422 return;
413 423
414 if (read) { 424 if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
739 } 749 }
740} 750}
741 751
752/* I2C is a special case, we need to poll the status of a reset */
753static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
754{
755 int i, ret;
756
757 ret = reset_control_reset(priv->rstc);
758 if (ret)
759 return ret;
760
761 for (i = 0; i < LOOP_TIMEOUT; i++) {
762 ret = reset_control_status(priv->rstc);
763 if (ret == 0)
764 return 0;
765 udelay(1);
766 }
767
768 return -ETIMEDOUT;
769}
770
742static int rcar_i2c_master_xfer(struct i2c_adapter *adap, 771static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
743 struct i2c_msg *msgs, 772 struct i2c_msg *msgs,
744 int num) 773 int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
750 779
751 pm_runtime_get_sync(dev); 780 pm_runtime_get_sync(dev);
752 781
782 /* Gen3 needs a reset before allowing RXDMA once */
783 if (priv->devtype == I2C_RCAR_GEN3) {
784 priv->flags |= ID_P_NO_RXDMA;
785 if (!IS_ERR(priv->rstc)) {
786 ret = rcar_i2c_do_reset(priv);
787 if (ret == 0)
788 priv->flags &= ~ID_P_NO_RXDMA;
789 }
790 }
791
753 rcar_i2c_init(priv); 792 rcar_i2c_init(priv);
754 793
755 ret = rcar_i2c_bus_barrier(priv); 794 ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
920 if (ret < 0) 959 if (ret < 0)
921 goto out_pm_put; 960 goto out_pm_put;
922 961
962 if (priv->devtype == I2C_RCAR_GEN3) {
963 priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
964 if (!IS_ERR(priv->rstc)) {
965 ret = reset_control_status(priv->rstc);
966 if (ret < 0)
967 priv->rstc = ERR_PTR(-ENOTSUPP);
968 }
969 }
970
923 /* Stay always active when multi-master to keep arbitration working */ 971 /* Stay always active when multi-master to keep arbitration working */
924 if (of_property_read_bool(dev->of_node, "multi-master")) 972 if (of_property_read_bool(dev->of_node, "multi-master"))
925 priv->flags |= ID_P_PM_BLOCKED; 973 priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 1f41a4f89c08..8a873975cf12 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
191 if (priv->len_recv) { 191 if (priv->len_recv) {
192 /* read length byte */ 192 /* read length byte */
193 rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); 193 rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
194
195 /*
196 * We expect at least 2 interrupts for I2C_M_RECV_LEN
197 * transactions. The length is updated during the first
198 * interrupt, and the buffer contents are only copied
199 * during subsequent interrupts. If in case the interrupts
200 * get merged we would complete the transaction without
201 * copying out the bytes from RX fifo. To avoid this now we
202 * drain the fifo as and when data is available.
203 * We drained the rlen byte already, decrement total length
204 * by one.
205 */
206
207 len--;
194 if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) { 208 if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
195 rlen = 0; /*abort transfer */ 209 rlen = 0; /*abort transfer */
196 priv->msg_buf_remaining = 0; 210 priv->msg_buf_remaining = 0;
197 priv->msg_len = 0; 211 priv->msg_len = 0;
198 } else { 212 xlp9xx_i2c_update_rlen(priv);
199 *buf++ = rlen; 213 return;
200 if (priv->client_pec)
201 ++rlen; /* account for error check byte */
202 /* update remaining bytes and message length */
203 priv->msg_buf_remaining = rlen;
204 priv->msg_len = rlen + 1;
205 } 214 }
215
216 *buf++ = rlen;
217 if (priv->client_pec)
218 ++rlen; /* account for error check byte */
219 /* update remaining bytes and message length */
220 priv->msg_buf_remaining = rlen;
221 priv->msg_len = rlen + 1;
206 xlp9xx_i2c_update_rlen(priv); 222 xlp9xx_i2c_update_rlen(priv);
207 priv->len_recv = false; 223 priv->len_recv = false;
208 } else {
209 len = min(priv->msg_buf_remaining, len);
210 for (i = 0; i < len; i++, buf++)
211 *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
212
213 priv->msg_buf_remaining -= len;
214 } 224 }
215 225
226 len = min(priv->msg_buf_remaining, len);
227 for (i = 0; i < len; i++, buf++)
228 *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
229
230 priv->msg_buf_remaining -= len;
216 priv->msg_buf = buf; 231 priv->msg_buf = buf;
217 232
218 if (priv->msg_buf_remaining) 233 if (priv->msg_buf_remaining)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 301285c54603..15c95aaa484c 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
624static void i2c_adapter_lock_bus(struct i2c_adapter *adapter, 624static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
625 unsigned int flags) 625 unsigned int flags)
626{ 626{
627 rt_mutex_lock(&adapter->bus_lock); 627 rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
628} 628}
629 629
630/** 630/**
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 300ab4b672e4..29646aa6132e 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
144 struct i2c_mux_priv *priv = adapter->algo_data; 144 struct i2c_mux_priv *priv = adapter->algo_data;
145 struct i2c_adapter *parent = priv->muxc->parent; 145 struct i2c_adapter *parent = priv->muxc->parent;
146 146
147 rt_mutex_lock(&parent->mux_lock); 147 rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
148 if (!(flags & I2C_LOCK_ROOT_ADAPTER)) 148 if (!(flags & I2C_LOCK_ROOT_ADAPTER))
149 return; 149 return;
150 i2c_lock_bus(parent, flags); 150 i2c_lock_bus(parent, flags);
@@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
181 struct i2c_mux_priv *priv = adapter->algo_data; 181 struct i2c_mux_priv *priv = adapter->algo_data;
182 struct i2c_adapter *parent = priv->muxc->parent; 182 struct i2c_adapter *parent = priv->muxc->parent;
183 183
184 rt_mutex_lock(&parent->mux_lock); 184 rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
185 i2c_lock_bus(parent, flags); 185 i2c_lock_bus(parent, flags);
186} 186}
187 187
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index cc06e8404e9b..583d3a10b940 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1984,15 +1984,64 @@ static int modify_qp(struct ib_uverbs_file *file,
1984 goto release_qp; 1984 goto release_qp;
1985 } 1985 }
1986 1986
1987 if ((cmd->base.attr_mask & IB_QP_AV) && 1987 if ((cmd->base.attr_mask & IB_QP_AV)) {
1988 !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { 1988 if (!rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
1989 ret = -EINVAL; 1989 ret = -EINVAL;
1990 goto release_qp; 1990 goto release_qp;
1991 }
1992
1993 if (cmd->base.attr_mask & IB_QP_STATE &&
1994 cmd->base.qp_state == IB_QPS_RTR) {
1995 /* We are in INIT->RTR TRANSITION (if we are not,
1996 * this transition will be rejected in subsequent checks).
1997 * In the INIT->RTR transition, we cannot have IB_QP_PORT set,
1998 * but the IB_QP_STATE flag is required.
1999 *
2000 * Since kernel 3.14 (commit dbf727de7440), the uverbs driver,
2001 * when IB_QP_AV is set, has required inclusion of a valid
2002 * port number in the primary AV. (AVs are created and handled
2003 * differently for infiniband and ethernet (RoCE) ports).
2004 *
2005 * Check the port number included in the primary AV against
2006 * the port number in the qp struct, which was set (and saved)
2007 * in the RST->INIT transition.
2008 */
2009 if (cmd->base.dest.port_num != qp->real_qp->port) {
2010 ret = -EINVAL;
2011 goto release_qp;
2012 }
2013 } else {
2014 /* We are in SQD->SQD. (If we are not, this transition will
2015 * be rejected later in the verbs layer checks).
2016 * Check for both IB_QP_PORT and IB_QP_AV, these can be set
2017 * together in the SQD->SQD transition.
2018 *
2019 * If only IP_QP_AV was set, add in IB_QP_PORT as well (the
2020 * verbs layer driver does not track primary port changes
2021 * resulting from path migration. Thus, in SQD, if the primary
2022 * AV is modified, the primary port should also be modified).
2023 *
2024 * Note that in this transition, the IB_QP_STATE flag
2025 * is not allowed.
2026 */
2027 if (((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
2028 == (IB_QP_AV | IB_QP_PORT)) &&
2029 cmd->base.port_num != cmd->base.dest.port_num) {
2030 ret = -EINVAL;
2031 goto release_qp;
2032 }
2033 if ((cmd->base.attr_mask & (IB_QP_AV | IB_QP_PORT))
2034 == IB_QP_AV) {
2035 cmd->base.attr_mask |= IB_QP_PORT;
2036 cmd->base.port_num = cmd->base.dest.port_num;
2037 }
2038 }
1991 } 2039 }
1992 2040
1993 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && 2041 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1994 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || 2042 (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
1995 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { 2043 !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num) ||
2044 cmd->base.alt_port_num != cmd->base.alt_dest.port_num)) {
1996 ret = -EINVAL; 2045 ret = -EINVAL;
1997 goto release_qp; 2046 goto release_qp;
1998 } 2047 }
diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c
index a4e404aaf64b..5c7afdec192c 100644
--- a/drivers/input/keyboard/hilkbd.c
+++ b/drivers/input/keyboard/hilkbd.c
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
57 #define HIL_DATA 0x1 57 #define HIL_DATA 0x1
58 #define HIL_CMD 0x3 58 #define HIL_CMD 0x3
59 #define HIL_IRQ 2 59 #define HIL_IRQ 2
60 #define hil_readb(p) readb(p) 60 #define hil_readb(p) readb((const volatile void __iomem *)(p))
61 #define hil_writeb(v,p) writeb((v),(p)) 61 #define hil_writeb(v, p) writeb((v), (volatile void __iomem *)(p))
62 62
63#else 63#else
64#error "HIL is not supported on this platform" 64#error "HIL is not supported on this platform"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 1f9cd7d8b7ad..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1347 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0618", 0 }, 1348 { "ELAN0618", 0 },
1349 { "ELAN061D", 0 },
1350 { "ELAN0622", 0 },
1349 { "ELAN1000", 0 }, 1351 { "ELAN1000", 0 },
1350 { } 1352 { }
1351}; 1353};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), 527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 }, 528 },
529 }, 529 },
530 {
531 /* Lenovo LaVie Z */
532 .matches = {
533 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
534 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
535 },
536 },
530 { } 537 { }
531}; 538};
532 539
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index e9233db16e03..d564d21245c5 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -8,7 +8,7 @@ config ARM_GIC
8 bool 8 bool
9 select IRQ_DOMAIN 9 select IRQ_DOMAIN
10 select IRQ_DOMAIN_HIERARCHY 10 select IRQ_DOMAIN_HIERARCHY
11 select MULTI_IRQ_HANDLER 11 select GENERIC_IRQ_MULTI_HANDLER
12 select GENERIC_IRQ_EFFECTIVE_AFF_MASK 12 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
13 13
14config ARM_GIC_PM 14config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
34config ARM_GIC_V3 34config ARM_GIC_V3
35 bool 35 bool
36 select IRQ_DOMAIN 36 select IRQ_DOMAIN
37 select MULTI_IRQ_HANDLER 37 select GENERIC_IRQ_MULTI_HANDLER
38 select IRQ_DOMAIN_HIERARCHY 38 select IRQ_DOMAIN_HIERARCHY
39 select PARTITION_PERCPU 39 select PARTITION_PERCPU
40 select GENERIC_IRQ_EFFECTIVE_AFF_MASK 40 select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
66config ARM_VIC 66config ARM_VIC
67 bool 67 bool
68 select IRQ_DOMAIN 68 select IRQ_DOMAIN
69 select MULTI_IRQ_HANDLER 69 select GENERIC_IRQ_MULTI_HANDLER
70 70
71config ARM_VIC_NR 71config ARM_VIC_NR
72 int 72 int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
93 bool 93 bool
94 select GENERIC_IRQ_CHIP 94 select GENERIC_IRQ_CHIP
95 select IRQ_DOMAIN 95 select IRQ_DOMAIN
96 select MULTI_IRQ_HANDLER 96 select GENERIC_IRQ_MULTI_HANDLER
97 select SPARSE_IRQ 97 select SPARSE_IRQ
98 98
99config ATMEL_AIC5_IRQ 99config ATMEL_AIC5_IRQ
100 bool 100 bool
101 select GENERIC_IRQ_CHIP 101 select GENERIC_IRQ_CHIP
102 select IRQ_DOMAIN 102 select IRQ_DOMAIN
103 select MULTI_IRQ_HANDLER 103 select GENERIC_IRQ_MULTI_HANDLER
104 select SPARSE_IRQ 104 select SPARSE_IRQ
105 105
106config I8259 106config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
137config FARADAY_FTINTC010 137config FARADAY_FTINTC010
138 bool 138 bool
139 select IRQ_DOMAIN 139 select IRQ_DOMAIN
140 select MULTI_IRQ_HANDLER 140 select GENERIC_IRQ_MULTI_HANDLER
141 select SPARSE_IRQ 141 select SPARSE_IRQ
142 142
143config HISILICON_IRQ_MBIGEN 143config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
162 bool 162 bool
163 depends on ARCH_CLPS711X 163 depends on ARCH_CLPS711X
164 select IRQ_DOMAIN 164 select IRQ_DOMAIN
165 select MULTI_IRQ_HANDLER 165 select GENERIC_IRQ_MULTI_HANDLER
166 select SPARSE_IRQ 166 select SPARSE_IRQ
167 default y 167 default y
168 168
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
181config ORION_IRQCHIP 181config ORION_IRQCHIP
182 bool 182 bool
183 select IRQ_DOMAIN 183 select IRQ_DOMAIN
184 select MULTI_IRQ_HANDLER 184 select GENERIC_IRQ_MULTI_HANDLER
185 185
186config PIC32_EVIC 186config PIC32_EVIC
187 bool 187 bool
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 4eca5c763766..606efa64adff 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
45 */ 45 */
46 info->scratchpad[0].ul = mc_bus_dev->icid; 46 info->scratchpad[0].ul = mc_bus_dev->icid;
47 msi_info = msi_get_domain_info(msi_domain->parent); 47 msi_info = msi_get_domain_info(msi_domain->parent);
48
49 /* Allocate at least 32 MSIs, and always as a power of 2 */
50 nvec = max_t(int, 32, roundup_pow_of_two(nvec));
48 return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); 51 return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
49} 52}
50 53
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 25a98de5cfb2..8d6d009d1d58 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
66{ 66{
67 struct pci_dev *pdev, *alias_dev; 67 struct pci_dev *pdev, *alias_dev;
68 struct msi_domain_info *msi_info; 68 struct msi_domain_info *msi_info;
69 int alias_count = 0; 69 int alias_count = 0, minnvec = 1;
70 70
71 if (!dev_is_pci(dev)) 71 if (!dev_is_pci(dev))
72 return -EINVAL; 72 return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
86 /* ITS specific DeviceID, as the core ITS ignores dev. */ 86 /* ITS specific DeviceID, as the core ITS ignores dev. */
87 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); 87 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
88 88
89 return msi_info->ops->msi_prepare(domain->parent, 89 /*
90 dev, max(nvec, alias_count), info); 90 * Always allocate a power of 2, and special case device 0 for
91 * broken systems where the DevID is not wired (and all devices
92 * appear as DevID 0). For that reason, we generously allocate a
93 * minimum of 32 MSIs for DevID 0. If you want more because all
94 * your devices are aliasing to DevID 0, consider fixing your HW.
95 */
96 nvec = max(nvec, alias_count);
97 if (!info->scratchpad[0].ul)
98 minnvec = 32;
99 nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
100 return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
91} 101}
92 102
93static struct msi_domain_ops its_pci_msi_ops = { 103static struct msi_domain_ops its_pci_msi_ops = {
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 8881a053c173..7b8e87b493fe 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
73 /* ITS specific DeviceID, as the core ITS ignores dev. */ 73 /* ITS specific DeviceID, as the core ITS ignores dev. */
74 info->scratchpad[0].ul = dev_id; 74 info->scratchpad[0].ul = dev_id;
75 75
76 /* Allocate at least 32 MSIs, and always as a power of 2 */
77 nvec = max_t(int, 32, roundup_pow_of_two(nvec));
76 return msi_info->ops->msi_prepare(domain->parent, 78 return msi_info->ops->msi_prepare(domain->parent,
77 dev, nvec, info); 79 dev, nvec, info);
78} 80}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d7842d312d3e..316a57530f6d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -23,6 +23,8 @@
23#include <linux/dma-iommu.h> 23#include <linux/dma-iommu.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/list.h>
27#include <linux/list_sort.h>
26#include <linux/log2.h> 28#include <linux/log2.h>
27#include <linux/mm.h> 29#include <linux/mm.h>
28#include <linux/msi.h> 30#include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
160} vpe_proxy; 162} vpe_proxy;
161 163
162static LIST_HEAD(its_nodes); 164static LIST_HEAD(its_nodes);
163static DEFINE_SPINLOCK(its_lock); 165static DEFINE_RAW_SPINLOCK(its_lock);
164static struct rdists *gic_rdists; 166static struct rdists *gic_rdists;
165static struct irq_domain *its_parent; 167static struct irq_domain *its_parent;
166 168
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
1421 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1423 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1422}; 1424};
1423 1425
1426
1424/* 1427/*
1425 * How we allocate LPIs: 1428 * How we allocate LPIs:
1426 * 1429 *
1427 * The GIC has id_bits bits for interrupt identifiers. From there, we 1430 * lpi_range_list contains ranges of LPIs that are to available to
1428 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as 1431 * allocate from. To allocate LPIs, just pick the first range that
1429 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 1432 * fits the required allocation, and reduce it by the required
1430 * bits to the right. 1433 * amount. Once empty, remove the range from the list.
1434 *
1435 * To free a range of LPIs, add a free range to the list, sort it and
1436 * merge the result if the new range happens to be adjacent to an
1437 * already free block.
1431 * 1438 *
1432 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1439 * The consequence of the above is that allocation is cost is low, but
1440 * freeing is expensive. We assumes that freeing rarely occurs.
1433 */ 1441 */
1434#define IRQS_PER_CHUNK_SHIFT 5
1435#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1436#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1437 1442
1438static unsigned long *lpi_bitmap; 1443static DEFINE_MUTEX(lpi_range_lock);
1439static u32 lpi_chunks; 1444static LIST_HEAD(lpi_range_list);
1440static DEFINE_SPINLOCK(lpi_lock); 1445
1446struct lpi_range {
1447 struct list_head entry;
1448 u32 base_id;
1449 u32 span;
1450};
1441 1451
1442static int its_lpi_to_chunk(int lpi) 1452static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1443{ 1453{
1444 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; 1454 struct lpi_range *range;
1455
1456 range = kzalloc(sizeof(*range), GFP_KERNEL);
1457 if (range) {
1458 INIT_LIST_HEAD(&range->entry);
1459 range->base_id = base;
1460 range->span = span;
1461 }
1462
1463 return range;
1445} 1464}
1446 1465
1447static int its_chunk_to_lpi(int chunk) 1466static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1448{ 1467{
1449 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; 1468 struct lpi_range *ra, *rb;
1469
1470 ra = container_of(a, struct lpi_range, entry);
1471 rb = container_of(b, struct lpi_range, entry);
1472
1473 return rb->base_id - ra->base_id;
1450} 1474}
1451 1475
1452static int __init its_lpi_init(u32 id_bits) 1476static void merge_lpi_ranges(void)
1453{ 1477{
1454 lpi_chunks = its_lpi_to_chunk(1UL << id_bits); 1478 struct lpi_range *range, *tmp;
1455 1479
1456 lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long), 1480 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1457 GFP_KERNEL); 1481 if (!list_is_last(&range->entry, &lpi_range_list) &&
1458 if (!lpi_bitmap) { 1482 (tmp->base_id == (range->base_id + range->span))) {
1459 lpi_chunks = 0; 1483 tmp->base_id = range->base_id;
1460 return -ENOMEM; 1484 tmp->span += range->span;
1485 list_del(&range->entry);
1486 kfree(range);
1487 }
1461 } 1488 }
1489}
1462 1490
1463 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); 1491static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1464 return 0; 1492{
1493 struct lpi_range *range, *tmp;
1494 int err = -ENOSPC;
1495
1496 mutex_lock(&lpi_range_lock);
1497
1498 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1499 if (range->span >= nr_lpis) {
1500 *base = range->base_id;
1501 range->base_id += nr_lpis;
1502 range->span -= nr_lpis;
1503
1504 if (range->span == 0) {
1505 list_del(&range->entry);
1506 kfree(range);
1507 }
1508
1509 err = 0;
1510 break;
1511 }
1512 }
1513
1514 mutex_unlock(&lpi_range_lock);
1515
1516 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1517 return err;
1465} 1518}
1466 1519
1467static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) 1520static int free_lpi_range(u32 base, u32 nr_lpis)
1468{ 1521{
1469 unsigned long *bitmap = NULL; 1522 struct lpi_range *new;
1470 int chunk_id; 1523 int err = 0;
1471 int nr_chunks; 1524
1472 int i; 1525 mutex_lock(&lpi_range_lock);
1526
1527 new = mk_lpi_range(base, nr_lpis);
1528 if (!new) {
1529 err = -ENOMEM;
1530 goto out;
1531 }
1532
1533 list_add(&new->entry, &lpi_range_list);
1534 list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1535 merge_lpi_ranges();
1536out:
1537 mutex_unlock(&lpi_range_lock);
1538 return err;
1539}
1540
1541static int __init its_lpi_init(u32 id_bits)
1542{
1543 u32 lpis = (1UL << id_bits) - 8192;
1544 u32 numlpis;
1545 int err;
1546
1547 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1548
1549 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1550 lpis = numlpis;
1551 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1552 lpis);
1553 }
1473 1554
1474 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); 1555 /*
1556 * Initializing the allocator is just the same as freeing the
1557 * full range of LPIs.
1558 */
1559 err = free_lpi_range(8192, lpis);
1560 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1561 return err;
1562}
1475 1563
1476 spin_lock(&lpi_lock); 1564static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1565{
1566 unsigned long *bitmap = NULL;
1567 int err = 0;
1477 1568
1478 do { 1569 do {
1479 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, 1570 err = alloc_lpi_range(nr_irqs, base);
1480 0, nr_chunks, 0); 1571 if (!err)
1481 if (chunk_id < lpi_chunks)
1482 break; 1572 break;
1483 1573
1484 nr_chunks--; 1574 nr_irqs /= 2;
1485 } while (nr_chunks > 0); 1575 } while (nr_irqs > 0);
1486 1576
1487 if (!nr_chunks) 1577 if (err)
1488 goto out; 1578 goto out;
1489 1579
1490 bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK), 1580 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1491 sizeof(long),
1492 GFP_ATOMIC);
1493 if (!bitmap) 1581 if (!bitmap)
1494 goto out; 1582 goto out;
1495 1583
1496 for (i = 0; i < nr_chunks; i++) 1584 *nr_ids = nr_irqs;
1497 set_bit(chunk_id + i, lpi_bitmap);
1498
1499 *base = its_chunk_to_lpi(chunk_id);
1500 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
1501 1585
1502out: 1586out:
1503 spin_unlock(&lpi_lock);
1504
1505 if (!bitmap) 1587 if (!bitmap)
1506 *base = *nr_ids = 0; 1588 *base = *nr_ids = 0;
1507 1589
1508 return bitmap; 1590 return bitmap;
1509} 1591}
1510 1592
1511static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) 1593static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1512{ 1594{
1513 int lpi; 1595 WARN_ON(free_lpi_range(base, nr_ids));
1514
1515 spin_lock(&lpi_lock);
1516
1517 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
1518 int chunk = its_lpi_to_chunk(lpi);
1519
1520 BUG_ON(chunk > lpi_chunks);
1521 if (test_bit(chunk, lpi_bitmap)) {
1522 clear_bit(chunk, lpi_bitmap);
1523 } else {
1524 pr_err("Bad LPI chunk %d\n", chunk);
1525 }
1526 }
1527
1528 spin_unlock(&lpi_lock);
1529
1530 kfree(bitmap); 1596 kfree(bitmap);
1531} 1597}
1532 1598
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
1559{ 1625{
1560 phys_addr_t paddr; 1626 phys_addr_t paddr;
1561 1627
1562 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 1628 lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
1563 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 1629 gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1564 if (!gic_rdists->prop_page) { 1630 if (!gic_rdists->prop_page) {
1565 pr_err("Failed to allocate PROPBASE\n"); 1631 pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
1997{ 2063{
1998 struct its_node *its; 2064 struct its_node *its;
1999 2065
2000 spin_lock(&its_lock); 2066 raw_spin_lock(&its_lock);
2001 2067
2002 list_for_each_entry(its, &its_nodes, entry) 2068 list_for_each_entry(its, &its_nodes, entry)
2003 its_cpu_init_collection(its); 2069 its_cpu_init_collection(its);
2004 2070
2005 spin_unlock(&its_lock); 2071 raw_spin_unlock(&its_lock);
2006} 2072}
2007 2073
2008static struct its_device *its_find_device(struct its_node *its, u32 dev_id) 2074static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2134 if (!its_alloc_device_table(its, dev_id)) 2200 if (!its_alloc_device_table(its, dev_id))
2135 return NULL; 2201 return NULL;
2136 2202
2203 if (WARN_ON(!is_power_of_2(nvecs)))
2204 nvecs = roundup_pow_of_two(nvecs);
2205
2137 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2206 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2138 /* 2207 /*
2139 * We allocate at least one chunk worth of LPIs bet device, 2208 * Even if the device wants a single LPI, the ITT must be
2140 * and thus that many ITEs. The device may require less though. 2209 * sized as a power of two (and you need at least one bit...).
2141 */ 2210 */
2142 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); 2211 nr_ites = max(2, nvecs);
2143 sz = nr_ites * its->ite_size; 2212 sz = nr_ites * its->ite_size;
2144 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2213 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2145 itt = kzalloc(sz, GFP_KERNEL); 2214 itt = kzalloc(sz, GFP_KERNEL);
2146 if (alloc_lpis) { 2215 if (alloc_lpis) {
2147 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2216 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2148 if (lpi_map) 2217 if (lpi_map)
2149 col_map = kcalloc(nr_lpis, sizeof(*col_map), 2218 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2150 GFP_KERNEL); 2219 GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2379 /* If all interrupts have been freed, start mopping the floor */ 2448 /* If all interrupts have been freed, start mopping the floor */
2380 if (bitmap_empty(its_dev->event_map.lpi_map, 2449 if (bitmap_empty(its_dev->event_map.lpi_map,
2381 its_dev->event_map.nr_lpis)) { 2450 its_dev->event_map.nr_lpis)) {
2382 its_lpi_free_chunks(its_dev->event_map.lpi_map, 2451 its_lpi_free(its_dev->event_map.lpi_map,
2383 its_dev->event_map.lpi_base, 2452 its_dev->event_map.lpi_base,
2384 its_dev->event_map.nr_lpis); 2453 its_dev->event_map.nr_lpis);
2385 kfree(its_dev->event_map.col_map); 2454 kfree(its_dev->event_map.col_map);
2386 2455
2387 /* Unmap device/itt */ 2456 /* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
2780 } 2849 }
2781 2850
2782 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 2851 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2783 its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 2852 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2784 its_free_prop_table(vm->vprop_page); 2853 its_free_prop_table(vm->vprop_page);
2785 } 2854 }
2786} 2855}
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
2795 2864
2796 BUG_ON(!vm); 2865 BUG_ON(!vm);
2797 2866
2798 bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); 2867 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
2799 if (!bitmap) 2868 if (!bitmap)
2800 return -ENOMEM; 2869 return -ENOMEM;
2801 2870
2802 if (nr_ids < nr_irqs) { 2871 if (nr_ids < nr_irqs) {
2803 its_lpi_free_chunks(bitmap, base, nr_ids); 2872 its_lpi_free(bitmap, base, nr_ids);
2804 return -ENOMEM; 2873 return -ENOMEM;
2805 } 2874 }
2806 2875
2807 vprop_page = its_allocate_prop_table(GFP_KERNEL); 2876 vprop_page = its_allocate_prop_table(GFP_KERNEL);
2808 if (!vprop_page) { 2877 if (!vprop_page) {
2809 its_lpi_free_chunks(bitmap, base, nr_ids); 2878 its_lpi_free(bitmap, base, nr_ids);
2810 return -ENOMEM; 2879 return -ENOMEM;
2811 } 2880 }
2812 2881
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
2833 if (i > 0) 2902 if (i > 0)
2834 its_vpe_irq_domain_free(domain, virq, i - 1); 2903 its_vpe_irq_domain_free(domain, virq, i - 1);
2835 2904
2836 its_lpi_free_chunks(bitmap, base, nr_ids); 2905 its_lpi_free(bitmap, base, nr_ids);
2837 its_free_prop_table(vprop_page); 2906 its_free_prop_table(vprop_page);
2838 } 2907 }
2839 2908
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
3070 struct its_node *its; 3139 struct its_node *its;
3071 int err = 0; 3140 int err = 0;
3072 3141
3073 spin_lock(&its_lock); 3142 raw_spin_lock(&its_lock);
3074 list_for_each_entry(its, &its_nodes, entry) { 3143 list_for_each_entry(its, &its_nodes, entry) {
3075 void __iomem *base; 3144 void __iomem *base;
3076 3145
@@ -3102,7 +3171,7 @@ err:
3102 writel_relaxed(its->ctlr_save, base + GITS_CTLR); 3171 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3103 } 3172 }
3104 } 3173 }
3105 spin_unlock(&its_lock); 3174 raw_spin_unlock(&its_lock);
3106 3175
3107 return err; 3176 return err;
3108} 3177}
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
3112 struct its_node *its; 3181 struct its_node *its;
3113 int ret; 3182 int ret;
3114 3183
3115 spin_lock(&its_lock); 3184 raw_spin_lock(&its_lock);
3116 list_for_each_entry(its, &its_nodes, entry) { 3185 list_for_each_entry(its, &its_nodes, entry) {
3117 void __iomem *base; 3186 void __iomem *base;
3118 int i; 3187 int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
3164 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) 3233 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3165 its_cpu_init_collection(its); 3234 its_cpu_init_collection(its);
3166 } 3235 }
3167 spin_unlock(&its_lock); 3236 raw_spin_unlock(&its_lock);
3168} 3237}
3169 3238
3170static struct syscore_ops its_syscore_ops = { 3239static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
3398 if (err) 3467 if (err)
3399 goto out_free_tables; 3468 goto out_free_tables;
3400 3469
3401 spin_lock(&its_lock); 3470 raw_spin_lock(&its_lock);
3402 list_add(&its->entry, &its_nodes); 3471 list_add(&its->entry, &its_nodes);
3403 spin_unlock(&its_lock); 3472 raw_spin_unlock(&its_lock);
3404 3473
3405 return 0; 3474 return 0;
3406 3475
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76ea56d779a1..e214181b77b7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
877 .flags = IRQCHIP_SET_TYPE_MASKED, 877 .flags = IRQCHIP_SET_TYPE_MASKED,
878}; 878};
879 879
880#define GIC_ID_NR (1U << gic_data.rdists.id_bits) 880#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
881 881
882static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 882static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
883 irq_hw_number_t hw) 883 irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
1091 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 1091 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
1092 */ 1092 */
1093 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1093 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1094 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); 1094 gic_data.rdists.gicd_typer = typer;
1095 gic_irqs = GICD_TYPER_IRQS(typer); 1095 gic_irqs = GICD_TYPER_IRQS(typer);
1096 if (gic_irqs > 1020) 1096 if (gic_irqs > 1020)
1097 gic_irqs = 1020; 1097 gic_irqs = 1020;
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index fc5953dea509..2ff08986b536 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
165 return ingenic_intc_of_init(node, 1); 165 return ingenic_intc_of_init(node, 1);
166} 166}
167IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init); 167IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
168IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
168 169
169static int __init intc_2chip_of_init(struct device_node *node, 170static int __init intc_2chip_of_init(struct device_node *node,
170 struct device_node *parent) 171 struct device_node *parent)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 3a7e8905a97e..3df527fcf4e1 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
159}; 159};
160 160
161static const struct stm32_desc_irq stm32mp1_desc_irq[] = { 161static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
162 { .exti = 0, .irq_parent = 6 },
162 { .exti = 1, .irq_parent = 7 }, 163 { .exti = 1, .irq_parent = 7 },
163 { .exti = 2, .irq_parent = 8 }, 164 { .exti = 2, .irq_parent = 8 },
164 { .exti = 3, .irq_parent = 9 }, 165 { .exti = 3, .irq_parent = 9 },
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index edb35a5c57ea..a99fc0ced7a7 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -728,9 +728,6 @@ EXPORT_SYMBOL_GPL(vsp1_du_setup_lif);
728 */ 728 */
729void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) 729void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index)
730{ 730{
731 struct vsp1_device *vsp1 = dev_get_drvdata(dev);
732
733 mutex_lock(&vsp1->drm->lock);
734} 731}
735EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); 732EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
736 733
@@ -846,6 +843,7 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
846 843
847 drm_pipe->crc = cfg->crc; 844 drm_pipe->crc = cfg->crc;
848 845
846 mutex_lock(&vsp1->drm->lock);
849 vsp1_du_pipeline_setup_inputs(vsp1, pipe); 847 vsp1_du_pipeline_setup_inputs(vsp1, pipe);
850 vsp1_du_pipeline_configure(pipe); 848 vsp1_du_pipeline_configure(pipe);
851 mutex_unlock(&vsp1->drm->lock); 849 mutex_unlock(&vsp1->drm->lock);
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index fcfab6635f9c..81b150e5dfdb 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -174,6 +174,7 @@ static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
174 174
175 rcu_assign_pointer(raw->progs, new_array); 175 rcu_assign_pointer(raw->progs, new_array);
176 bpf_prog_array_free(old_array); 176 bpf_prog_array_free(old_array);
177 bpf_prog_put(prog);
177unlock: 178unlock:
178 mutex_unlock(&ir_raw_handler_lock); 179 mutex_unlock(&ir_raw_handler_lock);
179 return ret; 180 return ret;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 2e0066b1a31c..e7948908e78c 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,13 +30,13 @@ static int ir_raw_event_thread(void *data)
30 while (kfifo_out(&raw->kfifo, &ev, 1)) { 30 while (kfifo_out(&raw->kfifo, &ev, 1)) {
31 if (is_timing_event(ev)) { 31 if (is_timing_event(ev)) {
32 if (ev.duration == 0) 32 if (ev.duration == 0)
33 dev_err(&dev->dev, "nonsensical timing event of duration 0"); 33 dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
34 if (is_timing_event(raw->prev_ev) && 34 if (is_timing_event(raw->prev_ev) &&
35 !is_transition(&ev, &raw->prev_ev)) 35 !is_transition(&ev, &raw->prev_ev))
36 dev_err(&dev->dev, "two consecutive events of type %s", 36 dev_warn_once(&dev->dev, "two consecutive events of type %s",
37 TO_STR(ev.pulse)); 37 TO_STR(ev.pulse));
38 if (raw->prev_ev.reset && ev.pulse == 0) 38 if (raw->prev_ev.reset && ev.pulse == 0)
39 dev_err(&dev->dev, "timing event after reset should be pulse"); 39 dev_warn_once(&dev->dev, "timing event after reset should be pulse");
40 } 40 }
41 list_for_each_entry(handler, &ir_raw_handler_list, list) 41 list_for_each_entry(handler, &ir_raw_handler_list, list)
42 if (dev->enabled_protocols & 42 if (dev->enabled_protocols &
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 2e222d9ee01f..ca68e1d2b2f9 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -679,6 +679,14 @@ static void ir_timer_repeat(struct timer_list *t)
679 spin_unlock_irqrestore(&dev->keylock, flags); 679 spin_unlock_irqrestore(&dev->keylock, flags);
680} 680}
681 681
682static unsigned int repeat_period(int protocol)
683{
684 if (protocol >= ARRAY_SIZE(protocols))
685 return 100;
686
687 return protocols[protocol].repeat_period;
688}
689
682/** 690/**
683 * rc_repeat() - signals that a key is still pressed 691 * rc_repeat() - signals that a key is still pressed
684 * @dev: the struct rc_dev descriptor of the device 692 * @dev: the struct rc_dev descriptor of the device
@@ -691,7 +699,7 @@ void rc_repeat(struct rc_dev *dev)
691{ 699{
692 unsigned long flags; 700 unsigned long flags;
693 unsigned int timeout = nsecs_to_jiffies(dev->timeout) + 701 unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
694 msecs_to_jiffies(protocols[dev->last_protocol].repeat_period); 702 msecs_to_jiffies(repeat_period(dev->last_protocol));
695 struct lirc_scancode sc = { 703 struct lirc_scancode sc = {
696 .scancode = dev->last_scancode, .rc_proto = dev->last_protocol, 704 .scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
697 .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED, 705 .keycode = dev->keypressed ? dev->last_keycode : KEY_RESERVED,
@@ -803,7 +811,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
803 811
804 if (dev->keypressed) { 812 if (dev->keypressed) {
805 dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) + 813 dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
806 msecs_to_jiffies(protocols[protocol].repeat_period); 814 msecs_to_jiffies(repeat_period(protocol));
807 mod_timer(&dev->timer_keyup, dev->keyup_jiffies); 815 mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
808 } 816 }
809 spin_unlock_irqrestore(&dev->keylock, flags); 817 spin_unlock_irqrestore(&dev->keylock, flags);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 75f781c11e89..de4e6e5bf304 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
293 int i; 293 int i;
294 294
295 for_each_sg(data->sg, sg, data->sg_len, i) { 295 for_each_sg(data->sg, sg, data->sg_len, i) {
296 void *buf = kmap_atomic(sg_page(sg) + sg->offset; 296 void *buf = kmap_atomic(sg_page(sg) + sg->offset);
297 buffer_swap32(buf, sg->length); 297 buffer_swap32(buf, sg->length);
298 kunmap_atomic(buf); 298 kunmap_atomic(buf);
299 }
299} 300}
300#else 301#else
301static inline void mxcmci_swap_buffers(struct mmc_data *data) {} 302static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 63e3844c5bec..217b790d22ed 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1717 goto err_upper_unlink; 1717 goto err_upper_unlink;
1718 } 1718 }
1719 1719
1720 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1721
1720 /* If the mode uses primary, then the following is handled by 1722 /* If the mode uses primary, then the following is handled by
1721 * bond_change_active_slave(). 1723 * bond_change_active_slave().
1722 */ 1724 */
@@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1764 if (bond_mode_can_use_xmit_hash(bond)) 1766 if (bond_mode_can_use_xmit_hash(bond))
1765 bond_update_slave_arr(bond, NULL); 1767 bond_update_slave_arr(bond, NULL);
1766 1768
1767 bond->nest_level = dev_get_nest_level(bond_dev);
1768 1769
1769 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n", 1770 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
1770 slave_dev->name, 1771 slave_dev->name,
@@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3415 } 3416 }
3416} 3417}
3417 3418
3419static int bond_get_nest_level(struct net_device *bond_dev)
3420{
3421 struct bonding *bond = netdev_priv(bond_dev);
3422
3423 return bond->nest_level;
3424}
3425
3418static void bond_get_stats(struct net_device *bond_dev, 3426static void bond_get_stats(struct net_device *bond_dev,
3419 struct rtnl_link_stats64 *stats) 3427 struct rtnl_link_stats64 *stats)
3420{ 3428{
@@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
3423 struct list_head *iter; 3431 struct list_head *iter;
3424 struct slave *slave; 3432 struct slave *slave;
3425 3433
3426 spin_lock(&bond->stats_lock); 3434 spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
3427 memcpy(stats, &bond->bond_stats, sizeof(*stats)); 3435 memcpy(stats, &bond->bond_stats, sizeof(*stats));
3428 3436
3429 rcu_read_lock(); 3437 rcu_read_lock();
@@ -4227,6 +4235,7 @@ static const struct net_device_ops bond_netdev_ops = {
4227 .ndo_neigh_setup = bond_neigh_setup, 4235 .ndo_neigh_setup = bond_neigh_setup,
4228 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4236 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4229 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4237 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4238 .ndo_get_lock_subclass = bond_get_nest_level,
4230#ifdef CONFIG_NET_POLL_CONTROLLER 4239#ifdef CONFIG_NET_POLL_CONTROLLER
4231 .ndo_netpoll_setup = bond_netpoll_setup, 4240 .ndo_netpoll_setup = bond_netpoll_setup,
4232 .ndo_netpoll_cleanup = bond_netpoll_cleanup, 4241 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
@@ -4725,6 +4734,7 @@ static int bond_init(struct net_device *bond_dev)
4725 if (!bond->wq) 4734 if (!bond->wq)
4726 return -ENOMEM; 4735 return -ENOMEM;
4727 4736
4737 bond->nest_level = SINGLE_DEPTH_NESTING;
4728 netdev_lockdep_set_classes(bond_dev); 4738 netdev_lockdep_set_classes(bond_dev);
4729 4739
4730 list_add_tail(&bond->bond_list, &bn->dev_list); 4740 list_add_tail(&bond->bond_list, &bn->dev_list);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 98663c50ded0..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
743static int bond_option_mode_set(struct bonding *bond, 743static int bond_option_mode_set(struct bonding *bond,
744 const struct bond_opt_value *newval) 744 const struct bond_opt_value *newval)
745{ 745{
746 if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { 746 if (!bond_mode_uses_arp(newval->value)) {
747 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", 747 if (bond->params.arp_interval) {
748 newval->string); 748 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
749 /* disable arp monitoring */ 749 newval->string);
750 bond->params.arp_interval = 0; 750 /* disable arp monitoring */
751 /* set miimon to default value */ 751 bond->params.arp_interval = 0;
752 bond->params.miimon = BOND_DEFAULT_MIIMON; 752 }
753 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", 753
754 bond->params.miimon); 754 if (!bond->params.miimon) {
755 /* set miimon to default value */
756 bond->params.miimon = BOND_DEFAULT_MIIMON;
757 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
758 bond->params.miimon);
759 }
755 } 760 }
756 761
757 if (newval->value == BOND_MODE_ALB) 762 if (newval->value == BOND_MODE_ALB)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
634 int err; 634 int err;
635 635
636 err = pm_runtime_get_sync(priv->device); 636 err = pm_runtime_get_sync(priv->device);
637 if (err) 637 if (err < 0) {
638 pm_runtime_put_noidle(priv->device); 638 pm_runtime_put_noidle(priv->device);
639 return err;
640 }
639 641
640 return err; 642 return 0;
641} 643}
642 644
643static void m_can_clk_stop(struct m_can_priv *priv) 645static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
1109 1111
1110 } else { 1112 } else {
1111 /* Version 3.1.x or 3.2.x */ 1113 /* Version 3.1.x or 3.2.x */
1112 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); 1114 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1115 CCCR_NISO);
1113 1116
1114 /* Only 3.2.x has NISO Bit implemented */ 1117 /* Only 3.2.x has NISO Bit implemented */
1115 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1118 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
1642 priv->can.clock.freq = clk_get_rate(cclk); 1645 priv->can.clock.freq = clk_get_rate(cclk);
1643 priv->mram_base = mram_addr; 1646 priv->mram_base = mram_addr;
1644 1647
1645 m_can_of_parse_mram(priv, mram_config_vals);
1646
1647 platform_set_drvdata(pdev, dev); 1648 platform_set_drvdata(pdev, dev);
1648 SET_NETDEV_DEV(dev, &pdev->dev); 1649 SET_NETDEV_DEV(dev, &pdev->dev);
1649 1650
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
1666 goto clk_disable; 1667 goto clk_disable;
1667 } 1668 }
1668 1669
1670 m_can_of_parse_mram(priv, mram_config_vals);
1671
1669 devm_can_led_init(dev); 1672 devm_can_led_init(dev);
1670 1673
1671 of_can_transceiver(dev); 1674 of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
1687 return ret; 1690 return ret;
1688} 1691}
1689 1692
1690/* TODO: runtime PM with power down or sleep mode */
1691
1692static __maybe_unused int m_can_suspend(struct device *dev) 1693static __maybe_unused int m_can_suspend(struct device *dev)
1693{ 1694{
1694 struct net_device *ndev = dev_get_drvdata(dev); 1695 struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
1715 1716
1716 pinctrl_pm_select_default_state(dev); 1717 pinctrl_pm_select_default_state(dev);
1717 1718
1718 m_can_init_ram(priv);
1719
1720 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1719 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1721 1720
1722 if (netif_running(ndev)) { 1721 if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
1726 if (ret) 1725 if (ret)
1727 return ret; 1726 return ret;
1728 1727
1728 m_can_init_ram(priv);
1729 m_can_start(ndev); 1729 m_can_start(ndev);
1730 netif_device_attach(ndev); 1730 netif_device_attach(ndev);
1731 netif_start_queue(ndev); 1731 netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
86 return 0; 86 return 0;
87 } 87 }
88 cdm = of_iomap(np_cdm, 0); 88 cdm = of_iomap(np_cdm, 0);
89 if (!cdm) {
90 of_node_put(np_cdm);
91 dev_err(&ofdev->dev, "can't map clock node!\n");
92 return 0;
93 }
89 94
90 if (in_8(&cdm->ipb_clk_sel) & 0x1) 95 if (in_8(&cdm->ipb_clk_sel) & 0x1)
91 freq *= 2; 96 freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b9e28578bc7b..455a3797a200 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ 58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ 59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
60 60
61#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
62 ((u32)(y) << 16) | \
63 ((u32)(z) << 8))
64
61/* System Control Registers Bits */ 65/* System Control Registers Bits */
62#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ 66#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
63#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ 67#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
782 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, 786 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
783 hw_ver_major, hw_ver_minor, hw_ver_sub); 787 hw_ver_major, hw_ver_minor, hw_ver_sub);
784 788
789#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
790 /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
791 * 64-bit logical addresses: this workaround forces usage of 32-bit
792 * DMA addresses only when such a fw is detected.
793 */
794 if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
795 PCIEFD_FW_VERSION(3, 3, 0)) {
796 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
797 if (err)
798 dev_warn(&pdev->dev,
799 "warning: can't set DMA mask %llxh (err %d)\n",
800 DMA_BIT_MASK(32), err);
801 }
802#endif
803
785 /* stop system clock */ 804 /* stop system clock */
786 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, 805 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
787 PCIEFD_REG_SYS_CTL_CLR); 806 PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 12ff0020ecd6..b7dfd4109d24 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
1072 usb_free_urb(dev->intr_urb); 1072 usb_free_urb(dev->intr_urb);
1073 1073
1074 kfree(dev->intr_in_buffer); 1074 kfree(dev->intr_in_buffer);
1075 kfree(dev->tx_msg_buffer);
1075 } 1076 }
1076} 1077}
1077 1078
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..5a24039733ef 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc. 3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved. 4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 * Copyright (C) 2017 Sandvik Mining and Construction Oy
5 * 6 *
6 * Description: 7 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 8 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/netdevice.h> 27#include <linux/netdevice.h>
27#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h>
28#include <linux/platform_device.h> 30#include <linux/platform_device.h>
29#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
30#include <linux/string.h> 33#include <linux/string.h>
31#include <linux/types.h> 34#include <linux/types.h>
32#include <linux/can/dev.h> 35#include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
101#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ 104#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
102 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ 105 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
103 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ 106 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
104 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) 107 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
105 108
106/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 109/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
107#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 110#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
118/** 121/**
119 * struct xcan_priv - This definition define CAN driver instance 122 * struct xcan_priv - This definition define CAN driver instance
120 * @can: CAN private data structure. 123 * @can: CAN private data structure.
124 * @tx_lock: Lock for synchronizing TX interrupt handling
121 * @tx_head: Tx CAN packets ready to send on the queue 125 * @tx_head: Tx CAN packets ready to send on the queue
122 * @tx_tail: Tx CAN packets successfully sended on the queue 126 * @tx_tail: Tx CAN packets successfully sended on the queue
123 * @tx_max: Maximum number packets the driver can send 127 * @tx_max: Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
132 */ 136 */
133struct xcan_priv { 137struct xcan_priv {
134 struct can_priv can; 138 struct can_priv can;
139 spinlock_t tx_lock;
135 unsigned int tx_head; 140 unsigned int tx_head;
136 unsigned int tx_tail; 141 unsigned int tx_tail;
137 unsigned int tx_max; 142 unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
159 .brp_inc = 1, 164 .brp_inc = 1,
160}; 165};
161 166
167#define XCAN_CAP_WATERMARK 0x0001
168struct xcan_devtype_data {
169 unsigned int caps;
170};
171
162/** 172/**
163 * xcan_write_reg_le - Write a value to the device register little endian 173 * xcan_write_reg_le - Write a value to the device register little endian
164 * @priv: Driver private data structure 174 * @priv: Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
238 usleep_range(500, 10000); 248 usleep_range(500, 10000);
239 } 249 }
240 250
251 /* reset clears FIFOs */
252 priv->tx_head = 0;
253 priv->tx_tail = 0;
254
241 return 0; 255 return 0;
242} 256}
243 257
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
392 struct net_device_stats *stats = &ndev->stats; 406 struct net_device_stats *stats = &ndev->stats;
393 struct can_frame *cf = (struct can_frame *)skb->data; 407 struct can_frame *cf = (struct can_frame *)skb->data;
394 u32 id, dlc, data[2] = {0, 0}; 408 u32 id, dlc, data[2] = {0, 0};
409 unsigned long flags;
395 410
396 if (can_dropped_invalid_skb(ndev, skb)) 411 if (can_dropped_invalid_skb(ndev, skb))
397 return NETDEV_TX_OK; 412 return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
439 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 454 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
440 455
441 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 456 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
457
458 spin_lock_irqsave(&priv->tx_lock, flags);
459
442 priv->tx_head++; 460 priv->tx_head++;
443 461
444 /* Write the Frame to Xilinx CAN TX FIFO */ 462 /* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
454 stats->tx_bytes += cf->can_dlc; 472 stats->tx_bytes += cf->can_dlc;
455 } 473 }
456 474
475 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
476 if (priv->tx_max > 1)
477 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
478
457 /* Check if the TX buffer is full */ 479 /* Check if the TX buffer is full */
458 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 480 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
459 netif_stop_queue(ndev); 481 netif_stop_queue(ndev);
460 482
483 spin_unlock_irqrestore(&priv->tx_lock, flags);
484
461 return NETDEV_TX_OK; 485 return NETDEV_TX_OK;
462} 486}
463 487
@@ -530,6 +554,123 @@ static int xcan_rx(struct net_device *ndev)
530} 554}
531 555
532/** 556/**
557 * xcan_current_error_state - Get current error state from HW
558 * @ndev: Pointer to net_device structure
559 *
560 * Checks the current CAN error state from the HW. Note that this
561 * only checks for ERROR_PASSIVE and ERROR_WARNING.
562 *
563 * Return:
564 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
565 * otherwise.
566 */
567static enum can_state xcan_current_error_state(struct net_device *ndev)
568{
569 struct xcan_priv *priv = netdev_priv(ndev);
570 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
571
572 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
573 return CAN_STATE_ERROR_PASSIVE;
574 else if (status & XCAN_SR_ERRWRN_MASK)
575 return CAN_STATE_ERROR_WARNING;
576 else
577 return CAN_STATE_ERROR_ACTIVE;
578}
579
580/**
581 * xcan_set_error_state - Set new CAN error state
582 * @ndev: Pointer to net_device structure
583 * @new_state: The new CAN state to be set
584 * @cf: Error frame to be populated or NULL
585 *
586 * Set new CAN error state for the device, updating statistics and
587 * populating the error frame if given.
588 */
589static void xcan_set_error_state(struct net_device *ndev,
590 enum can_state new_state,
591 struct can_frame *cf)
592{
593 struct xcan_priv *priv = netdev_priv(ndev);
594 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
595 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
596 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
597
598 priv->can.state = new_state;
599
600 if (cf) {
601 cf->can_id |= CAN_ERR_CRTL;
602 cf->data[6] = txerr;
603 cf->data[7] = rxerr;
604 }
605
606 switch (new_state) {
607 case CAN_STATE_ERROR_PASSIVE:
608 priv->can.can_stats.error_passive++;
609 if (cf)
610 cf->data[1] = (rxerr > 127) ?
611 CAN_ERR_CRTL_RX_PASSIVE :
612 CAN_ERR_CRTL_TX_PASSIVE;
613 break;
614 case CAN_STATE_ERROR_WARNING:
615 priv->can.can_stats.error_warning++;
616 if (cf)
617 cf->data[1] |= (txerr > rxerr) ?
618 CAN_ERR_CRTL_TX_WARNING :
619 CAN_ERR_CRTL_RX_WARNING;
620 break;
621 case CAN_STATE_ERROR_ACTIVE:
622 if (cf)
623 cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
624 break;
625 default:
626 /* non-ERROR states are handled elsewhere */
627 WARN_ON(1);
628 break;
629 }
630}
631
632/**
633 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
634 * @ndev: Pointer to net_device structure
635 *
636 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
637 * the performed RX/TX has caused it to drop to a lesser state and set
638 * the interface state accordingly.
639 */
640static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
641{
642 struct xcan_priv *priv = netdev_priv(ndev);
643 enum can_state old_state = priv->can.state;
644 enum can_state new_state;
645
646 /* changing error state due to successful frame RX/TX can only
647 * occur from these states
648 */
649 if (old_state != CAN_STATE_ERROR_WARNING &&
650 old_state != CAN_STATE_ERROR_PASSIVE)
651 return;
652
653 new_state = xcan_current_error_state(ndev);
654
655 if (new_state != old_state) {
656 struct sk_buff *skb;
657 struct can_frame *cf;
658
659 skb = alloc_can_err_skb(ndev, &cf);
660
661 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
662
663 if (skb) {
664 struct net_device_stats *stats = &ndev->stats;
665
666 stats->rx_packets++;
667 stats->rx_bytes += cf->can_dlc;
668 netif_rx(skb);
669 }
670 }
671}
672
673/**
533 * xcan_err_interrupt - error frame Isr 674 * xcan_err_interrupt - error frame Isr
534 * @ndev: net_device pointer 675 * @ndev: net_device pointer
535 * @isr: interrupt status register value 676 * @isr: interrupt status register value
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
544 struct net_device_stats *stats = &ndev->stats; 685 struct net_device_stats *stats = &ndev->stats;
545 struct can_frame *cf; 686 struct can_frame *cf;
546 struct sk_buff *skb; 687 struct sk_buff *skb;
547 u32 err_status, status, txerr = 0, rxerr = 0; 688 u32 err_status;
548 689
549 skb = alloc_can_err_skb(ndev, &cf); 690 skb = alloc_can_err_skb(ndev, &cf);
550 691
551 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 692 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
552 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 693 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
553 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
554 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
555 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
556 status = priv->read_reg(priv, XCAN_SR_OFFSET);
557 694
558 if (isr & XCAN_IXR_BSOFF_MASK) { 695 if (isr & XCAN_IXR_BSOFF_MASK) {
559 priv->can.state = CAN_STATE_BUS_OFF; 696 priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
563 can_bus_off(ndev); 700 can_bus_off(ndev);
564 if (skb) 701 if (skb)
565 cf->can_id |= CAN_ERR_BUSOFF; 702 cf->can_id |= CAN_ERR_BUSOFF;
566 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { 703 } else {
567 priv->can.state = CAN_STATE_ERROR_PASSIVE; 704 enum can_state new_state = xcan_current_error_state(ndev);
568 priv->can.can_stats.error_passive++; 705
569 if (skb) { 706 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
570 cf->can_id |= CAN_ERR_CRTL;
571 cf->data[1] = (rxerr > 127) ?
572 CAN_ERR_CRTL_RX_PASSIVE :
573 CAN_ERR_CRTL_TX_PASSIVE;
574 cf->data[6] = txerr;
575 cf->data[7] = rxerr;
576 }
577 } else if (status & XCAN_SR_ERRWRN_MASK) {
578 priv->can.state = CAN_STATE_ERROR_WARNING;
579 priv->can.can_stats.error_warning++;
580 if (skb) {
581 cf->can_id |= CAN_ERR_CRTL;
582 cf->data[1] |= (txerr > rxerr) ?
583 CAN_ERR_CRTL_TX_WARNING :
584 CAN_ERR_CRTL_RX_WARNING;
585 cf->data[6] = txerr;
586 cf->data[7] = rxerr;
587 }
588 } 707 }
589 708
590 /* Check for Arbitration lost interrupt */ 709 /* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
600 if (isr & XCAN_IXR_RXOFLW_MASK) { 719 if (isr & XCAN_IXR_RXOFLW_MASK) {
601 stats->rx_over_errors++; 720 stats->rx_over_errors++;
602 stats->rx_errors++; 721 stats->rx_errors++;
603 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
604 if (skb) { 722 if (skb) {
605 cf->can_id |= CAN_ERR_CRTL; 723 cf->can_id |= CAN_ERR_CRTL;
606 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 724 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
709 827
710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 828 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { 829 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
712 if (isr & XCAN_IXR_RXOK_MASK) { 830 work_done += xcan_rx(ndev);
713 priv->write_reg(priv, XCAN_ICR_OFFSET,
714 XCAN_IXR_RXOK_MASK);
715 work_done += xcan_rx(ndev);
716 } else {
717 priv->write_reg(priv, XCAN_ICR_OFFSET,
718 XCAN_IXR_RXNEMP_MASK);
719 break;
720 }
721 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); 831 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
722 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 832 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
723 } 833 }
724 834
725 if (work_done) 835 if (work_done) {
726 can_led_event(ndev, CAN_LED_EVENT_RX); 836 can_led_event(ndev, CAN_LED_EVENT_RX);
837 xcan_update_error_state_after_rxtx(ndev);
838 }
727 839
728 if (work_done < quota) { 840 if (work_done < quota) {
729 napi_complete_done(napi, work_done); 841 napi_complete_done(napi, work_done);
730 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 842 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
731 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); 843 ier |= XCAN_IXR_RXNEMP_MASK;
732 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 844 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
733 } 845 }
734 return work_done; 846 return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
743{ 855{
744 struct xcan_priv *priv = netdev_priv(ndev); 856 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats; 857 struct net_device_stats *stats = &ndev->stats;
858 unsigned int frames_in_fifo;
859 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
860 unsigned long flags;
861 int retries = 0;
862
863 /* Synchronize with xmit as we need to know the exact number
864 * of frames in the FIFO to stay in sync due to the TXFEMP
865 * handling.
866 * This also prevents a race between netif_wake_queue() and
867 * netif_stop_queue().
868 */
869 spin_lock_irqsave(&priv->tx_lock, flags);
870
871 frames_in_fifo = priv->tx_head - priv->tx_tail;
872
873 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
874 /* clear TXOK anyway to avoid getting back here */
875 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
876 spin_unlock_irqrestore(&priv->tx_lock, flags);
877 return;
878 }
879
880 /* Check if 2 frames were sent (TXOK only means that at least 1
881 * frame was sent).
882 */
883 if (frames_in_fifo > 1) {
884 WARN_ON(frames_in_fifo > priv->tx_max);
885
886 /* Synchronize TXOK and isr so that after the loop:
887 * (1) isr variable is up-to-date at least up to TXOK clear
888 * time. This avoids us clearing a TXOK of a second frame
889 * but not noticing that the FIFO is now empty and thus
890 * marking only a single frame as sent.
891 * (2) No TXOK is left. Having one could mean leaving a
892 * stray TXOK as we might process the associated frame
893 * via TXFEMP handling as we read TXFEMP *after* TXOK
894 * clear to satisfy (1).
895 */
896 while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
897 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
898 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
899 }
746 900
747 while ((priv->tx_head - priv->tx_tail > 0) && 901 if (isr & XCAN_IXR_TXFEMP_MASK) {
748 (isr & XCAN_IXR_TXOK_MASK)) { 902 /* nothing in FIFO anymore */
903 frames_sent = frames_in_fifo;
904 }
905 } else {
906 /* single frame in fifo, just clear TXOK */
749 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 907 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
908 }
909
910 while (frames_sent--) {
750 can_get_echo_skb(ndev, priv->tx_tail % 911 can_get_echo_skb(ndev, priv->tx_tail %
751 priv->tx_max); 912 priv->tx_max);
752 priv->tx_tail++; 913 priv->tx_tail++;
753 stats->tx_packets++; 914 stats->tx_packets++;
754 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
755 } 915 }
756 can_led_event(ndev, CAN_LED_EVENT_TX); 916
757 netif_wake_queue(ndev); 917 netif_wake_queue(ndev);
918
919 spin_unlock_irqrestore(&priv->tx_lock, flags);
920
921 can_led_event(ndev, CAN_LED_EVENT_TX);
922 xcan_update_error_state_after_rxtx(ndev);
758} 923}
759 924
760/** 925/**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
773 struct net_device *ndev = (struct net_device *)dev_id; 938 struct net_device *ndev = (struct net_device *)dev_id;
774 struct xcan_priv *priv = netdev_priv(ndev); 939 struct xcan_priv *priv = netdev_priv(ndev);
775 u32 isr, ier; 940 u32 isr, ier;
941 u32 isr_errors;
776 942
777 /* Get the interrupt status from Xilinx CAN */ 943 /* Get the interrupt status from Xilinx CAN */
778 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 944 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
791 xcan_tx_interrupt(ndev, isr); 957 xcan_tx_interrupt(ndev, isr);
792 958
793 /* Check for the type of error interrupt and Processing it */ 959 /* Check for the type of error interrupt and Processing it */
794 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 960 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
795 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { 961 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
796 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | 962 if (isr_errors) {
797 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | 963 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
798 XCAN_IXR_ARBLST_MASK));
799 xcan_err_interrupt(ndev, isr); 964 xcan_err_interrupt(ndev, isr);
800 } 965 }
801 966
802 /* Check for the type of receive interrupt and Processing it */ 967 /* Check for the type of receive interrupt and Processing it */
803 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { 968 if (isr & XCAN_IXR_RXNEMP_MASK) {
804 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 969 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
805 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); 970 ier &= ~XCAN_IXR_RXNEMP_MASK;
806 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 971 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
807 napi_schedule(&priv->napi); 972 napi_schedule(&priv->napi);
808 } 973 }
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
819static void xcan_chip_stop(struct net_device *ndev) 984static void xcan_chip_stop(struct net_device *ndev)
820{ 985{
821 struct xcan_priv *priv = netdev_priv(ndev); 986 struct xcan_priv *priv = netdev_priv(ndev);
822 u32 ier;
823 987
824 /* Disable interrupts and leave the can in configuration mode */ 988 /* Disable interrupts and leave the can in configuration mode */
825 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 989 set_reset_mode(ndev);
826 ier &= ~XCAN_INTR_ALL;
827 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
828 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
829 priv->can.state = CAN_STATE_STOPPED; 990 priv->can.state = CAN_STATE_STOPPED;
830} 991}
831 992
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
958 */ 1119 */
959static int __maybe_unused xcan_suspend(struct device *dev) 1120static int __maybe_unused xcan_suspend(struct device *dev)
960{ 1121{
961 if (!device_may_wakeup(dev)) 1122 struct net_device *ndev = dev_get_drvdata(dev);
962 return pm_runtime_force_suspend(dev);
963 1123
964 return 0; 1124 if (netif_running(ndev)) {
1125 netif_stop_queue(ndev);
1126 netif_device_detach(ndev);
1127 xcan_chip_stop(ndev);
1128 }
1129
1130 return pm_runtime_force_suspend(dev);
965} 1131}
966 1132
967/** 1133/**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
973 */ 1139 */
974static int __maybe_unused xcan_resume(struct device *dev) 1140static int __maybe_unused xcan_resume(struct device *dev)
975{ 1141{
976 if (!device_may_wakeup(dev)) 1142 struct net_device *ndev = dev_get_drvdata(dev);
977 return pm_runtime_force_resume(dev); 1143 int ret;
978 1144
979 return 0; 1145 ret = pm_runtime_force_resume(dev);
1146 if (ret) {
1147 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1148 return ret;
1149 }
1150
1151 if (netif_running(ndev)) {
1152 ret = xcan_chip_start(ndev);
1153 if (ret) {
1154 dev_err(dev, "xcan_chip_start failed on resume\n");
1155 return ret;
1156 }
1157
1158 netif_device_attach(ndev);
1159 netif_start_queue(ndev);
1160 }
980 1161
1162 return 0;
981} 1163}
982 1164
983/** 1165/**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
992 struct net_device *ndev = dev_get_drvdata(dev); 1174 struct net_device *ndev = dev_get_drvdata(dev);
993 struct xcan_priv *priv = netdev_priv(ndev); 1175 struct xcan_priv *priv = netdev_priv(ndev);
994 1176
995 if (netif_running(ndev)) {
996 netif_stop_queue(ndev);
997 netif_device_detach(ndev);
998 }
999
1000 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
1001 priv->can.state = CAN_STATE_SLEEPING;
1002
1003 clk_disable_unprepare(priv->bus_clk); 1177 clk_disable_unprepare(priv->bus_clk);
1004 clk_disable_unprepare(priv->can_clk); 1178 clk_disable_unprepare(priv->can_clk);
1005 1179
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1018 struct net_device *ndev = dev_get_drvdata(dev); 1192 struct net_device *ndev = dev_get_drvdata(dev);
1019 struct xcan_priv *priv = netdev_priv(ndev); 1193 struct xcan_priv *priv = netdev_priv(ndev);
1020 int ret; 1194 int ret;
1021 u32 isr, status;
1022 1195
1023 ret = clk_prepare_enable(priv->bus_clk); 1196 ret = clk_prepare_enable(priv->bus_clk);
1024 if (ret) { 1197 if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1032 return ret; 1205 return ret;
1033 } 1206 }
1034 1207
1035 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1036 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1037 status = priv->read_reg(priv, XCAN_SR_OFFSET);
1038
1039 if (netif_running(ndev)) {
1040 if (isr & XCAN_IXR_BSOFF_MASK) {
1041 priv->can.state = CAN_STATE_BUS_OFF;
1042 priv->write_reg(priv, XCAN_SRR_OFFSET,
1043 XCAN_SRR_RESET_MASK);
1044 } else if ((status & XCAN_SR_ESTAT_MASK) ==
1045 XCAN_SR_ESTAT_MASK) {
1046 priv->can.state = CAN_STATE_ERROR_PASSIVE;
1047 } else if (status & XCAN_SR_ERRWRN_MASK) {
1048 priv->can.state = CAN_STATE_ERROR_WARNING;
1049 } else {
1050 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1051 }
1052 netif_device_attach(ndev);
1053 netif_start_queue(ndev);
1054 }
1055
1056 return 0; 1208 return 0;
1057} 1209}
1058 1210
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
1061 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1213 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1062}; 1214};
1063 1215
1216static const struct xcan_devtype_data xcan_zynq_data = {
1217 .caps = XCAN_CAP_WATERMARK,
1218};
1219
1220/* Match table for OF platform binding */
1221static const struct of_device_id xcan_of_match[] = {
1222 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1223 { .compatible = "xlnx,axi-can-1.00.a", },
1224 { /* end of list */ },
1225};
1226MODULE_DEVICE_TABLE(of, xcan_of_match);
1227
1064/** 1228/**
1065 * xcan_probe - Platform registration call 1229 * xcan_probe - Platform registration call
1066 * @pdev: Handle to the platform device structure 1230 * @pdev: Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
1075 struct resource *res; /* IO mem resources */ 1239 struct resource *res; /* IO mem resources */
1076 struct net_device *ndev; 1240 struct net_device *ndev;
1077 struct xcan_priv *priv; 1241 struct xcan_priv *priv;
1242 const struct of_device_id *of_id;
1243 int caps = 0;
1078 void __iomem *addr; 1244 void __iomem *addr;
1079 int ret, rx_max, tx_max; 1245 int ret, rx_max, tx_max, tx_fifo_depth;
1080 1246
1081 /* Get the virtual base address for the device */ 1247 /* Get the virtual base address for the device */
1082 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1248 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
1086 goto err; 1252 goto err;
1087 } 1253 }
1088 1254
1089 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); 1255 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1256 &tx_fifo_depth);
1090 if (ret < 0) 1257 if (ret < 0)
1091 goto err; 1258 goto err;
1092 1259
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
1094 if (ret < 0) 1261 if (ret < 0)
1095 goto err; 1262 goto err;
1096 1263
1264 of_id = of_match_device(xcan_of_match, &pdev->dev);
1265 if (of_id) {
1266 const struct xcan_devtype_data *devtype_data = of_id->data;
1267
1268 if (devtype_data)
1269 caps = devtype_data->caps;
1270 }
1271
1272 /* There is no way to directly figure out how many frames have been
1273 * sent when the TXOK interrupt is processed. If watermark programming
1274 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1275 * to determine if 1 or 2 frames have been sent.
1276 * Theoretically we should be able to use TXFWMEMP to determine up
1277 * to 3 frames, but it seems that after putting a second frame in the
1278 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1279 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1280 * sent), which is not a sensible state - possibly TXFWMEMP is not
1281 * completely synchronized with the rest of the bits?
1282 */
1283 if (caps & XCAN_CAP_WATERMARK)
1284 tx_max = min(tx_fifo_depth, 2);
1285 else
1286 tx_max = 1;
1287
1097 /* Create a CAN device instance */ 1288 /* Create a CAN device instance */
1098 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1289 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1099 if (!ndev) 1290 if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
1108 CAN_CTRLMODE_BERR_REPORTING; 1299 CAN_CTRLMODE_BERR_REPORTING;
1109 priv->reg_base = addr; 1300 priv->reg_base = addr;
1110 priv->tx_max = tx_max; 1301 priv->tx_max = tx_max;
1302 spin_lock_init(&priv->tx_lock);
1111 1303
1112 /* Get IRQ for the device */ 1304 /* Get IRQ for the device */
1113 ndev->irq = platform_get_irq(pdev, 0); 1305 ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
1172 1364
1173 pm_runtime_put(&pdev->dev); 1365 pm_runtime_put(&pdev->dev);
1174 1366
1175 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", 1367 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
1176 priv->reg_base, ndev->irq, priv->can.clock.freq, 1368 priv->reg_base, ndev->irq, priv->can.clock.freq,
1177 priv->tx_max); 1369 tx_fifo_depth, priv->tx_max);
1178 1370
1179 return 0; 1371 return 0;
1180 1372
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
1208 return 0; 1400 return 0;
1209} 1401}
1210 1402
1211/* Match table for OF platform binding */
1212static const struct of_device_id xcan_of_match[] = {
1213 { .compatible = "xlnx,zynq-can-1.0", },
1214 { .compatible = "xlnx,axi-can-1.00.a", },
1215 { /* end of list */ },
1216};
1217MODULE_DEVICE_TABLE(of, xcan_of_match);
1218
1219static struct platform_driver xcan_driver = { 1403static struct platform_driver xcan_driver = {
1220 .probe = xcan_probe, 1404 .probe = xcan_probe,
1221 .remove = xcan_remove, 1405 .remove = xcan_remove,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 437cd6eb4faa..bb28c701381a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
343 .xlate = irq_domain_xlate_twocell, 343 .xlate = irq_domain_xlate_twocell,
344}; 344};
345 345
346/* To be called with reg_lock held */
346static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) 347static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
347{ 348{
348 int irq, virq; 349 int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
362 363
363static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) 364static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
364{ 365{
365 mv88e6xxx_g1_irq_free_common(chip); 366 /*
366 367 * free_irq must be called without reg_lock taken because the irq
368 * handler takes this lock, too.
369 */
367 free_irq(chip->irq, chip); 370 free_irq(chip->irq, chip);
371
372 mutex_lock(&chip->reg_lock);
373 mv88e6xxx_g1_irq_free_common(chip);
374 mutex_unlock(&chip->reg_lock);
368} 375}
369 376
370static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) 377static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
469 476
470static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) 477static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
471{ 478{
472 mv88e6xxx_g1_irq_free_common(chip);
473
474 kthread_cancel_delayed_work_sync(&chip->irq_poll_work); 479 kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
475 kthread_destroy_worker(chip->kworker); 480 kthread_destroy_worker(chip->kworker);
481
482 mutex_lock(&chip->reg_lock);
483 mv88e6xxx_g1_irq_free_common(chip);
484 mutex_unlock(&chip->reg_lock);
476} 485}
477 486
478int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) 487int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -2608,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
2608 .rmu_disable = mv88e6085_g1_rmu_disable, 2617 .rmu_disable = mv88e6085_g1_rmu_disable,
2609 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2618 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2610 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2619 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2611 .serdes_power = mv88e6341_serdes_power,
2612}; 2620};
2613 2621
2614static const struct mv88e6xxx_ops mv88e6095_ops = { 2622static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -2774,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
2774 .reset = mv88e6352_g1_reset, 2782 .reset = mv88e6352_g1_reset,
2775 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2783 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2776 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2784 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2785 .serdes_power = mv88e6341_serdes_power,
2777 .gpio_ops = &mv88e6352_gpio_ops, 2786 .gpio_ops = &mv88e6352_gpio_ops,
2778}; 2787};
2779 2788
@@ -2951,7 +2960,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
2951 .reset = mv88e6352_g1_reset, 2960 .reset = mv88e6352_g1_reset,
2952 .vtu_getnext = mv88e6352_g1_vtu_getnext, 2961 .vtu_getnext = mv88e6352_g1_vtu_getnext,
2953 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 2962 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
2954 .serdes_power = mv88e6341_serdes_power,
2955}; 2963};
2956 2964
2957static const struct mv88e6xxx_ops mv88e6176_ops = { 2965static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -3327,6 +3335,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
3327 .reset = mv88e6352_g1_reset, 3335 .reset = mv88e6352_g1_reset,
3328 .vtu_getnext = mv88e6352_g1_vtu_getnext, 3336 .vtu_getnext = mv88e6352_g1_vtu_getnext,
3329 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, 3337 .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
3338 .serdes_power = mv88e6341_serdes_power,
3330 .gpio_ops = &mv88e6352_gpio_ops, 3339 .gpio_ops = &mv88e6352_gpio_ops,
3331 .avb_ops = &mv88e6390_avb_ops, 3340 .avb_ops = &mv88e6390_avb_ops,
3332}; 3341};
@@ -4506,12 +4515,10 @@ out_g2_irq:
4506 if (chip->info->g2_irqs > 0) 4515 if (chip->info->g2_irqs > 0)
4507 mv88e6xxx_g2_irq_free(chip); 4516 mv88e6xxx_g2_irq_free(chip);
4508out_g1_irq: 4517out_g1_irq:
4509 mutex_lock(&chip->reg_lock);
4510 if (chip->irq > 0) 4518 if (chip->irq > 0)
4511 mv88e6xxx_g1_irq_free(chip); 4519 mv88e6xxx_g1_irq_free(chip);
4512 else 4520 else
4513 mv88e6xxx_irq_poll_free(chip); 4521 mv88e6xxx_irq_poll_free(chip);
4514 mutex_unlock(&chip->reg_lock);
4515out: 4522out:
4516 if (pdata) 4523 if (pdata)
4517 dev_put(pdata->netdev); 4524 dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
4539 if (chip->info->g2_irqs > 0) 4546 if (chip->info->g2_irqs > 0)
4540 mv88e6xxx_g2_irq_free(chip); 4547 mv88e6xxx_g2_irq_free(chip);
4541 4548
4542 mutex_lock(&chip->reg_lock);
4543 if (chip->irq > 0) 4549 if (chip->irq > 0)
4544 mv88e6xxx_g1_irq_free(chip); 4550 mv88e6xxx_g1_irq_free(chip);
4545 else 4551 else
4546 mv88e6xxx_irq_poll_free(chip); 4552 mv88e6xxx_irq_poll_free(chip);
4547 mutex_unlock(&chip->reg_lock);
4548} 4553}
4549 4554
4550static const struct of_device_id mv88e6xxx_of_match[] = { 4555static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
32 32
33config 3C515 33config 3C515
34 tristate "3c515 ISA \"Fast EtherLink\"" 34 tristate "3c515 ISA \"Fast EtherLink\""
35 depends on ISA && ISA_DMA_API 35 depends on ISA && ISA_DMA_API && !PPC32
36 ---help--- 36 ---help---
37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet 37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
38 network card, say Y here. 38 network card, say Y here.
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b6d735bf8011..342ae08ec3c2 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
153static void dayna_block_output(struct net_device *dev, int count, 153static void dayna_block_output(struct net_device *dev, int count,
154 const unsigned char *buf, int start_page); 154 const unsigned char *buf, int start_page);
155 155
156#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
157#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
158
159#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c)) 156#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
160 157
161/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 158/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
239 unsigned long outdata = 0xA5A0B5B0; 236 unsigned long outdata = 0xA5A0B5B0;
240 unsigned long indata = 0x00000000; 237 unsigned long indata = 0x00000000;
241 /* Try writing 32 bits */ 238 /* Try writing 32 bits */
242 memcpy_toio(membase, &outdata, 4); 239 memcpy_toio((void __iomem *)membase, &outdata, 4);
243 /* Now compare them */ 240 /* Now compare them */
244 if (memcmp_withio(&outdata, membase, 4) == 0) 241 if (memcmp_withio(&outdata, membase, 4) == 0)
245 return ACCESS_32; 242 return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
711 struct e8390_pkt_hdr *hdr, int ring_page) 708 struct e8390_pkt_hdr *hdr, int ring_page)
712{ 709{
713 unsigned long hdr_start = (ring_page - WD_START_PG)<<8; 710 unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
714 memcpy_fromio(hdr, dev->mem_start + hdr_start, 4); 711 memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
715 /* Fix endianness */ 712 /* Fix endianness */
716 hdr->count = swab16(hdr->count); 713 hdr->count = swab16(hdr->count);
717} 714}
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
725 if (xfer_start + count > ei_status.rmem_end) { 722 if (xfer_start + count > ei_status.rmem_end) {
726 /* We must wrap the input move. */ 723 /* We must wrap the input move. */
727 int semi_count = ei_status.rmem_end - xfer_start; 724 int semi_count = ei_status.rmem_end - xfer_start;
728 memcpy_fromio(skb->data, dev->mem_start + xfer_base, 725 memcpy_fromio(skb->data,
726 (void __iomem *)dev->mem_start + xfer_base,
729 semi_count); 727 semi_count);
730 count -= semi_count; 728 count -= semi_count;
731 memcpy_fromio(skb->data + semi_count, ei_status.rmem_start, 729 memcpy_fromio(skb->data + semi_count,
732 count); 730 (void __iomem *)ei_status.rmem_start, count);
733 } else { 731 } else {
734 memcpy_fromio(skb->data, dev->mem_start + xfer_base, count); 732 memcpy_fromio(skb->data,
733 (void __iomem *)dev->mem_start + xfer_base,
734 count);
735 } 735 }
736} 736}
737 737
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
740{ 740{
741 long shmem = (start_page - WD_START_PG)<<8; 741 long shmem = (start_page - WD_START_PG)<<8;
742 742
743 memcpy_toio(dev->mem_start + shmem, buf, count); 743 memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
744} 744}
745 745
746/* dayna block input/output */ 746/* dayna block input/output */
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 1b9d3130af4d..17f12c18d225 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
333 333
334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); 334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
335 335
336 io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size = 337 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? 338 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) : 339 sizeof(struct ena_eth_io_tx_desc) :
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f273af136fc7..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
44 44
45config LANCE 45config LANCE
46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
47 depends on ISA && ISA_DMA_API && !ARM 47 depends on ISA && ISA_DMA_API && !ARM && !PPC32
48 ---help--- 48 ---help---
49 If you have a network (Ethernet) card of this type, say Y here. 49 If you have a network (Ethernet) card of this type, say Y here.
50 Some LinkSys cards are of this type. 50 Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
138 138
139config NI65 139config NI65
140 tristate "NI6510 support" 140 tristate "NI6510 support"
141 depends on ISA && ISA_DMA_API && !ARM 141 depends on ISA && ISA_DMA_API && !ARM && !PPC32
142 ---help--- 142 ---help---
143 If you have a network (Ethernet) card of this type, say Y here. 143 If you have a network (Ethernet) card of this type, say Y here.
144 144
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4b5d625de8f0..8a3a60bb2688 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
1111 1111
1112 if (pdata->tx_pause != pdata->phy.tx_pause) { 1112 if (pdata->tx_pause != pdata->phy.tx_pause) {
1113 new_state = 1; 1113 new_state = 1;
1114 pdata->hw_if.config_tx_flow_control(pdata);
1115 pdata->tx_pause = pdata->phy.tx_pause; 1114 pdata->tx_pause = pdata->phy.tx_pause;
1115 pdata->hw_if.config_tx_flow_control(pdata);
1116 } 1116 }
1117 1117
1118 if (pdata->rx_pause != pdata->phy.rx_pause) { 1118 if (pdata->rx_pause != pdata->phy.rx_pause) {
1119 new_state = 1; 1119 new_state = 1;
1120 pdata->hw_if.config_rx_flow_control(pdata);
1121 pdata->rx_pause = pdata->phy.rx_pause; 1120 pdata->rx_pause = pdata->phy.rx_pause;
1121 pdata->hw_if.config_rx_flow_control(pdata);
1122 } 1122 }
1123 1123
1124 /* Speed support */ 1124 /* Speed support */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 956860a69797..3bdab972420b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
762 762
763 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC)); 763 hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
764 hw_atl_rpfl2multicast_flr_en_set(self, 764 hw_atl_rpfl2multicast_flr_en_set(self,
765 IS_FILTER_ENABLED(IFF_MULTICAST), 0); 765 IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
766 766
767 hw_atl_rpfl2_accept_all_mc_packets_set(self, 767 hw_atl_rpfl2_accept_all_mc_packets_set(self,
768 IS_FILTER_ENABLED(IFF_ALLMULTI)); 768 IS_FILTER_ENABLED(IFF_ALLMULTI));
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 94270f654b3b..7087b88550db 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
1686 skb = build_skb(page_address(page) + adapter->rx_page_offset, 1686 skb = build_skb(page_address(page) + adapter->rx_page_offset,
1687 adapter->rx_frag_size); 1687 adapter->rx_frag_size);
1688 if (likely(skb)) { 1688 if (likely(skb)) {
1689 skb_reserve(skb, NET_SKB_PAD);
1689 adapter->rx_page_offset += adapter->rx_frag_size; 1690 adapter->rx_page_offset += adapter->rx_frag_size;
1690 if (adapter->rx_page_offset >= PAGE_SIZE) 1691 if (adapter->rx_page_offset >= PAGE_SIZE)
1691 adapter->rx_page = NULL; 1692 adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index da18aa239acb..a4a90b6cdb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3388 DP(BNX2X_MSG_ETHTOOL, 3388 DP(BNX2X_MSG_ETHTOOL,
3389 "rss re-configured, UDP 4-tupple %s\n", 3389 "rss re-configured, UDP 4-tupple %s\n",
3390 udp_rss_requested ? "enabled" : "disabled"); 3390 udp_rss_requested ? "enabled" : "disabled");
3391 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3391 if (bp->state == BNX2X_STATE_OPEN)
3392 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3393 true);
3392 } else if ((info->flow_type == UDP_V6_FLOW) && 3394 } else if ((info->flow_type == UDP_V6_FLOW) &&
3393 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3395 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3394 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3396 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
3395 DP(BNX2X_MSG_ETHTOOL, 3397 DP(BNX2X_MSG_ETHTOOL,
3396 "rss re-configured, UDP 4-tupple %s\n", 3398 "rss re-configured, UDP 4-tupple %s\n",
3397 udp_rss_requested ? "enabled" : "disabled"); 3399 udp_rss_requested ? "enabled" : "disabled");
3398 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3400 if (bp->state == BNX2X_STATE_OPEN)
3401 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3402 true);
3399 } 3403 }
3400 return 0; 3404 return 0;
3401 3405
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3509 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; 3513 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
3510 } 3514 }
3511 3515
3512 return bnx2x_config_rss_eth(bp, false); 3516 if (bp->state == BNX2X_STATE_OPEN)
3517 return bnx2x_config_rss_eth(bp, false);
3518
3519 return 0;
3513} 3520}
3514 3521
3515/** 3522/**
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 5d08d2aeb172..e337da6ba2a4 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
1083 lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count); 1083 lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
1084 lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs), 1084 lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
1085 GFP_KERNEL); 1085 GFP_KERNEL);
1086 if (!lmac->dmacs)
1087 return -ENOMEM;
1086 1088
1087 /* Enable lmac */ 1089 /* Enable lmac */
1088 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); 1090 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 00fc5f1afb1d..7dddb9e748b8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1038 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); 1038 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1039 req->local_port = cpu_to_be16(f->fs.val.lport); 1039 req->local_port = cpu_to_be16(f->fs.val.lport);
1040 req->peer_port = cpu_to_be16(f->fs.val.fport); 1040 req->peer_port = cpu_to_be16(f->fs.val.fport);
1041 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | 1041 memcpy(&req->local_ip, f->fs.val.lip, 4);
1042 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; 1042 memcpy(&req->peer_ip, f->fs.val.fip, 4);
1043 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
1044 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
1045 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || 1043 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1046 f->fs.newvlan == VLAN_REWRITE) | 1044 f->fs.newvlan == VLAN_REWRITE) |
1047 DELACK_V(f->fs.hitcnts) | 1045 DELACK_V(f->fs.hitcnts) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index bc03c175a3cd..a8926e97935e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3072,6 +3072,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
3072 3072
3073 adapter->geneve_port = 0; 3073 adapter->geneve_port = 0;
3074 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); 3074 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3075 break;
3075 default: 3076 default:
3076 return; 3077 return;
3077 } 3078 }
@@ -3157,6 +3158,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
3157 3158
3158 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 3159 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3159 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); 3160 GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3161 break;
3160 default: 3162 default:
3161 return; 3163 return;
3162 } 3164 }
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
19config CS89x0 19config CS89x0
20 tristate "CS89x0 support" 20 tristate "CS89x0 support"
21 depends on ISA || EISA || ARM 21 depends on ISA || EISA || ARM
22 depends on !PPC32
22 ---help--- 23 ---help---
23 Support for CS89x0 chipset based Ethernet cards. If you have a 24 Support for CS89x0 chipset based Ethernet cards. If you have a
24 network (Ethernet) card of this type, say Y and read the file 25 network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 90c645b8538e..60641e202534 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
2047 return 0; 2047 return 0;
2048} 2048}
2049 2049
2050static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
2051{
2052 bool running = netif_running(netdev);
2053 int err = 0;
2054
2055 ASSERT_RTNL();
2056 if (running) {
2057 err = enic_stop(netdev);
2058 if (err)
2059 return err;
2060 }
2061
2062 netdev->mtu = new_mtu;
2063
2064 if (running) {
2065 err = enic_open(netdev);
2066 if (err)
2067 return err;
2068 }
2069
2070 return 0;
2071}
2072
2050static int enic_change_mtu(struct net_device *netdev, int new_mtu) 2073static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2051{ 2074{
2052 struct enic *enic = netdev_priv(netdev); 2075 struct enic *enic = netdev_priv(netdev);
2053 int running = netif_running(netdev);
2054 2076
2055 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) 2077 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2056 return -EOPNOTSUPP; 2078 return -EOPNOTSUPP;
2057 2079
2058 if (running)
2059 enic_stop(netdev);
2060
2061 netdev->mtu = new_mtu;
2062
2063 if (netdev->mtu > enic->port_mtu) 2080 if (netdev->mtu > enic->port_mtu)
2064 netdev_warn(netdev, 2081 netdev_warn(netdev,
2065 "interface MTU (%d) set higher than port MTU (%d)\n", 2082 "interface MTU (%d) set higher than port MTU (%d)\n",
2066 netdev->mtu, enic->port_mtu); 2083 netdev->mtu, enic->port_mtu);
2067 2084
2068 if (running) 2085 return _enic_change_mtu(netdev, new_mtu);
2069 enic_open(netdev);
2070
2071 return 0;
2072} 2086}
2073 2087
2074static void enic_change_mtu_work(struct work_struct *work) 2088static void enic_change_mtu_work(struct work_struct *work)
@@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
2076 struct enic *enic = container_of(work, struct enic, change_mtu_work); 2090 struct enic *enic = container_of(work, struct enic, change_mtu_work);
2077 struct net_device *netdev = enic->netdev; 2091 struct net_device *netdev = enic->netdev;
2078 int new_mtu = vnic_dev_mtu(enic->vdev); 2092 int new_mtu = vnic_dev_mtu(enic->vdev);
2079 int err;
2080 unsigned int i;
2081
2082 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
2083 2093
2084 rtnl_lock(); 2094 rtnl_lock();
2085 2095 (void)_enic_change_mtu(netdev, new_mtu);
2086 /* Stop RQ */
2087 del_timer_sync(&enic->notify_timer);
2088
2089 for (i = 0; i < enic->rq_count; i++)
2090 napi_disable(&enic->napi[i]);
2091
2092 vnic_intr_mask(&enic->intr[0]);
2093 enic_synchronize_irqs(enic);
2094 err = vnic_rq_disable(&enic->rq[0]);
2095 if (err) {
2096 rtnl_unlock();
2097 netdev_err(netdev, "Unable to disable RQ.\n");
2098 return;
2099 }
2100 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
2101 vnic_cq_clean(&enic->cq[0]);
2102 vnic_intr_clean(&enic->intr[0]);
2103
2104 /* Fill RQ with new_mtu-sized buffers */
2105 netdev->mtu = new_mtu;
2106 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
2107 /* Need at least one buffer on ring to get going */
2108 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
2109 rtnl_unlock();
2110 netdev_err(netdev, "Unable to alloc receive buffers.\n");
2111 return;
2112 }
2113
2114 /* Start RQ */
2115 vnic_rq_enable(&enic->rq[0]);
2116 napi_enable(&enic->napi[0]);
2117 vnic_intr_unmask(&enic->intr[0]);
2118 enic_notify_timer_start(enic);
2119
2120 rtnl_unlock(); 2096 rtnl_unlock();
2121 2097
2122 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); 2098 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2916 */ 2892 */
2917 2893
2918 enic->port_mtu = enic->config.mtu; 2894 enic->port_mtu = enic->config.mtu;
2919 (void)enic_change_mtu(netdev, enic->port_mtu);
2920 2895
2921 err = enic_set_mac_addr(netdev, enic->mac_addr); 2896 err = enic_set_mac_addr(netdev, enic->mac_addr);
2922 if (err) { 2897 if (err) {
@@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3006 /* MTU range: 68 - 9000 */ 2981 /* MTU range: 68 - 9000 */
3007 netdev->min_mtu = ENIC_MIN_MTU; 2982 netdev->min_mtu = ENIC_MIN_MTU;
3008 netdev->max_mtu = ENIC_MAX_MTU; 2983 netdev->max_mtu = ENIC_MAX_MTU;
2984 netdev->mtu = enic->port_mtu;
3009 2985
3010 err = register_netdev(netdev); 2986 err = register_netdev(netdev);
3011 if (err) { 2987 if (err) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 5b122728dcb4..09e9da10b786 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
983 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, 983 hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
984 nic_dev, link_status_event_handler); 984 nic_dev, link_status_event_handler);
985 985
986 SET_NETDEV_DEV(netdev, &pdev->dev);
986 err = register_netdev(netdev); 987 err = register_netdev(netdev);
987 if (err) { 988 if (err) {
988 dev_err(&pdev->dev, "Failed to register netdev\n"); 989 dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9128858479c4..2353ec829c04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
229 txq->txq_stats.tx_busy++; 229 txq->txq_stats.tx_busy++;
230 u64_stats_update_end(&txq->txq_stats.syncp); 230 u64_stats_update_end(&txq->txq_stats.syncp);
231 err = NETDEV_TX_BUSY; 231 err = NETDEV_TX_BUSY;
232 wqe_size = 0;
232 goto flush_skbs; 233 goto flush_skbs;
233 } 234 }
234 235
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 7b1b5ac986d0..31bd56727022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2960 struct res_srq *srq; 2960 struct res_srq *srq;
2961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2961 int local_qpn = vhcr->in_modifier & 0xffffff;
2962 2962
2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2964 if (err) 2964 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 323ffe8bf7e4..456f30007ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
123 int i; 123 int i;
124 124
125 buf->size = size; 125 buf->size = size;
126 buf->npages = 1 << get_order(size); 126 buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
127 buf->page_shift = PAGE_SHIFT; 127 buf->page_shift = PAGE_SHIFT;
128 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), 128 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
129 GFP_KERNEL); 129 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index eb9eb7aa953a..405236cf0b04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -858,8 +858,6 @@ struct mlx5e_profile {
858 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 858 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
859 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; 859 mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
860 } rx_handlers; 860 } rx_handlers;
861 void (*netdev_registered_init)(struct mlx5e_priv *priv);
862 void (*netdev_registered_remove)(struct mlx5e_priv *priv);
863 int max_tc; 861 int max_tc;
864}; 862};
865 863
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 75e4308ba786..d258bb679271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
381 HLIST_HEAD(del_list); 381 HLIST_HEAD(del_list);
382 spin_lock_bh(&priv->fs.arfs.arfs_lock); 382 spin_lock_bh(&priv->fs.arfs.arfs_lock);
383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { 383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
384 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
385 break;
386 if (!work_pending(&arfs_rule->arfs_work) && 384 if (!work_pending(&arfs_rule->arfs_work) &&
387 rps_may_expire_flow(priv->netdev, 385 rps_may_expire_flow(priv->netdev,
388 arfs_rule->rxq, arfs_rule->flow_id, 386 arfs_rule->rxq, arfs_rule->flow_id,
389 arfs_rule->filter_id)) { 387 arfs_rule->filter_id)) {
390 hlist_del_init(&arfs_rule->hlist); 388 hlist_del_init(&arfs_rule->hlist);
391 hlist_add_head(&arfs_rule->hlist, &del_list); 389 hlist_add_head(&arfs_rule->hlist, &del_list);
390 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
391 break;
392 } 392 }
393 } 393 }
394 spin_unlock_bh(&priv->fs.arfs.arfs_lock); 394 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
711 skb->protocol != htons(ETH_P_IPV6)) 711 skb->protocol != htons(ETH_P_IPV6))
712 return -EPROTONOSUPPORT; 712 return -EPROTONOSUPPORT;
713 713
714 if (skb->encapsulation)
715 return -EPROTONOSUPPORT;
716
714 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 717 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
715 if (!arfs_t) 718 if (!arfs_t)
716 return -EPROTONOSUPPORT; 719 return -EPROTONOSUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0a52f31fef37..722998d68564 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
275} 275}
276 276
277static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 277static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
278 struct ieee_ets *ets) 278 struct ieee_ets *ets,
279 bool zero_sum_allowed)
279{ 280{
280 bool have_ets_tc = false; 281 bool have_ets_tc = false;
281 int bw_sum = 0; 282 int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
300 } 301 }
301 302
302 if (have_ets_tc && bw_sum != 100) { 303 if (have_ets_tc && bw_sum != 100) {
303 netdev_err(netdev, 304 if (bw_sum || (!bw_sum && !zero_sum_allowed))
304 "Failed to validate ETS: BW sum is illegal\n"); 305 netdev_err(netdev,
306 "Failed to validate ETS: BW sum is illegal\n");
305 return -EINVAL; 307 return -EINVAL;
306 } 308 }
307 return 0; 309 return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
316 if (!MLX5_CAP_GEN(priv->mdev, ets)) 318 if (!MLX5_CAP_GEN(priv->mdev, ets))
317 return -EOPNOTSUPP; 319 return -EOPNOTSUPP;
318 320
319 err = mlx5e_dbcnl_validate_ets(netdev, ets); 321 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
320 if (err) 322 if (err)
321 return err; 323 return err;
322 324
@@ -441,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
441 bool is_new; 443 bool is_new;
442 int err; 444 int err;
443 445
444 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) 446 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
445 return -EINVAL; 447 !MLX5_DSCP_SUPPORTED(priv->mdev))
446 448 return -EOPNOTSUPP;
447 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
448 return -EINVAL;
449
450 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
451 return -EINVAL;
452 449
453 if (app->protocol >= MLX5E_MAX_DSCP) 450 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
451 (app->protocol >= MLX5E_MAX_DSCP))
454 return -EINVAL; 452 return -EINVAL;
455 453
456 /* Save the old entry info */ 454 /* Save the old entry info */
@@ -498,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
498 struct mlx5e_priv *priv = netdev_priv(dev); 496 struct mlx5e_priv *priv = netdev_priv(dev);
499 int err; 497 int err;
500 498
501 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) 499 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
502 return -EINVAL; 500 !MLX5_DSCP_SUPPORTED(priv->mdev))
503 501 return -EOPNOTSUPP;
504 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
505 return -EINVAL;
506
507 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
508 return -EINVAL;
509 502
510 if (app->protocol >= MLX5E_MAX_DSCP) 503 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
504 (app->protocol >= MLX5E_MAX_DSCP))
511 return -EINVAL; 505 return -EINVAL;
512 506
513 /* Skip if no dscp app entry */ 507 /* Skip if no dscp app entry */
@@ -642,12 +636,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
642 ets.prio_tc[i]); 636 ets.prio_tc[i]);
643 } 637 }
644 638
645 err = mlx5e_dbcnl_validate_ets(netdev, &ets); 639 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
646 if (err) { 640 if (err)
647 netdev_err(netdev,
648 "%s, Failed to validate ETS: %d\n", __func__, err);
649 goto out; 641 goto out;
650 }
651 642
652 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); 643 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
653 if (err) { 644 if (err) {
@@ -1147,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1147{ 1138{
1148 int err; 1139 int err;
1149 1140
1150 err = mlx5_set_trust_state(priv->mdev, trust_state); 1141 err = mlx5_set_trust_state(priv->mdev, trust_state);
1151 if (err) 1142 if (err)
1152 return err; 1143 return err;
1153 priv->dcbx_dp.trust_state = trust_state; 1144 priv->dcbx_dp.trust_state = trust_state;
@@ -1173,6 +1164,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1173 struct mlx5_core_dev *mdev = priv->mdev; 1164 struct mlx5_core_dev *mdev = priv->mdev;
1174 int err; 1165 int err;
1175 1166
1167 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1168
1176 if (!MLX5_DSCP_SUPPORTED(mdev)) 1169 if (!MLX5_DSCP_SUPPORTED(mdev))
1177 return 0; 1170 return 0;
1178 1171
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dae4156a710d..c592678ab5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3712,7 +3712,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
3712 3712
3713 if (!reset) { 3713 if (!reset) {
3714 params->sw_mtu = new_mtu; 3714 params->sw_mtu = new_mtu;
3715 set_mtu_cb(priv); 3715 if (set_mtu_cb)
3716 set_mtu_cb(priv);
3716 netdev->mtu = params->sw_mtu; 3717 netdev->mtu = params->sw_mtu;
3717 goto out; 3718 goto out;
3718 } 3719 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0edf4751a8ba..dfbcda0d0e08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
1957 else 1957 else
1958 actions = flow->nic_attr->action; 1958 actions = flow->nic_attr->action;
1959 1959
1960 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
1961 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
1962 return false;
1963
1960 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1964 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1961 return modify_header_match_supported(&parse_attr->spec, exts); 1965 return modify_header_match_supported(&parse_attr->spec, exts);
1962 1966
@@ -1966,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
1966static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 1970static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
1967{ 1971{
1968 struct mlx5_core_dev *fmdev, *pmdev; 1972 struct mlx5_core_dev *fmdev, *pmdev;
1969 u16 func_id, peer_id; 1973 u64 fsystem_guid, psystem_guid;
1970 1974
1971 fmdev = priv->mdev; 1975 fmdev = priv->mdev;
1972 pmdev = peer_priv->mdev; 1976 pmdev = peer_priv->mdev;
1973 1977
1974 func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn)); 1978 mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
1975 peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn)); 1979 mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
1976 1980
1977 return (func_id == peer_id); 1981 return (fsystem_guid == psystem_guid);
1978} 1982}
1979 1983
1980static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1984static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index b79d74860a30..40dba9e8af92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1696 int vport_num; 1696 int vport_num;
1697 int err; 1697 int err;
1698 1698
1699 if (!MLX5_VPORT_MANAGER(dev)) 1699 if (!MLX5_ESWITCH_MANAGER(dev))
1700 return 0; 1700 return 0;
1701 1701
1702 esw_info(dev, 1702 esw_info(dev,
@@ -1765,7 +1765,7 @@ abort:
1765 1765
1766void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1766void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1767{ 1767{
1768 if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) 1768 if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
1769 return; 1769 return;
1770 1770
1771 esw_info(esw->dev, "cleanup\n"); 1771 esw_info(esw->dev, "cleanup\n");
@@ -2216,6 +2216,6 @@ free_out:
2216 2216
2217u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) 2217u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2218{ 2218{
2219 return esw->mode; 2219 return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
2220} 2220}
2221EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 2221EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f1a86cea86a0..6ddb2565884d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1887 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1887 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1888 if (!fwd_next_prio_supported(ft)) 1888 if (!fwd_next_prio_supported(ft))
1889 return ERR_PTR(-EOPNOTSUPP); 1889 return ERR_PTR(-EOPNOTSUPP);
1890 if (dest) 1890 if (dest_num)
1891 return ERR_PTR(-EINVAL); 1891 return ERR_PTR(-EINVAL);
1892 mutex_lock(&root->chain_lock); 1892 mutex_lock(&root->chain_lock);
1893 next_ft = find_next_chained_ft(prio); 1893 next_ft = find_next_chained_ft(prio);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index af3bb2f7a504..b7c21eb21a21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
76 void *ppriv) 76 void *ppriv)
77{ 77{
78 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 78 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
79 u16 max_mtu;
79 80
80 /* priv init */ 81 /* priv init */
81 priv->mdev = mdev; 82 priv->mdev = mdev;
@@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
84 priv->ppriv = ppriv; 85 priv->ppriv = ppriv;
85 mutex_init(&priv->state_lock); 86 mutex_init(&priv->state_lock);
86 87
88 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
89 netdev->mtu = max_mtu;
90
87 mlx5e_build_nic_params(mdev, &priv->channels.params, 91 mlx5e_build_nic_params(mdev, &priv->channels.params,
88 profile->max_nch(mdev), netdev->mtu); 92 profile->max_nch(mdev), netdev->mtu);
89 mlx5i_build_nic_params(mdev, &priv->channels.params); 93 mlx5i_build_nic_params(mdev, &priv->channels.params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e062e6b2587..3f767cde4c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
488void mlx5_init_clock(struct mlx5_core_dev *mdev) 488void mlx5_init_clock(struct mlx5_core_dev *mdev)
489{ 489{
490 struct mlx5_clock *clock = &mdev->clock; 490 struct mlx5_clock *clock = &mdev->clock;
491 u64 overflow_cycles;
491 u64 ns; 492 u64 ns;
492 u64 frac = 0; 493 u64 frac = 0;
493 u32 dev_freq; 494 u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
511 512
512 /* Calculate period in seconds to call the overflow watchdog - to make 513 /* Calculate period in seconds to call the overflow watchdog - to make
513 * sure counter is checked at least once every wrap around. 514 * sure counter is checked at least once every wrap around.
515 * The period is calculated as the minimum between max HW cycles count
516 * (The clock source mask) and max amount of cycles that can be
517 * multiplied by clock multiplier where the result doesn't exceed
518 * 64bits.
514 */ 519 */
515 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, 520 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
521 overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
522
523 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
516 frac, &frac); 524 frac, &frac);
517 do_div(ns, NSEC_PER_SEC / 2 / HZ); 525 do_div(ns, NSEC_PER_SEC / HZ);
518 clock->overflow_period = ns; 526 clock->overflow_period = ns;
519 527
520 mdev->clock_info_page = alloc_page(GFP_KERNEL); 528 mdev->clock_info_page = alloc_page(GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index b97bb72b4db4..86478a6b99c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -113,35 +113,45 @@ err_db_free:
113 return err; 113 return err;
114} 114}
115 115
116static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, 116static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
117 struct mlx5_wq_qp *qp) 117 struct mlx5_wq_qp *qp)
118{ 118{
119 struct mlx5_frag_buf_ctrl *sq_fbc;
119 struct mlx5_frag_buf *rqb, *sqb; 120 struct mlx5_frag_buf *rqb, *sqb;
120 121
121 rqb = &qp->rq.fbc.frag_buf; 122 rqb = &qp->rq.fbc.frag_buf;
122 *rqb = *buf; 123 *rqb = *buf;
123 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 124 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
124 rqb->npages = 1 << get_order(rqb->size); 125 rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
125 126
126 sqb = &qp->sq.fbc.frag_buf; 127 sq_fbc = &qp->sq.fbc;
127 *sqb = *buf; 128 sqb = &sq_fbc->frag_buf;
128 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 129 *sqb = *buf;
129 sqb->npages = 1 << get_order(sqb->size); 130 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
131 sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
130 sqb->frags += rqb->npages; /* first part is for the rq */ 132 sqb->frags += rqb->npages; /* first part is for the rq */
133 if (sq_fbc->strides_offset)
134 sqb->frags--;
131} 135}
132 136
133int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 137int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
134 void *qpc, struct mlx5_wq_qp *wq, 138 void *qpc, struct mlx5_wq_qp *wq,
135 struct mlx5_wq_ctrl *wq_ctrl) 139 struct mlx5_wq_ctrl *wq_ctrl)
136{ 140{
141 u32 sq_strides_offset;
137 int err; 142 int err;
138 143
139 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, 144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
140 MLX5_GET(qpc, qpc, log_rq_size), 145 MLX5_GET(qpc, qpc, log_rq_size),
141 &wq->rq.fbc); 146 &wq->rq.fbc);
142 mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), 147
143 MLX5_GET(qpc, qpc, log_sq_size), 148 sq_strides_offset =
144 &wq->sq.fbc); 149 ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
150
151 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
152 MLX5_GET(qpc, qpc, log_sq_size),
153 sq_strides_offset,
154 &wq->sq.fbc);
145 155
146 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 156 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
147 if (err) { 157 if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
156 goto err_db_free; 166 goto err_db_free;
157 } 167 }
158 168
159 mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); 169 mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
160 170
161 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; 171 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
162 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; 172 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index 3c0d882ba183..f6f6a568d66a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -327,12 +327,16 @@ static void mlxsw_afa_resource_add(struct mlxsw_afa_block *block,
327 list_add(&resource->list, &block->resource_list); 327 list_add(&resource->list, &block->resource_list);
328} 328}
329 329
330static void mlxsw_afa_resource_del(struct mlxsw_afa_resource *resource)
331{
332 list_del(&resource->list);
333}
334
330static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block) 335static void mlxsw_afa_resources_destroy(struct mlxsw_afa_block *block)
331{ 336{
332 struct mlxsw_afa_resource *resource, *tmp; 337 struct mlxsw_afa_resource *resource, *tmp;
333 338
334 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) { 339 list_for_each_entry_safe(resource, tmp, &block->resource_list, list) {
335 list_del(&resource->list);
336 resource->destructor(block, resource); 340 resource->destructor(block, resource);
337 } 341 }
338} 342}
@@ -530,6 +534,7 @@ static void
530mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block, 534mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
531 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref) 535 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
532{ 536{
537 mlxsw_afa_resource_del(&fwd_entry_ref->resource);
533 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry); 538 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
534 kfree(fwd_entry_ref); 539 kfree(fwd_entry_ref);
535} 540}
@@ -579,6 +584,7 @@ static void
579mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block, 584mlxsw_afa_counter_destroy(struct mlxsw_afa_block *block,
580 struct mlxsw_afa_counter *counter) 585 struct mlxsw_afa_counter *counter)
581{ 586{
587 mlxsw_afa_resource_del(&counter->resource);
582 block->afa->ops->counter_index_put(block->afa->ops_priv, 588 block->afa->ops->counter_index_put(block->afa->ops_priv,
583 counter->counter_index); 589 counter->counter_index);
584 kfree(counter); 590 kfree(counter);
@@ -626,8 +632,8 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
626 char *oneact; 632 char *oneact;
627 char *actions; 633 char *actions;
628 634
629 if (WARN_ON(block->finished)) 635 if (block->finished)
630 return NULL; 636 return ERR_PTR(-EINVAL);
631 if (block->cur_act_index + action_size > 637 if (block->cur_act_index + action_size >
632 block->afa->max_acts_per_set) { 638 block->afa->max_acts_per_set) {
633 struct mlxsw_afa_set *set; 639 struct mlxsw_afa_set *set;
@@ -637,7 +643,7 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
637 */ 643 */
638 set = mlxsw_afa_set_create(false); 644 set = mlxsw_afa_set_create(false);
639 if (!set) 645 if (!set)
640 return NULL; 646 return ERR_PTR(-ENOBUFS);
641 set->prev = block->cur_set; 647 set->prev = block->cur_set;
642 block->cur_act_index = 0; 648 block->cur_act_index = 0;
643 block->cur_set->next = set; 649 block->cur_set->next = set;
@@ -724,8 +730,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
724 MLXSW_AFA_VLAN_CODE, 730 MLXSW_AFA_VLAN_CODE,
725 MLXSW_AFA_VLAN_SIZE); 731 MLXSW_AFA_VLAN_SIZE);
726 732
727 if (!act) 733 if (IS_ERR(act))
728 return -ENOBUFS; 734 return PTR_ERR(act);
729 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP, 735 mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
730 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid, 736 MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
731 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp, 737 MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
@@ -806,8 +812,8 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
806 MLXSW_AFA_TRAPDISC_CODE, 812 MLXSW_AFA_TRAPDISC_CODE,
807 MLXSW_AFA_TRAPDISC_SIZE); 813 MLXSW_AFA_TRAPDISC_SIZE);
808 814
809 if (!act) 815 if (IS_ERR(act))
810 return -ENOBUFS; 816 return PTR_ERR(act);
811 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, 817 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
812 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0); 818 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 0);
813 return 0; 819 return 0;
@@ -820,8 +826,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
820 MLXSW_AFA_TRAPDISC_CODE, 826 MLXSW_AFA_TRAPDISC_CODE,
821 MLXSW_AFA_TRAPDISC_SIZE); 827 MLXSW_AFA_TRAPDISC_SIZE);
822 828
823 if (!act) 829 if (IS_ERR(act))
824 return -ENOBUFS; 830 return PTR_ERR(act);
825 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, 831 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
826 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, 832 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD,
827 trap_id); 833 trap_id);
@@ -836,8 +842,8 @@ int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
836 MLXSW_AFA_TRAPDISC_CODE, 842 MLXSW_AFA_TRAPDISC_CODE,
837 MLXSW_AFA_TRAPDISC_SIZE); 843 MLXSW_AFA_TRAPDISC_SIZE);
838 844
839 if (!act) 845 if (IS_ERR(act))
840 return -ENOBUFS; 846 return PTR_ERR(act);
841 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, 847 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP,
842 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 848 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD,
843 trap_id); 849 trap_id);
@@ -856,6 +862,7 @@ static void
856mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block, 862mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
857 struct mlxsw_afa_mirror *mirror) 863 struct mlxsw_afa_mirror *mirror)
858{ 864{
865 mlxsw_afa_resource_del(&mirror->resource);
859 block->afa->ops->mirror_del(block->afa->ops_priv, 866 block->afa->ops->mirror_del(block->afa->ops_priv,
860 mirror->local_in_port, 867 mirror->local_in_port,
861 mirror->span_id, 868 mirror->span_id,
@@ -908,8 +915,8 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
908 char *act = mlxsw_afa_block_append_action(block, 915 char *act = mlxsw_afa_block_append_action(block,
909 MLXSW_AFA_TRAPDISC_CODE, 916 MLXSW_AFA_TRAPDISC_CODE,
910 MLXSW_AFA_TRAPDISC_SIZE); 917 MLXSW_AFA_TRAPDISC_SIZE);
911 if (!act) 918 if (IS_ERR(act))
912 return -ENOBUFS; 919 return PTR_ERR(act);
913 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP, 920 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_NOP,
914 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0); 921 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, 0);
915 mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent); 922 mlxsw_afa_trapdisc_mirror_pack(act, true, mirror_agent);
@@ -996,8 +1003,8 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
996 1003
997 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE, 1004 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
998 MLXSW_AFA_FORWARD_SIZE); 1005 MLXSW_AFA_FORWARD_SIZE);
999 if (!act) { 1006 if (IS_ERR(act)) {
1000 err = -ENOBUFS; 1007 err = PTR_ERR(act);
1001 goto err_append_action; 1008 goto err_append_action;
1002 } 1009 }
1003 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS, 1010 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
@@ -1052,8 +1059,8 @@ int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
1052{ 1059{
1053 char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE, 1060 char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_POLCNT_CODE,
1054 MLXSW_AFA_POLCNT_SIZE); 1061 MLXSW_AFA_POLCNT_SIZE);
1055 if (!act) 1062 if (IS_ERR(act))
1056 return -ENOBUFS; 1063 return PTR_ERR(act);
1057 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES, 1064 mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
1058 counter_index); 1065 counter_index);
1059 return 0; 1066 return 0;
@@ -1123,8 +1130,8 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
1123 char *act = mlxsw_afa_block_append_action(block, 1130 char *act = mlxsw_afa_block_append_action(block,
1124 MLXSW_AFA_VIRFWD_CODE, 1131 MLXSW_AFA_VIRFWD_CODE,
1125 MLXSW_AFA_VIRFWD_SIZE); 1132 MLXSW_AFA_VIRFWD_SIZE);
1126 if (!act) 1133 if (IS_ERR(act))
1127 return -ENOBUFS; 1134 return PTR_ERR(act);
1128 mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid); 1135 mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
1129 return 0; 1136 return 0;
1130} 1137}
@@ -1193,8 +1200,8 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
1193 char *act = mlxsw_afa_block_append_action(block, 1200 char *act = mlxsw_afa_block_append_action(block,
1194 MLXSW_AFA_MCROUTER_CODE, 1201 MLXSW_AFA_MCROUTER_CODE,
1195 MLXSW_AFA_MCROUTER_SIZE); 1202 MLXSW_AFA_MCROUTER_SIZE);
1196 if (!act) 1203 if (IS_ERR(act))
1197 return -ENOBUFS; 1204 return PTR_ERR(act);
1198 mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, 1205 mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP,
1199 expected_irif, min_mtu, rmid_valid, kvdl_index); 1206 expected_irif, min_mtu, rmid_valid, kvdl_index);
1200 return 0; 1207 return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 1decf3a1cad3..e57d23746585 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
80 return NFP_REPR_TYPE_VF; 80 return NFP_REPR_TYPE_VF;
81 } 81 }
82 82
83 return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; 83 return __NFP_REPR_TYPE_MAX;
84} 84}
85 85
86static struct net_device * 86static struct net_device *
@@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
91 u8 port = 0; 91 u8 port = 0;
92 92
93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
94 if (repr_type > NFP_REPR_TYPE_MAX)
95 return NULL;
94 96
95 reprs = rcu_dereference(app->reprs[repr_type]); 97 reprs = rcu_dereference(app->reprs[repr_type]);
96 if (!reprs) 98 if (!reprs)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 78afe75129ab..382bb93cb090 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
317 payload.dst_ipv4 = flow->daddr; 317 payload.dst_ipv4 = flow->daddr;
318 318
319 /* If entry has expired send dst IP with all other fields 0. */ 319 /* If entry has expired send dst IP with all other fields 0. */
320 if (!(neigh->nud_state & NUD_VALID)) { 320 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4); 321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
322 /* Trigger ARP to verify invalid neighbour state. */ 322 /* Trigger ARP to verify invalid neighbour state. */
323 neigh_event_send(neigh, NULL); 323 neigh_event_send(neigh, NULL);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 99973e10b179..5ede6408649d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
665 665
666 p_ramrod->common.update_approx_mcast_flg = 1; 666 p_ramrod->common.update_approx_mcast_flg = 1;
667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
668 u32 *p_bins = (u32 *)p_params->bins; 668 u32 *p_bins = p_params->bins;
669 669
670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
671 } 671 }
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1476 enum spq_mode comp_mode, 1476 enum spq_mode comp_mode,
1477 struct qed_spq_comp_cb *p_comp_data) 1477 struct qed_spq_comp_cb *p_comp_data)
1478{ 1478{
1479 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1480 struct vport_update_ramrod_data *p_ramrod = NULL; 1479 struct vport_update_ramrod_data *p_ramrod = NULL;
1480 u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1481 struct qed_spq_entry *p_ent = NULL; 1481 struct qed_spq_entry *p_ent = NULL;
1482 struct qed_sp_init_data init_data; 1482 struct qed_sp_init_data init_data;
1483 u8 abs_vport_id = 0; 1483 u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1513 /* explicitly clear out the entire vector */ 1513 /* explicitly clear out the entire vector */
1514 memset(&p_ramrod->approx_mcast.bins, 0, 1514 memset(&p_ramrod->approx_mcast.bins, 0,
1515 sizeof(p_ramrod->approx_mcast.bins)); 1515 sizeof(p_ramrod->approx_mcast.bins));
1516 memset(bins, 0, sizeof(unsigned long) * 1516 memset(bins, 0, sizeof(bins));
1517 ETH_MULTICAST_MAC_BINS_IN_REGS);
1518 /* filter ADD op is explicit set op and it removes 1517 /* filter ADD op is explicit set op and it removes
1519 * any existing filters for the vport 1518 * any existing filters for the vport
1520 */ 1519 */
1521 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1520 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1522 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1521 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1523 u32 bit; 1522 u32 bit, nbits;
1524 1523
1525 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1524 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1526 __set_bit(bit, bins); 1525 nbits = sizeof(u32) * BITS_PER_BYTE;
1526 bins[bit / nbits] |= 1 << (bit % nbits);
1527 } 1527 }
1528 1528
1529 /* Convert to correct endianity */ 1529 /* Convert to correct endianity */
1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1531 struct vport_update_ramrod_mcast *p_ramrod_bins; 1531 struct vport_update_ramrod_mcast *p_ramrod_bins;
1532 u32 *p_bins = (u32 *)bins;
1533 1532
1534 p_ramrod_bins = &p_ramrod->approx_mcast; 1533 p_ramrod_bins = &p_ramrod->approx_mcast;
1535 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1534 p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1536 } 1535 }
1537 } 1536 }
1538 1537
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 806a8da257e9..8d80f1095d17 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
215 u8 anti_spoofing_en; 215 u8 anti_spoofing_en;
216 u8 update_accept_any_vlan_flg; 216 u8 update_accept_any_vlan_flg;
217 u8 accept_any_vlan; 217 u8 accept_any_vlan;
218 unsigned long bins[8]; 218 u32 bins[8];
219 struct qed_rss_params *rss_params; 219 struct qed_rss_params *rss_params;
220 struct qed_filter_accept_flags accept_flags; 220 struct qed_filter_accept_flags accept_flags;
221 struct qed_sge_tpa_params *sge_tpa_params; 221 struct qed_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9d9e533bccdc..cdd645024a32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1211,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1211 break; 1211 break;
1212 default: 1212 default:
1213 p_link->speed = 0; 1213 p_link->speed = 0;
1214 p_link->link_up = 0;
1214 } 1215 }
1215 1216
1216 if (p_link->link_up && p_link->speed) 1217 if (p_link->link_up && p_link->speed)
@@ -1308,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1308 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1309 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1309 phy_cfg.adv_speed = params->speed.advertised_speeds; 1310 phy_cfg.adv_speed = params->speed.advertised_speeds;
1310 phy_cfg.loopback_mode = params->loopback_mode; 1311 phy_cfg.loopback_mode = params->loopback_mode;
1311 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 1312
1312 if (params->eee.enable) 1313 /* There are MFWs that share this capability regardless of whether
1313 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1314 * this is feasible or not. And given that at the very least adv_caps
1315 * would be set internally by qed, we want to make sure LFA would
1316 * still work.
1317 */
1318 if ((p_hwfn->mcp_info->capabilities &
1319 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1320 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1314 if (params->eee.tx_lpi_enable) 1321 if (params->eee.tx_lpi_enable)
1315 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1322 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1316 if (params->eee.adv_caps & QED_EEE_1G_ADV) 1323 if (params->eee.adv_caps & QED_EEE_1G_ADV)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index fd59cf45f4be..26e918d7f2f9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2831 2831
2832 p_data->update_approx_mcast_flg = 1; 2832 p_data->update_approx_mcast_flg = 1;
2833 memcpy(p_data->bins, p_mcast_tlv->bins, 2833 memcpy(p_data->bins, p_mcast_tlv->bins,
2834 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2834 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2835 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2835 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2836} 2836}
2837 2837
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 2d7fcd6a0777..be6ddde1a104 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1126 resp_size += sizeof(struct pfvf_def_resp_tlv); 1126 resp_size += sizeof(struct pfvf_def_resp_tlv);
1127 1127
1128 memcpy(p_mcast_tlv->bins, p_params->bins, 1128 memcpy(p_mcast_tlv->bins, p_params->bins,
1129 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1129 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1130 } 1130 }
1131 1131
1132 update_rx = p_params->accept_flags.update_rx_mode_config; 1132 update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1272 u32 bit; 1272 u32 bit;
1273 1273
1274 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1274 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1275 __set_bit(bit, sp_params.bins); 1275 sp_params.bins[bit / 32] |= 1 << (bit % 32);
1276 } 1276 }
1277 } 1277 }
1278 1278
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 4f05d5eb3cf5..033409db86ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
392 struct channel_tlv tl; 392 struct channel_tlv tl;
393 u8 padding[4]; 393 u8 padding[4];
394 394
395 u64 bins[8]; 395 /* There are only 256 approx bins, and in HSI they're divided into
396 * 32-bit values. As old VFs used to set-bit to the values on its side,
397 * the upper half of the array is never expected to contain any data.
398 */
399 u64 bins[4];
400 u64 obsolete_bins[4];
396}; 401};
397 402
398struct vfpf_vport_update_accept_param_tlv { 403struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a3f69901ac87..eaedc11ed686 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7734 return rc; 7734 return rc;
7735 } 7735 }
7736 7736
7737 /* override BIOS settings, use userspace tools to enable WOL */ 7737 tp->saved_wolopts = __rtl8169_get_wol(tp);
7738 __rtl8169_set_wol(tp, 0);
7739 7738
7740 if (rtl_tbi_enabled(tp)) { 7739 if (rtl_tbi_enabled(tp)) {
7741 tp->set_speed = rtl8169_set_speed_tbi; 7740 tp->set_speed = rtl8169_set_speed_tbi;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 60f59abab009..ef6a8d39db2f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -53,7 +53,7 @@
53#include "dwmac1000.h" 53#include "dwmac1000.h"
54#include "hwif.h" 54#include "hwif.h"
55 55
56#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) 56#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
57#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 57#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
58 58
59/* Module parameters */ 59/* Module parameters */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 8d375e51a526..6a393b16a1fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
257 return -ENOMEM; 257 return -ENOMEM;
258 258
259 /* Enable pci device */ 259 /* Enable pci device */
260 ret = pcim_enable_device(pdev); 260 ret = pci_enable_device(pdev);
261 if (ret) { 261 if (ret) {
262 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", 262 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
263 __func__); 263 __func__);
@@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
300static void stmmac_pci_remove(struct pci_dev *pdev) 300static void stmmac_pci_remove(struct pci_dev *pdev)
301{ 301{
302 stmmac_dvr_remove(&pdev->dev); 302 stmmac_dvr_remove(&pdev->dev);
303 pci_disable_device(pdev);
303} 304}
304 305
305static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); 306static int stmmac_pci_suspend(struct device *dev)
307{
308 struct pci_dev *pdev = to_pci_dev(dev);
309 int ret;
310
311 ret = stmmac_suspend(dev);
312 if (ret)
313 return ret;
314
315 ret = pci_save_state(pdev);
316 if (ret)
317 return ret;
318
319 pci_disable_device(pdev);
320 pci_wake_from_d3(pdev, true);
321 return 0;
322}
323
324static int stmmac_pci_resume(struct device *dev)
325{
326 struct pci_dev *pdev = to_pci_dev(dev);
327 int ret;
328
329 pci_restore_state(pdev);
330 pci_set_power_state(pdev, PCI_D0);
331
332 ret = pci_enable_device(pdev);
333 if (ret)
334 return ret;
335
336 pci_set_master(pdev);
337
338 return stmmac_resume(dev);
339}
340
341static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
306 342
307/* synthetic ID, no official vendor */ 343/* synthetic ID, no official vendor */
308#define PCI_VENDOR_ID_STMMAC 0x700 344#define PCI_VENDOR_ID_STMMAC 0x700
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 358edab9e72e..3e34cb8ac1d3 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
2086 int i; 2086 int i;
2087 2087
2088 for (i = 0; i < cpsw->data.slaves; i++) { 2088 for (i = 0; i < cpsw->data.slaves; i++) {
2089 if (vid == cpsw->slaves[i].port_vlan) 2089 if (vid == cpsw->slaves[i].port_vlan) {
2090 return -EINVAL; 2090 ret = -EINVAL;
2091 goto err;
2092 }
2091 } 2093 }
2092 } 2094 }
2093 2095
2094 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 2096 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
2095 ret = cpsw_add_vlan_ale_entry(priv, vid); 2097 ret = cpsw_add_vlan_ale_entry(priv, vid);
2096 2098err:
2097 pm_runtime_put(cpsw->dev); 2099 pm_runtime_put(cpsw->dev);
2098 return ret; 2100 return ret;
2099} 2101}
@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
2119 2121
2120 for (i = 0; i < cpsw->data.slaves; i++) { 2122 for (i = 0; i < cpsw->data.slaves; i++) {
2121 if (vid == cpsw->slaves[i].port_vlan) 2123 if (vid == cpsw->slaves[i].port_vlan)
2122 return -EINVAL; 2124 goto err;
2123 } 2125 }
2124 } 2126 }
2125 2127
2126 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 2128 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
2127 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); 2129 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
2128 if (ret != 0) 2130 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2129 return ret; 2131 HOST_PORT_NUM, ALE_VLAN, vid);
2130 2132 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2131 ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, 2133 0, ALE_VLAN, vid);
2132 HOST_PORT_NUM, ALE_VLAN, vid); 2134err:
2133 if (ret != 0)
2134 return ret;
2135
2136 ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2137 0, ALE_VLAN, vid);
2138 pm_runtime_put(cpsw->dev); 2135 pm_runtime_put(cpsw->dev);
2139 return ret; 2136 return ret;
2140} 2137}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 93dc05c194d3..5766225a4ce1 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
394 394
395 idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); 395 idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
396 if (idx < 0) 396 if (idx < 0)
397 return -EINVAL; 397 return -ENOENT;
398 398
399 cpsw_ale_read(ale, idx, ale_entry); 399 cpsw_ale_read(ale, idx, ale_entry);
400 400
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 16c3bfbe1992..757a3b37ae8a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
218 ret = of_mdiobus_register(bus, np1); 218 ret = of_mdiobus_register(bus, np1);
219 if (ret) { 219 if (ret) {
220 mdiobus_free(bus); 220 mdiobus_free(bus);
221 lp->mii_bus = NULL;
221 return ret; 222 return ret;
222 } 223 }
223 return 0; 224 return 0;
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
index ba663e5af168..5135fc371f01 100644
--- a/drivers/net/netdevsim/devlink.c
+++ b/drivers/net/netdevsim/devlink.c
@@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
207 struct net *net = nsim_to_net(ns); 207 struct net *net = nsim_to_net(ns);
208 bool *reg_devlink = net_generic(net, nsim_devlink_id); 208 bool *reg_devlink = net_generic(net, nsim_devlink_id);
209 209
210 devlink_resources_unregister(ns->devlink, NULL);
210 devlink_unregister(ns->devlink); 211 devlink_unregister(ns->devlink);
211 devlink_free(ns->devlink); 212 devlink_free(ns->devlink);
212 ns->devlink = NULL; 213 ns->devlink = NULL;
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
index 0831b7142df7..0c5b68e7da51 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
@@ -218,7 +218,7 @@ out:
218 218
219static int mdio_mux_iproc_remove(struct platform_device *pdev) 219static int mdio_mux_iproc_remove(struct platform_device *pdev)
220{ 220{
221 struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); 221 struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
222 222
223 mdio_mux_uninit(md->mux_handle); 223 mdio_mux_uninit(md->mux_handle);
224 mdiobus_unregister(md->mii_bus); 224 mdiobus_unregister(md->mii_bus);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 537297d2b4b4..6c9b24fe3148 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
514 * negotiation may already be done and aneg interrupt may not be 514 * negotiation may already be done and aneg interrupt may not be
515 * generated. 515 * generated.
516 */ 516 */
517 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 517 if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
518 err = phy_aneg_done(phydev); 518 err = phy_aneg_done(phydev);
519 if (err > 0) { 519 if (err > 0) {
520 trigger = true; 520 trigger = true;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index ed10d49eb5e0..aeca484a75b8 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1242 mod_timer(&dev->stat_monitor, 1242 mod_timer(&dev->stat_monitor,
1243 jiffies + STAT_UPDATE_TIMER); 1243 jiffies + STAT_UPDATE_TIMER);
1244 } 1244 }
1245
1246 tasklet_schedule(&dev->bh);
1245 } 1247 }
1246 1248
1247 return ret; 1249 return ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 38502809420b..cb0cc30c3d6a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,7 +1246,7 @@ static const struct usb_device_id products[] = {
1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1249 {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ 1249 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 53085c63277b..2b6ec927809e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -586,7 +586,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
586 struct receive_queue *rq, 586 struct receive_queue *rq,
587 void *buf, void *ctx, 587 void *buf, void *ctx,
588 unsigned int len, 588 unsigned int len,
589 unsigned int *xdp_xmit) 589 unsigned int *xdp_xmit,
590 unsigned int *rbytes)
590{ 591{
591 struct sk_buff *skb; 592 struct sk_buff *skb;
592 struct bpf_prog *xdp_prog; 593 struct bpf_prog *xdp_prog;
@@ -601,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
601 int err; 602 int err;
602 603
603 len -= vi->hdr_len; 604 len -= vi->hdr_len;
605 *rbytes += len;
604 606
605 rcu_read_lock(); 607 rcu_read_lock();
606 xdp_prog = rcu_dereference(rq->xdp_prog); 608 xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -705,11 +707,13 @@ static struct sk_buff *receive_big(struct net_device *dev,
705 struct virtnet_info *vi, 707 struct virtnet_info *vi,
706 struct receive_queue *rq, 708 struct receive_queue *rq,
707 void *buf, 709 void *buf,
708 unsigned int len) 710 unsigned int len,
711 unsigned int *rbytes)
709{ 712{
710 struct page *page = buf; 713 struct page *page = buf;
711 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); 714 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
712 715
716 *rbytes += len - vi->hdr_len;
713 if (unlikely(!skb)) 717 if (unlikely(!skb))
714 goto err; 718 goto err;
715 719
@@ -727,7 +731,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
727 void *buf, 731 void *buf,
728 void *ctx, 732 void *ctx,
729 unsigned int len, 733 unsigned int len,
730 unsigned int *xdp_xmit) 734 unsigned int *xdp_xmit,
735 unsigned int *rbytes)
731{ 736{
732 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 737 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
733 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 738 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -740,6 +745,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
740 int err; 745 int err;
741 746
742 head_skb = NULL; 747 head_skb = NULL;
748 *rbytes += len - vi->hdr_len;
743 749
744 rcu_read_lock(); 750 rcu_read_lock();
745 xdp_prog = rcu_dereference(rq->xdp_prog); 751 xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -877,6 +883,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
877 goto err_buf; 883 goto err_buf;
878 } 884 }
879 885
886 *rbytes += len;
880 page = virt_to_head_page(buf); 887 page = virt_to_head_page(buf);
881 888
882 truesize = mergeable_ctx_to_truesize(ctx); 889 truesize = mergeable_ctx_to_truesize(ctx);
@@ -932,6 +939,7 @@ err_skb:
932 dev->stats.rx_length_errors++; 939 dev->stats.rx_length_errors++;
933 break; 940 break;
934 } 941 }
942 *rbytes += len;
935 page = virt_to_head_page(buf); 943 page = virt_to_head_page(buf);
936 put_page(page); 944 put_page(page);
937 } 945 }
@@ -942,14 +950,13 @@ xdp_xmit:
942 return NULL; 950 return NULL;
943} 951}
944 952
945static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 953static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
946 void *buf, unsigned int len, void **ctx, 954 void *buf, unsigned int len, void **ctx,
947 unsigned int *xdp_xmit) 955 unsigned int *xdp_xmit, unsigned int *rbytes)
948{ 956{
949 struct net_device *dev = vi->dev; 957 struct net_device *dev = vi->dev;
950 struct sk_buff *skb; 958 struct sk_buff *skb;
951 struct virtio_net_hdr_mrg_rxbuf *hdr; 959 struct virtio_net_hdr_mrg_rxbuf *hdr;
952 int ret;
953 960
954 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { 961 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
955 pr_debug("%s: short packet %i\n", dev->name, len); 962 pr_debug("%s: short packet %i\n", dev->name, len);
@@ -961,23 +968,22 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
961 } else { 968 } else {
962 put_page(virt_to_head_page(buf)); 969 put_page(virt_to_head_page(buf));
963 } 970 }
964 return 0; 971 return;
965 } 972 }
966 973
967 if (vi->mergeable_rx_bufs) 974 if (vi->mergeable_rx_bufs)
968 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit); 975 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
976 rbytes);
969 else if (vi->big_packets) 977 else if (vi->big_packets)
970 skb = receive_big(dev, vi, rq, buf, len); 978 skb = receive_big(dev, vi, rq, buf, len, rbytes);
971 else 979 else
972 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit); 980 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
973 981
974 if (unlikely(!skb)) 982 if (unlikely(!skb))
975 return 0; 983 return;
976 984
977 hdr = skb_vnet_hdr(skb); 985 hdr = skb_vnet_hdr(skb);
978 986
979 ret = skb->len;
980
981 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) 987 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
982 skb->ip_summed = CHECKSUM_UNNECESSARY; 988 skb->ip_summed = CHECKSUM_UNNECESSARY;
983 989
@@ -994,12 +1000,11 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
994 ntohs(skb->protocol), skb->len, skb->pkt_type); 1000 ntohs(skb->protocol), skb->len, skb->pkt_type);
995 1001
996 napi_gro_receive(&rq->napi, skb); 1002 napi_gro_receive(&rq->napi, skb);
997 return ret; 1003 return;
998 1004
999frame_err: 1005frame_err:
1000 dev->stats.rx_frame_errors++; 1006 dev->stats.rx_frame_errors++;
1001 dev_kfree_skb(skb); 1007 dev_kfree_skb(skb);
1002 return 0;
1003} 1008}
1004 1009
1005/* Unlike mergeable buffers, all buffers are allocated to the 1010/* Unlike mergeable buffers, all buffers are allocated to the
@@ -1249,13 +1254,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1249 1254
1250 while (received < budget && 1255 while (received < budget &&
1251 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) { 1256 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1252 bytes += receive_buf(vi, rq, buf, len, ctx, xdp_xmit); 1257 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
1253 received++; 1258 received++;
1254 } 1259 }
1255 } else { 1260 } else {
1256 while (received < budget && 1261 while (received < budget &&
1257 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { 1262 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1258 bytes += receive_buf(vi, rq, buf, len, NULL, xdp_xmit); 1263 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
1259 received++; 1264 received++;
1260 } 1265 }
1261 } 1266 }
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f6bb1d54d4bd..e857cb3335f6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -636,9 +636,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
636 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 636 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
637} 637}
638 638
639/* Add new entry to forwarding table -- assumes lock held */ 639static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
640 const u8 *mac, __u16 state,
641 __be32 src_vni, __u8 ndm_flags)
642{
643 struct vxlan_fdb *f;
644
645 f = kmalloc(sizeof(*f), GFP_ATOMIC);
646 if (!f)
647 return NULL;
648 f->state = state;
649 f->flags = ndm_flags;
650 f->updated = f->used = jiffies;
651 f->vni = src_vni;
652 INIT_LIST_HEAD(&f->remotes);
653 memcpy(f->eth_addr, mac, ETH_ALEN);
654
655 return f;
656}
657
640static int vxlan_fdb_create(struct vxlan_dev *vxlan, 658static int vxlan_fdb_create(struct vxlan_dev *vxlan,
641 const u8 *mac, union vxlan_addr *ip, 659 const u8 *mac, union vxlan_addr *ip,
660 __u16 state, __be16 port, __be32 src_vni,
661 __be32 vni, __u32 ifindex, __u8 ndm_flags,
662 struct vxlan_fdb **fdb)
663{
664 struct vxlan_rdst *rd = NULL;
665 struct vxlan_fdb *f;
666 int rc;
667
668 if (vxlan->cfg.addrmax &&
669 vxlan->addrcnt >= vxlan->cfg.addrmax)
670 return -ENOSPC;
671
672 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
673 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
674 if (!f)
675 return -ENOMEM;
676
677 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
678 if (rc < 0) {
679 kfree(f);
680 return rc;
681 }
682
683 ++vxlan->addrcnt;
684 hlist_add_head_rcu(&f->hlist,
685 vxlan_fdb_head(vxlan, mac, src_vni));
686
687 *fdb = f;
688
689 return 0;
690}
691
692/* Add new entry to forwarding table -- assumes lock held */
693static int vxlan_fdb_update(struct vxlan_dev *vxlan,
694 const u8 *mac, union vxlan_addr *ip,
642 __u16 state, __u16 flags, 695 __u16 state, __u16 flags,
643 __be16 port, __be32 src_vni, __be32 vni, 696 __be16 port, __be32 src_vni, __be32 vni,
644 __u32 ifindex, __u8 ndm_flags) 697 __u32 ifindex, __u8 ndm_flags)
@@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
687 if (!(flags & NLM_F_CREATE)) 740 if (!(flags & NLM_F_CREATE))
688 return -ENOENT; 741 return -ENOENT;
689 742
690 if (vxlan->cfg.addrmax &&
691 vxlan->addrcnt >= vxlan->cfg.addrmax)
692 return -ENOSPC;
693
694 /* Disallow replace to add a multicast entry */ 743 /* Disallow replace to add a multicast entry */
695 if ((flags & NLM_F_REPLACE) && 744 if ((flags & NLM_F_REPLACE) &&
696 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 745 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
697 return -EOPNOTSUPP; 746 return -EOPNOTSUPP;
698 747
699 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 748 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
700 f = kmalloc(sizeof(*f), GFP_ATOMIC); 749 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
701 if (!f) 750 vni, ifindex, ndm_flags, &f);
702 return -ENOMEM; 751 if (rc < 0)
703
704 notify = 1;
705 f->state = state;
706 f->flags = ndm_flags;
707 f->updated = f->used = jiffies;
708 f->vni = src_vni;
709 INIT_LIST_HEAD(&f->remotes);
710 memcpy(f->eth_addr, mac, ETH_ALEN);
711
712 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
713 if (rc < 0) {
714 kfree(f);
715 return rc; 752 return rc;
716 } 753 notify = 1;
717
718 ++vxlan->addrcnt;
719 hlist_add_head_rcu(&f->hlist,
720 vxlan_fdb_head(vxlan, mac, src_vni));
721 } 754 }
722 755
723 if (notify) { 756 if (notify) {
@@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
741 kfree(f); 774 kfree(f);
742} 775}
743 776
744static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 777static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
778 bool do_notify)
745{ 779{
746 netdev_dbg(vxlan->dev, 780 netdev_dbg(vxlan->dev,
747 "delete %pM\n", f->eth_addr); 781 "delete %pM\n", f->eth_addr);
748 782
749 --vxlan->addrcnt; 783 --vxlan->addrcnt;
750 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 784 if (do_notify)
785 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
751 786
752 hlist_del_rcu(&f->hlist); 787 hlist_del_rcu(&f->hlist);
753 call_rcu(&f->rcu, vxlan_fdb_free); 788 call_rcu(&f->rcu, vxlan_fdb_free);
@@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
863 return -EAFNOSUPPORT; 898 return -EAFNOSUPPORT;
864 899
865 spin_lock_bh(&vxlan->hash_lock); 900 spin_lock_bh(&vxlan->hash_lock);
866 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 901 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
867 port, src_vni, vni, ifindex, ndm->ndm_flags); 902 port, src_vni, vni, ifindex, ndm->ndm_flags);
868 spin_unlock_bh(&vxlan->hash_lock); 903 spin_unlock_bh(&vxlan->hash_lock);
869 904
@@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
897 goto out; 932 goto out;
898 } 933 }
899 934
900 vxlan_fdb_destroy(vxlan, f); 935 vxlan_fdb_destroy(vxlan, f, true);
901 936
902out: 937out:
903 return 0; 938 return 0;
@@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
1006 1041
1007 /* close off race between vxlan_flush and incoming packets */ 1042 /* close off race between vxlan_flush and incoming packets */
1008 if (netif_running(dev)) 1043 if (netif_running(dev))
1009 vxlan_fdb_create(vxlan, src_mac, src_ip, 1044 vxlan_fdb_update(vxlan, src_mac, src_ip,
1010 NUD_REACHABLE, 1045 NUD_REACHABLE,
1011 NLM_F_EXCL|NLM_F_CREATE, 1046 NLM_F_EXCL|NLM_F_CREATE,
1012 vxlan->cfg.dst_port, 1047 vxlan->cfg.dst_port,
@@ -2364,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
2364 "garbage collect %pM\n", 2399 "garbage collect %pM\n",
2365 f->eth_addr); 2400 f->eth_addr);
2366 f->state = NUD_STALE; 2401 f->state = NUD_STALE;
2367 vxlan_fdb_destroy(vxlan, f); 2402 vxlan_fdb_destroy(vxlan, f, true);
2368 } else if (time_before(timeout, next_timer)) 2403 } else if (time_before(timeout, next_timer))
2369 next_timer = timeout; 2404 next_timer = timeout;
2370 } 2405 }
@@ -2415,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2415 spin_lock_bh(&vxlan->hash_lock); 2450 spin_lock_bh(&vxlan->hash_lock);
2416 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2451 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2417 if (f) 2452 if (f)
2418 vxlan_fdb_destroy(vxlan, f); 2453 vxlan_fdb_destroy(vxlan, f, true);
2419 spin_unlock_bh(&vxlan->hash_lock); 2454 spin_unlock_bh(&vxlan->hash_lock);
2420} 2455}
2421 2456
@@ -2469,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2469 continue; 2504 continue;
2470 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2505 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2471 if (!is_zero_ether_addr(f->eth_addr)) 2506 if (!is_zero_ether_addr(f->eth_addr))
2472 vxlan_fdb_destroy(vxlan, f); 2507 vxlan_fdb_destroy(vxlan, f, true);
2473 } 2508 }
2474 } 2509 }
2475 spin_unlock_bh(&vxlan->hash_lock); 2510 spin_unlock_bh(&vxlan->hash_lock);
@@ -3160,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3160{ 3195{
3161 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3196 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3162 struct vxlan_dev *vxlan = netdev_priv(dev); 3197 struct vxlan_dev *vxlan = netdev_priv(dev);
3198 struct vxlan_fdb *f = NULL;
3163 int err; 3199 int err;
3164 3200
3165 err = vxlan_dev_configure(net, dev, conf, false, extack); 3201 err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3173,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3173 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3209 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3174 &vxlan->default_dst.remote_ip, 3210 &vxlan->default_dst.remote_ip,
3175 NUD_REACHABLE | NUD_PERMANENT, 3211 NUD_REACHABLE | NUD_PERMANENT,
3176 NLM_F_EXCL | NLM_F_CREATE,
3177 vxlan->cfg.dst_port, 3212 vxlan->cfg.dst_port,
3178 vxlan->default_dst.remote_vni, 3213 vxlan->default_dst.remote_vni,
3179 vxlan->default_dst.remote_vni, 3214 vxlan->default_dst.remote_vni,
3180 vxlan->default_dst.remote_ifindex, 3215 vxlan->default_dst.remote_ifindex,
3181 NTF_SELF); 3216 NTF_SELF, &f);
3182 if (err) 3217 if (err)
3183 return err; 3218 return err;
3184 } 3219 }
3185 3220
3186 err = register_netdevice(dev); 3221 err = register_netdevice(dev);
3222 if (err)
3223 goto errout;
3224
3225 err = rtnl_configure_link(dev, NULL);
3187 if (err) { 3226 if (err) {
3188 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); 3227 unregister_netdevice(dev);
3189 return err; 3228 goto errout;
3190 } 3229 }
3191 3230
3231 /* notify default fdb entry */
3232 if (f)
3233 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3234
3192 list_add(&vxlan->next, &vn->vxlan_list); 3235 list_add(&vxlan->next, &vn->vxlan_list);
3193 return 0; 3236 return 0;
3237errout:
3238 if (f)
3239 vxlan_fdb_destroy(vxlan, f, false);
3240 return err;
3194} 3241}
3195 3242
3196static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3243static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3425,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3425 struct vxlan_rdst *dst = &vxlan->default_dst; 3472 struct vxlan_rdst *dst = &vxlan->default_dst;
3426 struct vxlan_rdst old_dst; 3473 struct vxlan_rdst old_dst;
3427 struct vxlan_config conf; 3474 struct vxlan_config conf;
3475 struct vxlan_fdb *f = NULL;
3428 int err; 3476 int err;
3429 3477
3430 err = vxlan_nl2conf(tb, data, 3478 err = vxlan_nl2conf(tb, data,
@@ -3453,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3453 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3501 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3454 &dst->remote_ip, 3502 &dst->remote_ip,
3455 NUD_REACHABLE | NUD_PERMANENT, 3503 NUD_REACHABLE | NUD_PERMANENT,
3456 NLM_F_CREATE | NLM_F_APPEND,
3457 vxlan->cfg.dst_port, 3504 vxlan->cfg.dst_port,
3458 dst->remote_vni, 3505 dst->remote_vni,
3459 dst->remote_vni, 3506 dst->remote_vni,
3460 dst->remote_ifindex, 3507 dst->remote_ifindex,
3461 NTF_SELF); 3508 NTF_SELF, &f);
3462 if (err) { 3509 if (err) {
3463 spin_unlock_bh(&vxlan->hash_lock); 3510 spin_unlock_bh(&vxlan->hash_lock);
3464 return err; 3511 return err;
3465 } 3512 }
3513 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3466 } 3514 }
3467 spin_unlock_bh(&vxlan->hash_lock); 3515 spin_unlock_bh(&vxlan->hash_lock);
3468 } 3516 }
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 90a4ad9a2d08..b3a1b6f5c406 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1362,7 +1362,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1362 case 0x001: 1362 case 0x001:
1363 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name); 1363 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1364 break; 1364 break;
1365 case 0x010: 1365 case 0x002:
1366 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name); 1366 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1367 break; 1367 break;
1368 default: 1368 default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 45928b5b8d97..4fffa6988087 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; 1785 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; 1786 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; 1787 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1788 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus); 1788 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1789 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1789 fwreq->bus_nr = devinfo->pdev->bus->number; 1790 fwreq->bus_nr = devinfo->pdev->bus->number;
1790 1791
1791 return fwreq; 1792 return fwreq;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e20c30b29c03..c8ea63d02619 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
178 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 178 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
179}; 179};
180 180
181const struct iwl_cfg iwl9260_killer_2ac_cfg = {
182 .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
183 .fw_name_pre = IWL9260A_FW_PRE,
184 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
185 IWL_DEVICE_9000,
186 .ht_params = &iwl9000_ht_params,
187 .nvm_ver = IWL9000_NVM_VERSION,
188 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
189 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
190};
191
181const struct iwl_cfg iwl9270_2ac_cfg = { 192const struct iwl_cfg iwl9270_2ac_cfg = {
182 .name = "Intel(R) Dual Band Wireless AC 9270", 193 .name = "Intel(R) Dual Band Wireless AC 9270",
183 .fw_name_pre = IWL9260A_FW_PRE, 194 .fw_name_pre = IWL9260A_FW_PRE,
@@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
267 .soc_latency = 5000, 278 .soc_latency = 5000,
268}; 279};
269 280
281const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
282 .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
283 .fw_name_pre = IWL9000A_FW_PRE,
284 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
285 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
286 IWL_DEVICE_9000,
287 .ht_params = &iwl9000_ht_params,
288 .nvm_ver = IWL9000_NVM_VERSION,
289 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
290 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
291 .integrated = true,
292 .soc_latency = 5000,
293};
294
295const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
296 .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
297 .fw_name_pre = IWL9000A_FW_PRE,
298 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
299 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
300 IWL_DEVICE_9000,
301 .ht_params = &iwl9000_ht_params,
302 .nvm_ver = IWL9000_NVM_VERSION,
303 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
304 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
305 .integrated = true,
306 .soc_latency = 5000,
307};
308
270const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { 309const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
271 .name = "Intel(R) Dual Band Wireless AC 9460", 310 .name = "Intel(R) Dual Band Wireless AC 9460",
272 .fw_name_pre = IWL9000A_FW_PRE, 311 .fw_name_pre = IWL9000A_FW_PRE,
@@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
327 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK 366 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
328}; 367};
329 368
369const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
370 .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
371 .fw_name_pre = IWL9000A_FW_PRE,
372 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
373 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
374 IWL_DEVICE_9000,
375 .ht_params = &iwl9000_ht_params,
376 .nvm_ver = IWL9000_NVM_VERSION,
377 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
378 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
379 .integrated = true,
380 .soc_latency = 5000,
381 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
382};
383
384const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
385 .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
386 .fw_name_pre = IWL9000A_FW_PRE,
387 .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
388 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
389 IWL_DEVICE_9000,
390 .ht_params = &iwl9000_ht_params,
391 .nvm_ver = IWL9000_NVM_VERSION,
392 .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
393 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
394 .integrated = true,
395 .soc_latency = 5000,
396 .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
397};
398
330MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 399MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
331MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 400MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
332MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); 401MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c503b26793f6..84a816809723 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
551extern const struct iwl_cfg iwl4165_2ac_cfg; 551extern const struct iwl_cfg iwl4165_2ac_cfg;
552extern const struct iwl_cfg iwl9160_2ac_cfg; 552extern const struct iwl_cfg iwl9160_2ac_cfg;
553extern const struct iwl_cfg iwl9260_2ac_cfg; 553extern const struct iwl_cfg iwl9260_2ac_cfg;
554extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
554extern const struct iwl_cfg iwl9270_2ac_cfg; 555extern const struct iwl_cfg iwl9270_2ac_cfg;
555extern const struct iwl_cfg iwl9460_2ac_cfg; 556extern const struct iwl_cfg iwl9460_2ac_cfg;
556extern const struct iwl_cfg iwl9560_2ac_cfg; 557extern const struct iwl_cfg iwl9560_2ac_cfg;
@@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
558extern const struct iwl_cfg iwl9461_2ac_cfg_soc; 559extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
559extern const struct iwl_cfg iwl9462_2ac_cfg_soc; 560extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
560extern const struct iwl_cfg iwl9560_2ac_cfg_soc; 561extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
562extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
563extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
561extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; 564extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
562extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; 565extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
563extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; 566extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
564extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; 567extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
568extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
569extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
565extern const struct iwl_cfg iwl22000_2ac_cfg_hr; 570extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
566extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; 571extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
567extern const struct iwl_cfg iwl22000_2ac_cfg_jf; 572extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 38234bda9017..8520523b91b4 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
545 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, 545 {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
546 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, 546 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
547 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, 547 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
548 {IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
549 {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
550 {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
548 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, 551 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
549 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, 552 {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
550 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, 553 {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
554 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, 557 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
555 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, 558 {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
556 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, 559 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
560 {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
557 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, 561 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
558 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, 562 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
559 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, 563 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
@@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
578 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)}, 582 {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
579 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)}, 583 {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
580 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, 584 {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
585 {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
586 {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
581 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, 587 {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
582 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, 588 {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
583 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, 589 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
@@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
604 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)}, 610 {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
605 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)}, 611 {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
606 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, 612 {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
613 {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
614 {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
607 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, 615 {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
608 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, 616 {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
609 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, 617 {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
630 {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)}, 638 {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
631 {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)}, 639 {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
632 {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, 640 {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
641 {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
642 {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
633 {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, 643 {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
634 {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, 644 {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
635 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, 645 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
@@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
656 {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)}, 666 {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
657 {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)}, 667 {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
658 {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)}, 668 {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
669 {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
670 {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
659 {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)}, 671 {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
660 {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)}, 672 {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
661 {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)}, 673 {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
682 {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)}, 694 {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
683 {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 695 {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
684 {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, 696 {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
697 {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
698 {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
685 {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 699 {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
686 {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, 700 {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
687 {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, 701 {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
708 {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)}, 722 {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
709 {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)}, 723 {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
710 {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, 724 {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
725 {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
726 {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
711 {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, 727 {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
712 {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, 728 {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
713 {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, 729 {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
743 {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)}, 759 {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
744 {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)}, 760 {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
745 {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)}, 761 {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
762 {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
763 {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
746 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, 764 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
747 {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, 765 {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
748 {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, 766 {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
@@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
771 {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)}, 789 {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
772 {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)}, 790 {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
773 {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, 791 {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
792 {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
793 {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
774 {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, 794 {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
775 {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, 795 {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
776 {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, 796 {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
@@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
797 {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)}, 817 {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
798 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, 818 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
799 {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, 819 {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
820 {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
821 {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
800 {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, 822 {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
801 {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, 823 {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
802 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, 824 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a57daecf1d57..9dd2ca62d84a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,7 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); 91static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
91 92
92struct netfront_stats { 93struct netfront_stats {
@@ -893,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
893 struct sk_buff *skb, 894 struct sk_buff *skb,
894 struct sk_buff_head *list) 895 struct sk_buff_head *list)
895{ 896{
896 struct skb_shared_info *shinfo = skb_shinfo(skb);
897 RING_IDX cons = queue->rx.rsp_cons; 897 RING_IDX cons = queue->rx.rsp_cons;
898 struct sk_buff *nskb; 898 struct sk_buff *nskb;
899 899
@@ -902,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
902 RING_GET_RESPONSE(&queue->rx, ++cons); 902 RING_GET_RESPONSE(&queue->rx, ++cons);
903 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 903 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
904 904
905 if (shinfo->nr_frags == MAX_SKB_FRAGS) { 905 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
906 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 906 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
907 907
908 BUG_ON(pull_to <= skb_headlen(skb)); 908 BUG_ON(pull_to <= skb_headlen(skb));
909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
910 } 910 }
911 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 911 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
912 912
913 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 913 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
914 skb_frag_page(nfrag),
914 rx->offset, rx->status, PAGE_SIZE); 915 rx->offset, rx->status, PAGE_SIZE);
915 916
916 skb_shinfo(nskb)->nr_frags = 0; 917 skb_shinfo(nskb)->nr_frags = 0;
@@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1330 netif_carrier_off(netdev); 1331 netif_carrier_off(netdev);
1331 1332
1332 xenbus_switch_state(dev, XenbusStateInitialising); 1333 xenbus_switch_state(dev, XenbusStateInitialising);
1334 wait_event(module_load_q,
1335 xenbus_read_driver_state(dev->otherend) !=
1336 XenbusStateClosed &&
1337 xenbus_read_driver_state(dev->otherend) !=
1338 XenbusStateUnknown);
1333 return netdev; 1339 return netdev;
1334 1340
1335 exit: 1341 exit:
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
index a59b6c4bb5b8..ad3d17c42e23 100644
--- a/drivers/nubus/bus.c
+++ b/drivers/nubus/bus.c
@@ -5,6 +5,7 @@
5// Copyright (C) 2017 Finn Thain 5// Copyright (C) 2017 Finn Thain
6 6
7#include <linux/device.h> 7#include <linux/device.h>
8#include <linux/dma-mapping.h>
8#include <linux/list.h> 9#include <linux/list.h>
9#include <linux/nubus.h> 10#include <linux/nubus.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
93 board->dev.release = nubus_device_release; 94 board->dev.release = nubus_device_release;
94 board->dev.bus = &nubus_bus_type; 95 board->dev.bus = &nubus_bus_type;
95 dev_set_name(&board->dev, "slot.%X", board->slot); 96 dev_set_name(&board->dev, "slot.%X", board->slot);
97 board->dev.dma_mask = &board->dev.coherent_dma_mask;
98 dma_set_mask(&board->dev, DMA_BIT_MASK(32));
96 return device_register(&board->dev); 99 return device_register(&board->dev);
97} 100}
98 101
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..f7efe5a58cc7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
539/* 539/*
540 * For something we're not in a state to send to the device the default action 540 * For something we're not in a state to send to the device the default action
541 * is to busy it and retry it after the controller state is recovered. However, 541 * is to busy it and retry it after the controller state is recovered. However,
542 * anything marked for failfast or nvme multipath is immediately failed. 542 * if the controller is deleting or if anything is marked for failfast or
543 * nvme multipath it is immediately failed.
543 * 544 *
544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: commands used to initialize the controller will be marked for failfast.
545 * Note: nvme cli/ioctl commands are marked for failfast. 546 * Note: nvme cli/ioctl commands are marked for failfast.
546 */ 547 */
547blk_status_t nvmf_fail_nonready_command(struct request *rq) 548blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
549 struct request *rq)
548{ 550{
549 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 551 if (ctrl->state != NVME_CTRL_DELETING &&
552 ctrl->state != NVME_CTRL_DEAD &&
553 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550 return BLK_STS_RESOURCE; 554 return BLK_STS_RESOURCE;
551 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 555 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552 return BLK_STS_IOERR; 556 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162void nvmf_free_options(struct nvmf_ctrl_options *opts); 162void nvmf_free_options(struct nvmf_ctrl_options *opts);
163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165blk_status_t nvmf_fail_nonready_command(struct request *rq); 165blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
166 struct request *rq);
166bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 167bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167 bool queue_live); 168 bool queue_live);
168 169
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 41d45a1b5c62..9bac912173ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2272 2272
2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275 return nvmf_fail_nonready_command(rq); 2275 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2276 2276
2277 ret = nvme_setup_cmd(ns, rq, sqe); 2277 ret = nvme_setup_cmd(ns, rq, sqe);
2278 if (ret) 2278 if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 518c5b09038c..66ec5985c9f3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1639 WARN_ON_ONCE(rq->tag < 0); 1639 WARN_ON_ONCE(rq->tag < 0);
1640 1640
1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1642 return nvmf_fail_nonready_command(rq); 1642 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
1643 1643
1644 dev = queue->device->dev; 1644 dev = queue->device->dev;
1645 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1645 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..ebea1373d1b7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
282{ 282{
283 struct nvmet_ns *ns = to_nvmet_ns(item); 283 struct nvmet_ns *ns = to_nvmet_ns(item);
284 struct nvmet_subsys *subsys = ns->subsys; 284 struct nvmet_subsys *subsys = ns->subsys;
285 size_t len;
285 int ret; 286 int ret;
286 287
287 mutex_lock(&subsys->lock); 288 mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
289 if (ns->enabled) 290 if (ns->enabled)
290 goto out_unlock; 291 goto out_unlock;
291 292
292 kfree(ns->device_path); 293 ret = -EINVAL;
294 len = strcspn(page, "\n");
295 if (!len)
296 goto out_unlock;
293 297
298 kfree(ns->device_path);
294 ret = -ENOMEM; 299 ret = -ENOMEM;
295 ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); 300 ns->device_path = kstrndup(page, len, GFP_KERNEL);
296 if (!ns->device_path) 301 if (!ns->device_path)
297 goto out_unlock; 302 goto out_unlock;
298 303
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 74d4b785d2da..9838103f2d62 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
339 goto out_unlock; 339 goto out_unlock;
340 340
341 ret = nvmet_bdev_ns_enable(ns); 341 ret = nvmet_bdev_ns_enable(ns);
342 if (ret) 342 if (ret == -ENOTBLK)
343 ret = nvmet_file_ns_enable(ns); 343 ret = nvmet_file_ns_enable(ns);
344 if (ret) 344 if (ret)
345 goto out_unlock; 345 goto out_unlock;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
58 struct work_struct work; 58 struct work_struct work;
59} __aligned(sizeof(unsigned long long)); 59} __aligned(sizeof(unsigned long long));
60 60
61/* desired maximum for a single sequence - if sg list allows it */
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 63
64enum nvmet_fcp_datadir { 64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA, 65 NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
74 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf; 75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma; 76 dma_addr_t rspdma;
77 struct scatterlist *next_sg;
77 struct scatterlist *data_sg; 78 struct scatterlist *data_sg;
78 int data_sg_cnt; 79 int data_sg_cnt;
79 u32 offset; 80 u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1025 INIT_LIST_HEAD(&newrec->assoc_list); 1026 INIT_LIST_HEAD(&newrec->assoc_list);
1026 kref_init(&newrec->ref); 1027 kref_init(&newrec->ref);
1027 ida_init(&newrec->assoc_cnt); 1028 ida_init(&newrec->assoc_cnt);
1028 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, 1029 newrec->max_sg_cnt = template->max_sgl_segments;
1029 template->max_sgl_segments);
1030 1030
1031 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1031 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1032 if (ret) { 1032 if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1722 ((fod->io_dir == NVMET_FCP_WRITE) ? 1722 ((fod->io_dir == NVMET_FCP_WRITE) ?
1723 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 1723 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1724 /* note: write from initiator perspective */ 1724 /* note: write from initiator perspective */
1725 fod->next_sg = fod->data_sg;
1725 1726
1726 return 0; 1727 return 0;
1727 1728
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1866 struct nvmet_fc_fcp_iod *fod, u8 op) 1867 struct nvmet_fc_fcp_iod *fod, u8 op)
1867{ 1868{
1868 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 1869 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1870 struct scatterlist *sg = fod->next_sg;
1869 unsigned long flags; 1871 unsigned long flags;
1870 u32 tlen; 1872 u32 remaininglen = fod->req.transfer_len - fod->offset;
1873 u32 tlen = 0;
1871 int ret; 1874 int ret;
1872 1875
1873 fcpreq->op = op; 1876 fcpreq->op = op;
1874 fcpreq->offset = fod->offset; 1877 fcpreq->offset = fod->offset;
1875 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 1878 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1876 1879
1877 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, 1880 /*
1878 (fod->req.transfer_len - fod->offset)); 1881 * for next sequence:
1882 * break at a sg element boundary
1883 * attempt to keep sequence length capped at
1884 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1885 * be longer if a single sg element is larger
1886 * than that amount. This is done to avoid creating
1887 * a new sg list to use for the tgtport api.
1888 */
1889 fcpreq->sg = sg;
1890 fcpreq->sg_cnt = 0;
1891 while (tlen < remaininglen &&
1892 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1893 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1894 fcpreq->sg_cnt++;
1895 tlen += sg_dma_len(sg);
1896 sg = sg_next(sg);
1897 }
1898 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1899 fcpreq->sg_cnt++;
1900 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1901 sg = sg_next(sg);
1902 }
1903 if (tlen < remaininglen)
1904 fod->next_sg = sg;
1905 else
1906 fod->next_sg = NULL;
1907
1879 fcpreq->transfer_length = tlen; 1908 fcpreq->transfer_length = tlen;
1880 fcpreq->transferred_length = 0; 1909 fcpreq->transferred_length = 0;
1881 fcpreq->fcp_error = 0; 1910 fcpreq->fcp_error = 0;
1882 fcpreq->rsplen = 0; 1911 fcpreq->rsplen = 0;
1883 1912
1884 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1885 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1886
1887 /* 1913 /*
1888 * If the last READDATA request: check if LLDD supports 1914 * If the last READDATA request: check if LLDD supports
1889 * combined xfr with response. 1915 * combined xfr with response.
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..ae7586b8be07 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
162 blk_status_t ret; 162 blk_status_t ret;
163 163
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(req); 165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
166 166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
168 if (ret) 168 if (ret)
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 35b7fc87eac5..5cb40b2518f9 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -330,7 +330,7 @@ void pci_bus_add_device(struct pci_dev *dev)
330 return; 330 return;
331 } 331 }
332 332
333 dev->is_added = 1; 333 pci_dev_assign_added(dev, true);
334} 334}
335EXPORT_SYMBOL_GPL(pci_bus_add_device); 335EXPORT_SYMBOL_GPL(pci_bus_add_device);
336 336
@@ -347,14 +347,14 @@ void pci_bus_add_devices(const struct pci_bus *bus)
347 347
348 list_for_each_entry(dev, &bus->devices, bus_list) { 348 list_for_each_entry(dev, &bus->devices, bus_list) {
349 /* Skip already-added devices */ 349 /* Skip already-added devices */
350 if (dev->is_added) 350 if (pci_dev_is_added(dev))
351 continue; 351 continue;
352 pci_bus_add_device(dev); 352 pci_bus_add_device(dev);
353 } 353 }
354 354
355 list_for_each_entry(dev, &bus->devices, bus_list) { 355 list_for_each_entry(dev, &bus->devices, bus_list) {
356 /* Skip if device attach failed */ 356 /* Skip if device attach failed */
357 if (!dev->is_added) 357 if (!pci_dev_is_added(dev))
358 continue; 358 continue;
359 child = dev->subordinate; 359 child = dev->subordinate;
360 if (child) 360 if (child)
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index 4d6c20e47bed..cf0aa7cee5b0 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -107,7 +107,7 @@
107#define CFG_WINDOW_TYPE 0 107#define CFG_WINDOW_TYPE 0
108#define IO_WINDOW_TYPE 1 108#define IO_WINDOW_TYPE 1
109#define MEM_WINDOW_TYPE 2 109#define MEM_WINDOW_TYPE 2
110#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) 110#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024)
111#define MAX_PIO_WINDOWS 8 111#define MAX_PIO_WINDOWS 8
112 112
113/* Parameters for the waiting for link up routine */ 113/* Parameters for the waiting for link up routine */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 3a17b290df5d..ef0b1b6ba86f 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -509,7 +509,7 @@ static void enable_slot(struct acpiphp_slot *slot)
509 509
510 list_for_each_entry(dev, &bus->devices, bus_list) { 510 list_for_each_entry(dev, &bus->devices, bus_list) {
511 /* Assume that newly added devices are powered on already. */ 511 /* Assume that newly added devices are powered on already. */
512 if (!dev->is_added) 512 if (!pci_dev_is_added(dev))
513 dev->current_state = PCI_D0; 513 dev->current_state = PCI_D0;
514 } 514 }
515 515
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 882f1f9596df..08817253c8a2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -288,6 +288,7 @@ struct pci_sriov {
288 288
289/* pci_dev priv_flags */ 289/* pci_dev priv_flags */
290#define PCI_DEV_DISCONNECTED 0 290#define PCI_DEV_DISCONNECTED 0
291#define PCI_DEV_ADDED 1
291 292
292static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) 293static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
293{ 294{
@@ -300,6 +301,16 @@ static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
300 return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags); 301 return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
301} 302}
302 303
304static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
305{
306 assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
307}
308
309static inline bool pci_dev_is_added(const struct pci_dev *dev)
310{
311 return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
312}
313
303#ifdef CONFIG_PCI_ATS 314#ifdef CONFIG_PCI_ATS
304void pci_restore_ats_state(struct pci_dev *dev); 315void pci_restore_ats_state(struct pci_dev *dev);
305#else 316#else
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f7ce0cb0b0b7..f02e334beb45 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
295 295
296 parent = udev->subordinate; 296 parent = udev->subordinate;
297 pci_lock_rescan_remove(); 297 pci_lock_rescan_remove();
298 pci_dev_get(dev);
298 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, 299 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
299 bus_list) { 300 bus_list) {
300 pci_dev_get(pdev); 301 pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
328 pci_info(dev, "Device recovery from fatal error failed\n"); 329 pci_info(dev, "Device recovery from fatal error failed\n");
329 } 330 }
330 331
332 pci_dev_put(dev);
331 pci_unlock_rescan_remove(); 333 pci_unlock_rescan_remove();
332} 334}
333 335
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ac876e32de4b..611adcd9c169 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2433,13 +2433,13 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
2433 dev = pci_scan_single_device(bus, devfn); 2433 dev = pci_scan_single_device(bus, devfn);
2434 if (!dev) 2434 if (!dev)
2435 return 0; 2435 return 0;
2436 if (!dev->is_added) 2436 if (!pci_dev_is_added(dev))
2437 nr++; 2437 nr++;
2438 2438
2439 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) { 2439 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2440 dev = pci_scan_single_device(bus, devfn + fn); 2440 dev = pci_scan_single_device(bus, devfn + fn);
2441 if (dev) { 2441 if (dev) {
2442 if (!dev->is_added) 2442 if (!pci_dev_is_added(dev))
2443 nr++; 2443 nr++;
2444 dev->multifunction = 1; 2444 dev->multifunction = 1;
2445 } 2445 }
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 6f072eae4f7a..5e3d0dced2b8 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -19,11 +19,12 @@ static void pci_stop_dev(struct pci_dev *dev)
19{ 19{
20 pci_pme_active(dev, false); 20 pci_pme_active(dev, false);
21 21
22 if (dev->is_added) { 22 if (pci_dev_is_added(dev)) {
23 device_release_driver(&dev->dev); 23 device_release_driver(&dev->dev);
24 pci_proc_detach_device(dev); 24 pci_proc_detach_device(dev);
25 pci_remove_sysfs_dev_files(dev); 25 pci_remove_sysfs_dev_files(dev);
26 dev->is_added = 0; 26
27 pci_dev_assign_added(dev, false);
27 } 28 }
28 29
29 if (dev->bus->self) 30 if (dev->bus->self)
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1b7febc43da9..29d2c3b1913a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
962{ 962{
963 void __iomem *ctrl = params->ctrl_regs; 963 void __iomem *ctrl = params->ctrl_regs;
964 964
965 USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
966 /* 1 millisecond - for USB clocks to settle down */
967 usleep_range(1000, 2000);
968
965 if (BRCM_ID(params->family_id) == 0x7366) { 969 if (BRCM_ID(params->family_id) == 0x7366) {
966 /* 970 /*
967 * The PHY3_SOFT_RESETB bits default to the wrong state. 971 * The PHY3_SOFT_RESETB bits default to the wrong state.
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 23705e1a0023..0075fb0bef8c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
182 ddata = container_of(work, struct phy_mdm6600, status_work.work); 182 ddata = container_of(work, struct phy_mdm6600, status_work.work);
183 dev = ddata->dev; 183 dev = ddata->dev;
184 184
185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES, 185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
186 ddata->status_gpios->desc, 186 ddata->status_gpios->desc,
187 values); 187 values);
188 if (error) 188 if (error)
189 return; 189 return;
190 190
191 for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) { 191 for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
192 val |= values[i] << i; 192 val |= values[i] << i;
193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", 193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
194 __func__, i, values[i], val); 194 __func__, i, values[i], val);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index ea23c8dffc25..ffec695e0bfb 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
754 case ELS_LOGO: 754 case ELS_LOGO:
755 if (fip->mode == FIP_MODE_VN2VN) { 755 if (fip->mode == FIP_MODE_VN2VN) {
756 if (fip->state != FIP_ST_VNMP_UP) 756 if (fip->state != FIP_ST_VNMP_UP)
757 return -EINVAL; 757 goto drop;
758 if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) 758 if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
759 return -EINVAL; 759 goto drop;
760 } else { 760 } else {
761 if (fip->state != FIP_ST_ENABLED) 761 if (fip->state != FIP_ST_ENABLED)
762 return 0; 762 return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
799 fip->send(fip, skb); 799 fip->send(fip, skb);
800 return -EINPROGRESS; 800 return -EINPROGRESS;
801drop: 801drop:
802 kfree_skb(skb);
803 LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n", 802 LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
804 op, ntoh24(fh->fh_d_id)); 803 op, ntoh24(fh->fh_d_id));
804 kfree_skb(skb);
805 return -EINVAL; 805 return -EINVAL;
806} 806}
807EXPORT_SYMBOL(fcoe_ctlr_els_send); 807EXPORT_SYMBOL(fcoe_ctlr_els_send);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 31d31aad3de1..89b1f1af2fd4 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
2164 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", 2164 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
2165 fc_rport_state(rdata)); 2165 fc_rport_state(rdata));
2166 2166
2167 rdata->flags &= ~FC_RP_STARTED;
2167 fc_rport_enter_delete(rdata, RPORT_EV_STOP); 2168 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
2168 mutex_unlock(&rdata->rp_mutex); 2169 mutex_unlock(&rdata->rp_mutex);
2169 kref_put(&rdata->kref, fc_rport_destroy); 2170 kref_put(&rdata->kref, fc_rport_destroy);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index d6093838f5f2..c972cc2b3d5b 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
284 */ 284 */
285 if (opcode != ISCSI_OP_SCSI_DATA_OUT) { 285 if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
286 iscsi_conn_printk(KERN_INFO, conn, 286 iscsi_conn_printk(KERN_INFO, conn,
287 "task [op %x/%x itt " 287 "task [op %x itt "
288 "0x%x/0x%x] " 288 "0x%x/0x%x] "
289 "rejected.\n", 289 "rejected.\n",
290 task->hdr->opcode, opcode, 290 opcode, task->itt,
291 task->itt, task->hdr_itt); 291 task->hdr_itt);
292 return -EACCES; 292 return -EACCES;
293 } 293 }
294 /* 294 /*
@@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
297 */ 297 */
298 if (conn->session->fast_abort) { 298 if (conn->session->fast_abort) {
299 iscsi_conn_printk(KERN_INFO, conn, 299 iscsi_conn_printk(KERN_INFO, conn,
300 "task [op %x/%x itt " 300 "task [op %x itt "
301 "0x%x/0x%x] fast abort.\n", 301 "0x%x/0x%x] fast abort.\n",
302 task->hdr->opcode, opcode, 302 opcode, task->itt,
303 task->itt, task->hdr_itt); 303 task->hdr_itt);
304 return -EACCES; 304 return -EACCES;
305 } 305 }
306 break; 306 break;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 569392d0d4c9..e44c91edf92d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3343 spinlock_t *writeq_lock) 3343 spinlock_t *writeq_lock)
3344{ 3344{
3345 unsigned long flags; 3345 unsigned long flags;
3346 __u64 data_out = b;
3347 3346
3348 spin_lock_irqsave(writeq_lock, flags); 3347 spin_lock_irqsave(writeq_lock, flags);
3349 writel((u32)(data_out), addr); 3348 __raw_writel((u32)(b), addr);
3350 writel((u32)(data_out >> 32), (addr + 4)); 3349 __raw_writel((u32)(b >> 32), (addr + 4));
3351 mmiowb(); 3350 mmiowb();
3352 spin_unlock_irqrestore(writeq_lock, flags); 3351 spin_unlock_irqrestore(writeq_lock, flags);
3353} 3352}
@@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3367static inline void 3366static inline void
3368_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 3367_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3369{ 3368{
3370 writeq(b, addr); 3369 __raw_writeq(b, addr);
3370 mmiowb();
3371} 3371}
3372#else 3372#else
3373static inline void 3373static inline void
@@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5268 5268
5269 /* send message 32-bits at a time */ 5269 /* send message 32-bits at a time */
5270 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 5270 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5271 writel((u32)(request[i]), &ioc->chip->Doorbell); 5271 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5272 if ((_base_wait_for_doorbell_ack(ioc, 5))) 5272 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5273 failed = 1; 5273 failed = 1;
5274 } 5274 }
@@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5289 } 5289 }
5290 5290
5291 /* read the first two 16-bits, it gives the total length of the reply */ 5291 /* read the first two 16-bits, it gives the total length of the reply */
5292 reply[0] = (u16)(readl(&ioc->chip->Doorbell) 5292 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
5293 & MPI2_DOORBELL_DATA_MASK); 5293 & MPI2_DOORBELL_DATA_MASK);
5294 writel(0, &ioc->chip->HostInterruptStatus); 5294 writel(0, &ioc->chip->HostInterruptStatus);
5295 if ((_base_wait_for_doorbell_int(ioc, 5))) { 5295 if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5298 ioc->name, __LINE__); 5298 ioc->name, __LINE__);
5299 return -EFAULT; 5299 return -EFAULT;
5300 } 5300 }
5301 reply[1] = (u16)(readl(&ioc->chip->Doorbell) 5301 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
5302 & MPI2_DOORBELL_DATA_MASK); 5302 & MPI2_DOORBELL_DATA_MASK);
5303 writel(0, &ioc->chip->HostInterruptStatus); 5303 writel(0, &ioc->chip->HostInterruptStatus);
5304 5304
@@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5312 if (i >= reply_bytes/2) /* overflow case */ 5312 if (i >= reply_bytes/2) /* overflow case */
5313 readl(&ioc->chip->Doorbell); 5313 readl(&ioc->chip->Doorbell);
5314 else 5314 else
5315 reply[i] = (u16)(readl(&ioc->chip->Doorbell) 5315 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
5316 & MPI2_DOORBELL_DATA_MASK); 5316 & MPI2_DOORBELL_DATA_MASK);
5317 writel(0, &ioc->chip->HostInterruptStatus); 5317 writel(0, &ioc->chip->HostInterruptStatus);
5318 } 5318 }
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 091ec1207bea..cff83b9457f7 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
888 ipv6_en = !!(block->generic.ctrl_flags & 888 ipv6_en = !!(block->generic.ctrl_flags &
889 NVM_ISCSI_CFG_GEN_IPV6_ENABLED); 889 NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
890 890
891 snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", 891 snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
892 block->target[index].target_name.byte); 892 block->target[index].target_name.byte);
893 893
894 tgt->ipv6_en = ipv6_en; 894 tgt->ipv6_en = ipv6_en;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 89a4999fa631..c8731568f9c4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2141 msleep(1000); 2141 msleep(1000);
2142 2142
2143 qla24xx_disable_vp(vha); 2143 qla24xx_disable_vp(vha);
2144 qla2x00_wait_for_sess_deletion(vha);
2144 2145
2145 vha->flags.delete_progress = 1; 2146 vha->flags.delete_progress = 1;
2146 2147
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f68eb6096559..2660a48d918a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
214int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); 214int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
215int qla24xx_async_abort_cmd(srb_t *); 215int qla24xx_async_abort_cmd(srb_t *);
216int qla24xx_post_relogin_work(struct scsi_qla_host *vha); 216int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
217void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
217 218
218/* 219/*
219 * Global Functions in qla_mid.c source file. 220 * Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 2c35b0b2baa0..7a3744006419 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3708,6 +3708,10 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3708 return rval; 3708 return rval;
3709 3709
3710done_free_sp: 3710done_free_sp:
3711 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3712 list_del(&sp->elem);
3713 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3714
3711 if (sp->u.iocb_cmd.u.ctarg.req) { 3715 if (sp->u.iocb_cmd.u.ctarg.req) {
3712 dma_free_coherent(&vha->hw->pdev->dev, 3716 dma_free_coherent(&vha->hw->pdev->dev,
3713 sizeof(struct ct_sns_pkt), 3717 sizeof(struct ct_sns_pkt),
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index db0e3279e07a..1b19b954bbae 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1489,11 +1489,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1489 1489
1490 wait_for_completion(&tm_iocb->u.tmf.comp); 1490 wait_for_completion(&tm_iocb->u.tmf.comp);
1491 1491
1492 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? 1492 rval = tm_iocb->u.tmf.data;
1493 QLA_SUCCESS : QLA_FUNCTION_FAILED;
1494 1493
1495 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { 1494 if (rval != QLA_SUCCESS) {
1496 ql_dbg(ql_dbg_taskm, vha, 0x8030, 1495 ql_log(ql_log_warn, vha, 0x8030,
1497 "TM IOCB failed (%x).\n", rval); 1496 "TM IOCB failed (%x).\n", rval);
1498 } 1497 }
1499 1498
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 37ae0f6d8ae5..59fd5a9dfeb8 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -222,6 +222,8 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
222 sp->fcport = fcport; 222 sp->fcport = fcport;
223 sp->iocbs = 1; 223 sp->iocbs = 1;
224 sp->vha = qpair->vha; 224 sp->vha = qpair->vha;
225 INIT_LIST_HEAD(&sp->elem);
226
225done: 227done:
226 if (!sp) 228 if (!sp)
227 QLA_QPAIR_MARK_NOT_BUSY(qpair); 229 QLA_QPAIR_MARK_NOT_BUSY(qpair);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a91cca52b5d5..dd93a22fe843 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2130 req_cnt = 1; 2130 req_cnt = 1;
2131 handle = 0; 2131 handle = 0;
2132 2132
2133 if (!sp) 2133 if (sp && (sp->type != SRB_SCSI_CMD)) {
2134 goto skip_cmd_array; 2134 /* Adjust entry-counts as needed. */
2135
2136 /* Check for room in outstanding command list. */
2137 handle = req->current_outstanding_cmd;
2138 for (index = 1; index < req->num_outstanding_cmds; index++) {
2139 handle++;
2140 if (handle == req->num_outstanding_cmds)
2141 handle = 1;
2142 if (!req->outstanding_cmds[handle])
2143 break;
2144 }
2145 if (index == req->num_outstanding_cmds) {
2146 ql_log(ql_log_warn, vha, 0x700b,
2147 "No room on outstanding cmd array.\n");
2148 goto queuing_error;
2149 }
2150
2151 /* Prep command array. */
2152 req->current_outstanding_cmd = handle;
2153 req->outstanding_cmds[handle] = sp;
2154 sp->handle = handle;
2155
2156 /* Adjust entry-counts as needed. */
2157 if (sp->type != SRB_SCSI_CMD)
2158 req_cnt = sp->iocbs; 2135 req_cnt = sp->iocbs;
2136 }
2159 2137
2160skip_cmd_array:
2161 /* Check for room on request queue. */ 2138 /* Check for room on request queue. */
2162 if (req->cnt < req_cnt + 2) { 2139 if (req->cnt < req_cnt + 2) {
2163 if (qpair->use_shadow_reg) 2140 if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ skip_cmd_array:
2183 if (req->cnt < req_cnt + 2) 2160 if (req->cnt < req_cnt + 2)
2184 goto queuing_error; 2161 goto queuing_error;
2185 2162
2163 if (sp) {
2164 /* Check for room in outstanding command list. */
2165 handle = req->current_outstanding_cmd;
2166 for (index = 1; index < req->num_outstanding_cmds; index++) {
2167 handle++;
2168 if (handle == req->num_outstanding_cmds)
2169 handle = 1;
2170 if (!req->outstanding_cmds[handle])
2171 break;
2172 }
2173 if (index == req->num_outstanding_cmds) {
2174 ql_log(ql_log_warn, vha, 0x700b,
2175 "No room on outstanding cmd array.\n");
2176 goto queuing_error;
2177 }
2178
2179 /* Prep command array. */
2180 req->current_outstanding_cmd = handle;
2181 req->outstanding_cmds[handle] = sp;
2182 sp->handle = handle;
2183 }
2184
2186 /* Prep packet */ 2185 /* Prep packet */
2187 req->cnt -= req_cnt; 2186 req->cnt -= req_cnt;
2188 pkt = req->ring_ptr; 2187 pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ skip_cmd_array:
2195 pkt->handle = handle; 2194 pkt->handle = handle;
2196 } 2195 }
2197 2196
2197 return pkt;
2198
2198queuing_error: 2199queuing_error:
2199 qpair->tgt_counters.num_alloc_iocb_failed++; 2200 qpair->tgt_counters.num_alloc_iocb_failed++;
2200 return pkt; 2201 return pkt;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9fa5a2557f2c..7756106d4555 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -631,6 +631,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
631 unsigned long flags; 631 unsigned long flags;
632 fc_port_t *fcport = NULL; 632 fc_port_t *fcport = NULL;
633 633
634 if (!vha->hw->flags.fw_started)
635 return;
636
634 /* Setup to process RIO completion. */ 637 /* Setup to process RIO completion. */
635 handle_cnt = 0; 638 handle_cnt = 0;
636 if (IS_CNA_CAPABLE(ha)) 639 if (IS_CNA_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7e875f575229..f0ec13d48bf3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4220,6 +4220,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4220 mbx_cmd_t *mcp = &mc; 4220 mbx_cmd_t *mcp = &mc;
4221 struct qla_hw_data *ha = vha->hw; 4221 struct qla_hw_data *ha = vha->hw;
4222 4222
4223 if (!ha->flags.fw_started)
4224 return QLA_SUCCESS;
4225
4223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, 4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4224 "Entered %s.\n", __func__); 4227 "Entered %s.\n", __func__);
4225 4228
@@ -4289,6 +4292,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4289 mbx_cmd_t *mcp = &mc; 4292 mbx_cmd_t *mcp = &mc;
4290 struct qla_hw_data *ha = vha->hw; 4293 struct qla_hw_data *ha = vha->hw;
4291 4294
4295 if (!ha->flags.fw_started)
4296 return QLA_SUCCESS;
4297
4292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, 4298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4293 "Entered %s.\n", __func__); 4299 "Entered %s.\n", __func__);
4294 4300
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f6f0a759a7c2..aa727d07b702 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -152,11 +152,18 @@ int
152qla24xx_disable_vp(scsi_qla_host_t *vha) 152qla24xx_disable_vp(scsi_qla_host_t *vha)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 int ret; 155 int ret = QLA_SUCCESS;
156 fc_port_t *fcport;
157
158 if (vha->hw->flags.fw_started)
159 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
156 160
157 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
158 atomic_set(&vha->loop_state, LOOP_DOWN); 161 atomic_set(&vha->loop_state, LOOP_DOWN);
159 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 162 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
163 list_for_each_entry(fcport, &vha->vp_fcports, list)
164 fcport->logout_on_delete = 0;
165
166 qla2x00_mark_all_devices_lost(vha, 0);
160 167
161 /* Remove port id from vp target map */ 168 /* Remove port id from vp target map */
162 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 169 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 9f309e572be4..1fbd16c8c9a7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -303,6 +303,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
303static int qla2xxx_map_queues(struct Scsi_Host *shost); 303static int qla2xxx_map_queues(struct Scsi_Host *shost);
304static void qla2x00_destroy_deferred_work(struct qla_hw_data *); 304static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
305 305
306
306struct scsi_host_template qla2xxx_driver_template = { 307struct scsi_host_template qla2xxx_driver_template = {
307 .module = THIS_MODULE, 308 .module = THIS_MODULE,
308 .name = QLA2XXX_DRIVER_NAME, 309 .name = QLA2XXX_DRIVER_NAME,
@@ -1147,7 +1148,7 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
1147 * qla2x00_wait_for_sess_deletion can only be called from remove_one. 1148 * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1148 * it has dependency on UNLOADING flag to stop device discovery 1149 * it has dependency on UNLOADING flag to stop device discovery
1149 */ 1150 */
1150static void 1151void
1151qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) 1152qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1152{ 1153{
1153 qla2x00_mark_all_devices_lost(vha, 0); 1154 qla2x00_mark_all_devices_lost(vha, 0);
@@ -3603,6 +3604,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3603 3604
3604 base_vha = pci_get_drvdata(pdev); 3605 base_vha = pci_get_drvdata(pdev);
3605 ha = base_vha->hw; 3606 ha = base_vha->hw;
3607 ql_log(ql_log_info, base_vha, 0xb079,
3608 "Removing driver\n");
3606 3609
3607 /* Indicate device removal to prevent future board_disable and wait 3610 /* Indicate device removal to prevent future board_disable and wait
3608 * until any pending board_disable has completed. */ 3611 * until any pending board_disable has completed. */
@@ -3625,6 +3628,21 @@ qla2x00_remove_one(struct pci_dev *pdev)
3625 } 3628 }
3626 qla2x00_wait_for_hba_ready(base_vha); 3629 qla2x00_wait_for_hba_ready(base_vha);
3627 3630
3631 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3632 if (ha->flags.fw_started)
3633 qla2x00_abort_isp_cleanup(base_vha);
3634 } else if (!IS_QLAFX00(ha)) {
3635 if (IS_QLA8031(ha)) {
3636 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3637 "Clearing fcoe driver presence.\n");
3638 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3639 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3640 "Error while clearing DRV-Presence.\n");
3641 }
3642
3643 qla2x00_try_to_stop_firmware(base_vha);
3644 }
3645
3628 qla2x00_wait_for_sess_deletion(base_vha); 3646 qla2x00_wait_for_sess_deletion(base_vha);
3629 3647
3630 /* 3648 /*
@@ -3648,14 +3666,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
3648 3666
3649 qla2x00_delete_all_vps(ha, base_vha); 3667 qla2x00_delete_all_vps(ha, base_vha);
3650 3668
3651 if (IS_QLA8031(ha)) {
3652 ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3653 "Clearing fcoe driver presence.\n");
3654 if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3655 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3656 "Error while clearing DRV-Presence.\n");
3657 }
3658
3659 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); 3669 qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
3660 3670
3661 qla2x00_dfs_remove(base_vha); 3671 qla2x00_dfs_remove(base_vha);
@@ -3715,24 +3725,6 @@ qla2x00_free_device(scsi_qla_host_t *vha)
3715 qla2x00_stop_timer(vha); 3725 qla2x00_stop_timer(vha);
3716 3726
3717 qla25xx_delete_queues(vha); 3727 qla25xx_delete_queues(vha);
3718
3719 if (ha->flags.fce_enabled)
3720 qla2x00_disable_fce_trace(vha, NULL, NULL);
3721
3722 if (ha->eft)
3723 qla2x00_disable_eft_trace(vha);
3724
3725 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3726 if (ha->flags.fw_started)
3727 qla2x00_abort_isp_cleanup(vha);
3728 } else {
3729 if (ha->flags.fw_started) {
3730 /* Stop currently executing firmware. */
3731 qla2x00_try_to_stop_firmware(vha);
3732 ha->flags.fw_started = 0;
3733 }
3734 }
3735
3736 vha->flags.online = 0; 3728 vha->flags.online = 0;
3737 3729
3738 /* turn-off interrupts on the card */ 3730 /* turn-off interrupts on the card */
@@ -6028,8 +6020,9 @@ qla2x00_do_dpc(void *data)
6028 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6020 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6029 } 6021 }
6030 6022
6031 if (test_and_clear_bit(ISP_ABORT_NEEDED, 6023 if (test_and_clear_bit
6032 &base_vha->dpc_flags)) { 6024 (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
6025 !test_bit(UNLOADING, &base_vha->dpc_flags)) {
6033 6026
6034 ql_dbg(ql_dbg_dpc, base_vha, 0x4007, 6027 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
6035 "ISP abort scheduled.\n"); 6028 "ISP abort scheduled.\n");
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 04458eb19d38..4499c787165f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1880,6 +1880,9 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1880 if (IS_P3P_TYPE(ha)) 1880 if (IS_P3P_TYPE(ha))
1881 return QLA_SUCCESS; 1881 return QLA_SUCCESS;
1882 1882
1883 if (!ha->flags.fw_started)
1884 return QLA_SUCCESS;
1885
1883 ha->beacon_blink_led = 0; 1886 ha->beacon_blink_led = 0;
1884 1887
1885 if (IS_QLA2031(ha) || IS_QLA27XX(ha)) 1888 if (IS_QLA2031(ha) || IS_QLA27XX(ha))
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8932ae81a15a..2715cdaa669c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
296 rtn = host->hostt->eh_timed_out(scmd); 296 rtn = host->hostt->eh_timed_out(scmd);
297 297
298 if (rtn == BLK_EH_DONE) { 298 if (rtn == BLK_EH_DONE) {
299 /*
300 * For blk-mq, we must set the request state to complete now
301 * before sending the request to the scsi error handler. This
302 * will prevent a use-after-free in the event the LLD manages
303 * to complete the request before the error handler finishes
304 * processing this timed out request.
305 *
306 * If the request was already completed, then the LLD beat the
307 * time out handler from transferring the request to the scsi
308 * error handler. In that case we can return immediately as no
309 * further action is required.
310 */
311 if (req->q->mq_ops && !blk_mq_mark_complete(req))
312 return rtn;
299 if (scsi_abort_command(scmd) != SUCCESS) { 313 if (scsi_abort_command(scmd) != SUCCESS) {
300 set_host_byte(scmd, DID_TIME_OUT); 314 set_host_byte(scmd, DID_TIME_OUT);
301 scsi_eh_scmd_add(scmd); 315 scsi_eh_scmd_add(scmd);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cd2fdac000c9..ba9ba0e04f42 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1741,15 +1741,11 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
1741 * 1741 *
1742 * With scsi-mq enabled, there are a fixed number of preallocated 1742 * With scsi-mq enabled, there are a fixed number of preallocated
1743 * requests equal in number to shost->can_queue. If all of the 1743 * requests equal in number to shost->can_queue. If all of the
1744 * preallocated requests are already in use, then using GFP_ATOMIC with 1744 * preallocated requests are already in use, then blk_get_request()
1745 * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL 1745 * will sleep until an active command completes, freeing up a request.
1746 * will cause blk_get_request() to sleep until an active command 1746 * Although waiting in an asynchronous interface is less than ideal, we
1747 * completes, freeing up a request. Neither option is ideal, but 1747 * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
1748 * GFP_KERNEL is the better choice to prevent userspace from getting an 1748 * not expect an EWOULDBLOCK from this condition.
1749 * unexpected EWOULDBLOCK.
1750 *
1751 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
1752 * does not sleep except under memory pressure.
1753 */ 1749 */
1754 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? 1750 rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
1755 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); 1751 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
@@ -2185,6 +2181,7 @@ sg_add_sfp(Sg_device * sdp)
2185 write_lock_irqsave(&sdp->sfd_lock, iflags); 2181 write_lock_irqsave(&sdp->sfd_lock, iflags);
2186 if (atomic_read(&sdp->detaching)) { 2182 if (atomic_read(&sdp->detaching)) {
2187 write_unlock_irqrestore(&sdp->sfd_lock, iflags); 2183 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
2184 kfree(sfp);
2188 return ERR_PTR(-ENODEV); 2185 return ERR_PTR(-ENODEV);
2189 } 2186 }
2190 list_add_tail(&sfp->sfd_siblings, &sdp->sfds); 2187 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 3f3cb72e0c0c..d0389b20574d 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
523static int sr_block_open(struct block_device *bdev, fmode_t mode) 523static int sr_block_open(struct block_device *bdev, fmode_t mode)
524{ 524{
525 struct scsi_cd *cd; 525 struct scsi_cd *cd;
526 struct scsi_device *sdev;
526 int ret = -ENXIO; 527 int ret = -ENXIO;
527 528
529 cd = scsi_cd_get(bdev->bd_disk);
530 if (!cd)
531 goto out;
532
533 sdev = cd->device;
534 scsi_autopm_get_device(sdev);
528 check_disk_change(bdev); 535 check_disk_change(bdev);
529 536
530 mutex_lock(&sr_mutex); 537 mutex_lock(&sr_mutex);
531 cd = scsi_cd_get(bdev->bd_disk); 538 ret = cdrom_open(&cd->cdi, bdev, mode);
532 if (cd) {
533 ret = cdrom_open(&cd->cdi, bdev, mode);
534 if (ret)
535 scsi_cd_put(cd);
536 }
537 mutex_unlock(&sr_mutex); 539 mutex_unlock(&sr_mutex);
540
541 scsi_autopm_put_device(sdev);
542 if (ret)
543 scsi_cd_put(cd);
544
545out:
538 return ret; 546 return ret;
539} 547}
540 548
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
562 if (ret) 570 if (ret)
563 goto out; 571 goto out;
564 572
573 scsi_autopm_get_device(sdev);
574
565 /* 575 /*
566 * Send SCSI addressing ioctls directly to mid level, send other 576 * Send SCSI addressing ioctls directly to mid level, send other
567 * ioctls to cdrom/block level. 577 * ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
570 case SCSI_IOCTL_GET_IDLUN: 580 case SCSI_IOCTL_GET_IDLUN:
571 case SCSI_IOCTL_GET_BUS_NUMBER: 581 case SCSI_IOCTL_GET_BUS_NUMBER:
572 ret = scsi_ioctl(sdev, cmd, argp); 582 ret = scsi_ioctl(sdev, cmd, argp);
573 goto out; 583 goto put;
574 } 584 }
575 585
576 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); 586 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
577 if (ret != -ENOSYS) 587 if (ret != -ENOSYS)
578 goto out; 588 goto put;
579 589
580 ret = scsi_ioctl(sdev, cmd, argp); 590 ret = scsi_ioctl(sdev, cmd, argp);
581 591
592put:
593 scsi_autopm_put_device(sdev);
594
582out: 595out:
583 mutex_unlock(&sr_mutex); 596 mutex_unlock(&sr_mutex);
584 return ret; 597 return ret;
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 777e5f1e52d1..0cd947f78b5b 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
561 (btstat == BTSTAT_SUCCESS || 561 (btstat == BTSTAT_SUCCESS ||
562 btstat == BTSTAT_LINKED_COMMAND_COMPLETED || 562 btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
563 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { 563 btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
564 cmd->result = (DID_OK << 16) | sdstat; 564 if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
565 if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) 565 cmd->result = (DID_RESET << 16);
566 cmd->result |= (DRIVER_SENSE << 24); 566 } else {
567 cmd->result = (DID_OK << 16) | sdstat;
568 if (sdstat == SAM_STAT_CHECK_CONDITION &&
569 cmd->sense_buffer)
570 cmd->result |= (DRIVER_SENSE << 24);
571 }
567 } else 572 } else
568 switch (btstat) { 573 switch (btstat) {
569 case BTSTAT_SUCCESS: 574 case BTSTAT_SUCCESS:
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a1a0025b59e0..d5d33e12e952 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -402,6 +402,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
402 fput(asma->file); 402 fput(asma->file);
403 goto out; 403 goto out;
404 } 404 }
405 } else {
406 vma_set_anonymous(vma);
405 } 407 }
406 408
407 if (vma->vm_file) 409 if (vma->vm_file)
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 0ecffab52ec2..abdaf7cf8162 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); 1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
1843 1843
1844 if (dev->flags & IFF_PROMISC) { 1844 if (dev->flags & IFF_PROMISC) {
1845 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1845 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1846 MCAST_FILTER_PROMISC); 1846 MCAST_FILTER_PROMISC);
1847 goto spin_unlock; 1847 goto spin_unlock;
1848 } 1848 }
1849 1849
1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || 1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
1851 (dev->flags & IFF_ALLMULTI)) { 1851 (dev->flags & IFF_ALLMULTI)) {
1852 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1852 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1853 MCAST_FILTER_MCASTALL); 1853 MCAST_FILTER_MCASTALL);
1854 goto spin_unlock; 1854 goto spin_unlock;
1855 } 1855 }
1856 1856
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1866 ETH_ALEN * mc_count); 1866 ETH_ALEN * mc_count);
1867 } else { 1867 } else {
1868 priv->sme_i.sme_flag |= SME_MULTICAST; 1868 priv->sme_i.sme_flag |= SME_MULTICAST;
1869 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1869 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1870 MCAST_FILTER_MCAST); 1870 MCAST_FILTER_MCAST);
1871 } 1871 }
1872 1872
1873spin_unlock: 1873spin_unlock:
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index a3a83424a926..16478fe9e3f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#include <asm/cacheflush.h>
15#include <linux/clk.h> 14#include <linux/clk.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
17#include <linux/pagemap.h> 16#include <linux/pagemap.h>
@@ -24,6 +23,8 @@
24#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
25#include <media/v4l2-mc.h> 24#include <media/v4l2-mc.h>
26 25
26#include <asm/cacheflush.h>
27
27#include "iss_video.h" 28#include "iss_video.h"
28#include "iss.h" 29#include "iss.h"
29 30
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 673fdce25530..ff7832798a77 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -7,7 +7,6 @@ config R8188EU
7 select LIB80211 7 select LIB80211
8 select LIB80211_CRYPT_WEP 8 select LIB80211_CRYPT_WEP
9 select LIB80211_CRYPT_CCMP 9 select LIB80211_CRYPT_CCMP
10 select LIB80211_CRYPT_TKIP
11 ---help--- 10 ---help---
12 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N. 11 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
13 If built as a module, it will be called r8188eu. 12 If built as a module, it will be called r8188eu.
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 05936a45eb93..c6857a5be12a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,7 +23,6 @@
23#include <mon.h> 23#include <mon.h>
24#include <wifi.h> 24#include <wifi.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <net/lib80211.h>
27 26
28#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */ 27#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
29#define LLC_HEADER_SIZE 6 /* LLC Header Length */ 28#define LLC_HEADER_SIZE 6 /* LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
221static int recvframe_chkmic(struct adapter *adapter, 220static int recvframe_chkmic(struct adapter *adapter,
222 struct recv_frame *precvframe) 221 struct recv_frame *precvframe)
223{ 222{
224 int res = _SUCCESS; 223 int i, res = _SUCCESS;
225 struct rx_pkt_attrib *prxattrib = &precvframe->attrib; 224 u32 datalen;
226 struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta); 225 u8 miccode[8];
226 u8 bmic_err = false, brpt_micerror = true;
227 u8 *pframe, *payload, *pframemic;
228 u8 *mickey;
229 struct sta_info *stainfo;
230 struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
231 struct security_priv *psecuritypriv = &adapter->securitypriv;
232
233 struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
234 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
235
236 stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
227 237
228 if (prxattrib->encrypt == _TKIP_) { 238 if (prxattrib->encrypt == _TKIP_) {
239 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
240 ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
241 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
242 ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
243 __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
244 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
245
246 /* calculate mic code */
229 if (stainfo) { 247 if (stainfo) {
230 int key_idx;
231 const int iv_len = 8, icv_len = 4, key_length = 32;
232 struct sk_buff *skb = precvframe->pkt;
233 u8 key[32], iv[8], icv[4], *pframe = skb->data;
234 void *crypto_private = NULL;
235 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
236 struct security_priv *psecuritypriv = &adapter->securitypriv;
237
238 if (IS_MCAST(prxattrib->ra)) { 248 if (IS_MCAST(prxattrib->ra)) {
239 if (!psecuritypriv) { 249 if (!psecuritypriv) {
240 res = _FAIL; 250 res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
243 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__); 253 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
244 goto exit; 254 goto exit;
245 } 255 }
246 key_idx = prxattrib->key_index; 256 mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
247 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); 257
248 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); 258 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
259 ("\n %s: bcmc key\n", __func__));
249 } else { 260 } else {
250 key_idx = 0; 261 mickey = &stainfo->dot11tkiprxmickey.skey[0];
251 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 262 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
252 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); 263 ("\n %s: unicast key\n", __func__));
253 } 264 }
254 265
255 if (!crypto_ops) { 266 /* icv_len included the mic code */
256 res = _FAIL; 267 datalen = precvframe->pkt->len-prxattrib->hdrlen -
257 goto exit_lib80211_tkip; 268 prxattrib->iv_len-prxattrib->icv_len-8;
258 } 269 pframe = precvframe->pkt->data;
270 payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
259 271
260 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 272 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
261 memcpy(icv, pframe + skb->len - icv_len, icv_len); 273 rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
262 memmove(pframe + iv_len, pframe, prxattrib->hdrlen); 274 (unsigned char)prxattrib->priority); /* care the length of the data */
263 275
264 skb_pull(skb, iv_len); 276 pframemic = payload+datalen;
265 skb_trim(skb, skb->len - icv_len);
266 277
267 crypto_private = crypto_ops->init(key_idx); 278 bmic_err = false;
268 if (!crypto_private) { 279
269 res = _FAIL; 280 for (i = 0; i < 8; i++) {
270 goto exit_lib80211_tkip; 281 if (miccode[i] != *(pframemic+i)) {
271 } 282 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
272 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { 283 ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
273 res = _FAIL; 284 __func__, i, miccode[i], i, *(pframemic + i)));
274 goto exit_lib80211_tkip; 285 bmic_err = true;
275 } 286 }
276 if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
277 res = _FAIL;
278 goto exit_lib80211_tkip;
279 } 287 }
280 288
281 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 289 if (bmic_err) {
282 skb_push(skb, iv_len); 290 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
283 skb_put(skb, icv_len); 291 ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
292 *(pframemic-8), *(pframemic-7), *(pframemic-6),
293 *(pframemic-5), *(pframemic-4), *(pframemic-3),
294 *(pframemic-2), *(pframemic-1)));
295 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
296 ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
297 *(pframemic-16), *(pframemic-15), *(pframemic-14),
298 *(pframemic-13), *(pframemic-12), *(pframemic-11),
299 *(pframemic-10), *(pframemic-9)));
300 {
301 uint i;
284 302
285 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 303 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
286 memcpy(pframe + skb->len - icv_len, icv, icv_len); 304 ("\n ======demp packet (len=%d)======\n",
305 precvframe->pkt->len));
306 for (i = 0; i < precvframe->pkt->len; i += 8) {
307 RT_TRACE(_module_rtl871x_recv_c_,
308 _drv_err_,
309 ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
310 *(precvframe->pkt->data+i),
311 *(precvframe->pkt->data+i+1),
312 *(precvframe->pkt->data+i+2),
313 *(precvframe->pkt->data+i+3),
314 *(precvframe->pkt->data+i+4),
315 *(precvframe->pkt->data+i+5),
316 *(precvframe->pkt->data+i+6),
317 *(precvframe->pkt->data+i+7)));
318 }
319 RT_TRACE(_module_rtl871x_recv_c_,
320 _drv_err_,
321 ("\n ====== demp packet end [len=%d]======\n",
322 precvframe->pkt->len));
323 RT_TRACE(_module_rtl871x_recv_c_,
324 _drv_err_,
325 ("\n hrdlen=%d,\n",
326 prxattrib->hdrlen));
327 }
287 328
288exit_lib80211_tkip: 329 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
289 if (crypto_ops && crypto_private) 330 ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
290 crypto_ops->deinit(crypto_private); 331 prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
332 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
333
334 /* double check key_index for some timing issue , */
335 /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
336 if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
337 brpt_micerror = false;
338
339 if ((prxattrib->bdecrypted) && (brpt_micerror)) {
340 rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
341 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
342 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
343 } else {
344 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
345 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
346 }
347 res = _FAIL;
348 } else {
349 /* mic checked ok */
350 if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
351 psecuritypriv->bcheck_grpkey = true;
352 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
353 }
354 }
291 } else { 355 } else {
292 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 356 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
293 ("%s: rtw_get_stainfo==NULL!!!\n", __func__)); 357 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
294 } 358 }
359
360 skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
295 } 361 }
296 362
297exit: 363exit:
364
298 return res; 365 return res;
299} 366}
300 367
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index bfe0b217e679..67a2490f055e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
650 return res; 650 return res;
651} 651}
652 652
653/* The hlen isn't include the IV */
653u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) 654u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
654{ 655{ /* exclude ICV */
655 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; 656 u16 pnl;
656 u32 res = _SUCCESS; 657 u32 pnh;
658 u8 rc4key[16];
659 u8 ttkey[16];
660 u8 crc[4];
661 struct arc4context mycontext;
662 int length;
663
664 u8 *pframe, *payload, *iv, *prwskey;
665 union pn48 dot11txpn;
666 struct sta_info *stainfo;
667 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
668 struct security_priv *psecuritypriv = &padapter->securitypriv;
669 u32 res = _SUCCESS;
670
671
672 pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
657 673
658 /* 4 start to decrypt recvframe */ 674 /* 4 start to decrypt recvframe */
659 if (prxattrib->encrypt == _TKIP_) { 675 if (prxattrib->encrypt == _TKIP_) {
660 struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta); 676 stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
661
662 if (stainfo) { 677 if (stainfo) {
663 int key_idx;
664 const int iv_len = 8, icv_len = 4, key_length = 32;
665 void *crypto_private = NULL;
666 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
667 u8 key[32], iv[8], icv[4], *pframe = skb->data;
668 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
669 struct security_priv *psecuritypriv = &padapter->securitypriv;
670
671 if (IS_MCAST(prxattrib->ra)) { 678 if (IS_MCAST(prxattrib->ra)) {
672 if (!psecuritypriv->binstallGrpkey) { 679 if (!psecuritypriv->binstallGrpkey) {
673 res = _FAIL; 680 res = _FAIL;
674 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__); 681 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
675 goto exit; 682 goto exit;
676 } 683 }
677 key_idx = prxattrib->key_index; 684 prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
678 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
679 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
680 } else { 685 } else {
681 key_idx = 0; 686 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
682 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 687 prwskey = &stainfo->dot118021x_UncstKey.skey[0];
683 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
684 } 688 }
685 689
686 if (!crypto_ops) { 690 iv = pframe+prxattrib->hdrlen;
687 res = _FAIL; 691 payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
688 goto exit_lib80211_tkip; 692 length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
689 }
690 693
691 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 694 GET_TKIP_PN(iv, dot11txpn);
692 memcpy(icv, pframe + skb->len - icv_len, icv_len);
693 695
694 crypto_private = crypto_ops->init(key_idx); 696 pnl = (u16)(dot11txpn.val);
695 if (!crypto_private) { 697 pnh = (u32)(dot11txpn.val>>16);
696 res = _FAIL;
697 goto exit_lib80211_tkip;
698 }
699 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
700 res = _FAIL;
701 goto exit_lib80211_tkip;
702 }
703 if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
704 res = _FAIL;
705 goto exit_lib80211_tkip;
706 }
707 698
708 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 699 phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
709 skb_push(skb, iv_len); 700 phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
710 skb_put(skb, icv_len);
711 701
712 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 702 /* 4 decrypt payload include icv */
713 memcpy(pframe + skb->len - icv_len, icv, icv_len);
714 703
715exit_lib80211_tkip: 704 arcfour_init(&mycontext, rc4key, 16);
716 if (crypto_ops && crypto_private) 705 arcfour_encrypt(&mycontext, payload, payload, length);
717 crypto_ops->deinit(crypto_private); 706
707 *((__le32 *)crc) = getcrc32(payload, length-4);
708
709 if (crc[3] != payload[length-1] ||
710 crc[2] != payload[length-2] ||
711 crc[1] != payload[length-3] ||
712 crc[0] != payload[length-4]) {
713 RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
714 ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
715 &crc, &payload[length-4]));
716 res = _FAIL;
717 }
718 } else { 718 } else {
719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n")); 719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
720 res = _FAIL; 720 res = _FAIL;
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a61bc41b82d7..947c79532e10 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
198 int chars_sent = 0; 198 int chars_sent = 0;
199 char __user *cp; 199 char __user *cp;
200 char *init; 200 char *init;
201 size_t bytes_per_ch = unicode ? 3 : 1;
201 u16 ch; 202 u16 ch;
202 int empty; 203 int empty;
203 unsigned long flags; 204 unsigned long flags;
204 DEFINE_WAIT(wait); 205 DEFINE_WAIT(wait);
205 206
207 if (count < bytes_per_ch)
208 return -EINVAL;
209
206 spin_lock_irqsave(&speakup_info.spinlock, flags); 210 spin_lock_irqsave(&speakup_info.spinlock, flags);
207 while (1) { 211 while (1) {
208 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 212 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
228 init = get_initstring(); 232 init = get_initstring();
229 233
230 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 234 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
231 while (chars_sent <= count - 3) { 235 while (chars_sent <= count - bytes_per_ch) {
232 if (speakup_info.flushing) { 236 if (speakup_info.flushing) {
233 speakup_info.flushing = 0; 237 speakup_info.flushing = 0;
234 ch = '\x18'; 238 ch = '\x18';
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 514986b57c2d..25eb3891e34b 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -652,6 +652,7 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
652 struct iscsi_param *param; 652 struct iscsi_param *param;
653 u32 mrdsl, mbl; 653 u32 mrdsl, mbl;
654 u32 max_npdu, max_iso_npdu; 654 u32 max_npdu, max_iso_npdu;
655 u32 max_iso_payload;
655 656
656 if (conn->login->leading_connection) { 657 if (conn->login->leading_connection) {
657 param = iscsi_find_param_from_key(MAXBURSTLENGTH, 658 param = iscsi_find_param_from_key(MAXBURSTLENGTH,
@@ -670,8 +671,10 @@ static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
670 mrdsl = conn_ops->MaxRecvDataSegmentLength; 671 mrdsl = conn_ops->MaxRecvDataSegmentLength;
671 max_npdu = mbl / mrdsl; 672 max_npdu = mbl / mrdsl;
672 673
673 max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD / 674 max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss);
674 (ISCSI_HDR_LEN + mrdsl + 675
676 max_iso_npdu = max_iso_payload /
677 (ISCSI_HDR_LEN + mrdsl +
675 cxgbit_digest_len[csk->submode]); 678 cxgbit_digest_len[csk->submode]);
676 679
677 csk->max_iso_npdu = min(max_npdu, max_iso_npdu); 680 csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
@@ -741,6 +744,9 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
741 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl) 744 if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
742 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl; 745 conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
743 746
747 if (cxgbit_set_digest(csk))
748 return -1;
749
744 if (conn->login->leading_connection) { 750 if (conn->login->leading_connection) {
745 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL, 751 param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
746 conn->param_list); 752 conn->param_list);
@@ -764,7 +770,7 @@ static int cxgbit_set_params(struct iscsi_conn *conn)
764 if (is_t5(cdev->lldi.adapter_type)) 770 if (is_t5(cdev->lldi.adapter_type))
765 goto enable_ddp; 771 goto enable_ddp;
766 else 772 else
767 goto enable_digest; 773 return 0;
768 } 774 }
769 775
770 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) { 776 if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
@@ -781,10 +787,6 @@ enable_ddp:
781 } 787 }
782 } 788 }
783 789
784enable_digest:
785 if (cxgbit_set_digest(csk))
786 return -1;
787
788 return 0; 790 return 0;
789} 791}
790 792
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 785f0ed037f7..ee34e9046f7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 select EXTCON 4 select EXTCON
5 select RESET_CONTROLLER 5 select RESET_CONTROLLER
6 select USB_ULPI_BUS
6 help 7 help
7 Say Y here if your system has a dual role high speed USB 8 Say Y here if your system has a dual role high speed USB
8 controller based on ChipIdea silicon IP. It supports: 9 controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
38 help 39 help
39 Say Y here to enable host controller functionality of the 40 Say Y here to enable host controller functionality of the
40 ChipIdea driver. 41 ChipIdea driver.
41
42config USB_CHIPIDEA_ULPI
43 bool "ChipIdea ULPI PHY support"
44 depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
45 help
46 Say Y here if you have a ULPI PHY attached to your ChipIdea
47 controller.
48
49endif 42endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index e3d5e728fa53..12df94f78f72 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,11 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o 2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
3 3
4ci_hdrc-y := core.o otg.o debug.o 4ci_hdrc-y := core.o otg.o debug.o ulpi.o
5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o 5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o 6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o 7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
8ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
9 8
10# Glue/Bridge layers go here 9# Glue/Bridge layers go here
11 10
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0bf244d50544..6a2cc5cd0281 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -240,10 +240,8 @@ struct ci_hdrc {
240 240
241 struct ci_hdrc_platform_data *platdata; 241 struct ci_hdrc_platform_data *platdata;
242 int vbus_active; 242 int vbus_active;
243#ifdef CONFIG_USB_CHIPIDEA_ULPI
244 struct ulpi *ulpi; 243 struct ulpi *ulpi;
245 struct ulpi_ops ulpi_ops; 244 struct ulpi_ops ulpi_ops;
246#endif
247 struct phy *phy; 245 struct phy *phy;
248 /* old usb_phy interface */ 246 /* old usb_phy interface */
249 struct usb_phy *usb_phy; 247 struct usb_phy *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
426#endif 424#endif
427} 425}
428 426
429#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
430int ci_ulpi_init(struct ci_hdrc *ci); 427int ci_ulpi_init(struct ci_hdrc *ci);
431void ci_ulpi_exit(struct ci_hdrc *ci); 428void ci_ulpi_exit(struct ci_hdrc *ci);
432int ci_ulpi_resume(struct ci_hdrc *ci); 429int ci_ulpi_resume(struct ci_hdrc *ci);
433#else
434static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
435static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
436static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
437#endif
438 430
439u32 hw_read_intr_enable(struct ci_hdrc *ci); 431u32 hw_read_intr_enable(struct ci_hdrc *ci);
440 432
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 6da42dcd2888..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
95{ 95{
96 int cnt = 100000; 96 int cnt = 100000;
97 97
98 if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
99 return 0;
100
98 while (cnt-- > 0) { 101 while (cnt-- > 0) {
99 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE)) 102 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
100 return 0; 103 return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 998b32d0167e..75c4623ad779 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ 1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ 1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1833 }, 1833 },
1834 { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
1835 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1836 },
1834 1837
1835 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1838 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1836 .driver_info = CLEAR_HALT_CONDITIONS, 1839 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcae521df29b..1fb266809966 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1142 1142
1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
1144 /* Tell hub_wq to disconnect the device or 1144 /* Tell hub_wq to disconnect the device or
1145 * check for a new connection 1145 * check for a new connection or over current condition.
1146 * Based on USB2.0 Spec Section 11.12.5,
1147 * C_PORT_OVER_CURRENT could be set while
1148 * PORT_OVER_CURRENT is not. So check for any of them.
1146 */ 1149 */
1147 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || 1150 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
1148 (portstatus & USB_PORT_STAT_OVERCURRENT)) 1151 (portstatus & USB_PORT_STAT_OVERCURRENT) ||
1152 (portchange & USB_PORT_STAT_C_OVERCURRENT))
1149 set_bit(port1, hub->change_bits); 1153 set_bit(port1, hub->change_bits);
1150 1154
1151 } else if (portstatus & USB_PORT_STAT_ENABLE) { 1155 } else if (portstatus & USB_PORT_STAT_ENABLE) {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index a0f82cca2d9a..cefc99ae69b2 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3431 hs_ep = hsotg->eps_in[idx]; 3431 hs_ep = hsotg->eps_in[idx];
3432 /* Proceed only unmasked ISOC EPs */ 3432 /* Proceed only unmasked ISOC EPs */
3433 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3433 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3434 continue; 3434 continue;
3435 3435
3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); 3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3477 hs_ep = hsotg->eps_out[idx]; 3477 hs_ep = hsotg->eps_out[idx];
3478 /* Proceed only unmasked ISOC EPs */ 3478 /* Proceed only unmasked ISOC EPs */
3479 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3479 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3480 continue; 3480 continue;
3481 3481
3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3650,7 +3650,7 @@ irq_retry:
3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3651 hs_ep = hsotg->eps_out[idx]; 3651 hs_ep = hsotg->eps_out[idx];
3652 /* Proceed only unmasked ISOC EPs */ 3652 /* Proceed only unmasked ISOC EPs */
3653 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3653 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3654 continue; 3654 continue;
3655 3655
3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b1104be3429c..6e2cdd7b93d4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2665 2665
2666#define DWC2_USB_DMA_ALIGN 4 2666#define DWC2_USB_DMA_ALIGN 4
2667 2667
2668struct dma_aligned_buffer {
2669 void *kmalloc_ptr;
2670 void *old_xfer_buffer;
2671 u8 data[0];
2672};
2673
2674static void dwc2_free_dma_aligned_buffer(struct urb *urb) 2668static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2675{ 2669{
2676 struct dma_aligned_buffer *temp; 2670 void *stored_xfer_buffer;
2671 size_t length;
2677 2672
2678 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2673 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2679 return; 2674 return;
2680 2675
2681 temp = container_of(urb->transfer_buffer, 2676 /* Restore urb->transfer_buffer from the end of the allocated area */
2682 struct dma_aligned_buffer, data); 2677 memcpy(&stored_xfer_buffer, urb->transfer_buffer +
2678 urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
2683 2679
2684 if (usb_urb_dir_in(urb)) 2680 if (usb_urb_dir_in(urb)) {
2685 memcpy(temp->old_xfer_buffer, temp->data, 2681 if (usb_pipeisoc(urb->pipe))
2686 urb->transfer_buffer_length); 2682 length = urb->transfer_buffer_length;
2687 urb->transfer_buffer = temp->old_xfer_buffer; 2683 else
2688 kfree(temp->kmalloc_ptr); 2684 length = urb->actual_length;
2685
2686 memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
2687 }
2688 kfree(urb->transfer_buffer);
2689 urb->transfer_buffer = stored_xfer_buffer;
2689 2690
2690 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2691 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2691} 2692}
2692 2693
2693static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) 2694static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2694{ 2695{
2695 struct dma_aligned_buffer *temp, *kmalloc_ptr; 2696 void *kmalloc_ptr;
2696 size_t kmalloc_size; 2697 size_t kmalloc_size;
2697 2698
2698 if (urb->num_sgs || urb->sg || 2699 if (urb->num_sgs || urb->sg ||
@@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2700 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) 2701 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2701 return 0; 2702 return 0;
2702 2703
2703 /* Allocate a buffer with enough padding for alignment */ 2704 /*
2705 * Allocate a buffer with enough padding for original transfer_buffer
2706 * pointer. This allocation is guaranteed to be aligned properly for
2707 * DMA
2708 */
2704 kmalloc_size = urb->transfer_buffer_length + 2709 kmalloc_size = urb->transfer_buffer_length +
2705 sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; 2710 sizeof(urb->transfer_buffer);
2706 2711
2707 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2712 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2708 if (!kmalloc_ptr) 2713 if (!kmalloc_ptr)
2709 return -ENOMEM; 2714 return -ENOMEM;
2710 2715
2711 /* Position our struct dma_aligned_buffer such that data is aligned */ 2716 /*
2712 temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; 2717 * Position value of original urb->transfer_buffer pointer to the end
2713 temp->kmalloc_ptr = kmalloc_ptr; 2718 * of allocation for later referencing
2714 temp->old_xfer_buffer = urb->transfer_buffer; 2719 */
2720 memcpy(kmalloc_ptr + urb->transfer_buffer_length,
2721 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2722
2715 if (usb_urb_dir_out(urb)) 2723 if (usb_urb_dir_out(urb))
2716 memcpy(temp->data, urb->transfer_buffer, 2724 memcpy(kmalloc_ptr, urb->transfer_buffer,
2717 urb->transfer_buffer_length); 2725 urb->transfer_buffer_length);
2718 urb->transfer_buffer = temp->data; 2726 urb->transfer_buffer = kmalloc_ptr;
2719 2727
2720 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2728 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2721 2729
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index ed7f05cf4906..8ce10caf3e19 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1231 * avoid interrupt storms we'll wait before retrying if we've got 1231 * avoid interrupt storms we'll wait before retrying if we've got
1232 * several NAKs. If we didn't do this we'd retry directly from the 1232 * several NAKs. If we didn't do this we'd retry directly from the
1233 * interrupt handler and could end up quickly getting another 1233 * interrupt handler and could end up quickly getting another
1234 * interrupt (another NAK), which we'd retry. 1234 * interrupt (another NAK), which we'd retry. Note that we do not
1235 * delay retries for IN parts of control requests, as those are expected
1236 * to complete fairly quickly, and if we delay them we risk confusing
1237 * the device and cause it issue STALL.
1235 * 1238 *
1236 * Note that in DMA mode software only gets involved to re-send NAKed 1239 * Note that in DMA mode software only gets involved to re-send NAKed
1237 * transfers for split transactions, so we only need to apply this 1240 * transfers for split transactions, so we only need to apply this
@@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1244 qtd->error_count = 0; 1247 qtd->error_count = 0;
1245 qtd->complete_split = 0; 1248 qtd->complete_split = 0;
1246 qtd->num_naks++; 1249 qtd->num_naks++;
1247 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; 1250 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1251 !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1252 chan->ep_is_in);
1248 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1253 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1249 goto handle_nak_done; 1254 goto handle_nak_done;
1250 } 1255 }
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c77ff50a88a2..8efde178eef4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
973 ret = dwc3_ep0_start_trans(dep); 973 ret = dwc3_ep0_start_trans(dep);
974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && 974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
975 req->request.length && req->request.zero) { 975 req->request.length && req->request.zero) {
976 u32 maxpacket;
977 976
978 ret = usb_gadget_map_request_by_dev(dwc->sysdev, 977 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
979 &req->request, dep->number); 978 &req->request, dep->number);
980 if (ret) 979 if (ret)
981 return; 980 return;
982 981
983 maxpacket = dep->endpoint.maxpacket;
984
985 /* prepare normal TRB */ 982 /* prepare normal TRB */
986 dwc3_ep0_prepare_one_trb(dep, req->request.dma, 983 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
987 req->request.length, 984 req->request.length,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d2fa071c21b1..b8a15840b4ff 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1819,7 +1819,6 @@ unknown:
1819 if (cdev->use_os_string && cdev->os_desc_config && 1819 if (cdev->use_os_string && cdev->os_desc_config &&
1820 (ctrl->bRequestType & USB_TYPE_VENDOR) && 1820 (ctrl->bRequestType & USB_TYPE_VENDOR) &&
1821 ctrl->bRequest == cdev->b_vendor_code) { 1821 ctrl->bRequest == cdev->b_vendor_code) {
1822 struct usb_request *req;
1823 struct usb_configuration *os_desc_cfg; 1822 struct usb_configuration *os_desc_cfg;
1824 u8 *buf; 1823 u8 *buf;
1825 int interface; 1824 int interface;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 33e2030503fa..3ada83d81bda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP); 3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3265 3265
3266 return USB_GADGET_DELAYED_STATUS; 3266 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3267} 3267}
3268 3268
3269static bool ffs_func_req_match(struct usb_function *f, 3269static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d2dc1f00180b..d582921f7257 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
438}; 438};
439 439
440struct cntrl_cur_lay3 { 440struct cntrl_cur_lay3 {
441 __u32 dCUR; 441 __le32 dCUR;
442}; 442};
443 443
444struct cntrl_range_lay3 { 444struct cntrl_range_lay3 {
445 __u16 wNumSubRanges; 445 __le16 wNumSubRanges;
446 __u32 dMIN; 446 __le32 dMIN;
447 __u32 dMAX; 447 __le32 dMAX;
448 __u32 dRES; 448 __le32 dRES;
449} __packed; 449} __packed;
450 450
451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, 451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); 559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
560 if (!agdev->out_ep) { 560 if (!agdev->out_ep) {
561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
562 return ret; 562 return -ENODEV;
563 } 563 }
564 564
565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); 565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
566 if (!agdev->in_ep) { 566 if (!agdev->in_ep) {
567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
568 return ret; 568 return -ENODEV;
569 } 569 }
570 570
571 agdev->in_ep_maxpsize = max_t(u16, 571 agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
703 memset(&c, 0, sizeof(struct cntrl_cur_lay3)); 703 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
704 704
705 if (entity_id == USB_IN_CLK_ID) 705 if (entity_id == USB_IN_CLK_ID)
706 c.dCUR = p_srate; 706 c.dCUR = cpu_to_le32(p_srate);
707 else if (entity_id == USB_OUT_CLK_ID) 707 else if (entity_id == USB_OUT_CLK_ID)
708 c.dCUR = c_srate; 708 c.dCUR = cpu_to_le32(c_srate);
709 709
710 value = min_t(unsigned, w_length, sizeof c); 710 value = min_t(unsigned, w_length, sizeof c);
711 memcpy(req->buf, &c, value); 711 memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
742 742
743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
744 if (entity_id == USB_IN_CLK_ID) 744 if (entity_id == USB_IN_CLK_ID)
745 r.dMIN = p_srate; 745 r.dMIN = cpu_to_le32(p_srate);
746 else if (entity_id == USB_OUT_CLK_ID) 746 else if (entity_id == USB_OUT_CLK_ID)
747 r.dMIN = c_srate; 747 r.dMIN = cpu_to_le32(c_srate);
748 else 748 else
749 return -EOPNOTSUPP; 749 return -EOPNOTSUPP;
750 750
751 r.dMAX = r.dMIN; 751 r.dMAX = r.dMIN;
752 r.dRES = 0; 752 r.dRES = 0;
753 r.wNumSubRanges = 1; 753 r.wNumSubRanges = cpu_to_le16(1);
754 754
755 value = min_t(unsigned, w_length, sizeof r); 755 value = min_t(unsigned, w_length, sizeof r);
756 memcpy(req->buf, &r, value); 756 memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index a72295c953bb..fb5ed97572e5 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -32,9 +32,6 @@ struct uac_req {
32struct uac_rtd_params { 32struct uac_rtd_params {
33 struct snd_uac_chip *uac; /* parent chip */ 33 struct snd_uac_chip *uac; /* parent chip */
34 bool ep_enabled; /* if the ep is enabled */ 34 bool ep_enabled; /* if the ep is enabled */
35 /* Size of the ring buffer */
36 size_t dma_bytes;
37 unsigned char *dma_area;
38 35
39 struct snd_pcm_substream *ss; 36 struct snd_pcm_substream *ss;
40 37
@@ -43,8 +40,6 @@ struct uac_rtd_params {
43 40
44 void *rbuf; 41 void *rbuf;
45 42
46 size_t period_size;
47
48 unsigned max_psize; /* MaxPacketSize of endpoint */ 43 unsigned max_psize; /* MaxPacketSize of endpoint */
49 struct uac_req *ureq; 44 struct uac_req *ureq;
50 45
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
84static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) 79static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
85{ 80{
86 unsigned pending; 81 unsigned pending;
87 unsigned long flags; 82 unsigned long flags, flags2;
88 unsigned int hw_ptr; 83 unsigned int hw_ptr;
89 bool update_alsa = false;
90 int status = req->status; 84 int status = req->status;
91 struct uac_req *ur = req->context; 85 struct uac_req *ur = req->context;
92 struct snd_pcm_substream *substream; 86 struct snd_pcm_substream *substream;
87 struct snd_pcm_runtime *runtime;
93 struct uac_rtd_params *prm = ur->pp; 88 struct uac_rtd_params *prm = ur->pp;
94 struct snd_uac_chip *uac = prm->uac; 89 struct snd_uac_chip *uac = prm->uac;
95 90
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
111 if (!substream) 106 if (!substream)
112 goto exit; 107 goto exit;
113 108
109 snd_pcm_stream_lock_irqsave(substream, flags2);
110
111 runtime = substream->runtime;
112 if (!runtime || !snd_pcm_running(substream)) {
113 snd_pcm_stream_unlock_irqrestore(substream, flags2);
114 goto exit;
115 }
116
114 spin_lock_irqsave(&prm->lock, flags); 117 spin_lock_irqsave(&prm->lock, flags);
115 118
116 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 119 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
137 req->actual = req->length; 140 req->actual = req->length;
138 } 141 }
139 142
140 pending = prm->hw_ptr % prm->period_size;
141 pending += req->actual;
142 if (pending >= prm->period_size)
143 update_alsa = true;
144
145 hw_ptr = prm->hw_ptr; 143 hw_ptr = prm->hw_ptr;
146 prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
147 144
148 spin_unlock_irqrestore(&prm->lock, flags); 145 spin_unlock_irqrestore(&prm->lock, flags);
149 146
150 /* Pack USB load in ALSA ring buffer */ 147 /* Pack USB load in ALSA ring buffer */
151 pending = prm->dma_bytes - hw_ptr; 148 pending = runtime->dma_bytes - hw_ptr;
152 149
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 150 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
154 if (unlikely(pending < req->actual)) { 151 if (unlikely(pending < req->actual)) {
155 memcpy(req->buf, prm->dma_area + hw_ptr, pending); 152 memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
156 memcpy(req->buf + pending, prm->dma_area, 153 memcpy(req->buf + pending, runtime->dma_area,
157 req->actual - pending); 154 req->actual - pending);
158 } else { 155 } else {
159 memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); 156 memcpy(req->buf, runtime->dma_area + hw_ptr,
157 req->actual);
160 } 158 }
161 } else { 159 } else {
162 if (unlikely(pending < req->actual)) { 160 if (unlikely(pending < req->actual)) {
163 memcpy(prm->dma_area + hw_ptr, req->buf, pending); 161 memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
164 memcpy(prm->dma_area, req->buf + pending, 162 memcpy(runtime->dma_area, req->buf + pending,
165 req->actual - pending); 163 req->actual - pending);
166 } else { 164 } else {
167 memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); 165 memcpy(runtime->dma_area + hw_ptr, req->buf,
166 req->actual);
168 } 167 }
169 } 168 }
170 169
170 spin_lock_irqsave(&prm->lock, flags);
171 /* update hw_ptr after data is copied to memory */
172 prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
173 hw_ptr = prm->hw_ptr;
174 spin_unlock_irqrestore(&prm->lock, flags);
175 snd_pcm_stream_unlock_irqrestore(substream, flags2);
176
177 if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
178 snd_pcm_period_elapsed(substream);
179
171exit: 180exit:
172 if (usb_ep_queue(ep, req, GFP_ATOMIC)) 181 if (usb_ep_queue(ep, req, GFP_ATOMIC))
173 dev_err(uac->card->dev, "%d Error!\n", __LINE__); 182 dev_err(uac->card->dev, "%d Error!\n", __LINE__);
174
175 if (update_alsa)
176 snd_pcm_period_elapsed(substream);
177} 183}
178 184
179static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 185static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
236static int uac_pcm_hw_params(struct snd_pcm_substream *substream, 242static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
237 struct snd_pcm_hw_params *hw_params) 243 struct snd_pcm_hw_params *hw_params)
238{ 244{
239 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); 245 return snd_pcm_lib_malloc_pages(substream,
240 struct uac_rtd_params *prm;
241 int err;
242
243 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
244 prm = &uac->p_prm;
245 else
246 prm = &uac->c_prm;
247
248 err = snd_pcm_lib_malloc_pages(substream,
249 params_buffer_bytes(hw_params)); 246 params_buffer_bytes(hw_params));
250 if (err >= 0) {
251 prm->dma_bytes = substream->runtime->dma_bytes;
252 prm->dma_area = substream->runtime->dma_area;
253 prm->period_size = params_period_bytes(hw_params);
254 }
255
256 return err;
257} 247}
258 248
259static int uac_pcm_hw_free(struct snd_pcm_substream *substream) 249static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
260{ 250{
261 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
262 struct uac_rtd_params *prm;
263
264 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
265 prm = &uac->p_prm;
266 else
267 prm = &uac->c_prm;
268
269 prm->dma_area = NULL;
270 prm->dma_bytes = 0;
271 prm->period_size = 0;
272
273 return snd_pcm_lib_free_pages(substream); 251 return snd_pcm_lib_free_pages(substream);
274} 252}
275 253
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
595 if (err < 0) 573 if (err < 0)
596 goto snd_fail; 574 goto snd_fail;
597 575
598 strcpy(pcm->name, pcm_name); 576 strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
599 pcm->private_data = uac; 577 pcm->private_data = uac;
600 uac->pcm = pcm; 578 uac->pcm = pcm;
601 579
602 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); 580 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
603 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); 581 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
604 582
605 strcpy(card->driver, card_name); 583 strlcpy(card->driver, card_name, sizeof(card->driver));
606 strcpy(card->shortname, card_name); 584 strlcpy(card->shortname, card_name, sizeof(card->shortname));
607 sprintf(card->longname, "%s %i", card_name, card->dev->id); 585 sprintf(card->longname, "%s %i", card_name, card->dev->id);
608 586
609 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, 587 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 20ffb03ff6ac..e2927fb083cf 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
108 /* Check our state, cancel pending requests if needed */ 108 /* Check our state, cancel pending requests if needed */
109 if (ep->ep0.state != ep0_state_token) { 109 if (ep->ep0.state != ep0_state_token) {
110 EPDBG(ep, "wrong state\n"); 110 EPDBG(ep, "wrong state\n");
111 ast_vhub_nuke(ep, -EIO);
112
113 /*
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
116 * phase.
117 */
111 ast_vhub_nuke(ep, 0); 118 ast_vhub_nuke(ep, 0);
112 goto stall; 119 goto stall;
113 } 120 }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
212 if (chunk && req->req.buf) 219 if (chunk && req->req.buf)
213 memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 220 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
214 221
222 vhub_dma_workaround(ep->buf);
223
215 /* Remember chunk size and trigger send */ 224 /* Remember chunk size and trigger send */
216 reg = VHUB_EP0_SET_TX_LEN(chunk); 225 reg = VHUB_EP0_SET_TX_LEN(chunk);
217 writel(reg, ep->ep0.ctlstat); 226 writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
224 EPVDBG(ep, "rx prime\n"); 233 EPVDBG(ep, "rx prime\n");
225 234
226 /* Prime endpoint for receiving data */ 235 /* Prime endpoint for receiving data */
227 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); 236 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
228} 237}
229 238
230static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 239static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 80c9feac5147..5939eb1e97f2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
66 if (!req->req.dma) { 66 if (!req->req.dma) {
67 67
68 /* For IN transfers, copy data over first */ 68 /* For IN transfers, copy data over first */
69 if (ep->epn.is_in) 69 if (ep->epn.is_in) {
70 memcpy(ep->buf, req->req.buf + act, chunk); 70 memcpy(ep->buf, req->req.buf + act, chunk);
71 vhub_dma_workaround(ep->buf);
72 }
71 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 73 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
72 } else 74 } else {
75 if (ep->epn.is_in)
76 vhub_dma_workaround(req->req.buf);
73 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 77 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
78 }
74 79
75 /* Start DMA */ 80 /* Start DMA */
76 req->active = true; 81 req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
161static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 166static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
162 struct ast_vhub_req *req) 167 struct ast_vhub_req *req)
163{ 168{
169 struct ast_vhub_desc *desc = NULL;
164 unsigned int act = req->act_count; 170 unsigned int act = req->act_count;
165 unsigned int len = req->req.length; 171 unsigned int len = req->req.length;
166 unsigned int chunk; 172 unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
177 183
178 /* While we can create descriptors */ 184 /* While we can create descriptors */
179 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 185 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
180 struct ast_vhub_desc *desc;
181 unsigned int d_num; 186 unsigned int d_num;
182 187
183 /* Grab next free descriptor */ 188 /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
227 req->act_count = act = act + chunk; 232 req->act_count = act = act + chunk;
228 } 233 }
229 234
235 if (likely(desc))
236 vhub_dma_workaround(desc);
237
230 /* Tell HW about new descriptors */ 238 /* Tell HW about new descriptors */
231 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 239 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
232 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 240 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2b040257bc1f..4ed03d33a5a9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -462,6 +462,39 @@ enum std_req_rc {
462#define DDBG(d, fmt, ...) do { } while(0) 462#define DDBG(d, fmt, ...) do { } while(0)
463#endif 463#endif
464 464
465static inline void vhub_dma_workaround(void *addr)
466{
467 /*
468 * This works around a confirmed HW issue with the Aspeed chip.
469 *
470 * The core uses a different bus to memory than the AHB going to
471 * the USB device controller. Due to the latter having a higher
472 * priority than the core for arbitration on that bus, it's
473 * possible for an MMIO to the device, followed by a DMA by the
474 * device from memory to all be performed and services before
475 * a previous store to memory gets completed.
476 *
477 * This the following scenario can happen:
478 *
479 * - Driver writes to a DMA descriptor (Mbus)
480 * - Driver writes to the MMIO register to start the DMA (AHB)
481 * - The gadget sees the second write and sends a read of the
482 * descriptor to the memory controller (Mbus)
483 * - The gadget hits memory before the descriptor write
484 * causing it to read an obsolete value.
485 *
486 * Thankfully the problem is limited to the USB gadget device, other
487 * masters in the SoC all have a lower priority than the core, thus
488 * ensuring that the store by the core arrives first.
489 *
490 * The workaround consists of using a dummy read of the memory before
491 * doing the MMIO writes. This will ensure that the previous writes
492 * have been "pushed out".
493 */
494 mb();
495 (void)__raw_readl((void __iomem *)addr);
496}
497
465/* core.c */ 498/* core.c */
466void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 499void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
467 int status); 500 int status);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index a3ecce62662b..11e25a3f4f1f 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
832 832
833 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 833 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
834 834
835 msleep(3); 835 mdelay(3);
836 836
837 r8a66597_bset(r8a66597, PLLC, SYSCFG0); 837 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
838 838
839 msleep(1); 839 mdelay(1);
840 840
841 r8a66597_bset(r8a66597, SCKE, SYSCFG0); 841 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
842 842
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
1190 r8a66597->ep0_req->length = 2; 1190 r8a66597->ep0_req->length = 2;
1191 /* AV: what happens if we get called again before that gets through? */ 1191 /* AV: what happens if we get called again before that gets through? */
1192 spin_unlock(&r8a66597->lock); 1192 spin_unlock(&r8a66597->lock);
1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); 1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1194 spin_lock(&r8a66597->lock); 1194 spin_lock(&r8a66597->lock);
1195} 1195}
1196 1196
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2f4850f25e82..68e6132aa8b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3051 if (!list_empty(&ep->ring->td_list)) { 3051 if (!list_empty(&ep->ring->td_list)) {
3052 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3052 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3053 spin_unlock_irqrestore(&xhci->lock, flags); 3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054 xhci_free_command(xhci, cfg_cmd);
3054 goto cleanup; 3055 goto cleanup;
3055 } 3056 }
3056 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); 3057 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 900875f326d7..f7c96d209eda 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
861 if (pdata->init && pdata->init(pdev) != 0) 861 if (pdata->init && pdata->init(pdev) != 0)
862 return -EINVAL; 862 return -EINVAL;
863 863
864#ifdef CONFIG_PPC32
864 if (pdata->big_endian_mmio) { 865 if (pdata->big_endian_mmio) {
865 _fsl_readl = _fsl_readl_be; 866 _fsl_readl = _fsl_readl_be;
866 _fsl_writel = _fsl_writel_be; 867 _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
868 _fsl_readl = _fsl_readl_le; 869 _fsl_readl = _fsl_readl_le;
869 _fsl_writel = _fsl_writel_le; 870 _fsl_writel = _fsl_writel_le;
870 } 871 }
872#endif
871 873
872 /* request irq */ 874 /* request irq */
873 p_otg->irq = platform_get_irq(pdev, 0); 875 p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
958/* 960/*
959 * state file in sysfs 961 * state file in sysfs
960 */ 962 */
961static int show_fsl_usb2_otg_state(struct device *dev, 963static ssize_t show_fsl_usb2_otg_state(struct device *dev,
962 struct device_attribute *attr, char *buf) 964 struct device_attribute *attr, char *buf)
963{ 965{
964 struct otg_fsm *fsm = &fsl_otg_dev->fsm; 966 struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 150f43668bec..d1d20252bad8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -2140,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2140 * PPS APDO. Again skip the first sink PDO as this will 2140 * PPS APDO. Again skip the first sink PDO as this will
2141 * always be 5V 3A. 2141 * always be 5V 3A.
2142 */ 2142 */
2143 for (j = i; j < port->nr_snk_pdo; j++) { 2143 for (j = 1; j < port->nr_snk_pdo; j++) {
2144 pdo = port->snk_pdo[j]; 2144 pdo = port->snk_pdo[j];
2145 2145
2146 switch (pdo_type(pdo)) { 2146 switch (pdo_type(pdo)) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a502f1af4a21..ed3114556fda 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
1560 d->iotlb = niotlb; 1560 d->iotlb = niotlb;
1561 1561
1562 for (i = 0; i < d->nvqs; ++i) { 1562 for (i = 0; i < d->nvqs; ++i) {
1563 mutex_lock(&d->vqs[i]->mutex); 1563 struct vhost_virtqueue *vq = d->vqs[i];
1564 d->vqs[i]->iotlb = niotlb; 1564
1565 mutex_unlock(&d->vqs[i]->mutex); 1565 mutex_lock(&vq->mutex);
1566 vq->iotlb = niotlb;
1567 __vhost_vq_meta_reset(vq);
1568 mutex_unlock(&vq->mutex);
1566 } 1569 }
1567 1570
1568 vhost_umem_clean(oiotlb); 1571 vhost_umem_clean(oiotlb);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 46a4484e3da7..c6f78d27947b 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -20,7 +20,7 @@
20#include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */ 20#include <drm/drm_connector.h> /* For DRM_MODE_PANEL_ORIENTATION_* */
21 21
22static bool request_mem_succeeded = false; 22static bool request_mem_succeeded = false;
23static bool nowc = false; 23static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
24 24
25static struct fb_var_screeninfo efifb_defined = { 25static struct fb_var_screeninfo efifb_defined = {
26 .activate = FB_ACTIVATE_NOW, 26 .activate = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
68 68
69static void efifb_destroy(struct fb_info *info) 69static void efifb_destroy(struct fb_info *info)
70{ 70{
71 if (info->screen_base) 71 if (info->screen_base) {
72 iounmap(info->screen_base); 72 if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
73 iounmap(info->screen_base);
74 else
75 memunmap(info->screen_base);
76 }
73 if (request_mem_succeeded) 77 if (request_mem_succeeded)
74 release_mem_region(info->apertures->ranges[0].base, 78 release_mem_region(info->apertures->ranges[0].base,
75 info->apertures->ranges[0].size); 79 info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
104 else if (!strncmp(this_opt, "width:", 6)) 108 else if (!strncmp(this_opt, "width:", 6))
105 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 109 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
106 else if (!strcmp(this_opt, "nowc")) 110 else if (!strcmp(this_opt, "nowc"))
107 nowc = true; 111 mem_flags &= ~EFI_MEMORY_WC;
108 } 112 }
109 } 113 }
110 114
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
164 unsigned int size_remap; 168 unsigned int size_remap;
165 unsigned int size_total; 169 unsigned int size_total;
166 char *option = NULL; 170 char *option = NULL;
171 efi_memory_desc_t md;
167 172
168 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) 173 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
169 return -ENODEV; 174 return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
272 info->apertures->ranges[0].base = efifb_fix.smem_start; 277 info->apertures->ranges[0].base = efifb_fix.smem_start;
273 info->apertures->ranges[0].size = size_remap; 278 info->apertures->ranges[0].size = size_remap;
274 279
275 if (nowc) 280 if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
276 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 281 if ((efifb_fix.smem_start + efifb_fix.smem_len) >
277 else 282 (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
278 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); 283 pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
284 efifb_fix.smem_start);
285 err = -EIO;
286 goto err_release_fb;
287 }
288 /*
289 * If the UEFI memory map covers the efifb region, we may only
290 * remap it using the attributes the memory map prescribes.
291 */
292 mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
293 mem_flags &= md.attribute;
294 }
295 if (mem_flags & EFI_MEMORY_WC)
296 info->screen_base = ioremap_wc(efifb_fix.smem_start,
297 efifb_fix.smem_len);
298 else if (mem_flags & EFI_MEMORY_UC)
299 info->screen_base = ioremap(efifb_fix.smem_start,
300 efifb_fix.smem_len);
301 else if (mem_flags & EFI_MEMORY_WT)
302 info->screen_base = memremap(efifb_fix.smem_start,
303 efifb_fix.smem_len, MEMREMAP_WT);
304 else if (mem_flags & EFI_MEMORY_WB)
305 info->screen_base = memremap(efifb_fix.smem_start,
306 efifb_fix.smem_len, MEMREMAP_WB);
279 if (!info->screen_base) { 307 if (!info->screen_base) {
280 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", 308 pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
281 efifb_fix.smem_len, efifb_fix.smem_start); 309 efifb_fix.smem_len, efifb_fix.smem_start);
282 err = -EIO; 310 err = -EIO;
283 goto err_release_fb; 311 goto err_release_fb;
@@ -371,7 +399,10 @@ err_fb_dealoc:
371err_groups: 399err_groups:
372 sysfs_remove_groups(&dev->dev.kobj, efifb_groups); 400 sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
373err_unmap: 401err_unmap:
374 iounmap(info->screen_base); 402 if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
403 iounmap(info->screen_base);
404 else
405 memunmap(info->screen_base);
375err_release_fb: 406err_release_fb:
376 framebuffer_release(info); 407 framebuffer_release(info);
377err_release_mem: 408err_release_mem:
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6b237e3f4983..3988c0914322 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -513,7 +513,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
513 tell_host(vb, vb->inflate_vq); 513 tell_host(vb, vb->inflate_vq);
514 514
515 /* balloon's page migration 2nd step -- deflate "page" */ 515 /* balloon's page migration 2nd step -- deflate "page" */
516 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
516 balloon_page_delete(page); 517 balloon_page_delete(page);
518 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
517 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 519 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
518 set_page_pfns(vb, vb->pfns, page); 520 set_page_pfns(vb, vb->pfns, page);
519 tell_host(vb, vb->deflate_vq); 521 tell_host(vb, vb->deflate_vq);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0dd87aaeb39a..aba25414231a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
221 221
222 ret = bio_iov_iter_get_pages(&bio, iter); 222 ret = bio_iov_iter_get_pages(&bio, iter);
223 if (unlikely(ret)) 223 if (unlikely(ret))
224 return ret; 224 goto out;
225 ret = bio.bi_iter.bi_size; 225 ret = bio.bi_iter.bi_size;
226 226
227 if (iov_iter_rw(iter) == READ) { 227 if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
250 put_page(bvec->bv_page); 250 put_page(bvec->bv_page);
251 } 251 }
252 252
253 if (vecs != inline_vecs)
254 kfree(vecs);
255
256 if (unlikely(bio.bi_status)) 253 if (unlikely(bio.bi_status))
257 ret = blk_status_to_errno(bio.bi_status); 254 ret = blk_status_to_errno(bio.bi_status);
258 255
256out:
257 if (vecs != inline_vecs)
258 kfree(vecs);
259
259 bio_uninit(&bio); 260 bio_uninit(&bio);
260 261
261 return ret; 262 return ret;
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index d9f001078e08..4a717d400807 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
218 "%s", 218 "%s",
219 fsdef->dentry->d_sb->s_id); 219 fsdef->dentry->d_sb->s_id);
220 220
221 fscache_object_init(&fsdef->fscache, NULL, &cache->cache); 221 fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
222 &cache->cache);
222 223
223 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); 224 ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
224 if (ret < 0) 225 if (ret < 0)
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index ab0bbe93b398..af2b17b21b94 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -186,12 +186,12 @@ try_again:
186 * need to wait for it to be destroyed */ 186 * need to wait for it to be destroyed */
187wait_for_old_object: 187wait_for_old_object:
188 trace_cachefiles_wait_active(object, dentry, xobject); 188 trace_cachefiles_wait_active(object, dentry, xobject);
189 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
189 190
190 if (fscache_object_is_live(&xobject->fscache)) { 191 if (fscache_object_is_live(&xobject->fscache)) {
191 pr_err("\n"); 192 pr_err("\n");
192 pr_err("Error: Unexpected object collision\n"); 193 pr_err("Error: Unexpected object collision\n");
193 cachefiles_printk_object(object, xobject); 194 cachefiles_printk_object(object, xobject);
194 BUG();
195 } 195 }
196 atomic_inc(&xobject->usage); 196 atomic_inc(&xobject->usage);
197 write_unlock(&cache->active_lock); 197 write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
248 goto try_again; 248 goto try_again;
249 249
250requeue: 250requeue:
251 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
252 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); 251 cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
253 _leave(" = -ETIMEDOUT"); 252 _leave(" = -ETIMEDOUT");
254 return -ETIMEDOUT; 253 return -ETIMEDOUT;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 5082c8a49686..40f7595aad10 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
27 struct cachefiles_one_read *monitor = 27 struct cachefiles_one_read *monitor =
28 container_of(wait, struct cachefiles_one_read, monitor); 28 container_of(wait, struct cachefiles_one_read, monitor);
29 struct cachefiles_object *object; 29 struct cachefiles_object *object;
30 struct fscache_retrieval *op = monitor->op;
30 struct wait_bit_key *key = _key; 31 struct wait_bit_key *key = _key;
31 struct page *page = wait->private; 32 struct page *page = wait->private;
32 33
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
51 list_del(&wait->entry); 52 list_del(&wait->entry);
52 53
53 /* move onto the action list and queue for FS-Cache thread pool */ 54 /* move onto the action list and queue for FS-Cache thread pool */
54 ASSERT(monitor->op); 55 ASSERT(op);
55 56
56 object = container_of(monitor->op->op.object, 57 /* We need to temporarily bump the usage count as we don't own a ref
57 struct cachefiles_object, fscache); 58 * here otherwise cachefiles_read_copier() may free the op between the
59 * monitor being enqueued on the op->to_do list and the op getting
60 * enqueued on the work queue.
61 */
62 fscache_get_retrieval(op);
58 63
64 object = container_of(op->op.object, struct cachefiles_object, fscache);
59 spin_lock(&object->work_lock); 65 spin_lock(&object->work_lock);
60 list_add_tail(&monitor->op_link, &monitor->op->to_do); 66 list_add_tail(&monitor->op_link, &op->to_do);
61 spin_unlock(&object->work_lock); 67 spin_unlock(&object->work_lock);
62 68
63 fscache_enqueue_retrieval(monitor->op); 69 fscache_enqueue_retrieval(op);
70 fscache_put_retrieval(op);
64 return 0; 71 return 0;
65} 72}
66 73
diff --git a/fs/dcache.c b/fs/dcache.c
index 0e8e5de3c48a..ceb7b491d1b9 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
358 __releases(dentry->d_inode->i_lock) 358 __releases(dentry->d_inode->i_lock)
359{ 359{
360 struct inode *inode = dentry->d_inode; 360 struct inode *inode = dentry->d_inode;
361 bool hashed = !d_unhashed(dentry);
362 361
363 if (hashed) 362 raw_write_seqcount_begin(&dentry->d_seq);
364 raw_write_seqcount_begin(&dentry->d_seq);
365 __d_clear_type_and_inode(dentry); 363 __d_clear_type_and_inode(dentry);
366 hlist_del_init(&dentry->d_u.d_alias); 364 hlist_del_init(&dentry->d_u.d_alias);
367 if (hashed) 365 raw_write_seqcount_end(&dentry->d_seq);
368 raw_write_seqcount_end(&dentry->d_seq);
369 spin_unlock(&dentry->d_lock); 366 spin_unlock(&dentry->d_lock);
370 spin_unlock(&inode->i_lock); 367 spin_unlock(&inode->i_lock);
371 if (!inode->i_nlink) 368 if (!inode->i_nlink)
@@ -1932,10 +1929,12 @@ struct dentry *d_make_root(struct inode *root_inode)
1932 1929
1933 if (root_inode) { 1930 if (root_inode) {
1934 res = d_alloc_anon(root_inode->i_sb); 1931 res = d_alloc_anon(root_inode->i_sb);
1935 if (res) 1932 if (res) {
1933 res->d_flags |= DCACHE_RCUACCESS;
1936 d_instantiate(res, root_inode); 1934 d_instantiate(res, root_inode);
1937 else 1935 } else {
1938 iput(root_inode); 1936 iput(root_inode);
1937 }
1939 } 1938 }
1940 return res; 1939 return res;
1941} 1940}
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 71fccccf317e..8c6ab6c95727 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
86 /* length of the variable name itself: remove GUID and separator */ 86 /* length of the variable name itself: remove GUID and separator */
87 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; 87 namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
88 88
89 uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid); 89 err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
90 if (err)
91 goto out;
90 92
91 if (efivar_variable_is_removable(var->var.VendorGuid, 93 if (efivar_variable_is_removable(var->var.VendorGuid,
92 dentry->d_name.name, namelen)) 94 dentry->d_name.name, namelen))
diff --git a/fs/exec.c b/fs/exec.c
index 72e961a62adb..bdd0eacefdf5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -293,6 +293,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
293 bprm->vma = vma = vm_area_alloc(mm); 293 bprm->vma = vma = vm_area_alloc(mm);
294 if (!vma) 294 if (!vma)
295 return -ENOMEM; 295 return -ENOMEM;
296 vma_set_anonymous(vma);
296 297
297 if (down_write_killable(&mm->mmap_sem)) { 298 if (down_write_killable(&mm->mmap_sem)) {
298 err = -EINTR; 299 err = -EINTR;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index e68cefe08261..aa52d87985aa 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -368,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
368 return -EFSCORRUPTED; 368 return -EFSCORRUPTED;
369 369
370 ext4_lock_group(sb, block_group); 370 ext4_lock_group(sb, block_group);
371 if (buffer_verified(bh))
372 goto verified;
371 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, 373 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
372 desc, bh))) { 374 desc, bh))) {
373 ext4_unlock_group(sb, block_group); 375 ext4_unlock_group(sb, block_group);
@@ -386,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
386 return -EFSCORRUPTED; 388 return -EFSCORRUPTED;
387 } 389 }
388 set_buffer_verified(bh); 390 set_buffer_verified(bh);
391verified:
389 ext4_unlock_group(sb, block_group); 392 ext4_unlock_group(sb, block_group);
390 return 0; 393 return 0;
391} 394}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index fb83750c1a14..f336cbc6e932 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
90 return -EFSCORRUPTED; 90 return -EFSCORRUPTED;
91 91
92 ext4_lock_group(sb, block_group); 92 ext4_lock_group(sb, block_group);
93 if (buffer_verified(bh))
94 goto verified;
93 blk = ext4_inode_bitmap(sb, desc); 95 blk = ext4_inode_bitmap(sb, desc);
94 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, 96 if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
95 EXT4_INODES_PER_GROUP(sb) / 8)) { 97 EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
101 return -EFSBADCRC; 103 return -EFSBADCRC;
102 } 104 }
103 set_buffer_verified(bh); 105 set_buffer_verified(bh);
106verified:
104 ext4_unlock_group(sb, block_group); 107 ext4_unlock_group(sb, block_group);
105 return 0; 108 return 0;
106} 109}
@@ -1385,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1385 ext4_itable_unused_count(sb, gdp)), 1388 ext4_itable_unused_count(sb, gdp)),
1386 sbi->s_inodes_per_block); 1389 sbi->s_inodes_per_block);
1387 1390
1388 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { 1391 if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
1392 ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
1393 ext4_itable_unused_count(sb, gdp)) <
1394 EXT4_FIRST_INO(sb)))) {
1389 ext4_error(sb, "Something is wrong with group %u: " 1395 ext4_error(sb, "Something is wrong with group %u: "
1390 "used itable blocks: %d; " 1396 "used itable blocks: %d; "
1391 "itable unused count: %u", 1397 "itable unused count: %u",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index e55a8bc870bd..3543fe80a3c4 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -682,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
682 goto convert; 682 goto convert;
683 } 683 }
684 684
685 ret = ext4_journal_get_write_access(handle, iloc.bh);
686 if (ret)
687 goto out;
688
685 flags |= AOP_FLAG_NOFS; 689 flags |= AOP_FLAG_NOFS;
686 690
687 page = grab_cache_page_write_begin(mapping, 0, flags); 691 page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -710,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
710out_up_read: 714out_up_read:
711 up_read(&EXT4_I(inode)->xattr_sem); 715 up_read(&EXT4_I(inode)->xattr_sem);
712out: 716out:
713 if (handle) 717 if (handle && (ret != 1))
714 ext4_journal_stop(handle); 718 ext4_journal_stop(handle);
715 brelse(iloc.bh); 719 brelse(iloc.bh);
716 return ret; 720 return ret;
@@ -752,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
752 756
753 ext4_write_unlock_xattr(inode, &no_expand); 757 ext4_write_unlock_xattr(inode, &no_expand);
754 brelse(iloc.bh); 758 brelse(iloc.bh);
759 mark_inode_dirty(inode);
755out: 760out:
756 return copied; 761 return copied;
757} 762}
@@ -898,7 +903,6 @@ retry_journal:
898 goto out; 903 goto out;
899 } 904 }
900 905
901
902 page = grab_cache_page_write_begin(mapping, 0, flags); 906 page = grab_cache_page_write_begin(mapping, 0, flags);
903 if (!page) { 907 if (!page) {
904 ret = -ENOMEM; 908 ret = -ENOMEM;
@@ -916,6 +920,9 @@ retry_journal:
916 if (ret < 0) 920 if (ret < 0)
917 goto out_release_page; 921 goto out_release_page;
918 } 922 }
923 ret = ext4_journal_get_write_access(handle, iloc.bh);
924 if (ret)
925 goto out_release_page;
919 926
920 up_read(&EXT4_I(inode)->xattr_sem); 927 up_read(&EXT4_I(inode)->xattr_sem);
921 *pagep = page; 928 *pagep = page;
@@ -936,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
936 unsigned len, unsigned copied, 943 unsigned len, unsigned copied,
937 struct page *page) 944 struct page *page)
938{ 945{
939 int i_size_changed = 0;
940 int ret; 946 int ret;
941 947
942 ret = ext4_write_inline_data_end(inode, pos, len, copied, page); 948 ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -954,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
954 * But it's important to update i_size while still holding page lock: 960 * But it's important to update i_size while still holding page lock:
955 * page writeout could otherwise come in and zero beyond i_size. 961 * page writeout could otherwise come in and zero beyond i_size.
956 */ 962 */
957 if (pos+copied > inode->i_size) { 963 if (pos+copied > inode->i_size)
958 i_size_write(inode, pos+copied); 964 i_size_write(inode, pos+copied);
959 i_size_changed = 1;
960 }
961 unlock_page(page); 965 unlock_page(page);
962 put_page(page); 966 put_page(page);
963 967
@@ -967,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
967 * ordering of page lock and transaction start for journaling 971 * ordering of page lock and transaction start for journaling
968 * filesystems. 972 * filesystems.
969 */ 973 */
970 if (i_size_changed) 974 mark_inode_dirty(inode);
971 mark_inode_dirty(inode);
972 975
973 return copied; 976 return copied;
974} 977}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 7d6c10017bdf..4efe77286ecd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
1389 loff_t old_size = inode->i_size; 1389 loff_t old_size = inode->i_size;
1390 int ret = 0, ret2; 1390 int ret = 0, ret2;
1391 int i_size_changed = 0; 1391 int i_size_changed = 0;
1392 int inline_data = ext4_has_inline_data(inode);
1392 1393
1393 trace_ext4_write_end(inode, pos, len, copied); 1394 trace_ext4_write_end(inode, pos, len, copied);
1394 if (ext4_has_inline_data(inode)) { 1395 if (inline_data) {
1395 ret = ext4_write_inline_data_end(inode, pos, len, 1396 ret = ext4_write_inline_data_end(inode, pos, len,
1396 copied, page); 1397 copied, page);
1397 if (ret < 0) { 1398 if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
1419 * ordering of page lock and transaction start for journaling 1420 * ordering of page lock and transaction start for journaling
1420 * filesystems. 1421 * filesystems.
1421 */ 1422 */
1422 if (i_size_changed) 1423 if (i_size_changed || inline_data)
1423 ext4_mark_inode_dirty(handle, inode); 1424 ext4_mark_inode_dirty(handle, inode);
1424 1425
1425 if (pos + len > inode->i_size && ext4_can_truncate(inode)) 1426 if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
1493 int partial = 0; 1494 int partial = 0;
1494 unsigned from, to; 1495 unsigned from, to;
1495 int size_changed = 0; 1496 int size_changed = 0;
1497 int inline_data = ext4_has_inline_data(inode);
1496 1498
1497 trace_ext4_journalled_write_end(inode, pos, len, copied); 1499 trace_ext4_journalled_write_end(inode, pos, len, copied);
1498 from = pos & (PAGE_SIZE - 1); 1500 from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
1500 1502
1501 BUG_ON(!ext4_handle_valid(handle)); 1503 BUG_ON(!ext4_handle_valid(handle));
1502 1504
1503 if (ext4_has_inline_data(inode)) { 1505 if (inline_data) {
1504 ret = ext4_write_inline_data_end(inode, pos, len, 1506 ret = ext4_write_inline_data_end(inode, pos, len,
1505 copied, page); 1507 copied, page);
1506 if (ret < 0) { 1508 if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
1531 if (old_size < pos) 1533 if (old_size < pos)
1532 pagecache_isize_extended(inode, old_size, pos); 1534 pagecache_isize_extended(inode, old_size, pos);
1533 1535
1534 if (size_changed) { 1536 if (size_changed || inline_data) {
1535 ret2 = ext4_mark_inode_dirty(handle, inode); 1537 ret2 = ext4_mark_inode_dirty(handle, inode);
1536 if (!ret) 1538 if (!ret)
1537 ret = ret2; 1539 ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
2028 } 2030 }
2029 2031
2030 if (inline_data) { 2032 if (inline_data) {
2031 BUFFER_TRACE(inode_bh, "get write access"); 2033 ret = ext4_mark_inode_dirty(handle, inode);
2032 ret = ext4_journal_get_write_access(handle, inode_bh);
2033
2034 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
2035
2036 } else { 2034 } else {
2037 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, 2035 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
2038 do_journal_get_write_access); 2036 do_journal_get_write_access);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 27b9a76a0dfa..638ad4743477 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
186 goto exit_thread; 186 goto exit_thread;
187 } 187 }
188 188
189 if (sb_rdonly(sb)) { 189 if (sb_rdonly(sb))
190 ext4_warning(sb, "kmmpd being stopped since filesystem " 190 break;
191 "has been remounted as readonly.");
192 goto exit_thread;
193 }
194 191
195 diff = jiffies - last_update_time; 192 diff = jiffies - last_update_time;
196 if (diff < mmp_update_interval * HZ) 193 if (diff < mmp_update_interval * HZ)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ba2396a7bd04..b7f7922061be 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2342,7 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
2342 struct ext4_sb_info *sbi = EXT4_SB(sb); 2342 struct ext4_sb_info *sbi = EXT4_SB(sb);
2343 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); 2343 ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
2344 ext4_fsblk_t last_block; 2344 ext4_fsblk_t last_block;
2345 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; 2345 ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
2346 ext4_fsblk_t block_bitmap; 2346 ext4_fsblk_t block_bitmap;
2347 ext4_fsblk_t inode_bitmap; 2347 ext4_fsblk_t inode_bitmap;
2348 ext4_fsblk_t inode_table; 2348 ext4_fsblk_t inode_table;
@@ -3141,14 +3141,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
3141 if (!gdp) 3141 if (!gdp)
3142 continue; 3142 continue;
3143 3143
3144 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) 3144 if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
3145 continue;
3146 if (group != 0)
3147 break; 3145 break;
3148 ext4_error(sb, "Inode table for bg 0 marked as "
3149 "needing zeroing");
3150 if (sb_rdonly(sb))
3151 return ngroups;
3152 } 3146 }
3153 3147
3154 return group; 3148 return group;
@@ -4085,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4085 goto failed_mount2; 4079 goto failed_mount2;
4086 } 4080 }
4087 } 4081 }
4082 sbi->s_gdb_count = db_count;
4088 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { 4083 if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
4089 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 4084 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
4090 ret = -EFSCORRUPTED; 4085 ret = -EFSCORRUPTED;
4091 goto failed_mount2; 4086 goto failed_mount2;
4092 } 4087 }
4093 4088
4094 sbi->s_gdb_count = db_count;
4095
4096 timer_setup(&sbi->s_err_report, print_daily_error_info, 0); 4089 timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
4097 4090
4098 /* Register extent status tree shrinker */ 4091 /* Register extent status tree shrinker */
@@ -5213,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5213 5206
5214 if (sbi->s_journal) 5207 if (sbi->s_journal)
5215 ext4_mark_recovery_complete(sb, es); 5208 ext4_mark_recovery_complete(sb, es);
5209 if (sbi->s_mmp_tsk)
5210 kthread_stop(sbi->s_mmp_tsk);
5216 } else { 5211 } else {
5217 /* Make sure we can mount this feature set readwrite */ 5212 /* Make sure we can mount this feature set readwrite */
5218 if (ext4_has_feature_readonly(sb) || 5213 if (ext4_has_feature_readonly(sb) ||
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index c184c5a356ff..cdcb376ef8df 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
220{ 220{
221 struct fscache_cache_tag *tag; 221 struct fscache_cache_tag *tag;
222 222
223 ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
223 BUG_ON(!cache->ops); 224 BUG_ON(!cache->ops);
224 BUG_ON(!ifsdef); 225 BUG_ON(!ifsdef);
225 226
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
248 if (!cache->kobj) 249 if (!cache->kobj)
249 goto error; 250 goto error;
250 251
251 ifsdef->cookie = &fscache_fsdef_index;
252 ifsdef->cache = cache; 252 ifsdef->cache = cache;
253 cache->fsdef = ifsdef; 253 cache->fsdef = ifsdef;
254 254
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 97137d7ec5ee..83bfe04456b6 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
516 goto error; 516 goto error;
517 } 517 }
518 518
519 ASSERTCMP(object->cookie, ==, cookie);
519 fscache_stat(&fscache_n_object_alloc); 520 fscache_stat(&fscache_n_object_alloc);
520 521
521 object->debug_id = atomic_inc_return(&fscache_object_debug_id); 522 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
571 572
572 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); 573 _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
573 574
575 ASSERTCMP(object->cookie, ==, cookie);
576
574 spin_lock(&cookie->lock); 577 spin_lock(&cookie->lock);
575 578
576 /* there may be multiple initial creations of this object, but we only 579 /* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
610 spin_unlock(&cache->object_list_lock); 613 spin_unlock(&cache->object_list_lock);
611 } 614 }
612 615
613 /* attach to the cookie */ 616 /* Attach to the cookie. The object already has a ref on it. */
614 object->cookie = cookie;
615 fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
616 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 617 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
617 618
618 fscache_objlist_add(object); 619 fscache_objlist_add(object);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 20e0d0a4dc8c..9edc920f651f 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
327 object->store_limit_l = 0; 327 object->store_limit_l = 0;
328 object->cache = cache; 328 object->cache = cache;
329 object->cookie = cookie; 329 object->cookie = cookie;
330 fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
330 object->parent = NULL; 331 object->parent = NULL;
331#ifdef CONFIG_FSCACHE_OBJECT_LIST 332#ifdef CONFIG_FSCACHE_OBJECT_LIST
332 RB_CLEAR_NODE(&object->objlist_link); 333 RB_CLEAR_NODE(&object->objlist_link);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e30c5975ea58..8d265790374c 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
70 ASSERT(op->processor != NULL); 70 ASSERT(op->processor != NULL);
71 ASSERT(fscache_object_is_available(op->object)); 71 ASSERT(fscache_object_is_available(op->object));
72 ASSERTCMP(atomic_read(&op->usage), >, 0); 72 ASSERTCMP(atomic_read(&op->usage), >, 0);
73 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); 73 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
74 op->state, ==, FSCACHE_OP_ST_CANCELLED);
74 75
75 fscache_stat(&fscache_n_op_enqueue); 76 fscache_stat(&fscache_n_op_enqueue);
76 switch (op->flags & FSCACHE_OP_TYPE) { 77 switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
499 struct fscache_cache *cache; 500 struct fscache_cache *cache;
500 501
501 _enter("{OBJ%x OP%x,%d}", 502 _enter("{OBJ%x OP%x,%d}",
502 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 503 op->object ? op->object->debug_id : 0,
504 op->debug_id, atomic_read(&op->usage));
503 505
504 ASSERTCMP(atomic_read(&op->usage), >, 0); 506 ASSERTCMP(atomic_read(&op->usage), >, 0);
505 507
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d508c7844681..40d4c66c7751 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
411 bool truncate_op = (lend == LLONG_MAX); 411 bool truncate_op = (lend == LLONG_MAX);
412 412
413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 413 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
414 vma_init(&pseudo_vma, current->mm);
414 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 415 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
415 pagevec_init(&pvec); 416 pagevec_init(&pvec);
416 next = start; 417 next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
595 * as input to create an allocation policy. 596 * as input to create an allocation policy.
596 */ 597 */
597 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); 598 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
599 vma_init(&pseudo_vma, mm);
598 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); 600 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
599 pseudo_vma.vm_file = file; 601 pseudo_vma.vm_file = file;
600 602
diff --git a/fs/iomap.c b/fs/iomap.c
index 77397b5a96ef..0d0bd8845586 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1443,7 +1443,7 @@ iomap_bmap(struct address_space *mapping, sector_t bno,
1443 const struct iomap_ops *ops) 1443 const struct iomap_ops *ops)
1444{ 1444{
1445 struct inode *inode = mapping->host; 1445 struct inode *inode = mapping->host;
1446 loff_t pos = bno >> inode->i_blkbits; 1446 loff_t pos = bno << inode->i_blkbits;
1447 unsigned blocksize = i_blocksize(inode); 1447 unsigned blocksize = i_blocksize(inode);
1448 1448
1449 if (filemap_write_and_wait(mapping)) 1449 if (filemap_write_and_wait(mapping))
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index 395c4c0d0f06..1682a87c00b2 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -115,6 +115,13 @@ struct dinode {
115 dxd_t _dxd; /* 16: */ 115 dxd_t _dxd; /* 16: */
116 union { 116 union {
117 __le32 _rdev; /* 4: */ 117 __le32 _rdev; /* 4: */
118 /*
119 * The fast symlink area
120 * is expected to overflow
121 * into _inlineea when
122 * needed (which will clear
123 * INLINEEA).
124 */
118 u8 _fastsymlink[128]; 125 u8 _fastsymlink[128];
119 } _u; 126 } _u;
120 u8 _inlineea[128]; 127 u8 _inlineea[128];
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index 1f26d1910409..9940a1e04cbf 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -87,6 +87,7 @@ struct jfs_inode_info {
87 struct { 87 struct {
88 unchar _unused[16]; /* 16: */ 88 unchar _unused[16]; /* 16: */
89 dxd_t _dxd; /* 16: */ 89 dxd_t _dxd; /* 16: */
90 /* _inline may overflow into _inline_ea when needed */
90 unchar _inline[128]; /* 128: inline symlink */ 91 unchar _inline[128]; /* 128: inline symlink */
91 /* _inline_ea may overlay the last part of 92 /* _inline_ea may overlay the last part of
92 * file._xtroot if maxentry = XTROOTINITSLOT 93 * file._xtroot if maxentry = XTROOTINITSLOT
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 1b9264fd54b6..f08571433aba 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -967,8 +967,7 @@ static int __init init_jfs_fs(void)
967 jfs_inode_cachep = 967 jfs_inode_cachep =
968 kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), 968 kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info),
969 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT, 969 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
970 offsetof(struct jfs_inode_info, i_inline), 970 offsetof(struct jfs_inode_info, i_inline), IDATASIZE,
971 sizeof_field(struct jfs_inode_info, i_inline),
972 init_once); 971 init_once);
973 if (jfs_inode_cachep == NULL) 972 if (jfs_inode_cachep == NULL)
974 return -ENOMEM; 973 return -ENOMEM;
diff --git a/fs/namespace.c b/fs/namespace.c
index 8ddd14806799..bd2f4c68506a 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
659 return 0; 659 return 0;
660 mnt = real_mount(bastard); 660 mnt = real_mount(bastard);
661 mnt_add_count(mnt, 1); 661 mnt_add_count(mnt, 1);
662 smp_mb(); // see mntput_no_expire()
662 if (likely(!read_seqretry(&mount_lock, seq))) 663 if (likely(!read_seqretry(&mount_lock, seq)))
663 return 0; 664 return 0;
664 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { 665 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
665 mnt_add_count(mnt, -1); 666 mnt_add_count(mnt, -1);
666 return 1; 667 return 1;
667 } 668 }
669 lock_mount_hash();
670 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
671 mnt_add_count(mnt, -1);
672 unlock_mount_hash();
673 return 1;
674 }
675 unlock_mount_hash();
676 /* caller will mntput() */
668 return -1; 677 return -1;
669} 678}
670 679
@@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1195static void mntput_no_expire(struct mount *mnt) 1204static void mntput_no_expire(struct mount *mnt)
1196{ 1205{
1197 rcu_read_lock(); 1206 rcu_read_lock();
1198 mnt_add_count(mnt, -1); 1207 if (likely(READ_ONCE(mnt->mnt_ns))) {
1199 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ 1208 /*
1209 * Since we don't do lock_mount_hash() here,
1210 * ->mnt_ns can change under us. However, if it's
1211 * non-NULL, then there's a reference that won't
1212 * be dropped until after an RCU delay done after
1213 * turning ->mnt_ns NULL. So if we observe it
1214 * non-NULL under rcu_read_lock(), the reference
1215 * we are dropping is not the final one.
1216 */
1217 mnt_add_count(mnt, -1);
1200 rcu_read_unlock(); 1218 rcu_read_unlock();
1201 return; 1219 return;
1202 } 1220 }
1203 lock_mount_hash(); 1221 lock_mount_hash();
1222 /*
1223 * make sure that if __legitimize_mnt() has not seen us grab
1224 * mount_lock, we'll see their refcount increment here.
1225 */
1226 smp_mb();
1227 mnt_add_count(mnt, -1);
1204 if (mnt_get_count(mnt)) { 1228 if (mnt_get_count(mnt)) {
1205 rcu_read_unlock(); 1229 rcu_read_unlock();
1206 unlock_mount_hash(); 1230 unlock_mount_hash();
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6dd146885da9..f6c4ccd693f4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6466,34 +6466,34 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6466 if (data->arg.new_lock && !data->cancelled) { 6466 if (data->arg.new_lock && !data->cancelled) {
6467 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 6467 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6468 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 6468 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
6469 break; 6469 goto out_restart;
6470 } 6470 }
6471
6472 if (data->arg.new_lock_owner != 0) { 6471 if (data->arg.new_lock_owner != 0) {
6473 nfs_confirm_seqid(&lsp->ls_seqid, 0); 6472 nfs_confirm_seqid(&lsp->ls_seqid, 0);
6474 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 6473 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
6475 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6474 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6476 goto out_done; 6475 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
6477 } else if (nfs4_update_lock_stateid(lsp, &data->res.stateid)) 6476 goto out_restart;
6478 goto out_done;
6479
6480 break; 6477 break;
6481 case -NFS4ERR_BAD_STATEID: 6478 case -NFS4ERR_BAD_STATEID:
6482 case -NFS4ERR_OLD_STATEID: 6479 case -NFS4ERR_OLD_STATEID:
6483 case -NFS4ERR_STALE_STATEID: 6480 case -NFS4ERR_STALE_STATEID:
6484 case -NFS4ERR_EXPIRED: 6481 case -NFS4ERR_EXPIRED:
6485 if (data->arg.new_lock_owner != 0) { 6482 if (data->arg.new_lock_owner != 0) {
6486 if (nfs4_stateid_match(&data->arg.open_stateid, 6483 if (!nfs4_stateid_match(&data->arg.open_stateid,
6487 &lsp->ls_state->open_stateid)) 6484 &lsp->ls_state->open_stateid))
6488 goto out_done; 6485 goto out_restart;
6489 } else if (nfs4_stateid_match(&data->arg.lock_stateid, 6486 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
6490 &lsp->ls_stateid)) 6487 &lsp->ls_stateid))
6491 goto out_done; 6488 goto out_restart;
6492 } 6489 }
6493 if (!data->cancelled)
6494 rpc_restart_call_prepare(task);
6495out_done: 6490out_done:
6496 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 6491 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
6492 return;
6493out_restart:
6494 if (!data->cancelled)
6495 rpc_restart_call_prepare(task);
6496 goto out_done;
6497} 6497}
6498 6498
6499static void nfs4_lock_release(void *calldata) 6499static void nfs4_lock_release(void *calldata)
@@ -6502,7 +6502,7 @@ static void nfs4_lock_release(void *calldata)
6502 6502
6503 dprintk("%s: begin!\n", __func__); 6503 dprintk("%s: begin!\n", __func__);
6504 nfs_free_seqid(data->arg.open_seqid); 6504 nfs_free_seqid(data->arg.open_seqid);
6505 if (data->cancelled) { 6505 if (data->cancelled && data->rpc_status == 0) {
6506 struct rpc_task *task; 6506 struct rpc_task *task;
6507 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 6507 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
6508 data->arg.lock_seqid); 6508 data->arg.lock_seqid);
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2751476e6b6e..f098b9f1c396 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -167,6 +167,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
167 } 167 }
168 168
169 if (compressed) { 169 if (compressed) {
170 if (!msblk->stream)
171 goto read_failure;
170 length = squashfs_decompress(msblk, bh, b, offset, length, 172 length = squashfs_decompress(msblk, bh, b, offset, length,
171 output); 173 output);
172 if (length < 0) 174 if (length < 0)
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 23813c078cc9..0839efa720b3 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
350 350
351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); 351 TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
352 352
353 if (unlikely(length < 0))
354 return -EIO;
355
353 while (length) { 356 while (length) {
354 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); 357 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
355 if (entry->error) { 358 if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 13d80947bf9e..f1c1430ae721 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
194 } 194 }
195 195
196 for (i = 0; i < blocks; i++) { 196 for (i = 0; i < blocks; i++) {
197 int size = le32_to_cpu(blist[i]); 197 int size = squashfs_block_size(blist[i]);
198 if (size < 0) {
199 err = size;
200 goto failure;
201 }
198 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); 202 block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
199 } 203 }
200 n -= blocks; 204 n -= blocks;
@@ -367,7 +371,24 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
367 sizeof(size)); 371 sizeof(size));
368 if (res < 0) 372 if (res < 0)
369 return res; 373 return res;
370 return le32_to_cpu(size); 374 return squashfs_block_size(size);
375}
376
377void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
378{
379 int copied;
380 void *pageaddr;
381
382 pageaddr = kmap_atomic(page);
383 copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
384 memset(pageaddr + copied, 0, PAGE_SIZE - copied);
385 kunmap_atomic(pageaddr);
386
387 flush_dcache_page(page);
388 if (copied == avail)
389 SetPageUptodate(page);
390 else
391 SetPageError(page);
371} 392}
372 393
373/* Copy data into page cache */ 394/* Copy data into page cache */
@@ -376,7 +397,6 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
376{ 397{
377 struct inode *inode = page->mapping->host; 398 struct inode *inode = page->mapping->host;
378 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 399 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
379 void *pageaddr;
380 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; 400 int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
381 int start_index = page->index & ~mask, end_index = start_index | mask; 401 int start_index = page->index & ~mask, end_index = start_index | mask;
382 402
@@ -402,12 +422,7 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
402 if (PageUptodate(push_page)) 422 if (PageUptodate(push_page))
403 goto skip_page; 423 goto skip_page;
404 424
405 pageaddr = kmap_atomic(push_page); 425 squashfs_fill_page(push_page, buffer, offset, avail);
406 squashfs_copy_data(pageaddr, buffer, offset, avail);
407 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
408 kunmap_atomic(pageaddr);
409 flush_dcache_page(push_page);
410 SetPageUptodate(push_page);
411skip_page: 426skip_page:
412 unlock_page(push_page); 427 unlock_page(push_page);
413 if (i != page->index) 428 if (i != page->index)
@@ -416,10 +431,9 @@ skip_page:
416} 431}
417 432
418/* Read datablock stored packed inside a fragment (tail-end packed block) */ 433/* Read datablock stored packed inside a fragment (tail-end packed block) */
419static int squashfs_readpage_fragment(struct page *page) 434static int squashfs_readpage_fragment(struct page *page, int expected)
420{ 435{
421 struct inode *inode = page->mapping->host; 436 struct inode *inode = page->mapping->host;
422 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
423 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, 437 struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
424 squashfs_i(inode)->fragment_block, 438 squashfs_i(inode)->fragment_block,
425 squashfs_i(inode)->fragment_size); 439 squashfs_i(inode)->fragment_size);
@@ -430,23 +444,16 @@ static int squashfs_readpage_fragment(struct page *page)
430 squashfs_i(inode)->fragment_block, 444 squashfs_i(inode)->fragment_block,
431 squashfs_i(inode)->fragment_size); 445 squashfs_i(inode)->fragment_size);
432 else 446 else
433 squashfs_copy_cache(page, buffer, i_size_read(inode) & 447 squashfs_copy_cache(page, buffer, expected,
434 (msblk->block_size - 1),
435 squashfs_i(inode)->fragment_offset); 448 squashfs_i(inode)->fragment_offset);
436 449
437 squashfs_cache_put(buffer); 450 squashfs_cache_put(buffer);
438 return res; 451 return res;
439} 452}
440 453
441static int squashfs_readpage_sparse(struct page *page, int index, int file_end) 454static int squashfs_readpage_sparse(struct page *page, int expected)
442{ 455{
443 struct inode *inode = page->mapping->host; 456 squashfs_copy_cache(page, NULL, expected, 0);
444 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
445 int bytes = index == file_end ?
446 (i_size_read(inode) & (msblk->block_size - 1)) :
447 msblk->block_size;
448
449 squashfs_copy_cache(page, NULL, bytes, 0);
450 return 0; 457 return 0;
451} 458}
452 459
@@ -456,6 +463,9 @@ static int squashfs_readpage(struct file *file, struct page *page)
456 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; 463 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
457 int index = page->index >> (msblk->block_log - PAGE_SHIFT); 464 int index = page->index >> (msblk->block_log - PAGE_SHIFT);
458 int file_end = i_size_read(inode) >> msblk->block_log; 465 int file_end = i_size_read(inode) >> msblk->block_log;
466 int expected = index == file_end ?
467 (i_size_read(inode) & (msblk->block_size - 1)) :
468 msblk->block_size;
459 int res; 469 int res;
460 void *pageaddr; 470 void *pageaddr;
461 471
@@ -474,11 +484,11 @@ static int squashfs_readpage(struct file *file, struct page *page)
474 goto error_out; 484 goto error_out;
475 485
476 if (bsize == 0) 486 if (bsize == 0)
477 res = squashfs_readpage_sparse(page, index, file_end); 487 res = squashfs_readpage_sparse(page, expected);
478 else 488 else
479 res = squashfs_readpage_block(page, block, bsize); 489 res = squashfs_readpage_block(page, block, bsize, expected);
480 } else 490 } else
481 res = squashfs_readpage_fragment(page); 491 res = squashfs_readpage_fragment(page, expected);
482 492
483 if (!res) 493 if (!res)
484 return 0; 494 return 0;
diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c
index f2310d2a2019..a9ba8d96776a 100644
--- a/fs/squashfs/file_cache.c
+++ b/fs/squashfs/file_cache.c
@@ -20,7 +20,7 @@
20#include "squashfs.h" 20#include "squashfs.h"
21 21
22/* Read separately compressed datablock and memcopy into page cache */ 22/* Read separately compressed datablock and memcopy into page cache */
23int squashfs_readpage_block(struct page *page, u64 block, int bsize) 23int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected)
24{ 24{
25 struct inode *i = page->mapping->host; 25 struct inode *i = page->mapping->host;
26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 26 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
@@ -31,7 +31,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize)
31 ERROR("Unable to read page, block %llx, size %x\n", block, 31 ERROR("Unable to read page, block %llx, size %x\n", block,
32 bsize); 32 bsize);
33 else 33 else
34 squashfs_copy_cache(page, buffer, buffer->length, 0); 34 squashfs_copy_cache(page, buffer, expected, 0);
35 35
36 squashfs_cache_put(buffer); 36 squashfs_cache_put(buffer);
37 return res; 37 return res;
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index cb485d8e0e91..80db1b86a27c 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -21,10 +21,11 @@
21#include "page_actor.h" 21#include "page_actor.h"
22 22
23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page); 24 int pages, struct page **page, int bytes);
25 25
26/* Read separately compressed datablock directly into page cache */ 26/* Read separately compressed datablock directly into page cache */
27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) 27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
28 int expected)
28 29
29{ 30{
30 struct inode *inode = target_page->mapping->host; 31 struct inode *inode = target_page->mapping->host;
@@ -83,7 +84,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
83 * using an intermediate buffer. 84 * using an intermediate buffer.
84 */ 85 */
85 res = squashfs_read_cache(target_page, block, bsize, pages, 86 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page); 87 page, expected);
87 if (res < 0) 88 if (res < 0)
88 goto mark_errored; 89 goto mark_errored;
89 90
@@ -95,6 +96,11 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
95 if (res < 0) 96 if (res < 0)
96 goto mark_errored; 97 goto mark_errored;
97 98
99 if (res != expected) {
100 res = -EIO;
101 goto mark_errored;
102 }
103
98 /* Last page may have trailing bytes not filled */ 104 /* Last page may have trailing bytes not filled */
99 bytes = res % PAGE_SIZE; 105 bytes = res % PAGE_SIZE;
100 if (bytes) { 106 if (bytes) {
@@ -138,13 +144,12 @@ out:
138 144
139 145
140static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, 146static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
141 int pages, struct page **page) 147 int pages, struct page **page, int bytes)
142{ 148{
143 struct inode *i = target_page->mapping->host; 149 struct inode *i = target_page->mapping->host;
144 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, 150 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
145 block, bsize); 151 block, bsize);
146 int bytes = buffer->length, res = buffer->error, n, offset = 0; 152 int res = buffer->error, n, offset = 0;
147 void *pageaddr;
148 153
149 if (res) { 154 if (res) {
150 ERROR("Unable to read page, block %llx, size %x\n", block, 155 ERROR("Unable to read page, block %llx, size %x\n", block,
@@ -159,12 +164,7 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
159 if (page[n] == NULL) 164 if (page[n] == NULL)
160 continue; 165 continue;
161 166
162 pageaddr = kmap_atomic(page[n]); 167 squashfs_fill_page(page[n], buffer, offset, avail);
163 squashfs_copy_data(pageaddr, buffer, offset, avail);
164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]);
168 unlock_page(page[n]); 168 unlock_page(page[n]);
169 if (page[n] != target_page) 169 if (page[n] != target_page)
170 put_page(page[n]); 170 put_page(page[n]);
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edbc5c71..0681feab4a84 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
49 u64 *fragment_block) 49 u64 *fragment_block)
50{ 50{
51 struct squashfs_sb_info *msblk = sb->s_fs_info; 51 struct squashfs_sb_info *msblk = sb->s_fs_info;
52 int block = SQUASHFS_FRAGMENT_INDEX(fragment); 52 int block, offset, size;
53 int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
54 u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
55 struct squashfs_fragment_entry fragment_entry; 53 struct squashfs_fragment_entry fragment_entry;
56 int size; 54 u64 start_block;
55
56 if (fragment >= msblk->fragments)
57 return -EIO;
58 block = SQUASHFS_FRAGMENT_INDEX(fragment);
59 offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
60
61 start_block = le64_to_cpu(msblk->fragment_index[block]);
57 62
58 size = squashfs_read_metadata(sb, &fragment_entry, &start_block, 63 size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
59 &offset, sizeof(fragment_entry)); 64 &offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
61 return size; 66 return size;
62 67
63 *fragment_block = le64_to_cpu(fragment_entry.start_block); 68 *fragment_block = le64_to_cpu(fragment_entry.start_block);
64 size = le32_to_cpu(fragment_entry.size); 69 return squashfs_block_size(fragment_entry.size);
65
66 return size;
67} 70}
68 71
69 72
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index 887d6d270080..f89f8a74c6ce 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -67,11 +67,12 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
67 u64, u64, unsigned int); 67 u64, u64, unsigned int);
68 68
69/* file.c */ 69/* file.c */
70void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int);
70void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, 71void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int,
71 int); 72 int);
72 73
73/* file_xxx.c */ 74/* file_xxx.c */
74extern int squashfs_readpage_block(struct page *, u64, int); 75extern int squashfs_readpage_block(struct page *, u64, int, int);
75 76
76/* id.c */ 77/* id.c */
77extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); 78extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 24d12fd14177..4e6853f084d0 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
129 129
130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) 130#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
131 131
132static inline int squashfs_block_size(__le32 raw)
133{
134 u32 size = le32_to_cpu(raw);
135 return (size >> 25) ? -EIO : size;
136}
137
132/* 138/*
133 * Inode number ops. Inodes consist of a compressed block number, and an 139 * Inode number ops. Inodes consist of a compressed block number, and an
134 * uncompressed offset within that block 140 * uncompressed offset within that block
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 1da565cb50c3..ef69c31947bf 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
75 unsigned short block_log; 75 unsigned short block_log;
76 long long bytes_used; 76 long long bytes_used;
77 unsigned int inodes; 77 unsigned int inodes;
78 unsigned int fragments;
78 int xattr_ids; 79 int xattr_ids;
79}; 80};
80#endif 81#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 8a73b97217c8..40e657386fa5 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
175 msblk->inode_table = le64_to_cpu(sblk->inode_table_start); 175 msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
176 msblk->directory_table = le64_to_cpu(sblk->directory_table_start); 176 msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
177 msblk->inodes = le32_to_cpu(sblk->inodes); 177 msblk->inodes = le32_to_cpu(sblk->inodes);
178 msblk->fragments = le32_to_cpu(sblk->fragments);
178 flags = le16_to_cpu(sblk->flags); 179 flags = le16_to_cpu(sblk->flags);
179 180
180 TRACE("Found valid superblock on %pg\n", sb->s_bdev); 181 TRACE("Found valid superblock on %pg\n", sb->s_bdev);
@@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
185 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); 186 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
186 TRACE("Block size %d\n", msblk->block_size); 187 TRACE("Block size %d\n", msblk->block_size);
187 TRACE("Number of inodes %d\n", msblk->inodes); 188 TRACE("Number of inodes %d\n", msblk->inodes);
188 TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); 189 TRACE("Number of fragments %d\n", msblk->fragments);
189 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); 190 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
190 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); 191 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
191 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); 192 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -272,7 +273,7 @@ allocate_id_index_table:
272 sb->s_export_op = &squashfs_export_ops; 273 sb->s_export_op = &squashfs_export_ops;
273 274
274handle_fragments: 275handle_fragments:
275 fragments = le32_to_cpu(sblk->fragments); 276 fragments = msblk->fragments;
276 if (fragments == 0) 277 if (fragments == 0)
277 goto check_directory_table; 278 goto check_directory_table;
278 279
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 594d192b2331..bad9cea37f12 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -633,8 +633,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
633 /* the various vma->vm_userfaultfd_ctx still points to it */ 633 /* the various vma->vm_userfaultfd_ctx still points to it */
634 down_write(&mm->mmap_sem); 634 down_write(&mm->mmap_sem);
635 for (vma = mm->mmap; vma; vma = vma->vm_next) 635 for (vma = mm->mmap; vma; vma = vma->vm_next)
636 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) 636 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
637 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 637 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
638 vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
639 }
638 up_write(&mm->mmap_sem); 640 up_write(&mm->mmap_sem);
639 641
640 userfaultfd_ctx_put(release_new_ctx); 642 userfaultfd_ctx_put(release_new_ctx);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index eef466260d43..75dbdc14c45f 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
223 error = xfs_btree_get_rec(cur, &rec, stat); 223 error = xfs_btree_get_rec(cur, &rec, stat);
224 if (error || !(*stat)) 224 if (error || !(*stat))
225 return error; 225 return error;
226 if (rec->alloc.ar_blockcount == 0)
227 goto out_bad_rec;
228 226
229 *bno = be32_to_cpu(rec->alloc.ar_startblock); 227 *bno = be32_to_cpu(rec->alloc.ar_startblock);
230 *len = be32_to_cpu(rec->alloc.ar_blockcount); 228 *len = be32_to_cpu(rec->alloc.ar_blockcount);
231 229
230 if (*len == 0)
231 goto out_bad_rec;
232
232 /* check for valid extent range, including overflow */ 233 /* check for valid extent range, including overflow */
233 if (!xfs_verify_agbno(mp, agno, *bno)) 234 if (!xfs_verify_agbno(mp, agno, *bno))
234 goto out_bad_rec; 235 goto out_bad_rec;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 33dc34655ac3..30d1d60f1d46 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -731,7 +731,8 @@ xfs_inode_validate_extsize(
731 if ((hint_flag || inherit_flag) && extsize == 0) 731 if ((hint_flag || inherit_flag) && extsize == 0)
732 return __this_address; 732 return __this_address;
733 733
734 if (!(hint_flag || inherit_flag) && extsize != 0) 734 /* free inodes get flags set to zero but extsize remains */
735 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
735 return __this_address; 736 return __this_address;
736 737
737 if (extsize_bytes % blocksize_bytes) 738 if (extsize_bytes % blocksize_bytes)
@@ -777,7 +778,8 @@ xfs_inode_validate_cowextsize(
777 if (hint_flag && cowextsize == 0) 778 if (hint_flag && cowextsize == 0)
778 return __this_address; 779 return __this_address;
779 780
780 if (!hint_flag && cowextsize != 0) 781 /* free inodes get flags set to zero but cowextsize remains */
782 if (mode && !hint_flag && cowextsize != 0)
781 return __this_address; 783 return __this_address;
782 784
783 if (hint_flag && rt_flag) 785 if (hint_flag && rt_flag)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..ca3f2c2edd85 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
287 287
288void blk_mq_quiesce_queue_nowait(struct request_queue *q); 288void blk_mq_quiesce_queue_nowait(struct request_queue *q);
289 289
290/**
291 * blk_mq_mark_complete() - Set request state to complete
292 * @rq: request to set to complete state
293 *
294 * Returns true if request state was successfully set to complete. If
295 * successful, the caller is responsibile for seeing this request is ended, as
296 * blk_mq_complete_request will not work again.
297 */
298static inline bool blk_mq_mark_complete(struct request *rq)
299{
300 return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
301 MQ_RQ_IN_FLIGHT;
302}
303
290/* 304/*
291 * Driver command data is immediately after the request. So subtract request 305 * Driver command data is immediately after the request. So subtract request
292 * size to get back to the original request, add request size to get the PDU. 306 * size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index 687b1760bb9f..f02cee0225d4 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -5,10 +5,10 @@
5#include <uapi/linux/bpfilter.h> 5#include <uapi/linux/bpfilter.h>
6 6
7struct sock; 7struct sock;
8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval, 8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
9 unsigned int optlen); 9 unsigned int optlen);
10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval, 10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
11 int *optlen); 11 int __user *optlen);
12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
13 char __user *optval, 13 char __user *optval,
14 unsigned int optlen, bool is_set); 14 unsigned int optlen, bool is_set);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a97a63eef59f..3233fbe23594 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -30,7 +30,7 @@ struct cpu {
30}; 30};
31 31
32extern void boot_cpu_init(void); 32extern void boot_cpu_init(void);
33extern void boot_cpu_state_init(void); 33extern void boot_cpu_hotplug_init(void);
34extern void cpu_init(void); 34extern void cpu_init(void);
35extern void trap_init(void); 35extern void trap_init(void);
36 36
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
124 124
125static inline void delayacct_blkio_end(struct task_struct *p) 125static inline void delayacct_blkio_end(struct task_struct *p)
126{ 126{
127 if (current->delays) 127 if (p->delays)
128 __delayacct_blkio_end(p); 128 __delayacct_blkio_end(p);
129 delayacct_clear_flag(DELAYACCT_PF_BLKIO); 129 delayacct_clear_flag(DELAYACCT_PF_BLKIO);
130} 130}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
894 void *flush; 894 void *flush;
895} efi_file_handle_t; 895} efi_file_handle_t;
896 896
897typedef struct {
898 u64 revision;
899 u32 open_volume;
900} efi_file_io_interface_32_t;
901
902typedef struct {
903 u64 revision;
904 u64 open_volume;
905} efi_file_io_interface_64_t;
906
897typedef struct _efi_file_io_interface { 907typedef struct _efi_file_io_interface {
898 u64 revision; 908 u64 revision;
899 int (*open_volume)(struct _efi_file_io_interface *, 909 int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
988extern void efi_gettimeofday (struct timespec64 *ts); 998extern void efi_gettimeofday (struct timespec64 *ts);
989extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ 999extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
990#ifdef CONFIG_X86 1000#ifdef CONFIG_X86
991extern void efi_late_init(void);
992extern void efi_free_boot_services(void); 1001extern void efi_free_boot_services(void);
993extern efi_status_t efi_query_variable_store(u32 attributes, 1002extern efi_status_t efi_query_variable_store(u32 attributes,
994 unsigned long size, 1003 unsigned long size,
995 bool nonblocking); 1004 bool nonblocking);
996extern void efi_find_mirror(void); 1005extern void efi_find_mirror(void);
997#else 1006#else
998static inline void efi_late_init(void) {}
999static inline void efi_free_boot_services(void) {} 1007static inline void efi_free_boot_services(void) {}
1000 1008
1001static inline efi_status_t efi_query_variable_store(u32 attributes, 1009static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
1651 1659
1652extern int efi_tpm_eventlog_init(void); 1660extern int efi_tpm_eventlog_init(void);
1653 1661
1662/* Workqueue to queue EFI Runtime Services */
1663extern struct workqueue_struct *efi_rts_wq;
1664
1654#endif /* _LINUX_EFI_H */ 1665#endif /* _LINUX_EFI_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/fcntl.h> 12#include <linux/fcntl.h>
13#include <linux/wait.h> 13#include <linux/wait.h>
14#include <linux/err.h>
14 15
15/* 16/*
16 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining 17 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..9d2ea3e907d0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -73,6 +73,7 @@
73#define GICD_TYPER_MBIS (1U << 16) 73#define GICD_TYPER_MBIS (1U << 16)
74 74
75#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 75#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
76#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
76#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 77#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
77 78
78#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 79#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
576 phys_addr_t phys_base; 577 phys_addr_t phys_base;
577 } __percpu *rdist; 578 } __percpu *rdist;
578 struct page *prop_page; 579 struct page *prop_page;
579 int id_bits;
580 u64 flags; 580 u64 flags;
581 u32 gicd_typer;
581 bool has_vlpis; 582 bool has_vlpis;
582 bool has_direct_lpi; 583 bool has_direct_lpi;
583}; 584};
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80cbb7fdce4a..83957920653a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
358 struct mlx5_frag_buf frag_buf; 358 struct mlx5_frag_buf frag_buf;
359 u32 sz_m1; 359 u32 sz_m1;
360 u32 frag_sz_m1; 360 u32 frag_sz_m1;
361 u32 strides_offset;
361 u8 log_sz; 362 u8 log_sz;
362 u8 log_stride; 363 u8 log_stride;
363 u8 log_frag_strides; 364 u8 log_frag_strides;
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
983 return key & 0xffffff00u; 984 return key & 0xffffff00u;
984} 985}
985 986
986static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, 987static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
987 struct mlx5_frag_buf_ctrl *fbc) 988 u32 strides_offset,
989 struct mlx5_frag_buf_ctrl *fbc)
988{ 990{
989 fbc->log_stride = log_stride; 991 fbc->log_stride = log_stride;
990 fbc->log_sz = log_sz; 992 fbc->log_sz = log_sz;
991 fbc->sz_m1 = (1 << fbc->log_sz) - 1; 993 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
992 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; 994 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
993 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; 995 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
996 fbc->strides_offset = strides_offset;
997}
998
999static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
1000 struct mlx5_frag_buf_ctrl *fbc)
1001{
1002 mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
994} 1003}
995 1004
996static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, 1005static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
1004static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, 1013static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
1005 u32 ix) 1014 u32 ix)
1006{ 1015{
1007 unsigned int frag = (ix >> fbc->log_frag_strides); 1016 unsigned int frag;
1017
1018 ix += fbc->strides_offset;
1019 frag = ix >> fbc->log_frag_strides;
1008 1020
1009 return fbc->frag_buf.frags[frag].buf + 1021 return fbc->frag_buf.frags[frag].buf +
1010 ((fbc->frag_sz_m1 & ix) << fbc->log_stride); 1022 ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d3a3842316b8..68a5121694ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -452,6 +452,23 @@ struct vm_operations_struct {
452 unsigned long addr); 452 unsigned long addr);
453}; 453};
454 454
455static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
456{
457 static const struct vm_operations_struct dummy_vm_ops = {};
458
459 vma->vm_mm = mm;
460 vma->vm_ops = &dummy_vm_ops;
461 INIT_LIST_HEAD(&vma->anon_vma_chain);
462}
463
464static inline void vma_set_anonymous(struct vm_area_struct *vma)
465{
466 vma->vm_ops = NULL;
467}
468
469/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
470#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
471
455struct mmu_gather; 472struct mmu_gather;
456struct inode; 473struct inode;
457 474
diff --git a/include/linux/pci.h b/include/linux/pci.h
index abd5d5e17aee..c133ccfa002e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -368,7 +368,6 @@ struct pci_dev {
368 unsigned int transparent:1; /* Subtractive decode bridge */ 368 unsigned int transparent:1; /* Subtractive decode bridge */
369 unsigned int multifunction:1; /* Multi-function device */ 369 unsigned int multifunction:1; /* Multi-function device */
370 370
371 unsigned int is_added:1;
372 unsigned int is_busmaster:1; /* Is busmaster */ 371 unsigned int is_busmaster:1; /* Is busmaster */
373 unsigned int no_msi:1; /* May not use MSI */ 372 unsigned int no_msi:1; /* May not use MSI */
374 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ 373 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1fa12887ec02..87f6db437e4a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
1130extern struct perf_callchain_entry * 1130extern struct perf_callchain_entry *
1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1131get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1132 u32 max_stack, bool crosstask, bool add_mark); 1132 u32 max_stack, bool crosstask, bool add_mark);
1133extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
1133extern int get_callchain_buffers(int max_stack); 1134extern int get_callchain_buffers(int max_stack);
1134extern void put_callchain_buffers(void); 1135extern void put_callchain_buffers(void);
1135 1136
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
396 * @member: the name of the list_head within the struct. 396 * @member: the name of the list_head within the struct.
397 * 397 *
398 * Continue to iterate over list of given type, continuing after 398 * Continue to iterate over list of given type, continuing after
399 * the current position. 399 * the current position which must have been in the list when the RCU read
400 * lock was taken.
401 * This would typically require either that you obtained the node from a
402 * previous walk of the list in the same RCU read-side critical section, or
403 * that you held some sort of non-RCU reference (such as a reference count)
404 * to keep the node alive *and* in the list.
405 *
406 * This iterator is similar to list_for_each_entry_from_rcu() except
407 * this starts after the given position and that one starts at the given
408 * position.
400 */ 409 */
401#define list_for_each_entry_continue_rcu(pos, head, member) \ 410#define list_for_each_entry_continue_rcu(pos, head, member) \
402 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 411 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
411 * 420 *
412 * Iterate over the tail of a list starting from a given position, 421 * Iterate over the tail of a list starting from a given position,
413 * which must have been in the list when the RCU read lock was taken. 422 * which must have been in the list when the RCU read lock was taken.
423 * This would typically require either that you obtained the node from a
424 * previous walk of the list in the same RCU read-side critical section, or
425 * that you held some sort of non-RCU reference (such as a reference count)
426 * to keep the node alive *and* in the list.
427 *
428 * This iterator is similar to list_for_each_entry_continue_rcu() except
429 * this starts from the given position and that one starts from the position
430 * after the given position.
414 */ 431 */
415#define list_for_each_entry_from_rcu(pos, head, member) \ 432#define list_for_each_entry_from_rcu(pos, head, member) \
416 for (; &(pos)->member != (head); \ 433 for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
64 64
65void __rcu_read_lock(void); 65void __rcu_read_lock(void);
66void __rcu_read_unlock(void); 66void __rcu_read_unlock(void);
67void rcu_read_unlock_special(struct task_struct *t);
68void synchronize_rcu(void); 67void synchronize_rcu(void);
69 68
70/* 69/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
159 } while (0) 158 } while (0)
160 159
161/* 160/*
162 * Note a voluntary context switch for RCU-tasks benefit. This is a 161 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
163 * macro rather than an inline function to avoid #include hell. 162 * This is a macro rather than an inline function to avoid #include hell.
164 */ 163 */
165#ifdef CONFIG_TASKS_RCU 164#ifdef CONFIG_TASKS_RCU
166#define rcu_note_voluntary_context_switch_lite(t) \ 165#define rcu_tasks_qs(t) \
167 do { \ 166 do { \
168 if (READ_ONCE((t)->rcu_tasks_holdout)) \ 167 if (READ_ONCE((t)->rcu_tasks_holdout)) \
169 WRITE_ONCE((t)->rcu_tasks_holdout, false); \ 168 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
171#define rcu_note_voluntary_context_switch(t) \ 170#define rcu_note_voluntary_context_switch(t) \
172 do { \ 171 do { \
173 rcu_all_qs(); \ 172 rcu_all_qs(); \
174 rcu_note_voluntary_context_switch_lite(t); \ 173 rcu_tasks_qs(t); \
175 } while (0) 174 } while (0)
176void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); 175void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
177void synchronize_rcu_tasks(void); 176void synchronize_rcu_tasks(void);
178void exit_tasks_rcu_start(void); 177void exit_tasks_rcu_start(void);
179void exit_tasks_rcu_finish(void); 178void exit_tasks_rcu_finish(void);
180#else /* #ifdef CONFIG_TASKS_RCU */ 179#else /* #ifdef CONFIG_TASKS_RCU */
181#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) 180#define rcu_tasks_qs(t) do { } while (0)
182#define rcu_note_voluntary_context_switch(t) rcu_all_qs() 181#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
183#define call_rcu_tasks call_rcu_sched 182#define call_rcu_tasks call_rcu_sched
184#define synchronize_rcu_tasks synchronize_sched 183#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
195 */ 194 */
196#define cond_resched_tasks_rcu_qs() \ 195#define cond_resched_tasks_rcu_qs() \
197do { \ 196do { \
198 if (!cond_resched()) \ 197 rcu_tasks_qs(current); \
199 rcu_note_voluntary_context_switch_lite(current); \ 198 cond_resched(); \
200} while (0) 199} while (0)
201 200
202/* 201/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
567 * This is simply an identity function, but it documents where a pointer 566 * This is simply an identity function, but it documents where a pointer
568 * is handed off from RCU to some other synchronization mechanism, for 567 * is handed off from RCU to some other synchronization mechanism, for
569 * example, reference counting or locking. In C11, it would map to 568 * example, reference counting or locking. In C11, it would map to
570 * kill_dependency(). It could be used as follows: 569 * kill_dependency(). It could be used as follows::
571 * `` 570 *
572 * rcu_read_lock(); 571 * rcu_read_lock();
573 * p = rcu_dereference(gp); 572 * p = rcu_dereference(gp);
574 * long_lived = is_long_lived(p); 573 * long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
579 * p = rcu_pointer_handoff(p); 578 * p = rcu_pointer_handoff(p);
580 * } 579 * }
581 * rcu_read_unlock(); 580 * rcu_read_unlock();
582 *``
583 */ 581 */
584#define rcu_pointer_handoff(p) (p) 582#define rcu_pointer_handoff(p) (p)
585 583
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
93#define rcu_note_context_switch(preempt) \ 93#define rcu_note_context_switch(preempt) \
94 do { \ 94 do { \
95 rcu_sched_qs(); \ 95 rcu_sched_qs(); \
96 rcu_note_voluntary_context_switch_lite(current); \ 96 rcu_tasks_qs(current); \
97 } while (0) 97 } while (0)
98 98
99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) 99static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..003d09ab308d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
165void ring_buffer_record_off(struct ring_buffer *buffer); 165void ring_buffer_record_off(struct ring_buffer *buffer);
166void ring_buffer_record_on(struct ring_buffer *buffer); 166void ring_buffer_record_on(struct ring_buffer *buffer);
167int ring_buffer_record_is_on(struct ring_buffer *buffer); 167int ring_buffer_record_is_on(struct ring_buffer *buffer);
168int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
168void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 169void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
169void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 170void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
170 171
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 106extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
107extern void rt_mutex_destroy(struct rt_mutex *lock); 107extern void rt_mutex_destroy(struct rt_mutex *lock);
108 108
109#ifdef CONFIG_DEBUG_LOCK_ALLOC
110extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
111#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
112#else
109extern void rt_mutex_lock(struct rt_mutex *lock); 113extern void rt_mutex_lock(struct rt_mutex *lock);
114#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
115#endif
116
110extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 117extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
111extern int rt_mutex_timed_lock(struct rt_mutex *lock, 118extern int rt_mutex_timed_lock(struct rt_mutex *lock,
112 struct hrtimer_sleeper *timeout); 119 struct hrtimer_sleeper *timeout);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..3e72a291c401 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
195 return retval; 195 return retval;
196} 196}
197 197
198/* Used by tracing, cannot be traced and cannot invoke lockdep. */
199static inline notrace int
200srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
201{
202 int retval;
203
204 retval = __srcu_read_lock(sp);
205 return retval;
206}
207
198/** 208/**
199 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 209 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
200 * @sp: srcu_struct in which to unregister the old reader. 210 * @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
209 __srcu_read_unlock(sp, idx); 219 __srcu_read_unlock(sp, idx);
210} 220}
211 221
222/* Used by tracing, cannot be traced and cannot call lockdep. */
223static inline notrace void
224srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
225{
226 __srcu_read_unlock(sp, idx);
227}
228
212/** 229/**
213 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 230 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
214 * 231 *
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
64 long trs_count; 64 long trs_count;
65}; 65};
66#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 } 66#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
67#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
68 DEFINE_PER_CPU(struct torture_random_state, name)
67unsigned long torture_random(struct torture_random_state *trsp); 69unsigned long torture_random(struct torture_random_state *trsp);
68 70
69/* Task shuffler, which causes CPUs to occasionally go idle. */ 71/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
79int torture_stutter_init(int s); 81int torture_stutter_init(int s);
80 82
81/* Initialization and cleanup. */ 83/* Initialization and cleanup. */
82bool torture_init_begin(char *ttype, bool v); 84bool torture_init_begin(char *ttype, int v);
83void torture_init_end(void); 85void torture_init_end(void);
84bool torture_cleanup_begin(void); 86bool torture_cleanup_begin(void);
85void torture_cleanup_end(void); 87void torture_cleanup_end(void);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9324ac2d9ff2..43913ae79f64 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,7 +64,8 @@ struct vsock_sock {
64 struct list_head pending_links; 64 struct list_head pending_links;
65 struct list_head accept_queue; 65 struct list_head accept_queue;
66 bool rejected; 66 bool rejected;
67 struct delayed_work dwork; 67 struct delayed_work connect_work;
68 struct delayed_work pending_work;
68 struct delayed_work close_work; 69 struct delayed_work close_work;
69 bool close_work_scheduled; 70 bool close_work_scheduled;
70 u32 peer_shutdown; 71 u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
77 78
78s64 vsock_stream_has_data(struct vsock_sock *vsk); 79s64 vsock_stream_has_data(struct vsock_sock *vsk);
79s64 vsock_stream_has_space(struct vsock_sock *vsk); 80s64 vsock_stream_has_space(struct vsock_sock *vsk);
80void vsock_pending_work(struct work_struct *work);
81struct sock *__vsock_create(struct net *net, 81struct sock *__vsock_create(struct net *net,
82 struct socket *sock, 82 struct socket *sock,
83 struct sock *parent, 83 struct sock *parent,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5fbfe61f41c6..1beb3ead0385 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
5835/** 5835/**
5836 * cfg80211_rx_control_port - notification about a received control port frame 5836 * cfg80211_rx_control_port - notification about a received control port frame
5837 * @dev: The device the frame matched to 5837 * @dev: The device the frame matched to
5838 * @buf: control port frame 5838 * @skb: The skbuf with the control port frame. It is assumed that the skbuf
5839 * @len: length of the frame data 5839 * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
5840 * @addr: The peer from which the frame was received 5840 * This function does not take ownership of the skb, so the caller is
5841 * @proto: frame protocol, typically PAE or Pre-authentication 5841 * responsible for any cleanup. The caller must also ensure that
5842 * skb->protocol is set appropriately.
5842 * @unencrypted: Whether the frame was received unencrypted 5843 * @unencrypted: Whether the frame was received unencrypted
5843 * 5844 *
5844 * This function is used to inform userspace about a received control port 5845 * This function is used to inform userspace about a received control port
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
5851 * Return: %true if the frame was passed to userspace 5852 * Return: %true if the frame was passed to userspace
5852 */ 5853 */
5853bool cfg80211_rx_control_port(struct net_device *dev, 5854bool cfg80211_rx_control_port(struct net_device *dev,
5854 const u8 *buf, size_t len, 5855 struct sk_buff *skb, bool unencrypted);
5855 const u8 *addr, u16 proto, bool unencrypted);
5856 5856
5857/** 5857/**
5858 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event 5858 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 71b9043aa0e7..3d4930528db0 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -281,6 +281,11 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
281 atomic_inc(&f6i->fib6_ref); 281 atomic_inc(&f6i->fib6_ref);
282} 282}
283 283
284static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
285{
286 return atomic_inc_not_zero(&f6i->fib6_ref);
287}
288
284static inline void fib6_info_release(struct fib6_info *f6i) 289static inline void fib6_info_release(struct fib6_info *f6i)
285{ 290{
286 if (f6i && atomic_dec_and_test(&f6i->fib6_ref)) 291 if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
diff --git a/include/net/llc.h b/include/net/llc.h
index dc35f25eb679..890a87318014 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
116 refcount_inc(&sap->refcnt); 116 refcount_inc(&sap->refcnt);
117} 117}
118 118
119static inline bool llc_sap_hold_safe(struct llc_sap *sap)
120{
121 return refcount_inc_not_zero(&sap->refcnt);
122}
123
119void llc_sap_close(struct llc_sap *sap); 124void llc_sap_close(struct llc_sap *sap);
120 125
121static inline void llc_sap_put(struct llc_sap *sap) 126static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 08c005ce56e9..dc417ef0a0c5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
150 * @portid: netlink portID of the original message 150 * @portid: netlink portID of the original message
151 * @seq: netlink sequence number 151 * @seq: netlink sequence number
152 * @family: protocol family 152 * @family: protocol family
153 * @level: depth of the chains
153 * @report: notify via unicast netlink message 154 * @report: notify via unicast netlink message
154 */ 155 */
155struct nft_ctx { 156struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
160 u32 portid; 161 u32 portid;
161 u32 seq; 162 u32 seq;
162 u8 family; 163 u8 family;
164 u8 level;
163 bool report; 165 bool report;
164}; 166};
165 167
@@ -865,7 +867,6 @@ enum nft_chain_flags {
865 * @table: table that this chain belongs to 867 * @table: table that this chain belongs to
866 * @handle: chain handle 868 * @handle: chain handle
867 * @use: number of jump references to this chain 869 * @use: number of jump references to this chain
868 * @level: length of longest path to this chain
869 * @flags: bitmask of enum nft_chain_flags 870 * @flags: bitmask of enum nft_chain_flags
870 * @name: name of the chain 871 * @name: name of the chain
871 */ 872 */
@@ -878,7 +879,6 @@ struct nft_chain {
878 struct nft_table *table; 879 struct nft_table *table;
879 u64 handle; 880 u64 handle;
880 u32 use; 881 u32 use;
881 u16 level;
882 u8 flags:6, 882 u8 flags:6,
883 genmask:2; 883 genmask:2;
884 char *name; 884 char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
1124 u32 genmask:2, 1124 u32 genmask:2,
1125 use:30; 1125 use:30;
1126 u64 handle; 1126 u64 handle;
1127 char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
1128 /* runtime data below here */ 1127 /* runtime data below here */
1129 struct nf_hook_ops *ops ____cacheline_aligned; 1128 struct nf_hook_ops *ops ____cacheline_aligned;
1130 struct nf_flowtable data; 1129 struct nf_flowtable data;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 3482d13d655b..cd3ecda9386a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
342 struct pipe_inode_info *pipe, size_t len, 342 struct pipe_inode_info *pipe, size_t len,
343 unsigned int flags); 343 unsigned int flags);
344 344
345void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
345static inline void tcp_dec_quickack_mode(struct sock *sk, 346static inline void tcp_dec_quickack_mode(struct sock *sk,
346 const unsigned int pkts) 347 const unsigned int pkts)
347{ 348{
@@ -539,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
539void tcp_send_active_reset(struct sock *sk, gfp_t priority); 540void tcp_send_active_reset(struct sock *sk, gfp_t priority);
540int tcp_send_synack(struct sock *); 541int tcp_send_synack(struct sock *);
541void tcp_push_one(struct sock *, unsigned int mss_now); 542void tcp_push_one(struct sock *, unsigned int mss_now);
543void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
542void tcp_send_ack(struct sock *sk); 544void tcp_send_ack(struct sock *sk);
543void tcp_send_delayed_ack(struct sock *sk); 545void tcp_send_delayed_ack(struct sock *sk);
544void tcp_send_loss_probe(struct sock *sk); 546void tcp_send_loss_probe(struct sock *sk);
@@ -839,6 +841,11 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
839 */ 841 */
840static inline int tcp_v6_iif(const struct sk_buff *skb) 842static inline int tcp_v6_iif(const struct sk_buff *skb)
841{ 843{
844 return TCP_SKB_CB(skb)->header.h6.iif;
845}
846
847static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
848{
842 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 849 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
843 850
844 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 851 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
52 * "cpuqs": CPU passes through a quiescent state. 52 * "cpuqs": CPU passes through a quiescent state.
53 * "cpuonl": CPU comes online. 53 * "cpuonl": CPU comes online.
54 * "cpuofl": CPU goes offline. 54 * "cpuofl": CPU goes offline.
55 * "cpuofl-bgp": CPU goes offline while blocking a grace period.
55 * "reqwait": GP kthread sleeps waiting for grace-period request. 56 * "reqwait": GP kthread sleeps waiting for grace-period request.
56 * "reqwaitsig": GP kthread awakened by signal from reqwait state. 57 * "reqwaitsig": GP kthread awakened by signal from reqwait state.
57 * "fqswait": GP kthread waiting until time to force quiescent states. 58 * "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
63 */ 64 */
64TRACE_EVENT(rcu_grace_period, 65TRACE_EVENT(rcu_grace_period,
65 66
66 TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent), 67 TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
67 68
68 TP_ARGS(rcuname, gpnum, gpevent), 69 TP_ARGS(rcuname, gp_seq, gpevent),
69 70
70 TP_STRUCT__entry( 71 TP_STRUCT__entry(
71 __field(const char *, rcuname) 72 __field(const char *, rcuname)
72 __field(unsigned long, gpnum) 73 __field(unsigned long, gp_seq)
73 __field(const char *, gpevent) 74 __field(const char *, gpevent)
74 ), 75 ),
75 76
76 TP_fast_assign( 77 TP_fast_assign(
77 __entry->rcuname = rcuname; 78 __entry->rcuname = rcuname;
78 __entry->gpnum = gpnum; 79 __entry->gp_seq = gp_seq;
79 __entry->gpevent = gpevent; 80 __entry->gpevent = gpevent;
80 ), 81 ),
81 82
82 TP_printk("%s %lu %s", 83 TP_printk("%s %lu %s",
83 __entry->rcuname, __entry->gpnum, __entry->gpevent) 84 __entry->rcuname, __entry->gp_seq, __entry->gpevent)
84); 85);
85 86
86/* 87/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
90 * 91 *
91 * "Startleaf": Request a grace period based on leaf-node data. 92 * "Startleaf": Request a grace period based on leaf-node data.
92 * "Prestarted": Someone beat us to the request 93 * "Prestarted": Someone beat us to the request
93 * "Startedleaf": Leaf-node start proved sufficient. 94 * "Startedleaf": Leaf node marked for future GP.
94 * "Startedleafroot": Leaf-node start proved sufficient after checking root. 95 * "Startedleafroot": All nodes from leaf to root marked for future GP.
95 * "Startedroot": Requested a nocb grace period based on root-node data. 96 * "Startedroot": Requested a nocb grace period based on root-node data.
96 * "NoGPkthread": The RCU grace-period kthread has not yet started. 97 * "NoGPkthread": The RCU grace-period kthread has not yet started.
97 * "StartWait": Start waiting for the requested grace period. 98 * "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
102 */ 103 */
103TRACE_EVENT(rcu_future_grace_period, 104TRACE_EVENT(rcu_future_grace_period,
104 105
105 TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed, 106 TP_PROTO(const char *rcuname, unsigned long gp_seq,
106 unsigned long c, u8 level, int grplo, int grphi, 107 unsigned long gp_seq_req, u8 level, int grplo, int grphi,
107 const char *gpevent), 108 const char *gpevent),
108 109
109 TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent), 110 TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
110 111
111 TP_STRUCT__entry( 112 TP_STRUCT__entry(
112 __field(const char *, rcuname) 113 __field(const char *, rcuname)
113 __field(unsigned long, gpnum) 114 __field(unsigned long, gp_seq)
114 __field(unsigned long, completed) 115 __field(unsigned long, gp_seq_req)
115 __field(unsigned long, c)
116 __field(u8, level) 116 __field(u8, level)
117 __field(int, grplo) 117 __field(int, grplo)
118 __field(int, grphi) 118 __field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
121 121
122 TP_fast_assign( 122 TP_fast_assign(
123 __entry->rcuname = rcuname; 123 __entry->rcuname = rcuname;
124 __entry->gpnum = gpnum; 124 __entry->gp_seq = gp_seq;
125 __entry->completed = completed; 125 __entry->gp_seq_req = gp_seq_req;
126 __entry->c = c;
127 __entry->level = level; 126 __entry->level = level;
128 __entry->grplo = grplo; 127 __entry->grplo = grplo;
129 __entry->grphi = grphi; 128 __entry->grphi = grphi;
130 __entry->gpevent = gpevent; 129 __entry->gpevent = gpevent;
131 ), 130 ),
132 131
133 TP_printk("%s %lu %lu %lu %u %d %d %s", 132 TP_printk("%s %lu %lu %u %d %d %s",
134 __entry->rcuname, __entry->gpnum, __entry->completed, 133 __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
135 __entry->c, __entry->level, __entry->grplo, __entry->grphi, 134 __entry->grplo, __entry->grphi, __entry->gpevent)
136 __entry->gpevent)
137); 135);
138 136
139/* 137/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
145 */ 143 */
146TRACE_EVENT(rcu_grace_period_init, 144TRACE_EVENT(rcu_grace_period_init,
147 145
148 TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level, 146 TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
149 int grplo, int grphi, unsigned long qsmask), 147 int grplo, int grphi, unsigned long qsmask),
150 148
151 TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask), 149 TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
152 150
153 TP_STRUCT__entry( 151 TP_STRUCT__entry(
154 __field(const char *, rcuname) 152 __field(const char *, rcuname)
155 __field(unsigned long, gpnum) 153 __field(unsigned long, gp_seq)
156 __field(u8, level) 154 __field(u8, level)
157 __field(int, grplo) 155 __field(int, grplo)
158 __field(int, grphi) 156 __field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
161 159
162 TP_fast_assign( 160 TP_fast_assign(
163 __entry->rcuname = rcuname; 161 __entry->rcuname = rcuname;
164 __entry->gpnum = gpnum; 162 __entry->gp_seq = gp_seq;
165 __entry->level = level; 163 __entry->level = level;
166 __entry->grplo = grplo; 164 __entry->grplo = grplo;
167 __entry->grphi = grphi; 165 __entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
169 ), 167 ),
170 168
171 TP_printk("%s %lu %u %d %d %lx", 169 TP_printk("%s %lu %u %d %d %lx",
172 __entry->rcuname, __entry->gpnum, __entry->level, 170 __entry->rcuname, __entry->gp_seq, __entry->level,
173 __entry->grplo, __entry->grphi, __entry->qsmask) 171 __entry->grplo, __entry->grphi, __entry->qsmask)
174); 172);
175 173
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
301 */ 299 */
302TRACE_EVENT(rcu_preempt_task, 300TRACE_EVENT(rcu_preempt_task,
303 301
304 TP_PROTO(const char *rcuname, int pid, unsigned long gpnum), 302 TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
305 303
306 TP_ARGS(rcuname, pid, gpnum), 304 TP_ARGS(rcuname, pid, gp_seq),
307 305
308 TP_STRUCT__entry( 306 TP_STRUCT__entry(
309 __field(const char *, rcuname) 307 __field(const char *, rcuname)
310 __field(unsigned long, gpnum) 308 __field(unsigned long, gp_seq)
311 __field(int, pid) 309 __field(int, pid)
312 ), 310 ),
313 311
314 TP_fast_assign( 312 TP_fast_assign(
315 __entry->rcuname = rcuname; 313 __entry->rcuname = rcuname;
316 __entry->gpnum = gpnum; 314 __entry->gp_seq = gp_seq;
317 __entry->pid = pid; 315 __entry->pid = pid;
318 ), 316 ),
319 317
320 TP_printk("%s %lu %d", 318 TP_printk("%s %lu %d",
321 __entry->rcuname, __entry->gpnum, __entry->pid) 319 __entry->rcuname, __entry->gp_seq, __entry->pid)
322); 320);
323 321
324/* 322/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
328 */ 326 */
329TRACE_EVENT(rcu_unlock_preempted_task, 327TRACE_EVENT(rcu_unlock_preempted_task,
330 328
331 TP_PROTO(const char *rcuname, unsigned long gpnum, int pid), 329 TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
332 330
333 TP_ARGS(rcuname, gpnum, pid), 331 TP_ARGS(rcuname, gp_seq, pid),
334 332
335 TP_STRUCT__entry( 333 TP_STRUCT__entry(
336 __field(const char *, rcuname) 334 __field(const char *, rcuname)
337 __field(unsigned long, gpnum) 335 __field(unsigned long, gp_seq)
338 __field(int, pid) 336 __field(int, pid)
339 ), 337 ),
340 338
341 TP_fast_assign( 339 TP_fast_assign(
342 __entry->rcuname = rcuname; 340 __entry->rcuname = rcuname;
343 __entry->gpnum = gpnum; 341 __entry->gp_seq = gp_seq;
344 __entry->pid = pid; 342 __entry->pid = pid;
345 ), 343 ),
346 344
347 TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid) 345 TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
348); 346);
349 347
350/* 348/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
357 */ 355 */
358TRACE_EVENT(rcu_quiescent_state_report, 356TRACE_EVENT(rcu_quiescent_state_report,
359 357
360 TP_PROTO(const char *rcuname, unsigned long gpnum, 358 TP_PROTO(const char *rcuname, unsigned long gp_seq,
361 unsigned long mask, unsigned long qsmask, 359 unsigned long mask, unsigned long qsmask,
362 u8 level, int grplo, int grphi, int gp_tasks), 360 u8 level, int grplo, int grphi, int gp_tasks),
363 361
364 TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks), 362 TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
365 363
366 TP_STRUCT__entry( 364 TP_STRUCT__entry(
367 __field(const char *, rcuname) 365 __field(const char *, rcuname)
368 __field(unsigned long, gpnum) 366 __field(unsigned long, gp_seq)
369 __field(unsigned long, mask) 367 __field(unsigned long, mask)
370 __field(unsigned long, qsmask) 368 __field(unsigned long, qsmask)
371 __field(u8, level) 369 __field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
376 374
377 TP_fast_assign( 375 TP_fast_assign(
378 __entry->rcuname = rcuname; 376 __entry->rcuname = rcuname;
379 __entry->gpnum = gpnum; 377 __entry->gp_seq = gp_seq;
380 __entry->mask = mask; 378 __entry->mask = mask;
381 __entry->qsmask = qsmask; 379 __entry->qsmask = qsmask;
382 __entry->level = level; 380 __entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
386 ), 384 ),
387 385
388 TP_printk("%s %lu %lx>%lx %u %d %d %u", 386 TP_printk("%s %lu %lx>%lx %u %d %d %u",
389 __entry->rcuname, __entry->gpnum, 387 __entry->rcuname, __entry->gp_seq,
390 __entry->mask, __entry->qsmask, __entry->level, 388 __entry->mask, __entry->qsmask, __entry->level,
391 __entry->grplo, __entry->grphi, __entry->gp_tasks) 389 __entry->grplo, __entry->grphi, __entry->gp_tasks)
392); 390);
393 391
394/* 392/*
395 * Tracepoint for quiescent states detected by force_quiescent_state(). 393 * Tracepoint for quiescent states detected by force_quiescent_state().
396 * These trace events include the type of RCU, the grace-period number that 394 * These trace events include the type of RCU, the grace-period number
397 * was blocked by the CPU, the CPU itself, and the type of quiescent state, 395 * that was blocked by the CPU, the CPU itself, and the type of quiescent
398 * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick" 396 * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
399 * when kicking a CPU that has been in dyntick-idle mode for too long, or 397 * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
400 * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr. 398 * CPU got a quiescent state via its rcu_qs_ctr.
401 */ 399 */
402TRACE_EVENT(rcu_fqs, 400TRACE_EVENT(rcu_fqs,
403 401
404 TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), 402 TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
405 403
406 TP_ARGS(rcuname, gpnum, cpu, qsevent), 404 TP_ARGS(rcuname, gp_seq, cpu, qsevent),
407 405
408 TP_STRUCT__entry( 406 TP_STRUCT__entry(
409 __field(const char *, rcuname) 407 __field(const char *, rcuname)
410 __field(unsigned long, gpnum) 408 __field(unsigned long, gp_seq)
411 __field(int, cpu) 409 __field(int, cpu)
412 __field(const char *, qsevent) 410 __field(const char *, qsevent)
413 ), 411 ),
414 412
415 TP_fast_assign( 413 TP_fast_assign(
416 __entry->rcuname = rcuname; 414 __entry->rcuname = rcuname;
417 __entry->gpnum = gpnum; 415 __entry->gp_seq = gp_seq;
418 __entry->cpu = cpu; 416 __entry->cpu = cpu;
419 __entry->qsevent = qsevent; 417 __entry->qsevent = qsevent;
420 ), 418 ),
421 419
422 TP_printk("%s %lu %d %s", 420 TP_printk("%s %lu %d %s",
423 __entry->rcuname, __entry->gpnum, 421 __entry->rcuname, __entry->gp_seq,
424 __entry->cpu, __entry->qsevent) 422 __entry->cpu, __entry->qsevent)
425); 423);
426 424
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
753 751
754#else /* #ifdef CONFIG_RCU_TRACE */ 752#else /* #ifdef CONFIG_RCU_TRACE */
755 753
756#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0) 754#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
757#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \ 755#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
758 level, grplo, grphi, event) \ 756 level, grplo, grphi, event) \
759 do { } while (0) 757 do { } while (0)
760#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \ 758#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
761 qsmask) do { } while (0) 759 qsmask) do { } while (0)
762#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \ 760#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
763 do { } while (0) 761 do { } while (0)
764#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \ 762#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
765 do { } while (0) 763 do { } while (0)
766#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) 764#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
767#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0) 765#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
768#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0) 766#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
769#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \ 767#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
770 grplo, grphi, gp_tasks) do { } \ 768 grplo, grphi, gp_tasks) do { } \
771 while (0) 769 while (0)
772#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) 770#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
773#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0) 771#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
774#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0) 772#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
775#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \ 773#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
76 */ 76 */
77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
79#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
80 80
81/* Attributes stored in the BTF_INT_ENCODING */ 81/* Attributes stored in the BTF_INT_ENCODING */
82#define BTF_INT_SIGNED (1 << 0) 82#define BTF_INT_SIGNED (1 << 0)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
143 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 143 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
146}; 148};
147 149
148/* 150/*
diff --git a/init/main.c b/init/main.c
index 3b4ada11ed52..5e13c544bbf4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_kernel(void)
561 setup_command_line(command_line); 561 setup_command_line(command_line);
562 setup_nr_cpu_ids(); 562 setup_nr_cpu_ids();
563 setup_per_cpu_areas(); 563 setup_per_cpu_areas();
564 boot_cpu_state_init();
565 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ 564 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
565 boot_cpu_hotplug_init();
566 566
567 build_all_zonelists(NULL); 567 build_all_zonelists(NULL);
568 page_alloc_init(); 568 page_alloc_init();
diff --git a/ipc/sem.c b/ipc/sem.c
index 5af1943ad782..76e95e4f3aa2 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
2118 } 2118 }
2119 2119
2120 do { 2120 do {
2121 queue.status = -EINTR; 2121 WRITE_ONCE(queue.status, -EINTR);
2122 queue.sleeper = current; 2122 queue.sleeper = current;
2123 2123
2124 __set_current_state(TASK_INTERRUPTIBLE); 2124 __set_current_state(TASK_INTERRUPTIBLE);
diff --git a/ipc/shm.c b/ipc/shm.c
index 051a3e1fb8df..fefa00d310fb 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -427,6 +427,17 @@ static int shm_split(struct vm_area_struct *vma, unsigned long addr)
427 return 0; 427 return 0;
428} 428}
429 429
430static unsigned long shm_pagesize(struct vm_area_struct *vma)
431{
432 struct file *file = vma->vm_file;
433 struct shm_file_data *sfd = shm_file_data(file);
434
435 if (sfd->vm_ops->pagesize)
436 return sfd->vm_ops->pagesize(vma);
437
438 return PAGE_SIZE;
439}
440
430#ifdef CONFIG_NUMA 441#ifdef CONFIG_NUMA
431static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 442static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
432{ 443{
@@ -554,6 +565,7 @@ static const struct vm_operations_struct shm_vm_ops = {
554 .close = shm_close, /* callback for when the vm-area is released */ 565 .close = shm_close, /* callback for when the vm-area is released */
555 .fault = shm_fault, 566 .fault = shm_fault,
556 .split = shm_split, 567 .split = shm_split,
568 .pagesize = shm_pagesize,
557#if defined(CONFIG_NUMA) 569#if defined(CONFIG_NUMA)
558 .set_policy = shm_set_policy, 570 .set_policy = shm_set_policy,
559 .get_policy = shm_get_policy, 571 .get_policy = shm_get_policy,
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index ceb1c4596c51..80d672a11088 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1279,8 +1279,12 @@ static void show_special(struct audit_context *context, int *call_panic)
1279 break; 1279 break;
1280 case AUDIT_KERN_MODULE: 1280 case AUDIT_KERN_MODULE:
1281 audit_log_format(ab, "name="); 1281 audit_log_format(ab, "name=");
1282 audit_log_untrustedstring(ab, context->module.name); 1282 if (context->module.name) {
1283 kfree(context->module.name); 1283 audit_log_untrustedstring(ab, context->module.name);
1284 kfree(context->module.name);
1285 } else
1286 audit_log_format(ab, "(null)");
1287
1284 break; 1288 break;
1285 } 1289 }
1286 audit_log_end(ab); 1290 audit_log_end(ab);
@@ -2411,8 +2415,9 @@ void __audit_log_kern_module(char *name)
2411{ 2415{
2412 struct audit_context *context = audit_context(); 2416 struct audit_context *context = audit_context();
2413 2417
2414 context->module.name = kmalloc(strlen(name) + 1, GFP_KERNEL); 2418 context->module.name = kstrdup(name, GFP_KERNEL);
2415 strcpy(context->module.name, name); 2419 if (!context->module.name)
2420 audit_log_lost("out of memory in __audit_log_kern_module");
2416 context->type = AUDIT_KERN_MODULE; 2421 context->type = AUDIT_KERN_MODULE;
2417} 2422}
2418 2423
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 544e58f5f642..2aa55d030c77 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -378,7 +378,7 @@ static int array_map_check_btf(const struct bpf_map *map, const struct btf *btf,
378 return -EINVAL; 378 return -EINVAL;
379 379
380 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); 380 value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
381 if (!value_type || value_size > map->value_size) 381 if (!value_type || value_size != map->value_size)
382 return -EINVAL; 382 return -EINVAL;
383 383
384 return 0; 384 return 0;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index e016ac3afa24..2590700237c1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
450 */ 450 */
451static bool btf_type_int_is_regular(const struct btf_type *t) 451static bool btf_type_int_is_regular(const struct btf_type *t)
452{ 452{
453 u16 nr_bits, nr_bytes; 453 u8 nr_bits, nr_bytes;
454 u32 int_data; 454 u32 int_data;
455 455
456 int_data = btf_type_int(t); 456 int_data = btf_type_int(t);
@@ -993,12 +993,16 @@ static void btf_int_bits_seq_show(const struct btf *btf,
993{ 993{
994 u16 left_shift_bits, right_shift_bits; 994 u16 left_shift_bits, right_shift_bits;
995 u32 int_data = btf_type_int(t); 995 u32 int_data = btf_type_int(t);
996 u16 nr_bits = BTF_INT_BITS(int_data); 996 u8 nr_bits = BTF_INT_BITS(int_data);
997 u16 total_bits_offset; 997 u8 total_bits_offset;
998 u16 nr_copy_bytes; 998 u8 nr_copy_bytes;
999 u16 nr_copy_bits; 999 u8 nr_copy_bits;
1000 u64 print_num; 1000 u64 print_num;
1001 1001
1002 /*
1003 * bits_offset is at most 7.
1004 * BTF_INT_OFFSET() cannot exceed 64 bits.
1005 */
1002 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1006 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1003 data += BITS_ROUNDDOWN_BYTES(total_bits_offset); 1007 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1004 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); 1008 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
@@ -1028,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1028 u32 int_data = btf_type_int(t); 1032 u32 int_data = btf_type_int(t);
1029 u8 encoding = BTF_INT_ENCODING(int_data); 1033 u8 encoding = BTF_INT_ENCODING(int_data);
1030 bool sign = encoding & BTF_INT_SIGNED; 1034 bool sign = encoding & BTF_INT_SIGNED;
1031 u32 nr_bits = BTF_INT_BITS(int_data); 1035 u8 nr_bits = BTF_INT_BITS(int_data);
1032 1036
1033 if (bits_offset || BTF_INT_OFFSET(int_data) || 1037 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1034 BITS_PER_BYTE_MASKED(nr_bits)) { 1038 BITS_PER_BYTE_MASKED(nr_bits)) {
@@ -1515,9 +1519,9 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1515{ 1519{
1516 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; 1520 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1517 const struct btf_member *member; 1521 const struct btf_member *member;
1522 u32 meta_needed, last_offset;
1518 struct btf *btf = env->btf; 1523 struct btf *btf = env->btf;
1519 u32 struct_size = t->size; 1524 u32 struct_size = t->size;
1520 u32 meta_needed;
1521 u16 i; 1525 u16 i;
1522 1526
1523 meta_needed = btf_type_vlen(t) * sizeof(*member); 1527 meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1530,6 +1534,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1530 1534
1531 btf_verifier_log_type(env, t, NULL); 1535 btf_verifier_log_type(env, t, NULL);
1532 1536
1537 last_offset = 0;
1533 for_each_member(i, t, member) { 1538 for_each_member(i, t, member) {
1534 if (!btf_name_offset_valid(btf, member->name_off)) { 1539 if (!btf_name_offset_valid(btf, member->name_off)) {
1535 btf_verifier_log_member(env, t, member, 1540 btf_verifier_log_member(env, t, member,
@@ -1551,6 +1556,16 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1551 return -EINVAL; 1556 return -EINVAL;
1552 } 1557 }
1553 1558
1559 /*
1560 * ">" instead of ">=" because the last member could be
1561 * "char a[0];"
1562 */
1563 if (last_offset > member->offset) {
1564 btf_verifier_log_member(env, t, member,
1565 "Invalid member bits_offset");
1566 return -EINVAL;
1567 }
1568
1554 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) { 1569 if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
1555 btf_verifier_log_member(env, t, member, 1570 btf_verifier_log_member(env, t, member,
1556 "Memmber bits_offset exceeds its struct size"); 1571 "Memmber bits_offset exceeds its struct size");
@@ -1558,6 +1573,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1558 } 1573 }
1559 1574
1560 btf_verifier_log_member(env, t, member, NULL); 1575 btf_verifier_log_member(env, t, member, NULL);
1576 last_offset = member->offset;
1561 } 1577 }
1562 1578
1563 return meta_needed; 1579 return meta_needed;
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e0918d180f08..46f5f29605d4 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
69}; 69};
70 70
71static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 71static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
72 struct xdp_bulk_queue *bq); 72 struct xdp_bulk_queue *bq, bool in_napi_ctx);
73 73
74static u64 cpu_map_bitmap_size(const union bpf_attr *attr) 74static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
75{ 75{
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); 375 struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
376 376
377 /* No concurrent bq_enqueue can run at this point */ 377 /* No concurrent bq_enqueue can run at this point */
378 bq_flush_to_queue(rcpu, bq); 378 bq_flush_to_queue(rcpu, bq, false);
379 } 379 }
380 free_percpu(rcpu->bulkq); 380 free_percpu(rcpu->bulkq);
381 /* Cannot kthread_stop() here, last put free rcpu resources */ 381 /* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
558}; 558};
559 559
560static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, 560static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
561 struct xdp_bulk_queue *bq) 561 struct xdp_bulk_queue *bq, bool in_napi_ctx)
562{ 562{
563 unsigned int processed = 0, drops = 0; 563 unsigned int processed = 0, drops = 0;
564 const int to_cpu = rcpu->cpu; 564 const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
578 err = __ptr_ring_produce(q, xdpf); 578 err = __ptr_ring_produce(q, xdpf);
579 if (err) { 579 if (err) {
580 drops++; 580 drops++;
581 xdp_return_frame_rx_napi(xdpf); 581 if (likely(in_napi_ctx))
582 xdp_return_frame_rx_napi(xdpf);
583 else
584 xdp_return_frame(xdpf);
582 } 585 }
583 processed++; 586 processed++;
584 } 587 }
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
598 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); 601 struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
599 602
600 if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) 603 if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
601 bq_flush_to_queue(rcpu, bq); 604 bq_flush_to_queue(rcpu, bq, true);
602 605
603 /* Notice, xdp_buff/page MUST be queued here, long enough for 606 /* Notice, xdp_buff/page MUST be queued here, long enough for
604 * driver to code invoking us to finished, due to driver 607 * driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
661 664
662 /* Flush all frames in bulkq to real queue */ 665 /* Flush all frames in bulkq to real queue */
663 bq = this_cpu_ptr(rcpu->bulkq); 666 bq = this_cpu_ptr(rcpu->bulkq);
664 bq_flush_to_queue(rcpu, bq); 667 bq_flush_to_queue(rcpu, bq, true);
665 668
666 /* If already running, costs spin_lock_irqsave + smb_mb */ 669 /* If already running, costs spin_lock_irqsave + smb_mb */
667 wake_up_process(rcpu->kthread); 670 wake_up_process(rcpu->kthread);
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index d361fc1e3bf3..750d45edae79 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
217} 217}
218 218
219static int bq_xmit_all(struct bpf_dtab_netdev *obj, 219static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220 struct xdp_bulk_queue *bq, u32 flags) 220 struct xdp_bulk_queue *bq, u32 flags,
221 bool in_napi_ctx)
221{ 222{
222 struct net_device *dev = obj->dev; 223 struct net_device *dev = obj->dev;
223 int sent = 0, drops = 0, err = 0; 224 int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ error:
254 struct xdp_frame *xdpf = bq->q[i]; 255 struct xdp_frame *xdpf = bq->q[i];
255 256
256 /* RX path under NAPI protection, can return frames faster */ 257 /* RX path under NAPI protection, can return frames faster */
257 xdp_return_frame_rx_napi(xdpf); 258 if (likely(in_napi_ctx))
259 xdp_return_frame_rx_napi(xdpf);
260 else
261 xdp_return_frame(xdpf);
258 drops++; 262 drops++;
259 } 263 }
260 goto out; 264 goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
286 __clear_bit(bit, bitmap); 290 __clear_bit(bit, bitmap);
287 291
288 bq = this_cpu_ptr(dev->bulkq); 292 bq = this_cpu_ptr(dev->bulkq);
289 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 293 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
290 } 294 }
291} 295}
292 296
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
316 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); 320 struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
317 321
318 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) 322 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
319 bq_xmit_all(obj, bq, 0); 323 bq_xmit_all(obj, bq, 0, true);
320 324
321 /* Ingress dev_rx will be the same for all xdp_frame's in 325 /* Ingress dev_rx will be the same for all xdp_frame's in
322 * bulk_queue, because bq stored per-CPU and must be flushed 326 * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
385 __clear_bit(dev->bit, bitmap); 389 __clear_bit(dev->bit, bitmap);
386 390
387 bq = per_cpu_ptr(dev->bulkq, cpu); 391 bq = per_cpu_ptr(dev->bulkq, cpu);
388 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); 392 bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
389 } 393 }
390 } 394 }
391} 395}
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 98fb7938beea..c4d75c52b4fc 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1048 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1048 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1049 1049
1050 while (msg_data_left(msg)) { 1050 while (msg_data_left(msg)) {
1051 struct sk_msg_buff *m; 1051 struct sk_msg_buff *m = NULL;
1052 bool enospc = false; 1052 bool enospc = false;
1053 int copy; 1053 int copy;
1054 1054
1055 if (sk->sk_err) { 1055 if (sk->sk_err) {
1056 err = sk->sk_err; 1056 err = -sk->sk_err;
1057 goto out_err; 1057 goto out_err;
1058 } 1058 }
1059 1059
@@ -1116,8 +1116,11 @@ wait_for_sndbuf:
1116 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1116 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1117wait_for_memory: 1117wait_for_memory:
1118 err = sk_stream_wait_memory(sk, &timeo); 1118 err = sk_stream_wait_memory(sk, &timeo);
1119 if (err) 1119 if (err) {
1120 if (m && m != psock->cork)
1121 free_start_sg(sk, m);
1120 goto out_err; 1122 goto out_err;
1123 }
1121 } 1124 }
1122out_err: 1125out_err:
1123 if (err < 0) 1126 if (err < 0)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 191097c45fb1..15be70aae8ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2015,7 +2015,7 @@ void __init boot_cpu_init(void)
2015/* 2015/*
2016 * Must be called _AFTER_ setting up the per_cpu areas 2016 * Must be called _AFTER_ setting up the per_cpu areas
2017 */ 2017 */
2018void __init boot_cpu_state_init(void) 2018void __init boot_cpu_hotplug_init(void)
2019{ 2019{
2020 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; 2020 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
2021} 2021}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8f0434a9951a..eec2d5fb676b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6343,7 +6343,7 @@ static u64 perf_virt_to_phys(u64 virt)
6343 6343
6344static struct perf_callchain_entry __empty_callchain = { .nr = 0, }; 6344static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
6345 6345
6346static struct perf_callchain_entry * 6346struct perf_callchain_entry *
6347perf_callchain(struct perf_event *event, struct pt_regs *regs) 6347perf_callchain(struct perf_event *event, struct pt_regs *regs)
6348{ 6348{
6349 bool kernel = !event->attr.exclude_callchain_kernel; 6349 bool kernel = !event->attr.exclude_callchain_kernel;
@@ -6382,7 +6382,9 @@ void perf_prepare_sample(struct perf_event_header *header,
6382 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 6382 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
6383 int size = 1; 6383 int size = 1;
6384 6384
6385 data->callchain = perf_callchain(event, regs); 6385 if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
6386 data->callchain = perf_callchain(event, regs);
6387
6386 size += data->callchain->nr; 6388 size += data->callchain->nr;
6387 6389
6388 header->size += size * sizeof(u64); 6390 header->size += size * sizeof(u64);
@@ -7335,6 +7337,10 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
7335 struct file *file, unsigned long offset, 7337 struct file *file, unsigned long offset,
7336 unsigned long size) 7338 unsigned long size)
7337{ 7339{
7340 /* d_inode(NULL) won't be equal to any mapped user-space file */
7341 if (!filter->path.dentry)
7342 return false;
7343
7338 if (d_inode(filter->path.dentry) != file_inode(file)) 7344 if (d_inode(filter->path.dentry) != file_inode(file))
7339 return false; 7345 return false;
7340 7346
diff --git a/kernel/fork.c b/kernel/fork.c
index a191c05e757d..1b27babc4c78 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -312,10 +312,8 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
312{ 312{
313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
314 314
315 if (vma) { 315 if (vma)
316 vma->vm_mm = mm; 316 vma_init(vma, mm);
317 INIT_LIST_HEAD(&vma->anon_vma_chain);
318 }
319 return vma; 317 return vma;
320} 318}
321 319
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index c6766f326072..5f3e2baefca9 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
134endmenu 134endmenu
135 135
136config GENERIC_IRQ_MULTI_HANDLER 136config GENERIC_IRQ_MULTI_HANDLER
137 depends on !MULTI_IRQ_HANDLER
138 bool 137 bool
139 help 138 help
140 Allow to specify the low level IRQ handler at run time. 139 Allow to specify the low level IRQ handler at run time.
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index afc7f902d74a..578d0e5f1b5b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
443 * We free the descriptor, masks and stat fields via RCU. That 443 * We free the descriptor, masks and stat fields via RCU. That
444 * allows demultiplex interrupts to do rcu based management of 444 * allows demultiplex interrupts to do rcu based management of
445 * the child interrupts. 445 * the child interrupts.
446 * This also allows us to use rcu in kstat_irqs_usr().
446 */ 447 */
447 call_rcu(&desc->rcu, delayed_free_desc); 448 call_rcu(&desc->rcu, delayed_free_desc);
448} 449}
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
928 * kstat_irqs_usr - Get the statistics for an interrupt 929 * kstat_irqs_usr - Get the statistics for an interrupt
929 * @irq: The interrupt number 930 * @irq: The interrupt number
930 * 931 *
931 * Returns the sum of interrupt counts on all cpus since boot for 932 * Returns the sum of interrupt counts on all cpus since boot for @irq.
932 * @irq. Contrary to kstat_irqs() this can be called from any 933 * Contrary to kstat_irqs() this can be called from any context.
933 * preemptible context. It's protected against concurrent removal of 934 * It uses rcu since a concurrent removal of an interrupt descriptor is
934 * an interrupt descriptor when sparse irqs are enabled. 935 * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
935 */ 936 */
936unsigned int kstat_irqs_usr(unsigned int irq) 937unsigned int kstat_irqs_usr(unsigned int irq)
937{ 938{
938 unsigned int sum; 939 unsigned int sum;
939 940
940 irq_lock_sparse(); 941 rcu_read_lock();
941 sum = kstat_irqs(irq); 942 sum = kstat_irqs(irq);
942 irq_unlock_sparse(); 943 rcu_read_unlock();
943 return sum; 944 return sum;
944} 945}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index daeabd791d58..fb86146037a7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
790 790
791static int irq_wait_for_interrupt(struct irqaction *action) 791static int irq_wait_for_interrupt(struct irqaction *action)
792{ 792{
793 set_current_state(TASK_INTERRUPTIBLE); 793 for (;;) {
794 set_current_state(TASK_INTERRUPTIBLE);
794 795
795 while (!kthread_should_stop()) { 796 if (kthread_should_stop()) {
797 /* may need to run one last time */
798 if (test_and_clear_bit(IRQTF_RUNTHREAD,
799 &action->thread_flags)) {
800 __set_current_state(TASK_RUNNING);
801 return 0;
802 }
803 __set_current_state(TASK_RUNNING);
804 return -1;
805 }
796 806
797 if (test_and_clear_bit(IRQTF_RUNTHREAD, 807 if (test_and_clear_bit(IRQTF_RUNTHREAD,
798 &action->thread_flags)) { 808 &action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
800 return 0; 810 return 0;
801 } 811 }
802 schedule(); 812 schedule();
803 set_current_state(TASK_INTERRUPTIBLE);
804 } 813 }
805 __set_current_state(TASK_RUNNING);
806 return -1;
807} 814}
808 815
809/* 816/*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
1024 /* 1031 /*
1025 * This is the regular exit path. __free_irq() is stopping the 1032 * This is the regular exit path. __free_irq() is stopping the
1026 * thread via kthread_stop() after calling 1033 * thread via kthread_stop() after calling
1027 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the 1034 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1028 * oneshot mask bit can be set. We cannot verify that as we 1035 * oneshot mask bit can be set.
1029 * cannot touch the oneshot mask at this point anymore as
1030 * __setup_irq() might have given out currents thread_mask
1031 * again.
1032 */ 1036 */
1033 task_work_cancel(current, irq_thread_dtor); 1037 task_work_cancel(current, irq_thread_dtor);
1034 return 0; 1038 return 0;
@@ -1068,6 +1072,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
1068 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 1072 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1069 return 0; 1073 return 0;
1070 1074
1075 /*
1076 * No further action required for interrupts which are requested as
1077 * threaded interrupts already
1078 */
1079 if (new->handler == irq_default_primary_handler)
1080 return 0;
1081
1071 new->flags |= IRQF_ONESHOT; 1082 new->flags |= IRQF_ONESHOT;
1072 1083
1073 /* 1084 /*
@@ -1075,7 +1086,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
1075 * thread handler. We force thread them as well by creating a 1086 * thread handler. We force thread them as well by creating a
1076 * secondary action. 1087 * secondary action.
1077 */ 1088 */
1078 if (new->handler != irq_default_primary_handler && new->thread_fn) { 1089 if (new->handler && new->thread_fn) {
1079 /* Allocate the secondary action */ 1090 /* Allocate the secondary action */
1080 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1091 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1081 if (!new->secondary) 1092 if (!new->secondary)
@@ -1244,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1244 1255
1245 /* 1256 /*
1246 * Protects against a concurrent __free_irq() call which might wait 1257 * Protects against a concurrent __free_irq() call which might wait
1247 * for synchronize_irq() to complete without holding the optional 1258 * for synchronize_hardirq() to complete without holding the optional
1248 * chip bus lock and desc->lock. 1259 * chip bus lock and desc->lock. Also protects against handing out
1260 * a recycled oneshot thread_mask bit while it's still in use by
1261 * its previous owner.
1249 */ 1262 */
1250 mutex_lock(&desc->request_mutex); 1263 mutex_lock(&desc->request_mutex);
1251 1264
@@ -1564,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1564 1577
1565 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1578 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1566 1579
1567 if (!desc)
1568 return NULL;
1569
1570 mutex_lock(&desc->request_mutex); 1580 mutex_lock(&desc->request_mutex);
1571 chip_bus_lock(desc); 1581 chip_bus_lock(desc);
1572 raw_spin_lock_irqsave(&desc->lock, flags); 1582 raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1613,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1613 /* 1623 /*
1614 * Drop bus_lock here so the changes which were done in the chip 1624 * Drop bus_lock here so the changes which were done in the chip
1615 * callbacks above are synced out to the irq chips which hang 1625 * callbacks above are synced out to the irq chips which hang
1616 * behind a slow bus (I2C, SPI) before calling synchronize_irq(). 1626 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1617 * 1627 *
1618 * Aside of that the bus_lock can also be taken from the threaded 1628 * Aside of that the bus_lock can also be taken from the threaded
1619 * handler in irq_finalize_oneshot() which results in a deadlock 1629 * handler in irq_finalize_oneshot() which results in a deadlock
1620 * because synchronize_irq() would wait forever for the thread to 1630 * because kthread_stop() would wait forever for the thread to
1621 * complete, which is blocked on the bus lock. 1631 * complete, which is blocked on the bus lock.
1622 * 1632 *
1623 * The still held desc->request_mutex() protects against a 1633 * The still held desc->request_mutex() protects against a
@@ -1629,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1629 unregister_handler_proc(irq, action); 1639 unregister_handler_proc(irq, action);
1630 1640
1631 /* Make sure it's not being used on another CPU: */ 1641 /* Make sure it's not being used on another CPU: */
1632 synchronize_irq(irq); 1642 synchronize_hardirq(irq);
1633 1643
1634#ifdef CONFIG_DEBUG_SHIRQ 1644#ifdef CONFIG_DEBUG_SHIRQ
1635 /* 1645 /*
@@ -1638,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1638 * is so by doing an extra call to the handler .... 1648 * is so by doing an extra call to the handler ....
1639 * 1649 *
1640 * ( We do this after actually deregistering it, to make sure that a 1650 * ( We do this after actually deregistering it, to make sure that a
1641 * 'real' IRQ doesn't run in * parallel with our fake. ) 1651 * 'real' IRQ doesn't run in parallel with our fake. )
1642 */ 1652 */
1643 if (action->flags & IRQF_SHARED) { 1653 if (action->flags & IRQF_SHARED) {
1644 local_irq_save(flags); 1654 local_irq_save(flags);
@@ -1647,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1647 } 1657 }
1648#endif 1658#endif
1649 1659
1660 /*
1661 * The action has already been removed above, but the thread writes
1662 * its oneshot mask bit when it completes. Though request_mutex is
1663 * held across this which prevents __setup_irq() from handing out
1664 * the same bit to a newly requested action.
1665 */
1650 if (action->thread) { 1666 if (action->thread) {
1651 kthread_stop(action->thread); 1667 kthread_stop(action->thread);
1652 put_task_struct(action->thread); 1668 put_task_struct(action->thread);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 37eda10f5c36..da9addb8d655 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
475 seq_putc(p, '\n'); 475 seq_putc(p, '\n');
476 } 476 }
477 477
478 irq_lock_sparse(); 478 rcu_read_lock();
479 desc = irq_to_desc(i); 479 desc = irq_to_desc(i);
480 if (!desc) 480 if (!desc)
481 goto outsparse; 481 goto outsparse;
482 482
483 raw_spin_lock_irqsave(&desc->lock, flags); 483 if (desc->kstat_irqs)
484 for_each_online_cpu(j) 484 for_each_online_cpu(j)
485 any_count |= kstat_irqs_cpu(i, j); 485 any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
486 action = desc->action; 486
487 if ((!action || irq_desc_is_chained(desc)) && !any_count) 487 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
488 goto out; 488 goto outsparse;
489 489
490 seq_printf(p, "%*d: ", prec, i); 490 seq_printf(p, "%*d: ", prec, i);
491 for_each_online_cpu(j) 491 for_each_online_cpu(j)
492 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 492 seq_printf(p, "%10u ", desc->kstat_irqs ?
493 *per_cpu_ptr(desc->kstat_irqs, j) : 0);
493 494
495 raw_spin_lock_irqsave(&desc->lock, flags);
494 if (desc->irq_data.chip) { 496 if (desc->irq_data.chip) {
495 if (desc->irq_data.chip->irq_print_chip) 497 if (desc->irq_data.chip->irq_print_chip)
496 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); 498 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
511 if (desc->name) 513 if (desc->name)
512 seq_printf(p, "-%-8s", desc->name); 514 seq_printf(p, "-%-8s", desc->name);
513 515
516 action = desc->action;
514 if (action) { 517 if (action) {
515 seq_printf(p, " %s", action->name); 518 seq_printf(p, " %s", action->name);
516 while ((action = action->next) != NULL) 519 while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
518 } 521 }
519 522
520 seq_putc(p, '\n'); 523 seq_putc(p, '\n');
521out:
522 raw_spin_unlock_irqrestore(&desc->lock, flags); 524 raw_spin_unlock_irqrestore(&desc->lock, flags);
523outsparse: 525outsparse:
524 irq_unlock_sparse(); 526 rcu_read_unlock();
525 return 0; 527 return 0;
526} 528}
527#endif 529#endif
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 11b591ee51ab..087d18d771b5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -325,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
325 task = create->result; 325 task = create->result;
326 if (!IS_ERR(task)) { 326 if (!IS_ERR(task)) {
327 static const struct sched_param param = { .sched_priority = 0 }; 327 static const struct sched_param param = { .sched_priority = 0 };
328 char name[TASK_COMM_LEN];
328 329
329 vsnprintf(task->comm, sizeof(task->comm), namefmt, args); 330 /*
331 * task is already visible to other tasks, so updating
332 * COMM must be protected.
333 */
334 vsnprintf(name, sizeof(name), namefmt, args);
335 set_task_comm(task, name);
330 /* 336 /*
331 * root may have changed our (kthreadd's) priority or CPU mask. 337 * root may have changed our (kthreadd's) priority or CPU mask.
332 * The kernel thread should not inherit these properties. 338 * The kernel thread should not inherit these properties.
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8402b3349dca..57bef4fbfb31 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -21,6 +21,9 @@
21 * Davidlohr Bueso <dave@stgolabs.net> 21 * Davidlohr Bueso <dave@stgolabs.net>
22 * Based on kernel/rcu/torture.c. 22 * Based on kernel/rcu/torture.c.
23 */ 23 */
24
25#define pr_fmt(fmt) fmt
26
24#include <linux/kernel.h> 27#include <linux/kernel.h>
25#include <linux/module.h> 28#include <linux/module.h>
26#include <linux/kthread.h> 29#include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
57torture_param(int, stat_interval, 60, 60torture_param(int, stat_interval, 60,
58 "Number of seconds between stats printk()s"); 61 "Number of seconds between stats printk()s");
59torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 62torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
60torture_param(bool, verbose, true, 63torture_param(int, verbose, 1,
61 "Enable verbose debugging printk()s"); 64 "Enable verbose debugging printk()s");
62 65
63static char *torture_type = "spin_lock"; 66static char *torture_type = "spin_lock";
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 4f014be7a4b8..2823d4163a37 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1465,6 +1465,29 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1465 rt_mutex_postunlock(&wake_q); 1465 rt_mutex_postunlock(&wake_q);
1466} 1466}
1467 1467
1468static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
1469{
1470 might_sleep();
1471
1472 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1473 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1474}
1475
1476#ifdef CONFIG_DEBUG_LOCK_ALLOC
1477/**
1478 * rt_mutex_lock_nested - lock a rt_mutex
1479 *
1480 * @lock: the rt_mutex to be locked
1481 * @subclass: the lockdep subclass
1482 */
1483void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
1484{
1485 __rt_mutex_lock(lock, subclass);
1486}
1487EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
1488#endif
1489
1490#ifndef CONFIG_DEBUG_LOCK_ALLOC
1468/** 1491/**
1469 * rt_mutex_lock - lock a rt_mutex 1492 * rt_mutex_lock - lock a rt_mutex
1470 * 1493 *
@@ -1472,12 +1495,10 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
1472 */ 1495 */
1473void __sched rt_mutex_lock(struct rt_mutex *lock) 1496void __sched rt_mutex_lock(struct rt_mutex *lock)
1474{ 1497{
1475 might_sleep(); 1498 __rt_mutex_lock(lock, 0);
1476
1477 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1478 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1479} 1499}
1480EXPORT_SYMBOL_GPL(rt_mutex_lock); 1500EXPORT_SYMBOL_GPL(rt_mutex_lock);
1501#endif
1481 1502
1482/** 1503/**
1483 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible 1504 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5857267a4af5..38283363da06 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
176 unsigned long pfn, pgoff, order; 176 unsigned long pfn, pgoff, order;
177 pgprot_t pgprot = PAGE_KERNEL; 177 pgprot_t pgprot = PAGE_KERNEL;
178 int error, nid, is_ram; 178 int error, nid, is_ram;
179 struct dev_pagemap *conflict_pgmap;
179 180
180 align_start = res->start & ~(SECTION_SIZE - 1); 181 align_start = res->start & ~(SECTION_SIZE - 1);
181 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) 182 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
182 - align_start; 183 - align_start;
184 align_end = align_start + align_size - 1;
185
186 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
187 if (conflict_pgmap) {
188 dev_WARN(dev, "Conflicting mapping in same section\n");
189 put_dev_pagemap(conflict_pgmap);
190 return ERR_PTR(-ENOMEM);
191 }
192
193 conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
194 if (conflict_pgmap) {
195 dev_WARN(dev, "Conflicting mapping in same section\n");
196 put_dev_pagemap(conflict_pgmap);
197 return ERR_PTR(-ENOMEM);
198 }
199
183 is_ram = region_intersects(align_start, align_size, 200 is_ram = region_intersects(align_start, align_size,
184 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); 201 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
185 202
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
199 216
200 mutex_lock(&pgmap_lock); 217 mutex_lock(&pgmap_lock);
201 error = 0; 218 error = 0;
202 align_end = align_start + align_size - 1;
203 219
204 foreach_order_pgoff(res, order, pgoff) { 220 foreach_order_pgoff(res, order, pgoff) {
205 error = __radix_tree_insert(&pgmap_radix, 221 error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
305 321
306#ifdef CONFIG_DEV_PAGEMAP_OPS 322#ifdef CONFIG_DEV_PAGEMAP_OPS
307DEFINE_STATIC_KEY_FALSE(devmap_managed_key); 323DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
308EXPORT_SYMBOL_GPL(devmap_managed_key); 324EXPORT_SYMBOL(devmap_managed_key);
309static atomic_t devmap_enable; 325static atomic_t devmap_enable;
310 326
311/* 327/*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
346 } else if (!count) 362 } else if (!count)
347 __put_page(page); 363 __put_page(page);
348} 364}
349EXPORT_SYMBOL_GPL(__put_devmap_managed_page); 365EXPORT_SYMBOL(__put_devmap_managed_page);
350#endif /* CONFIG_DEV_PAGEMAP_OPS */ 366#endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 40cea6735c2d..4d04683c31b2 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
91 WRITE_ONCE(*sp, rcu_seq_endval(sp)); 91 WRITE_ONCE(*sp, rcu_seq_endval(sp));
92} 92}
93 93
94/* Take a snapshot of the update side's sequence number. */ 94/*
95 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
96 *
97 * This function returns the earliest value of the grace-period sequence number
98 * that will indicate that a full grace period has elapsed since the current
99 * time. Once the grace-period sequence number has reached this value, it will
100 * be safe to invoke all callbacks that have been registered prior to the
101 * current time. This value is the current grace-period number plus two to the
102 * power of the number of low-order bits reserved for state, then rounded up to
103 * the next value in which the state bits are all zero.
104 */
95static inline unsigned long rcu_seq_snap(unsigned long *sp) 105static inline unsigned long rcu_seq_snap(unsigned long *sp)
96{ 106{
97 unsigned long s; 107 unsigned long s;
@@ -108,6 +118,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
108} 118}
109 119
110/* 120/*
121 * Given a snapshot from rcu_seq_snap(), determine whether or not the
122 * corresponding update-side operation has started.
123 */
124static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
125{
126 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
127}
128
129/*
111 * Given a snapshot from rcu_seq_snap(), determine whether or not a 130 * Given a snapshot from rcu_seq_snap(), determine whether or not a
112 * full update-side operation has occurred. 131 * full update-side operation has occurred.
113 */ 132 */
@@ -117,6 +136,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
117} 136}
118 137
119/* 138/*
139 * Has a grace period completed since the time the old gp_seq was collected?
140 */
141static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
142{
143 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
144}
145
146/*
147 * Has a grace period started since the time the old gp_seq was collected?
148 */
149static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
150{
151 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
152 new);
153}
154
155/*
156 * Roughly how many full grace periods have elapsed between the collection
157 * of the two specified grace periods?
158 */
159static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
160{
161 unsigned long rnd_diff;
162
163 if (old == new)
164 return 0;
165 /*
166 * Compute the number of grace periods (still shifted up), plus
167 * one if either of new and old is not an exact grace period.
168 */
169 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
170 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
171 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
172 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
173 return 1; /* Definitely no grace period has elapsed. */
174 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
175}
176
177/*
120 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 178 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
121 * by call_rcu() and rcu callback execution, and are therefore not part of the 179 * by call_rcu() and rcu callback execution, and are therefore not part of the
122 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. 180 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
276/* Is this rcu_node a leaf? */ 334/* Is this rcu_node a leaf? */
277#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) 335#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
278 336
337/* Is this rcu_node the last leaf? */
338#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
339
279/* 340/*
280 * Do a full breadth-first scan of the rcu_node structures for the 341 * Do a full breadth-first scan of the rcu_node structures for the
281 * specified rcu_state structure. 342 * specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
405 466
406#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 467#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
407void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 468void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
408 unsigned long *gpnum, unsigned long *completed); 469 unsigned long *gp_seq);
409void rcutorture_record_test_transition(void);
410void rcutorture_record_progress(unsigned long vernum); 470void rcutorture_record_progress(unsigned long vernum);
411void do_trace_rcu_torture_read(const char *rcutorturename, 471void do_trace_rcu_torture_read(const char *rcutorturename,
412 struct rcu_head *rhp, 472 struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
415 unsigned long c); 475 unsigned long c);
416#else 476#else
417static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 477static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
418 int *flags, 478 int *flags, unsigned long *gp_seq)
419 unsigned long *gpnum,
420 unsigned long *completed)
421{ 479{
422 *flags = 0; 480 *flags = 0;
423 *gpnum = 0; 481 *gp_seq = 0;
424 *completed = 0;
425} 482}
426static inline void rcutorture_record_test_transition(void) { }
427static inline void rcutorture_record_progress(unsigned long vernum) { } 483static inline void rcutorture_record_progress(unsigned long vernum) { }
428#ifdef CONFIG_RCU_TRACE 484#ifdef CONFIG_RCU_TRACE
429void do_trace_rcu_torture_read(const char *rcutorturename, 485void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
441 497
442static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, 498static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
443 struct srcu_struct *sp, int *flags, 499 struct srcu_struct *sp, int *flags,
444 unsigned long *gpnum, 500 unsigned long *gp_seq)
445 unsigned long *completed)
446{ 501{
447 if (test_type != SRCU_FLAVOR) 502 if (test_type != SRCU_FLAVOR)
448 return; 503 return;
449 *flags = 0; 504 *flags = 0;
450 *completed = sp->srcu_idx; 505 *gp_seq = sp->srcu_idx;
451 *gpnum = *completed;
452} 506}
453 507
454#elif defined(CONFIG_TREE_SRCU) 508#elif defined(CONFIG_TREE_SRCU)
455 509
456void srcutorture_get_gp_data(enum rcutorture_type test_type, 510void srcutorture_get_gp_data(enum rcutorture_type test_type,
457 struct srcu_struct *sp, int *flags, 511 struct srcu_struct *sp, int *flags,
458 unsigned long *gpnum, unsigned long *completed); 512 unsigned long *gp_seq);
459 513
460#endif 514#endif
461 515
462#ifdef CONFIG_TINY_RCU 516#ifdef CONFIG_TINY_RCU
463static inline unsigned long rcu_batches_started(void) { return 0; } 517static inline unsigned long rcu_get_gp_seq(void) { return 0; }
464static inline unsigned long rcu_batches_started_bh(void) { return 0; } 518static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
465static inline unsigned long rcu_batches_started_sched(void) { return 0; } 519static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
466static inline unsigned long rcu_batches_completed(void) { return 0; }
467static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
468static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
469static inline unsigned long rcu_exp_batches_completed(void) { return 0; } 520static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
470static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; } 521static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
471static inline unsigned long 522static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
474static inline void rcu_bh_force_quiescent_state(void) { } 525static inline void rcu_bh_force_quiescent_state(void) { }
475static inline void rcu_sched_force_quiescent_state(void) { } 526static inline void rcu_sched_force_quiescent_state(void) { }
476static inline void show_rcu_gp_kthreads(void) { } 527static inline void show_rcu_gp_kthreads(void) { }
528static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
477#else /* #ifdef CONFIG_TINY_RCU */ 529#else /* #ifdef CONFIG_TINY_RCU */
478extern unsigned long rcutorture_testseq; 530unsigned long rcu_get_gp_seq(void);
479extern unsigned long rcutorture_vernum; 531unsigned long rcu_bh_get_gp_seq(void);
480unsigned long rcu_batches_started(void); 532unsigned long rcu_sched_get_gp_seq(void);
481unsigned long rcu_batches_started_bh(void);
482unsigned long rcu_batches_started_sched(void);
483unsigned long rcu_batches_completed(void);
484unsigned long rcu_batches_completed_bh(void);
485unsigned long rcu_batches_completed_sched(void);
486unsigned long rcu_exp_batches_completed(void); 533unsigned long rcu_exp_batches_completed(void);
487unsigned long rcu_exp_batches_completed_sched(void); 534unsigned long rcu_exp_batches_completed_sched(void);
488unsigned long srcu_batches_completed(struct srcu_struct *sp); 535unsigned long srcu_batches_completed(struct srcu_struct *sp);
489void show_rcu_gp_kthreads(void); 536void show_rcu_gp_kthreads(void);
537int rcu_get_gp_kthreads_prio(void);
490void rcu_force_quiescent_state(void); 538void rcu_force_quiescent_state(void);
491void rcu_bh_force_quiescent_state(void); 539void rcu_bh_force_quiescent_state(void);
492void rcu_sched_force_quiescent_state(void); 540void rcu_sched_force_quiescent_state(void);
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index e232846516b3..34244523550e 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -19,6 +19,9 @@
19 * 19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 */ 21 */
22
23#define pr_fmt(fmt) fmt
24
22#include <linux/types.h> 25#include <linux/types.h>
23#include <linux/kernel.h> 26#include <linux/kernel.h>
24#include <linux/init.h> 27#include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
88torture_param(int, nwriters, -1, "Number of RCU updater threads"); 91torture_param(int, nwriters, -1, "Number of RCU updater threads");
89torture_param(bool, shutdown, !IS_ENABLED(MODULE), 92torture_param(bool, shutdown, !IS_ENABLED(MODULE),
90 "Shutdown at end of performance tests."); 93 "Shutdown at end of performance tests.");
91torture_param(bool, verbose, true, "Enable verbose debugging printk()s"); 94torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
92torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable"); 95torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
93 96
94static char *perf_type = "rcu"; 97static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
135 void (*cleanup)(void); 138 void (*cleanup)(void);
136 int (*readlock)(void); 139 int (*readlock)(void);
137 void (*readunlock)(int idx); 140 void (*readunlock)(int idx);
138 unsigned long (*started)(void); 141 unsigned long (*get_gp_seq)(void);
139 unsigned long (*completed)(void); 142 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
140 unsigned long (*exp_completed)(void); 143 unsigned long (*exp_completed)(void);
141 void (*async)(struct rcu_head *head, rcu_callback_t func); 144 void (*async)(struct rcu_head *head, rcu_callback_t func);
142 void (*gp_barrier)(void); 145 void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
176 .init = rcu_sync_perf_init, 179 .init = rcu_sync_perf_init,
177 .readlock = rcu_perf_read_lock, 180 .readlock = rcu_perf_read_lock,
178 .readunlock = rcu_perf_read_unlock, 181 .readunlock = rcu_perf_read_unlock,
179 .started = rcu_batches_started, 182 .get_gp_seq = rcu_get_gp_seq,
180 .completed = rcu_batches_completed, 183 .gp_diff = rcu_seq_diff,
181 .exp_completed = rcu_exp_batches_completed, 184 .exp_completed = rcu_exp_batches_completed,
182 .async = call_rcu, 185 .async = call_rcu,
183 .gp_barrier = rcu_barrier, 186 .gp_barrier = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
206 .init = rcu_sync_perf_init, 209 .init = rcu_sync_perf_init,
207 .readlock = rcu_bh_perf_read_lock, 210 .readlock = rcu_bh_perf_read_lock,
208 .readunlock = rcu_bh_perf_read_unlock, 211 .readunlock = rcu_bh_perf_read_unlock,
209 .started = rcu_batches_started_bh, 212 .get_gp_seq = rcu_bh_get_gp_seq,
210 .completed = rcu_batches_completed_bh, 213 .gp_diff = rcu_seq_diff,
211 .exp_completed = rcu_exp_batches_completed_sched, 214 .exp_completed = rcu_exp_batches_completed_sched,
212 .async = call_rcu_bh, 215 .async = call_rcu_bh,
213 .gp_barrier = rcu_barrier_bh, 216 .gp_barrier = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
263 .init = rcu_sync_perf_init, 266 .init = rcu_sync_perf_init,
264 .readlock = srcu_perf_read_lock, 267 .readlock = srcu_perf_read_lock,
265 .readunlock = srcu_perf_read_unlock, 268 .readunlock = srcu_perf_read_unlock,
266 .started = NULL, 269 .get_gp_seq = srcu_perf_completed,
267 .completed = srcu_perf_completed, 270 .gp_diff = rcu_seq_diff,
268 .exp_completed = srcu_perf_completed, 271 .exp_completed = srcu_perf_completed,
269 .async = srcu_call_rcu, 272 .async = srcu_call_rcu,
270 .gp_barrier = srcu_rcu_barrier, 273 .gp_barrier = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
292 .cleanup = srcu_sync_perf_cleanup, 295 .cleanup = srcu_sync_perf_cleanup,
293 .readlock = srcu_perf_read_lock, 296 .readlock = srcu_perf_read_lock,
294 .readunlock = srcu_perf_read_unlock, 297 .readunlock = srcu_perf_read_unlock,
295 .started = NULL, 298 .get_gp_seq = srcu_perf_completed,
296 .completed = srcu_perf_completed, 299 .gp_diff = rcu_seq_diff,
297 .exp_completed = srcu_perf_completed, 300 .exp_completed = srcu_perf_completed,
298 .async = srcu_call_rcu, 301 .async = srcu_call_rcu,
299 .gp_barrier = srcu_rcu_barrier, 302 .gp_barrier = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
322 .init = rcu_sync_perf_init, 325 .init = rcu_sync_perf_init,
323 .readlock = sched_perf_read_lock, 326 .readlock = sched_perf_read_lock,
324 .readunlock = sched_perf_read_unlock, 327 .readunlock = sched_perf_read_unlock,
325 .started = rcu_batches_started_sched, 328 .get_gp_seq = rcu_sched_get_gp_seq,
326 .completed = rcu_batches_completed_sched, 329 .gp_diff = rcu_seq_diff,
327 .exp_completed = rcu_exp_batches_completed_sched, 330 .exp_completed = rcu_exp_batches_completed_sched,
328 .async = call_rcu_sched, 331 .async = call_rcu_sched,
329 .gp_barrier = rcu_barrier_sched, 332 .gp_barrier = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
350 .init = rcu_sync_perf_init, 353 .init = rcu_sync_perf_init,
351 .readlock = tasks_perf_read_lock, 354 .readlock = tasks_perf_read_lock,
352 .readunlock = tasks_perf_read_unlock, 355 .readunlock = tasks_perf_read_unlock,
353 .started = rcu_no_completed, 356 .get_gp_seq = rcu_no_completed,
354 .completed = rcu_no_completed, 357 .gp_diff = rcu_seq_diff,
355 .async = call_rcu_tasks, 358 .async = call_rcu_tasks,
356 .gp_barrier = rcu_barrier_tasks, 359 .gp_barrier = rcu_barrier_tasks,
357 .sync = synchronize_rcu_tasks, 360 .sync = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
359 .name = "tasks" 362 .name = "tasks"
360}; 363};
361 364
362static bool __maybe_unused torturing_tasks(void) 365static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
363{ 366{
364 return cur_ops == &tasks_ops; 367 if (!cur_ops->gp_diff)
368 return new - old;
369 return cur_ops->gp_diff(new, old);
365} 370}
366 371
367/* 372/*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
444 b_rcu_perf_writer_started = 449 b_rcu_perf_writer_started =
445 cur_ops->exp_completed() / 2; 450 cur_ops->exp_completed() / 2;
446 } else { 451 } else {
447 b_rcu_perf_writer_started = 452 b_rcu_perf_writer_started = cur_ops->get_gp_seq();
448 cur_ops->completed();
449 } 453 }
450 } 454 }
451 455
@@ -502,7 +506,7 @@ retry:
502 cur_ops->exp_completed() / 2; 506 cur_ops->exp_completed() / 2;
503 } else { 507 } else {
504 b_rcu_perf_writer_finished = 508 b_rcu_perf_writer_finished =
505 cur_ops->completed(); 509 cur_ops->get_gp_seq();
506 } 510 }
507 if (shutdown) { 511 if (shutdown) {
508 smp_mb(); /* Assign before wake. */ 512 smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ retry:
527 return 0; 531 return 0;
528} 532}
529 533
530static inline void 534static void
531rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag) 535rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
532{ 536{
533 pr_alert("%s" PERF_FLAG 537 pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
582 t_rcu_perf_writer_finished - 586 t_rcu_perf_writer_finished -
583 t_rcu_perf_writer_started, 587 t_rcu_perf_writer_started,
584 ngps, 588 ngps,
585 b_rcu_perf_writer_finished - 589 rcuperf_seq_diff(b_rcu_perf_writer_finished,
586 b_rcu_perf_writer_started); 590 b_rcu_perf_writer_started));
587 for (i = 0; i < nrealwriters; i++) { 591 for (i = 0; i < nrealwriters; i++) {
588 if (!writer_durations) 592 if (!writer_durations)
589 break; 593 break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
671 break; 675 break;
672 } 676 }
673 if (i == ARRAY_SIZE(perf_ops)) { 677 if (i == ARRAY_SIZE(perf_ops)) {
674 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", 678 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
675 perf_type);
676 pr_alert("rcu-perf types:"); 679 pr_alert("rcu-perf types:");
677 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) 680 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
678 pr_alert(" %s", perf_ops[i]->name); 681 pr_cont(" %s", perf_ops[i]->name);
679 pr_alert("\n"); 682 pr_cont("\n");
680 firsterr = -EINVAL; 683 firsterr = -EINVAL;
681 goto unwind; 684 goto unwind;
682 } 685 }
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 42fcb7f05fac..c596c6f1e457 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -22,6 +22,9 @@
22 * 22 *
23 * See also: Documentation/RCU/torture.txt 23 * See also: Documentation/RCU/torture.txt
24 */ 24 */
25
26#define pr_fmt(fmt) fmt
27
25#include <linux/types.h> 28#include <linux/types.h>
26#include <linux/kernel.h> 29#include <linux/kernel.h>
27#include <linux/init.h> 30#include <linux/init.h>
@@ -52,6 +55,7 @@
52#include <linux/torture.h> 55#include <linux/torture.h>
53#include <linux/vmalloc.h> 56#include <linux/vmalloc.h>
54#include <linux/sched/debug.h> 57#include <linux/sched/debug.h>
58#include <linux/sched/sysctl.h>
55 59
56#include "rcu.h" 60#include "rcu.h"
57 61
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
59MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 63MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
60 64
61 65
66/* Bits for ->extendables field, extendables param, and related definitions. */
67#define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
68#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
69#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */
70#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */
71#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */
72#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */
73#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */
74#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
75 RCUTORTURE_RDR_PREEMPT)
76#define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
77 /* Must be power of two minus one. */
78
62torture_param(int, cbflood_inter_holdoff, HZ, 79torture_param(int, cbflood_inter_holdoff, HZ,
63 "Holdoff between floods (jiffies)"); 80 "Holdoff between floods (jiffies)");
64torture_param(int, cbflood_intra_holdoff, 1, 81torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
66torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); 83torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
67torture_param(int, cbflood_n_per_burst, 20000, 84torture_param(int, cbflood_n_per_burst, 20000,
68 "# callbacks per burst in flood"); 85 "# callbacks per burst in flood");
86torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
87 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
69torture_param(int, fqs_duration, 0, 88torture_param(int, fqs_duration, 0,
70 "Duration of fqs bursts (us), 0 to disable"); 89 "Duration of fqs bursts (us), 0 to disable");
71torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 90torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
84 "Enable debug-object double call_rcu() testing"); 103 "Enable debug-object double call_rcu() testing");
85torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 104torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
86torture_param(int, onoff_interval, 0, 105torture_param(int, onoff_interval, 0,
87 "Time between CPU hotplugs (s), 0=disable"); 106 "Time between CPU hotplugs (jiffies), 0=disable");
88torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 107torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
89torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 108torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
90torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 109torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
101 "Interval between boost tests, seconds."); 120 "Interval between boost tests, seconds.");
102torture_param(bool, test_no_idle_hz, true, 121torture_param(bool, test_no_idle_hz, true,
103 "Test support for tickless idle CPUs"); 122 "Test support for tickless idle CPUs");
104torture_param(bool, verbose, true, 123torture_param(int, verbose, 1,
105 "Enable verbose debugging printk()s"); 124 "Enable verbose debugging printk()s");
106 125
107static char *torture_type = "rcu"; 126static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
148static long n_rcu_torture_boost_rterror; 167static long n_rcu_torture_boost_rterror;
149static long n_rcu_torture_boost_failure; 168static long n_rcu_torture_boost_failure;
150static long n_rcu_torture_boosts; 169static long n_rcu_torture_boosts;
151static long n_rcu_torture_timers; 170static atomic_long_t n_rcu_torture_timers;
152static long n_barrier_attempts; 171static long n_barrier_attempts;
153static long n_barrier_successes; 172static long n_barrier_successes; /* did rcu_barrier test succeed? */
154static atomic_long_t n_cbfloods; 173static atomic_long_t n_cbfloods;
155static struct list_head rcu_torture_removed; 174static struct list_head rcu_torture_removed;
156 175
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
261 int (*readlock)(void); 280 int (*readlock)(void);
262 void (*read_delay)(struct torture_random_state *rrsp); 281 void (*read_delay)(struct torture_random_state *rrsp);
263 void (*readunlock)(int idx); 282 void (*readunlock)(int idx);
264 unsigned long (*started)(void); 283 unsigned long (*get_gp_seq)(void);
265 unsigned long (*completed)(void); 284 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
266 void (*deferred_free)(struct rcu_torture *p); 285 void (*deferred_free)(struct rcu_torture *p);
267 void (*sync)(void); 286 void (*sync)(void);
268 void (*exp_sync)(void); 287 void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
274 void (*stats)(void); 293 void (*stats)(void);
275 int irq_capable; 294 int irq_capable;
276 int can_boost; 295 int can_boost;
296 int extendables;
297 int ext_irq_conflict;
277 const char *name; 298 const char *name;
278}; 299};
279 300
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
302 * force_quiescent_state. */ 323 * force_quiescent_state. */
303 324
304 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 325 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
305 started = cur_ops->completed(); 326 started = cur_ops->get_gp_seq();
306 ts = rcu_trace_clock_local(); 327 ts = rcu_trace_clock_local();
307 mdelay(longdelay_ms); 328 mdelay(longdelay_ms);
308 completed = cur_ops->completed(); 329 completed = cur_ops->get_gp_seq();
309 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 330 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
310 started, completed); 331 started, completed);
311 } 332 }
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
397 .readlock = rcu_torture_read_lock, 418 .readlock = rcu_torture_read_lock,
398 .read_delay = rcu_read_delay, 419 .read_delay = rcu_read_delay,
399 .readunlock = rcu_torture_read_unlock, 420 .readunlock = rcu_torture_read_unlock,
400 .started = rcu_batches_started, 421 .get_gp_seq = rcu_get_gp_seq,
401 .completed = rcu_batches_completed, 422 .gp_diff = rcu_seq_diff,
402 .deferred_free = rcu_torture_deferred_free, 423 .deferred_free = rcu_torture_deferred_free,
403 .sync = synchronize_rcu, 424 .sync = synchronize_rcu,
404 .exp_sync = synchronize_rcu_expedited, 425 .exp_sync = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
439 .readlock = rcu_bh_torture_read_lock, 460 .readlock = rcu_bh_torture_read_lock,
440 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 461 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
441 .readunlock = rcu_bh_torture_read_unlock, 462 .readunlock = rcu_bh_torture_read_unlock,
442 .started = rcu_batches_started_bh, 463 .get_gp_seq = rcu_bh_get_gp_seq,
443 .completed = rcu_batches_completed_bh, 464 .gp_diff = rcu_seq_diff,
444 .deferred_free = rcu_bh_torture_deferred_free, 465 .deferred_free = rcu_bh_torture_deferred_free,
445 .sync = synchronize_rcu_bh, 466 .sync = synchronize_rcu_bh,
446 .exp_sync = synchronize_rcu_bh_expedited, 467 .exp_sync = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
449 .fqs = rcu_bh_force_quiescent_state, 470 .fqs = rcu_bh_force_quiescent_state,
450 .stats = NULL, 471 .stats = NULL,
451 .irq_capable = 1, 472 .irq_capable = 1,
473 .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
474 .ext_irq_conflict = RCUTORTURE_RDR_RCU,
452 .name = "rcu_bh" 475 .name = "rcu_bh"
453}; 476};
454 477
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
483 .readlock = rcu_torture_read_lock, 506 .readlock = rcu_torture_read_lock,
484 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 507 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
485 .readunlock = rcu_torture_read_unlock, 508 .readunlock = rcu_torture_read_unlock,
486 .started = rcu_no_completed, 509 .get_gp_seq = rcu_no_completed,
487 .completed = rcu_no_completed,
488 .deferred_free = rcu_busted_torture_deferred_free, 510 .deferred_free = rcu_busted_torture_deferred_free,
489 .sync = synchronize_rcu_busted, 511 .sync = synchronize_rcu_busted,
490 .exp_sync = synchronize_rcu_busted, 512 .exp_sync = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
572 .readlock = srcu_torture_read_lock, 594 .readlock = srcu_torture_read_lock,
573 .read_delay = srcu_read_delay, 595 .read_delay = srcu_read_delay,
574 .readunlock = srcu_torture_read_unlock, 596 .readunlock = srcu_torture_read_unlock,
575 .started = NULL, 597 .get_gp_seq = srcu_torture_completed,
576 .completed = srcu_torture_completed,
577 .deferred_free = srcu_torture_deferred_free, 598 .deferred_free = srcu_torture_deferred_free,
578 .sync = srcu_torture_synchronize, 599 .sync = srcu_torture_synchronize,
579 .exp_sync = srcu_torture_synchronize_expedited, 600 .exp_sync = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
610 .readlock = srcu_torture_read_lock, 631 .readlock = srcu_torture_read_lock,
611 .read_delay = srcu_read_delay, 632 .read_delay = srcu_read_delay,
612 .readunlock = srcu_torture_read_unlock, 633 .readunlock = srcu_torture_read_unlock,
613 .started = NULL, 634 .get_gp_seq = srcu_torture_completed,
614 .completed = srcu_torture_completed,
615 .deferred_free = srcu_torture_deferred_free, 635 .deferred_free = srcu_torture_deferred_free,
616 .sync = srcu_torture_synchronize, 636 .sync = srcu_torture_synchronize,
617 .exp_sync = srcu_torture_synchronize_expedited, 637 .exp_sync = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
622 .name = "srcud" 642 .name = "srcud"
623}; 643};
624 644
645/* As above, but broken due to inappropriate reader extension. */
646static struct rcu_torture_ops busted_srcud_ops = {
647 .ttype = SRCU_FLAVOR,
648 .init = srcu_torture_init,
649 .cleanup = srcu_torture_cleanup,
650 .readlock = srcu_torture_read_lock,
651 .read_delay = rcu_read_delay,
652 .readunlock = srcu_torture_read_unlock,
653 .get_gp_seq = srcu_torture_completed,
654 .deferred_free = srcu_torture_deferred_free,
655 .sync = srcu_torture_synchronize,
656 .exp_sync = srcu_torture_synchronize_expedited,
657 .call = srcu_torture_call,
658 .cb_barrier = srcu_torture_barrier,
659 .stats = srcu_torture_stats,
660 .irq_capable = 1,
661 .extendables = RCUTORTURE_MAX_EXTEND,
662 .name = "busted_srcud"
663};
664
625/* 665/*
626 * Definitions for sched torture testing. 666 * Definitions for sched torture testing.
627 */ 667 */
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
648 .readlock = sched_torture_read_lock, 688 .readlock = sched_torture_read_lock,
649 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 689 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
650 .readunlock = sched_torture_read_unlock, 690 .readunlock = sched_torture_read_unlock,
651 .started = rcu_batches_started_sched, 691 .get_gp_seq = rcu_sched_get_gp_seq,
652 .completed = rcu_batches_completed_sched, 692 .gp_diff = rcu_seq_diff,
653 .deferred_free = rcu_sched_torture_deferred_free, 693 .deferred_free = rcu_sched_torture_deferred_free,
654 .sync = synchronize_sched, 694 .sync = synchronize_sched,
655 .exp_sync = synchronize_sched_expedited, 695 .exp_sync = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
660 .fqs = rcu_sched_force_quiescent_state, 700 .fqs = rcu_sched_force_quiescent_state,
661 .stats = NULL, 701 .stats = NULL,
662 .irq_capable = 1, 702 .irq_capable = 1,
703 .extendables = RCUTORTURE_MAX_EXTEND,
663 .name = "sched" 704 .name = "sched"
664}; 705};
665 706
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
687 .readlock = tasks_torture_read_lock, 728 .readlock = tasks_torture_read_lock,
688 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 729 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
689 .readunlock = tasks_torture_read_unlock, 730 .readunlock = tasks_torture_read_unlock,
690 .started = rcu_no_completed, 731 .get_gp_seq = rcu_no_completed,
691 .completed = rcu_no_completed,
692 .deferred_free = rcu_tasks_torture_deferred_free, 732 .deferred_free = rcu_tasks_torture_deferred_free,
693 .sync = synchronize_rcu_tasks, 733 .sync = synchronize_rcu_tasks,
694 .exp_sync = synchronize_rcu_tasks, 734 .exp_sync = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
700 .name = "tasks" 740 .name = "tasks"
701}; 741};
702 742
743static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
744{
745 if (!cur_ops->gp_diff)
746 return new - old;
747 return cur_ops->gp_diff(new, old);
748}
749
703static bool __maybe_unused torturing_tasks(void) 750static bool __maybe_unused torturing_tasks(void)
704{ 751{
705 return cur_ops == &tasks_ops; 752 return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
726 smp_store_release(&rbip->inflight, 0); 773 smp_store_release(&rbip->inflight, 0);
727} 774}
728 775
776static int old_rt_runtime = -1;
777
778static void rcu_torture_disable_rt_throttle(void)
779{
780 /*
781 * Disable RT throttling so that rcutorture's boost threads don't get
782 * throttled. Only possible if rcutorture is built-in otherwise the
783 * user should manually do this by setting the sched_rt_period_us and
784 * sched_rt_runtime sysctls.
785 */
786 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
787 return;
788
789 old_rt_runtime = sysctl_sched_rt_runtime;
790 sysctl_sched_rt_runtime = -1;
791}
792
793static void rcu_torture_enable_rt_throttle(void)
794{
795 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
796 return;
797
798 sysctl_sched_rt_runtime = old_rt_runtime;
799 old_rt_runtime = -1;
800}
801
802static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
803{
804 if (end - start > test_boost_duration * HZ - HZ / 2) {
805 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
806 n_rcu_torture_boost_failure++;
807
808 return true; /* failed */
809 }
810
811 return false; /* passed */
812}
813
729static int rcu_torture_boost(void *arg) 814static int rcu_torture_boost(void *arg)
730{ 815{
731 unsigned long call_rcu_time; 816 unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
746 init_rcu_head_on_stack(&rbi.rcu); 831 init_rcu_head_on_stack(&rbi.rcu);
747 /* Each pass through the following loop does one boost-test cycle. */ 832 /* Each pass through the following loop does one boost-test cycle. */
748 do { 833 do {
834 /* Track if the test failed already in this test interval? */
835 bool failed = false;
836
837 /* Increment n_rcu_torture_boosts once per boost-test */
838 while (!kthread_should_stop()) {
839 if (mutex_trylock(&boost_mutex)) {
840 n_rcu_torture_boosts++;
841 mutex_unlock(&boost_mutex);
842 break;
843 }
844 schedule_timeout_uninterruptible(1);
845 }
846 if (kthread_should_stop())
847 goto checkwait;
848
749 /* Wait for the next test interval. */ 849 /* Wait for the next test interval. */
750 oldstarttime = boost_starttime; 850 oldstarttime = boost_starttime;
751 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 851 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
764 /* RCU core before ->inflight = 1. */ 864 /* RCU core before ->inflight = 1. */
765 smp_store_release(&rbi.inflight, 1); 865 smp_store_release(&rbi.inflight, 1);
766 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 866 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
767 if (jiffies - call_rcu_time > 867 /* Check if the boost test failed */
768 test_boost_duration * HZ - HZ / 2) { 868 failed = failed ||
769 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 869 rcu_torture_boost_failed(call_rcu_time,
770 n_rcu_torture_boost_failure++; 870 jiffies);
771 }
772 call_rcu_time = jiffies; 871 call_rcu_time = jiffies;
773 } 872 }
774 stutter_wait("rcu_torture_boost"); 873 stutter_wait("rcu_torture_boost");
@@ -777,6 +876,14 @@ static int rcu_torture_boost(void *arg)
777 } 876 }
778 877
779 /* 878 /*
879 * If boost never happened, then inflight will always be 1, in
880 * this case the boost check would never happen in the above
881 * loop so do another one here.
882 */
883 if (!failed && smp_load_acquire(&rbi.inflight))
884 rcu_torture_boost_failed(call_rcu_time, jiffies);
885
886 /*
780 * Set the start time of the next test interval. 887 * Set the start time of the next test interval.
781 * Yes, this is vulnerable to long delays, but such 888 * Yes, this is vulnerable to long delays, but such
782 * delays simply cause a false negative for the next 889 * delays simply cause a false negative for the next
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
788 if (mutex_trylock(&boost_mutex)) { 895 if (mutex_trylock(&boost_mutex)) {
789 boost_starttime = jiffies + 896 boost_starttime = jiffies +
790 test_boost_interval * HZ; 897 test_boost_interval * HZ;
791 n_rcu_torture_boosts++;
792 mutex_unlock(&boost_mutex); 898 mutex_unlock(&boost_mutex);
793 break; 899 break;
794 } 900 }
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
1010 break; 1116 break;
1011 } 1117 }
1012 } 1118 }
1013 rcutorture_record_progress(++rcu_torture_current_version); 1119 rcu_torture_current_version++;
1014 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1120 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1015 if (can_expedite && 1121 if (can_expedite &&
1016 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1122 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
1084} 1190}
1085 1191
1086/* 1192/*
1087 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1193 * Do one extension of an RCU read-side critical section using the
1088 * incrementing the corresponding element of the pipeline array. The 1194 * current reader state in readstate (set to zero for initial entry
1089 * counter in the element should never be greater than 1, otherwise, the 1195 * to extended critical section), set the new state as specified by
1090 * RCU implementation is broken. 1196 * newstate (set to zero for final exit from extended critical section),
1197 * and random-number-generator state in trsp. If this is neither the
1198 * beginning or end of the critical section and if there was actually a
1199 * change, do a ->read_delay().
1091 */ 1200 */
1092static void rcu_torture_timer(struct timer_list *unused) 1201static void rcutorture_one_extend(int *readstate, int newstate,
1202 struct torture_random_state *trsp)
1203{
1204 int idxnew = -1;
1205 int idxold = *readstate;
1206 int statesnew = ~*readstate & newstate;
1207 int statesold = *readstate & ~newstate;
1208
1209 WARN_ON_ONCE(idxold < 0);
1210 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1211
1212 /* First, put new protection in place to avoid critical-section gap. */
1213 if (statesnew & RCUTORTURE_RDR_BH)
1214 local_bh_disable();
1215 if (statesnew & RCUTORTURE_RDR_IRQ)
1216 local_irq_disable();
1217 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1218 preempt_disable();
1219 if (statesnew & RCUTORTURE_RDR_RCU)
1220 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1221
1222 /* Next, remove old protection, irq first due to bh conflict. */
1223 if (statesold & RCUTORTURE_RDR_IRQ)
1224 local_irq_enable();
1225 if (statesold & RCUTORTURE_RDR_BH)
1226 local_bh_enable();
1227 if (statesold & RCUTORTURE_RDR_PREEMPT)
1228 preempt_enable();
1229 if (statesold & RCUTORTURE_RDR_RCU)
1230 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1231
1232 /* Delay if neither beginning nor end and there was a change. */
1233 if ((statesnew || statesold) && *readstate && newstate)
1234 cur_ops->read_delay(trsp);
1235
1236 /* Update the reader state. */
1237 if (idxnew == -1)
1238 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1239 WARN_ON_ONCE(idxnew < 0);
1240 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1241 *readstate = idxnew | newstate;
1242 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1243 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1244}
1245
1246/* Return the biggest extendables mask given current RCU and boot parameters. */
1247static int rcutorture_extend_mask_max(void)
1248{
1249 int mask;
1250
1251 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1252 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1253 mask = mask | RCUTORTURE_RDR_RCU;
1254 return mask;
1255}
1256
1257/* Return a random protection state mask, but with at least one bit set. */
1258static int
1259rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1260{
1261 int mask = rcutorture_extend_mask_max();
1262 unsigned long randmask1 = torture_random(trsp) >> 8;
1263 unsigned long randmask2 = randmask1 >> 1;
1264
1265 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1266 /* Half the time lots of bits, half the time only one bit. */
1267 if (randmask1 & 0x1)
1268 mask = mask & randmask2;
1269 else
1270 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1271 if ((mask & RCUTORTURE_RDR_IRQ) &&
1272 !(mask & RCUTORTURE_RDR_BH) &&
1273 (oldmask & RCUTORTURE_RDR_BH))
1274 mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
1275 if ((mask & RCUTORTURE_RDR_IRQ) &&
1276 !(mask & cur_ops->ext_irq_conflict) &&
1277 (oldmask & cur_ops->ext_irq_conflict))
1278 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
1279 return mask ?: RCUTORTURE_RDR_RCU;
1280}
1281
1282/*
1283 * Do a randomly selected number of extensions of an existing RCU read-side
1284 * critical section.
1285 */
1286static void rcutorture_loop_extend(int *readstate,
1287 struct torture_random_state *trsp)
1288{
1289 int i;
1290 int mask = rcutorture_extend_mask_max();
1291
1292 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1293 if (!((mask - 1) & mask))
1294 return; /* Current RCU flavor not extendable. */
1295 i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
1296 while (i--) {
1297 mask = rcutorture_extend_mask(*readstate, trsp);
1298 rcutorture_one_extend(readstate, mask, trsp);
1299 }
1300}
1301
1302/*
1303 * Do one read-side critical section, returning false if there was
1304 * no data to read. Can be invoked both from process context and
1305 * from a timer handler.
1306 */
1307static bool rcu_torture_one_read(struct torture_random_state *trsp)
1093{ 1308{
1094 int idx;
1095 unsigned long started; 1309 unsigned long started;
1096 unsigned long completed; 1310 unsigned long completed;
1097 static DEFINE_TORTURE_RANDOM(rand); 1311 int newstate;
1098 static DEFINE_SPINLOCK(rand_lock);
1099 struct rcu_torture *p; 1312 struct rcu_torture *p;
1100 int pipe_count; 1313 int pipe_count;
1314 int readstate = 0;
1101 unsigned long long ts; 1315 unsigned long long ts;
1102 1316
1103 idx = cur_ops->readlock(); 1317 newstate = rcutorture_extend_mask(readstate, trsp);
1104 if (cur_ops->started) 1318 rcutorture_one_extend(&readstate, newstate, trsp);
1105 started = cur_ops->started(); 1319 started = cur_ops->get_gp_seq();
1106 else
1107 started = cur_ops->completed();
1108 ts = rcu_trace_clock_local(); 1320 ts = rcu_trace_clock_local();
1109 p = rcu_dereference_check(rcu_torture_current, 1321 p = rcu_dereference_check(rcu_torture_current,
1110 rcu_read_lock_bh_held() || 1322 rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
1112 srcu_read_lock_held(srcu_ctlp) || 1324 srcu_read_lock_held(srcu_ctlp) ||
1113 torturing_tasks()); 1325 torturing_tasks());
1114 if (p == NULL) { 1326 if (p == NULL) {
1115 /* Leave because rcu_torture_writer is not yet underway */ 1327 /* Wait for rcu_torture_writer to get underway */
1116 cur_ops->readunlock(idx); 1328 rcutorture_one_extend(&readstate, 0, trsp);
1117 return; 1329 return false;
1118 } 1330 }
1119 if (p->rtort_mbtest == 0) 1331 if (p->rtort_mbtest == 0)
1120 atomic_inc(&n_rcu_torture_mberror); 1332 atomic_inc(&n_rcu_torture_mberror);
1121 spin_lock(&rand_lock); 1333 rcutorture_loop_extend(&readstate, trsp);
1122 cur_ops->read_delay(&rand);
1123 n_rcu_torture_timers++;
1124 spin_unlock(&rand_lock);
1125 preempt_disable(); 1334 preempt_disable();
1126 pipe_count = p->rtort_pipe_count; 1335 pipe_count = p->rtort_pipe_count;
1127 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1336 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1128 /* Should not happen, but... */ 1337 /* Should not happen, but... */
1129 pipe_count = RCU_TORTURE_PIPE_LEN; 1338 pipe_count = RCU_TORTURE_PIPE_LEN;
1130 } 1339 }
1131 completed = cur_ops->completed(); 1340 completed = cur_ops->get_gp_seq();
1132 if (pipe_count > 1) { 1341 if (pipe_count > 1) {
1133 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 1342 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1134 started, completed); 1343 ts, started, completed);
1135 rcu_ftrace_dump(DUMP_ALL); 1344 rcu_ftrace_dump(DUMP_ALL);
1136 } 1345 }
1137 __this_cpu_inc(rcu_torture_count[pipe_count]); 1346 __this_cpu_inc(rcu_torture_count[pipe_count]);
1138 completed = completed - started; 1347 completed = rcutorture_seq_diff(completed, started);
1139 if (cur_ops->started)
1140 completed++;
1141 if (completed > RCU_TORTURE_PIPE_LEN) { 1348 if (completed > RCU_TORTURE_PIPE_LEN) {
1142 /* Should not happen, but... */ 1349 /* Should not happen, but... */
1143 completed = RCU_TORTURE_PIPE_LEN; 1350 completed = RCU_TORTURE_PIPE_LEN;
1144 } 1351 }
1145 __this_cpu_inc(rcu_torture_batch[completed]); 1352 __this_cpu_inc(rcu_torture_batch[completed]);
1146 preempt_enable(); 1353 preempt_enable();
1147 cur_ops->readunlock(idx); 1354 rcutorture_one_extend(&readstate, 0, trsp);
1355 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1356 return true;
1357}
1358
1359static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1360
1361/*
1362 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1363 * incrementing the corresponding element of the pipeline array. The
1364 * counter in the element should never be greater than 1, otherwise, the
1365 * RCU implementation is broken.
1366 */
1367static void rcu_torture_timer(struct timer_list *unused)
1368{
1369 atomic_long_inc(&n_rcu_torture_timers);
1370 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1148 1371
1149 /* Test call_rcu() invocation from interrupt handler. */ 1372 /* Test call_rcu() invocation from interrupt handler. */
1150 if (cur_ops->call) { 1373 if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
1164static int 1387static int
1165rcu_torture_reader(void *arg) 1388rcu_torture_reader(void *arg)
1166{ 1389{
1167 unsigned long started;
1168 unsigned long completed;
1169 int idx;
1170 DEFINE_TORTURE_RANDOM(rand); 1390 DEFINE_TORTURE_RANDOM(rand);
1171 struct rcu_torture *p;
1172 int pipe_count;
1173 struct timer_list t; 1391 struct timer_list t;
1174 unsigned long long ts;
1175 1392
1176 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1393 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1177 set_user_nice(current, MAX_NICE); 1394 set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
1183 if (!timer_pending(&t)) 1400 if (!timer_pending(&t))
1184 mod_timer(&t, jiffies + 1); 1401 mod_timer(&t, jiffies + 1);
1185 } 1402 }
1186 idx = cur_ops->readlock(); 1403 if (!rcu_torture_one_read(&rand))
1187 if (cur_ops->started)
1188 started = cur_ops->started();
1189 else
1190 started = cur_ops->completed();
1191 ts = rcu_trace_clock_local();
1192 p = rcu_dereference_check(rcu_torture_current,
1193 rcu_read_lock_bh_held() ||
1194 rcu_read_lock_sched_held() ||
1195 srcu_read_lock_held(srcu_ctlp) ||
1196 torturing_tasks());
1197 if (p == NULL) {
1198 /* Wait for rcu_torture_writer to get underway */
1199 cur_ops->readunlock(idx);
1200 schedule_timeout_interruptible(HZ); 1404 schedule_timeout_interruptible(HZ);
1201 continue;
1202 }
1203 if (p->rtort_mbtest == 0)
1204 atomic_inc(&n_rcu_torture_mberror);
1205 cur_ops->read_delay(&rand);
1206 preempt_disable();
1207 pipe_count = p->rtort_pipe_count;
1208 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1209 /* Should not happen, but... */
1210 pipe_count = RCU_TORTURE_PIPE_LEN;
1211 }
1212 completed = cur_ops->completed();
1213 if (pipe_count > 1) {
1214 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1215 ts, started, completed);
1216 rcu_ftrace_dump(DUMP_ALL);
1217 }
1218 __this_cpu_inc(rcu_torture_count[pipe_count]);
1219 completed = completed - started;
1220 if (cur_ops->started)
1221 completed++;
1222 if (completed > RCU_TORTURE_PIPE_LEN) {
1223 /* Should not happen, but... */
1224 completed = RCU_TORTURE_PIPE_LEN;
1225 }
1226 __this_cpu_inc(rcu_torture_batch[completed]);
1227 preempt_enable();
1228 cur_ops->readunlock(idx);
1229 stutter_wait("rcu_torture_reader"); 1405 stutter_wait("rcu_torture_reader");
1230 } while (!torture_must_stop()); 1406 } while (!torture_must_stop());
1231 if (irqreader && cur_ops->irq_capable) { 1407 if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
1282 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1458 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1283 n_rcu_torture_boost_failure, 1459 n_rcu_torture_boost_failure,
1284 n_rcu_torture_boosts, 1460 n_rcu_torture_boosts,
1285 n_rcu_torture_timers); 1461 atomic_long_read(&n_rcu_torture_timers));
1286 torture_onoff_stats(); 1462 torture_onoff_stats();
1287 pr_cont("barrier: %ld/%ld:%ld ", 1463 pr_cont("barrier: %ld/%ld:%ld ",
1288 n_barrier_successes, 1464 n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
1324 if (rtcv_snap == rcu_torture_current_version && 1500 if (rtcv_snap == rcu_torture_current_version &&
1325 rcu_torture_current != NULL) { 1501 rcu_torture_current != NULL) {
1326 int __maybe_unused flags = 0; 1502 int __maybe_unused flags = 0;
1327 unsigned long __maybe_unused gpnum = 0; 1503 unsigned long __maybe_unused gp_seq = 0;
1328 unsigned long __maybe_unused completed = 0;
1329 1504
1330 rcutorture_get_gp_data(cur_ops->ttype, 1505 rcutorture_get_gp_data(cur_ops->ttype,
1331 &flags, &gpnum, &completed); 1506 &flags, &gp_seq);
1332 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1507 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1333 &flags, &gpnum, &completed); 1508 &flags, &gp_seq);
1334 wtp = READ_ONCE(writer_task); 1509 wtp = READ_ONCE(writer_task);
1335 pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n", 1510 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1336 rcu_torture_writer_state_getname(), 1511 rcu_torture_writer_state_getname(),
1337 rcu_torture_writer_state, 1512 rcu_torture_writer_state, gp_seq, flags,
1338 gpnum, completed, flags,
1339 wtp == NULL ? ~0UL : wtp->state, 1513 wtp == NULL ? ~0UL : wtp->state,
1340 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1514 wtp == NULL ? -1 : (int)task_cpu(wtp));
1341 if (!splatted && wtp) { 1515 if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
1365 return 0; 1539 return 0;
1366} 1540}
1367 1541
1368static inline void 1542static void
1369rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1543rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1370{ 1544{
1371 pr_alert("%s" TORTURE_FLAG 1545 pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
1397 mutex_lock(&boost_mutex); 1571 mutex_lock(&boost_mutex);
1398 t = boost_tasks[cpu]; 1572 t = boost_tasks[cpu];
1399 boost_tasks[cpu] = NULL; 1573 boost_tasks[cpu] = NULL;
1574 rcu_torture_enable_rt_throttle();
1400 mutex_unlock(&boost_mutex); 1575 mutex_unlock(&boost_mutex);
1401 1576
1402 /* This must be outside of the mutex, otherwise deadlock! */ 1577 /* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
1413 1588
1414 /* Don't allow time recalculation while creating a new task. */ 1589 /* Don't allow time recalculation while creating a new task. */
1415 mutex_lock(&boost_mutex); 1590 mutex_lock(&boost_mutex);
1591 rcu_torture_disable_rt_throttle();
1416 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1592 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1417 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1593 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1418 cpu_to_node(cpu), 1594 cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
1446 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1622 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1447 } 1623 }
1448 if (!kthread_should_stop()) { 1624 if (!kthread_should_stop()) {
1449 stop_at = get_seconds() + stall_cpu; 1625 stop_at = ktime_get_seconds() + stall_cpu;
1450 /* RCU CPU stall is expected behavior in following code. */ 1626 /* RCU CPU stall is expected behavior in following code. */
1451 rcu_read_lock(); 1627 rcu_read_lock();
1452 if (stall_cpu_irqsoff) 1628 if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
1455 preempt_disable(); 1631 preempt_disable();
1456 pr_alert("rcu_torture_stall start on CPU %d.\n", 1632 pr_alert("rcu_torture_stall start on CPU %d.\n",
1457 smp_processor_id()); 1633 smp_processor_id());
1458 while (ULONG_CMP_LT(get_seconds(), stop_at)) 1634 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1635 stop_at))
1459 continue; /* Induce RCU CPU stall warning. */ 1636 continue; /* Induce RCU CPU stall warning. */
1460 if (stall_cpu_irqsoff) 1637 if (stall_cpu_irqsoff)
1461 local_irq_enable(); 1638 local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
1546 atomic_read(&barrier_cbs_invoked), 1723 atomic_read(&barrier_cbs_invoked),
1547 n_barrier_cbs); 1724 n_barrier_cbs);
1548 WARN_ON_ONCE(1); 1725 WARN_ON_ONCE(1);
1726 } else {
1727 n_barrier_successes++;
1549 } 1728 }
1550 n_barrier_successes++;
1551 schedule_timeout_interruptible(HZ / 10); 1729 schedule_timeout_interruptible(HZ / 10);
1552 } while (!torture_must_stop()); 1730 } while (!torture_must_stop());
1553 torture_kthread_stopping("rcu_torture_barrier"); 1731 torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
1610 } 1788 }
1611} 1789}
1612 1790
1791static bool rcu_torture_can_boost(void)
1792{
1793 static int boost_warn_once;
1794 int prio;
1795
1796 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
1797 return false;
1798
1799 prio = rcu_get_gp_kthreads_prio();
1800 if (!prio)
1801 return false;
1802
1803 if (prio < 2) {
1804 if (boost_warn_once == 1)
1805 return false;
1806
1807 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
1808 boost_warn_once = 1;
1809 return false;
1810 }
1811
1812 return true;
1813}
1814
1613static enum cpuhp_state rcutor_hp; 1815static enum cpuhp_state rcutor_hp;
1614 1816
1615static void 1817static void
1616rcu_torture_cleanup(void) 1818rcu_torture_cleanup(void)
1617{ 1819{
1618 int flags = 0; 1820 int flags = 0;
1619 unsigned long gpnum = 0; 1821 unsigned long gp_seq = 0;
1620 unsigned long completed = 0;
1621 int i; 1822 int i;
1622 1823
1623 rcutorture_record_test_transition();
1624 if (torture_cleanup_begin()) { 1824 if (torture_cleanup_begin()) {
1625 if (cur_ops->cb_barrier != NULL) 1825 if (cur_ops->cb_barrier != NULL)
1626 cur_ops->cb_barrier(); 1826 cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
1648 fakewriter_tasks = NULL; 1848 fakewriter_tasks = NULL;
1649 } 1849 }
1650 1850
1651 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed); 1851 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
1652 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1852 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
1653 &flags, &gpnum, &completed); 1853 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
1654 pr_alert("%s: End-test grace-period state: g%lu c%lu f%#x\n", 1854 cur_ops->name, gp_seq, flags);
1655 cur_ops->name, gpnum, completed, flags);
1656 torture_stop_kthread(rcu_torture_stats, stats_task); 1855 torture_stop_kthread(rcu_torture_stats, stats_task);
1657 torture_stop_kthread(rcu_torture_fqs, fqs_task); 1856 torture_stop_kthread(rcu_torture_fqs, fqs_task);
1658 for (i = 0; i < ncbflooders; i++) 1857 for (i = 0; i < ncbflooders; i++)
1659 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); 1858 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
1660 if ((test_boost == 1 && cur_ops->can_boost) || 1859 if (rcu_torture_can_boost())
1661 test_boost == 2)
1662 cpuhp_remove_state(rcutor_hp); 1860 cpuhp_remove_state(rcutor_hp);
1663 1861
1664 /* 1862 /*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
1746 int firsterr = 0; 1944 int firsterr = 0;
1747 static struct rcu_torture_ops *torture_ops[] = { 1945 static struct rcu_torture_ops *torture_ops[] = {
1748 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 1946 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
1749 &sched_ops, &tasks_ops, 1947 &busted_srcud_ops, &sched_ops, &tasks_ops,
1750 }; 1948 };
1751 1949
1752 if (!torture_init_begin(torture_type, verbose)) 1950 if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
1763 torture_type); 1961 torture_type);
1764 pr_alert("rcu-torture types:"); 1962 pr_alert("rcu-torture types:");
1765 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1963 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1766 pr_alert(" %s", torture_ops[i]->name); 1964 pr_cont(" %s", torture_ops[i]->name);
1767 pr_alert("\n"); 1965 pr_cont("\n");
1768 firsterr = -EINVAL; 1966 firsterr = -EINVAL;
1769 goto unwind; 1967 goto unwind;
1770 } 1968 }
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
1882 test_boost_interval = 1; 2080 test_boost_interval = 1;
1883 if (test_boost_duration < 2) 2081 if (test_boost_duration < 2)
1884 test_boost_duration = 2; 2082 test_boost_duration = 2;
1885 if ((test_boost == 1 && cur_ops->can_boost) || 2083 if (rcu_torture_can_boost()) {
1886 test_boost == 2) {
1887 2084
1888 boost_starttime = jiffies + test_boost_interval * HZ; 2085 boost_starttime = jiffies + test_boost_interval * HZ;
1889 2086
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
1897 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2094 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
1898 if (firsterr) 2095 if (firsterr)
1899 goto unwind; 2096 goto unwind;
1900 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); 2097 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
1901 if (firsterr) 2098 if (firsterr)
1902 goto unwind; 2099 goto unwind;
1903 firsterr = rcu_torture_stall_init(); 2100 firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
1926 goto unwind; 2123 goto unwind;
1927 } 2124 }
1928 } 2125 }
1929 rcutorture_record_test_transition();
1930 torture_init_end(); 2126 torture_init_end();
1931 return 0; 2127 return 0;
1932 2128
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index b4123d7a2cec..6c9866a854b1 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) "rcu: " fmt
30
29#include <linux/export.h> 31#include <linux/export.h>
30#include <linux/mutex.h> 32#include <linux/mutex.h>
31#include <linux/percpu.h> 33#include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
390 } 392 }
391 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 393 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
392 WARN_ON(srcu_readers_active(sp))) { 394 WARN_ON(srcu_readers_active(sp))) {
393 pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); 395 pr_info("%s: Active srcu_struct %p state: %d\n",
396 __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
394 return; /* Caller forgot to stop doing call_srcu()? */ 397 return; /* Caller forgot to stop doing call_srcu()? */
395 } 398 }
396 free_percpu(sp->sda); 399 free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
641 * period s. Losers must either ensure that their desired grace-period 644 * period s. Losers must either ensure that their desired grace-period
642 * number is recorded on at least their leaf srcu_node structure, or they 645 * number is recorded on at least their leaf srcu_node structure, or they
643 * must take steps to invoke their own callbacks. 646 * must take steps to invoke their own callbacks.
647 *
648 * Note that this function also does the work of srcu_funnel_exp_start(),
649 * in some cases by directly invoking it.
644 */ 650 */
645static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, 651static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
646 unsigned long s, bool do_norm) 652 unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
823 * more than one CPU, this means that when "func()" is invoked, each CPU 829 * more than one CPU, this means that when "func()" is invoked, each CPU
824 * is guaranteed to have executed a full memory barrier since the end of 830 * is guaranteed to have executed a full memory barrier since the end of
825 * its last corresponding SRCU read-side critical section whose beginning 831 * its last corresponding SRCU read-side critical section whose beginning
826 * preceded the call to call_rcu(). It also means that each CPU executing 832 * preceded the call to call_srcu(). It also means that each CPU executing
827 * an SRCU read-side critical section that continues beyond the start of 833 * an SRCU read-side critical section that continues beyond the start of
828 * "func()" must have executed a memory barrier after the call_rcu() 834 * "func()" must have executed a memory barrier after the call_srcu()
829 * but before the beginning of that SRCU read-side critical section. 835 * but before the beginning of that SRCU read-side critical section.
830 * Note that these guarantees include CPUs that are offline, idle, or 836 * Note that these guarantees include CPUs that are offline, idle, or
831 * executing in user mode, as well as CPUs that are executing in the kernel. 837 * executing in user mode, as well as CPUs that are executing in the kernel.
832 * 838 *
833 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 839 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
834 * resulting SRCU callback function "func()", then both CPU A and CPU 840 * resulting SRCU callback function "func()", then both CPU A and CPU
835 * B are guaranteed to execute a full memory barrier during the time 841 * B are guaranteed to execute a full memory barrier during the time
836 * interval between the call to call_rcu() and the invocation of "func()". 842 * interval between the call to call_srcu() and the invocation of "func()".
837 * This guarantee applies even if CPU A and CPU B are the same CPU (but 843 * This guarantee applies even if CPU A and CPU B are the same CPU (but
838 * again only if the system has more than one CPU). 844 * again only if the system has more than one CPU).
839 * 845 *
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
1246 1252
1247void srcutorture_get_gp_data(enum rcutorture_type test_type, 1253void srcutorture_get_gp_data(enum rcutorture_type test_type,
1248 struct srcu_struct *sp, int *flags, 1254 struct srcu_struct *sp, int *flags,
1249 unsigned long *gpnum, unsigned long *completed) 1255 unsigned long *gp_seq)
1250{ 1256{
1251 if (test_type != SRCU_FLAVOR) 1257 if (test_type != SRCU_FLAVOR)
1252 return; 1258 return;
1253 *flags = 0; 1259 *flags = 0;
1254 *completed = rcu_seq_ctr(sp->srcu_gp_seq); 1260 *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
1255 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1256} 1261}
1257EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 1262EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1258 1263
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1263 unsigned long s0 = 0, s1 = 0; 1268 unsigned long s0 = 0, s1 = 0;
1264 1269
1265 idx = sp->srcu_idx & 0x1; 1270 idx = sp->srcu_idx & 0x1;
1266 pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx); 1271 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1272 tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
1267 for_each_possible_cpu(cpu) { 1273 for_each_possible_cpu(cpu) {
1268 unsigned long l0, l1; 1274 unsigned long l0, l1;
1269 unsigned long u0, u1; 1275 unsigned long u0, u1;
1270 long c0, c1; 1276 long c0, c1;
1271 struct srcu_data *counts; 1277 struct srcu_data *sdp;
1272 1278
1273 counts = per_cpu_ptr(sp->sda, cpu); 1279 sdp = per_cpu_ptr(sp->sda, cpu);
1274 u0 = counts->srcu_unlock_count[!idx]; 1280 u0 = sdp->srcu_unlock_count[!idx];
1275 u1 = counts->srcu_unlock_count[idx]; 1281 u1 = sdp->srcu_unlock_count[idx];
1276 1282
1277 /* 1283 /*
1278 * Make sure that a lock is always counted if the corresponding 1284 * Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1280 */ 1286 */
1281 smp_rmb(); 1287 smp_rmb();
1282 1288
1283 l0 = counts->srcu_lock_count[!idx]; 1289 l0 = sdp->srcu_lock_count[!idx];
1284 l1 = counts->srcu_lock_count[idx]; 1290 l1 = sdp->srcu_lock_count[idx];
1285 1291
1286 c0 = l0 - u0; 1292 c0 = l0 - u0;
1287 c1 = l1 - u1; 1293 c1 = l1 - u1;
1288 pr_cont(" %d(%ld,%ld)", cpu, c0, c1); 1294 pr_cont(" %d(%ld,%ld %1p)",
1295 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1289 s0 += c0; 1296 s0 += c0;
1290 s1 += c1; 1297 s1 += c1;
1291 } 1298 }
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index a64eee0db39e..befc9321a89c 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
122{ 122{
123 if (user) 123 if (user)
124 rcu_sched_qs(); 124 rcu_sched_qs();
125 else if (!in_softirq()) 125 if (user || !in_softirq())
126 rcu_bh_qs(); 126 rcu_bh_qs();
127 if (user)
128 rcu_note_voluntary_context_switch(current);
129} 127}
130 128
131/* 129/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 91f888d3b23a..0b760c1369f7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -27,6 +27,9 @@
27 * For detailed explanation of Read-Copy Update mechanism see - 27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU 28 * Documentation/RCU
29 */ 29 */
30
31#define pr_fmt(fmt) "rcu: " fmt
32
30#include <linux/types.h> 33#include <linux/types.h>
31#include <linux/kernel.h> 34#include <linux/kernel.h>
32#include <linux/init.h> 35#include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
95 .rda = &sname##_data, \ 98 .rda = &sname##_data, \
96 .call = cr, \ 99 .call = cr, \
97 .gp_state = RCU_GP_IDLE, \ 100 .gp_state = RCU_GP_IDLE, \
98 .gpnum = 0UL - 300UL, \ 101 .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
99 .completed = 0UL - 300UL, \
100 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 102 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
101 .name = RCU_STATE_NAME(sname), \ 103 .name = RCU_STATE_NAME(sname), \
102 .abbr = sabbr, \ 104 .abbr = sabbr, \
103 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \ 105 .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
104 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \ 106 .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
107 .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
105} 108}
106 109
107RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched); 110RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
155 */ 158 */
156static int rcu_scheduler_fully_active __read_mostly; 159static int rcu_scheduler_fully_active __read_mostly;
157 160
161static void
162rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
163 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
158static void rcu_init_new_rnp(struct rcu_node *rnp_leaf); 164static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
159static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf); 165static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 166static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
177static int gp_cleanup_delay; 183static int gp_cleanup_delay;
178module_param(gp_cleanup_delay, int, 0444); 184module_param(gp_cleanup_delay, int, 0444);
179 185
186/* Retreive RCU kthreads priority for rcutorture */
187int rcu_get_gp_kthreads_prio(void)
188{
189 return kthread_prio;
190}
191EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
192
180/* 193/*
181 * Number of grace periods between delays, normalized by the duration of 194 * Number of grace periods between delays, normalized by the duration of
182 * the delay. The longer the delay, the more the grace periods between 195 * the delay. The longer the delay, the more the grace periods between
@@ -189,18 +202,6 @@ module_param(gp_cleanup_delay, int, 0444);
189#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */ 202#define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
190 203
191/* 204/*
192 * Track the rcutorture test sequence number and the update version
193 * number within a given test. The rcutorture_testseq is incremented
194 * on every rcutorture module load and unload, so has an odd value
195 * when a test is running. The rcutorture_vernum is set to zero
196 * when rcutorture starts and is incremented on each rcutorture update.
197 * These variables enable correlating rcutorture output with the
198 * RCU tracing information.
199 */
200unsigned long rcutorture_testseq;
201unsigned long rcutorture_vernum;
202
203/*
204 * Compute the mask of online CPUs for the specified rcu_node structure. 205 * Compute the mask of online CPUs for the specified rcu_node structure.
205 * This will not be stable unless the rcu_node structure's ->lock is 206 * This will not be stable unless the rcu_node structure's ->lock is
206 * held, but the bit corresponding to the current CPU will be stable 207 * held, but the bit corresponding to the current CPU will be stable
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
218 */ 219 */
219static int rcu_gp_in_progress(struct rcu_state *rsp) 220static int rcu_gp_in_progress(struct rcu_state *rsp)
220{ 221{
221 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); 222 return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
222} 223}
223 224
224/* 225/*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
233 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) 234 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
234 return; 235 return;
235 trace_rcu_grace_period(TPS("rcu_sched"), 236 trace_rcu_grace_period(TPS("rcu_sched"),
236 __this_cpu_read(rcu_sched_data.gpnum), 237 __this_cpu_read(rcu_sched_data.gp_seq),
237 TPS("cpuqs")); 238 TPS("cpuqs"));
238 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false); 239 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
239 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) 240 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
248 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); 249 RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
249 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { 250 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
250 trace_rcu_grace_period(TPS("rcu_bh"), 251 trace_rcu_grace_period(TPS("rcu_bh"),
251 __this_cpu_read(rcu_bh_data.gpnum), 252 __this_cpu_read(rcu_bh_data.gp_seq),
252 TPS("cpuqs")); 253 TPS("cpuqs"));
253 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); 254 __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
254 } 255 }
@@ -380,20 +381,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
380} 381}
381 382
382/* 383/*
383 * Do a double-increment of the ->dynticks counter to emulate a
384 * momentary idle-CPU quiescent state.
385 */
386static void rcu_dynticks_momentary_idle(void)
387{
388 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
389 int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
390 &rdtp->dynticks);
391
392 /* It is illegal to call this from idle state. */
393 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
394}
395
396/*
397 * Set the special (bottom) bit of the specified CPU so that it 384 * Set the special (bottom) bit of the specified CPU so that it
398 * will take special action (such as flushing its TLB) on the 385 * will take special action (such as flushing its TLB) on the
399 * next exit from an extended quiescent state. Returns true if 386 * next exit from an extended quiescent state. Returns true if
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
424 * 411 *
425 * We inform the RCU core by emulating a zero-duration dyntick-idle period. 412 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
426 * 413 *
427 * The caller must have disabled interrupts. 414 * The caller must have disabled interrupts and must not be idle.
428 */ 415 */
429static void rcu_momentary_dyntick_idle(void) 416static void rcu_momentary_dyntick_idle(void)
430{ 417{
418 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
419 int special;
420
431 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false); 421 raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
432 rcu_dynticks_momentary_idle(); 422 special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
423 /* It is illegal to call this from idle state. */
424 WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
433} 425}
434 426
435/* 427/*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
451 rcu_momentary_dyntick_idle(); 443 rcu_momentary_dyntick_idle();
452 this_cpu_inc(rcu_dynticks.rcu_qs_ctr); 444 this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
453 if (!preempt) 445 if (!preempt)
454 rcu_note_voluntary_context_switch_lite(current); 446 rcu_tasks_qs(current);
455out: 447out:
456 trace_rcu_utilization(TPS("End context switch")); 448 trace_rcu_utilization(TPS("End context switch"));
457 barrier(); /* Avoid RCU read-side critical sections leaking up. */ 449 barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
513static ulong jiffies_till_next_fqs = ULONG_MAX; 505static ulong jiffies_till_next_fqs = ULONG_MAX;
514static bool rcu_kick_kthreads; 506static bool rcu_kick_kthreads;
515 507
516module_param(jiffies_till_first_fqs, ulong, 0644); 508static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
517module_param(jiffies_till_next_fqs, ulong, 0644); 509{
510 ulong j;
511 int ret = kstrtoul(val, 0, &j);
512
513 if (!ret)
514 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
515 return ret;
516}
517
518static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
519{
520 ulong j;
521 int ret = kstrtoul(val, 0, &j);
522
523 if (!ret)
524 WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
525 return ret;
526}
527
528static struct kernel_param_ops first_fqs_jiffies_ops = {
529 .set = param_set_first_fqs_jiffies,
530 .get = param_get_ulong,
531};
532
533static struct kernel_param_ops next_fqs_jiffies_ops = {
534 .set = param_set_next_fqs_jiffies,
535 .get = param_get_ulong,
536};
537
538module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
539module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
518module_param(rcu_kick_kthreads, bool, 0644); 540module_param(rcu_kick_kthreads, bool, 0644);
519 541
520/* 542/*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
529static int rcu_pending(void); 551static int rcu_pending(void);
530 552
531/* 553/*
532 * Return the number of RCU batches started thus far for debug & stats. 554 * Return the number of RCU GPs completed thus far for debug & stats.
533 */ 555 */
534unsigned long rcu_batches_started(void) 556unsigned long rcu_get_gp_seq(void)
535{ 557{
536 return rcu_state_p->gpnum; 558 return READ_ONCE(rcu_state_p->gp_seq);
537} 559}
538EXPORT_SYMBOL_GPL(rcu_batches_started); 560EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
539 561
540/* 562/*
541 * Return the number of RCU-sched batches started thus far for debug & stats. 563 * Return the number of RCU-sched GPs completed thus far for debug & stats.
542 */ 564 */
543unsigned long rcu_batches_started_sched(void) 565unsigned long rcu_sched_get_gp_seq(void)
544{ 566{
545 return rcu_sched_state.gpnum; 567 return READ_ONCE(rcu_sched_state.gp_seq);
546} 568}
547EXPORT_SYMBOL_GPL(rcu_batches_started_sched); 569EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
548 570
549/* 571/*
550 * Return the number of RCU BH batches started thus far for debug & stats. 572 * Return the number of RCU-bh GPs completed thus far for debug & stats.
551 */ 573 */
552unsigned long rcu_batches_started_bh(void) 574unsigned long rcu_bh_get_gp_seq(void)
553{ 575{
554 return rcu_bh_state.gpnum; 576 return READ_ONCE(rcu_bh_state.gp_seq);
555} 577}
556EXPORT_SYMBOL_GPL(rcu_batches_started_bh); 578EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
557
558/*
559 * Return the number of RCU batches completed thus far for debug & stats.
560 */
561unsigned long rcu_batches_completed(void)
562{
563 return rcu_state_p->completed;
564}
565EXPORT_SYMBOL_GPL(rcu_batches_completed);
566
567/*
568 * Return the number of RCU-sched batches completed thus far for debug & stats.
569 */
570unsigned long rcu_batches_completed_sched(void)
571{
572 return rcu_sched_state.completed;
573}
574EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
575
576/*
577 * Return the number of RCU BH batches completed thus far for debug & stats.
578 */
579unsigned long rcu_batches_completed_bh(void)
580{
581 return rcu_bh_state.completed;
582}
583EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
584 579
585/* 580/*
586 * Return the number of RCU expedited batches completed thus far for 581 * Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
636 */ 631 */
637void show_rcu_gp_kthreads(void) 632void show_rcu_gp_kthreads(void)
638{ 633{
634 int cpu;
635 struct rcu_data *rdp;
636 struct rcu_node *rnp;
639 struct rcu_state *rsp; 637 struct rcu_state *rsp;
640 638
641 for_each_rcu_flavor(rsp) { 639 for_each_rcu_flavor(rsp) {
642 pr_info("%s: wait state: %d ->state: %#lx\n", 640 pr_info("%s: wait state: %d ->state: %#lx\n",
643 rsp->name, rsp->gp_state, rsp->gp_kthread->state); 641 rsp->name, rsp->gp_state, rsp->gp_kthread->state);
642 rcu_for_each_node_breadth_first(rsp, rnp) {
643 if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
644 continue;
645 pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
646 rnp->grplo, rnp->grphi, rnp->gp_seq,
647 rnp->gp_seq_needed);
648 if (!rcu_is_leaf_node(rnp))
649 continue;
650 for_each_leaf_node_possible_cpu(rnp, cpu) {
651 rdp = per_cpu_ptr(rsp->rda, cpu);
652 if (rdp->gpwrap ||
653 ULONG_CMP_GE(rsp->gp_seq,
654 rdp->gp_seq_needed))
655 continue;
656 pr_info("\tcpu %d ->gp_seq_needed %lu\n",
657 cpu, rdp->gp_seq_needed);
658 }
659 }
644 /* sched_show_task(rsp->gp_kthread); */ 660 /* sched_show_task(rsp->gp_kthread); */
645 } 661 }
646} 662}
647EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); 663EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
648 664
649/* 665/*
650 * Record the number of times rcutorture tests have been initiated and
651 * terminated. This information allows the debugfs tracing stats to be
652 * correlated to the rcutorture messages, even when the rcutorture module
653 * is being repeatedly loaded and unloaded. In other words, we cannot
654 * store this state in rcutorture itself.
655 */
656void rcutorture_record_test_transition(void)
657{
658 rcutorture_testseq++;
659 rcutorture_vernum = 0;
660}
661EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
662
663/*
664 * Send along grace-period-related data for rcutorture diagnostics. 666 * Send along grace-period-related data for rcutorture diagnostics.
665 */ 667 */
666void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 668void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
667 unsigned long *gpnum, unsigned long *completed) 669 unsigned long *gp_seq)
668{ 670{
669 struct rcu_state *rsp = NULL; 671 struct rcu_state *rsp = NULL;
670 672
@@ -684,23 +686,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
684 if (rsp == NULL) 686 if (rsp == NULL)
685 return; 687 return;
686 *flags = READ_ONCE(rsp->gp_flags); 688 *flags = READ_ONCE(rsp->gp_flags);
687 *gpnum = READ_ONCE(rsp->gpnum); 689 *gp_seq = rcu_seq_current(&rsp->gp_seq);
688 *completed = READ_ONCE(rsp->completed);
689} 690}
690EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); 691EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
691 692
692/* 693/*
693 * Record the number of writer passes through the current rcutorture test.
694 * This is also used to correlate debugfs tracing stats with the rcutorture
695 * messages.
696 */
697void rcutorture_record_progress(unsigned long vernum)
698{
699 rcutorture_vernum++;
700}
701EXPORT_SYMBOL_GPL(rcutorture_record_progress);
702
703/*
704 * Return the root node of the specified rcu_state structure. 694 * Return the root node of the specified rcu_state structure.
705 */ 695 */
706static struct rcu_node *rcu_get_root(struct rcu_state *rsp) 696static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
1059#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) 1049#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1060 1050
1061/* 1051/*
1062 * Is the current CPU online? Disable preemption to avoid false positives 1052 * Is the current CPU online as far as RCU is concerned?
1063 * that could otherwise happen due to the current CPU number being sampled,
1064 * this task being preempted, its old CPU being taken offline, resuming
1065 * on some other CPU, then determining that its old CPU is now offline.
1066 * It is OK to use RCU on an offline processor during initial boot, hence
1067 * the check for rcu_scheduler_fully_active. Note also that it is OK
1068 * for a CPU coming online to use RCU for one jiffy prior to marking itself
1069 * online in the cpu_online_mask. Similarly, it is OK for a CPU going
1070 * offline to continue to use RCU for one jiffy after marking itself
1071 * offline in the cpu_online_mask. This leniency is necessary given the
1072 * non-atomic nature of the online and offline processing, for example,
1073 * the fact that a CPU enters the scheduler after completing the teardown
1074 * of the CPU.
1075 * 1053 *
1076 * This is also why RCU internally marks CPUs online during in the 1054 * Disable preemption to avoid false positives that could otherwise
1077 * preparation phase and offline after the CPU has been taken down. 1055 * happen due to the current CPU number being sampled, this task being
1056 * preempted, its old CPU being taken offline, resuming on some other CPU,
1057 * then determining that its old CPU is now offline. Because there are
1058 * multiple flavors of RCU, and because this function can be called in the
1059 * midst of updating the flavors while a given CPU coming online or going
1060 * offline, it is necessary to check all flavors. If any of the flavors
1061 * believe that given CPU is online, it is considered to be online.
1078 * 1062 *
1079 * Disable checking if in an NMI handler because we cannot safely report 1063 * Disable checking if in an NMI handler because we cannot safely
1080 * errors from NMI handlers anyway. 1064 * report errors from NMI handlers anyway. In addition, it is OK to use
1065 * RCU on an offline processor during initial boot, hence the check for
1066 * rcu_scheduler_fully_active.
1081 */ 1067 */
1082bool rcu_lockdep_current_cpu_online(void) 1068bool rcu_lockdep_current_cpu_online(void)
1083{ 1069{
1084 struct rcu_data *rdp; 1070 struct rcu_data *rdp;
1085 struct rcu_node *rnp; 1071 struct rcu_node *rnp;
1086 bool ret; 1072 struct rcu_state *rsp;
1087 1073
1088 if (in_nmi()) 1074 if (in_nmi() || !rcu_scheduler_fully_active)
1089 return true; 1075 return true;
1090 preempt_disable(); 1076 preempt_disable();
1091 rdp = this_cpu_ptr(&rcu_sched_data); 1077 for_each_rcu_flavor(rsp) {
1092 rnp = rdp->mynode; 1078 rdp = this_cpu_ptr(rsp->rda);
1093 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) || 1079 rnp = rdp->mynode;
1094 !rcu_scheduler_fully_active; 1080 if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
1081 preempt_enable();
1082 return true;
1083 }
1084 }
1095 preempt_enable(); 1085 preempt_enable();
1096 return ret; 1086 return false;
1097} 1087}
1098EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online); 1088EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1099 1089
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
1115/* 1105/*
1116 * We are reporting a quiescent state on behalf of some other CPU, so 1106 * We are reporting a quiescent state on behalf of some other CPU, so
1117 * it is our responsibility to check for and handle potential overflow 1107 * it is our responsibility to check for and handle potential overflow
1118 * of the rcu_node ->gpnum counter with respect to the rcu_data counters. 1108 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1119 * After all, the CPU might be in deep idle state, and thus executing no 1109 * After all, the CPU might be in deep idle state, and thus executing no
1120 * code whatsoever. 1110 * code whatsoever.
1121 */ 1111 */
1122static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) 1112static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1123{ 1113{
1124 raw_lockdep_assert_held_rcu_node(rnp); 1114 raw_lockdep_assert_held_rcu_node(rnp);
1125 if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum)) 1115 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1116 rnp->gp_seq))
1126 WRITE_ONCE(rdp->gpwrap, true); 1117 WRITE_ONCE(rdp->gpwrap, true);
1127 if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum)) 1118 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1128 rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4; 1119 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1129} 1120}
1130 1121
1131/* 1122/*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
1137{ 1128{
1138 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks); 1129 rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1139 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { 1130 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1140 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1131 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1141 rcu_gpnum_ovf(rdp->mynode, rdp); 1132 rcu_gpnum_ovf(rdp->mynode, rdp);
1142 return 1; 1133 return 1;
1143 } 1134 }
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
1159 rnp = rdp->mynode; 1150 rnp = rdp->mynode;
1160 raw_spin_lock_rcu_node(rnp); 1151 raw_spin_lock_rcu_node(rnp);
1161 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { 1152 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1162 rdp->rcu_iw_gpnum = rnp->gpnum; 1153 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1163 rdp->rcu_iw_pending = false; 1154 rdp->rcu_iw_pending = false;
1164 } 1155 }
1165 raw_spin_unlock_rcu_node(rnp); 1156 raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1187 * of the current RCU grace period. 1178 * of the current RCU grace period.
1188 */ 1179 */
1189 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) { 1180 if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1190 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti")); 1181 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1191 rdp->dynticks_fqs++; 1182 rdp->dynticks_fqs++;
1192 rcu_gpnum_ovf(rnp, rdp); 1183 rcu_gpnum_ovf(rnp, rdp);
1193 return 1; 1184 return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1203 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu); 1194 ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1204 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) && 1195 if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1205 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) && 1196 READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1206 READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) { 1197 rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
1207 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc")); 1198 trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
1208 rcu_gpnum_ovf(rnp, rdp); 1199 rcu_gpnum_ovf(rnp, rdp);
1209 return 1; 1200 return 1;
1210 } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) { 1201 } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1212 smp_store_release(ruqp, true); 1203 smp_store_release(ruqp, true);
1213 } 1204 }
1214 1205
1215 /* Check for the CPU being offline. */ 1206 /* If waiting too long on an offline CPU, complain. */
1216 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) { 1207 if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
1217 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl")); 1208 time_after(jiffies, rdp->rsp->gp_start + HZ)) {
1218 rdp->offline_fqs++; 1209 bool onl;
1219 rcu_gpnum_ovf(rnp, rdp); 1210 struct rcu_node *rnp1;
1220 return 1; 1211
1212 WARN_ON(1); /* Offline CPUs are supposed to report QS! */
1213 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1214 __func__, rnp->grplo, rnp->grphi, rnp->level,
1215 (long)rnp->gp_seq, (long)rnp->completedqs);
1216 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1217 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1218 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1219 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1220 pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1221 __func__, rdp->cpu, ".o"[onl],
1222 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1223 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1224 return 1; /* Break things loose after complaining. */
1221 } 1225 }
1222 1226
1223 /* 1227 /*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1256 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) { 1260 if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
1257 resched_cpu(rdp->cpu); 1261 resched_cpu(rdp->cpu);
1258 if (IS_ENABLED(CONFIG_IRQ_WORK) && 1262 if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1259 !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum && 1263 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1260 (rnp->ffmask & rdp->grpmask)) { 1264 (rnp->ffmask & rdp->grpmask)) {
1261 init_irq_work(&rdp->rcu_iw, rcu_iw_handler); 1265 init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1262 rdp->rcu_iw_pending = true; 1266 rdp->rcu_iw_pending = true;
1263 rdp->rcu_iw_gpnum = rnp->gpnum; 1267 rdp->rcu_iw_gp_seq = rnp->gp_seq;
1264 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); 1268 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1265 } 1269 }
1266 } 1270 }
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
1274 unsigned long j1; 1278 unsigned long j1;
1275 1279
1276 rsp->gp_start = j; 1280 rsp->gp_start = j;
1277 smp_wmb(); /* Record start time before stall time. */
1278 j1 = rcu_jiffies_till_stall_check(); 1281 j1 = rcu_jiffies_till_stall_check();
1279 WRITE_ONCE(rsp->jiffies_stall, j + j1); 1282 /* Record ->gp_start before ->jiffies_stall. */
1283 smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
1280 rsp->jiffies_resched = j + j1 / 2; 1284 rsp->jiffies_resched = j + j1 / 2;
1281 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs); 1285 rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1282} 1286}
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1302 j = jiffies; 1306 j = jiffies;
1303 gpa = READ_ONCE(rsp->gp_activity); 1307 gpa = READ_ONCE(rsp->gp_activity);
1304 if (j - gpa > 2 * HZ) { 1308 if (j - gpa > 2 * HZ) {
1305 pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n", 1309 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1306 rsp->name, j - gpa, 1310 rsp->name, j - gpa,
1307 rsp->gpnum, rsp->completed, 1311 (long)rcu_seq_current(&rsp->gp_seq),
1308 rsp->gp_flags, 1312 rsp->gp_flags,
1309 gp_state_getname(rsp->gp_state), rsp->gp_state, 1313 gp_state_getname(rsp->gp_state), rsp->gp_state,
1310 rsp->gp_kthread ? rsp->gp_kthread->state : ~0, 1314 rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1359 } 1363 }
1360} 1364}
1361 1365
1362static inline void panic_on_rcu_stall(void) 1366static void panic_on_rcu_stall(void)
1363{ 1367{
1364 if (sysctl_panic_on_rcu_stall) 1368 if (sysctl_panic_on_rcu_stall)
1365 panic("RCU Stall\n"); 1369 panic("RCU Stall\n");
1366} 1370}
1367 1371
1368static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum) 1372static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
1369{ 1373{
1370 int cpu; 1374 int cpu;
1371 long delta;
1372 unsigned long flags; 1375 unsigned long flags;
1373 unsigned long gpa; 1376 unsigned long gpa;
1374 unsigned long j; 1377 unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1381 if (rcu_cpu_stall_suppress) 1384 if (rcu_cpu_stall_suppress)
1382 return; 1385 return;
1383 1386
1384 /* Only let one CPU complain about others per time interval. */
1385
1386 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1387 delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1388 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1389 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1390 return;
1391 }
1392 WRITE_ONCE(rsp->jiffies_stall,
1393 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1394 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1395
1396 /* 1387 /*
1397 * OK, time to rat on our buddy... 1388 * OK, time to rat on our buddy...
1398 * See Documentation/RCU/stallwarn.txt for info on how to debug 1389 * See Documentation/RCU/stallwarn.txt for info on how to debug
1399 * RCU CPU stall warnings. 1390 * RCU CPU stall warnings.
1400 */ 1391 */
1401 pr_err("INFO: %s detected stalls on CPUs/tasks:", 1392 pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
1402 rsp->name);
1403 print_cpu_stall_info_begin(); 1393 print_cpu_stall_info_begin();
1404 rcu_for_each_leaf_node(rsp, rnp) { 1394 rcu_for_each_leaf_node(rsp, rnp) {
1405 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1395 raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1418 for_each_possible_cpu(cpu) 1408 for_each_possible_cpu(cpu)
1419 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, 1409 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1420 cpu)->cblist); 1410 cpu)->cblist);
1421 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n", 1411 pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
1422 smp_processor_id(), (long)(jiffies - rsp->gp_start), 1412 smp_processor_id(), (long)(jiffies - rsp->gp_start),
1423 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1413 (long)rcu_seq_current(&rsp->gp_seq), totqlen);
1424 if (ndetected) { 1414 if (ndetected) {
1425 rcu_dump_cpu_stacks(rsp); 1415 rcu_dump_cpu_stacks(rsp);
1426 1416
1427 /* Complain about tasks blocking the grace period. */ 1417 /* Complain about tasks blocking the grace period. */
1428 rcu_print_detail_task_stall(rsp); 1418 rcu_print_detail_task_stall(rsp);
1429 } else { 1419 } else {
1430 if (READ_ONCE(rsp->gpnum) != gpnum || 1420 if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
1431 READ_ONCE(rsp->completed) == gpnum) {
1432 pr_err("INFO: Stall ended before state dump start\n"); 1421 pr_err("INFO: Stall ended before state dump start\n");
1433 } else { 1422 } else {
1434 j = jiffies; 1423 j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1441 sched_show_task(current); 1430 sched_show_task(current);
1442 } 1431 }
1443 } 1432 }
1433 /* Rewrite if needed in case of slow consoles. */
1434 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1435 WRITE_ONCE(rsp->jiffies_stall,
1436 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1444 1437
1445 rcu_check_gp_kthread_starvation(rsp); 1438 rcu_check_gp_kthread_starvation(rsp);
1446 1439
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
1476 for_each_possible_cpu(cpu) 1469 for_each_possible_cpu(cpu)
1477 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda, 1470 totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1478 cpu)->cblist); 1471 cpu)->cblist);
1479 pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n", 1472 pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
1480 jiffies - rsp->gp_start, 1473 jiffies - rsp->gp_start,
1481 (long)rsp->gpnum, (long)rsp->completed, totqlen); 1474 (long)rcu_seq_current(&rsp->gp_seq), totqlen);
1482 1475
1483 rcu_check_gp_kthread_starvation(rsp); 1476 rcu_check_gp_kthread_starvation(rsp);
1484 1477
1485 rcu_dump_cpu_stacks(rsp); 1478 rcu_dump_cpu_stacks(rsp);
1486 1479
1487 raw_spin_lock_irqsave_rcu_node(rnp, flags); 1480 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1481 /* Rewrite if needed in case of slow consoles. */
1488 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall))) 1482 if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1489 WRITE_ONCE(rsp->jiffies_stall, 1483 WRITE_ONCE(rsp->jiffies_stall,
1490 jiffies + 3 * rcu_jiffies_till_stall_check() + 3); 1484 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
1504 1498
1505static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 1499static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1506{ 1500{
1507 unsigned long completed; 1501 unsigned long gs1;
1508 unsigned long gpnum; 1502 unsigned long gs2;
1509 unsigned long gps; 1503 unsigned long gps;
1510 unsigned long j; 1504 unsigned long j;
1505 unsigned long jn;
1511 unsigned long js; 1506 unsigned long js;
1512 struct rcu_node *rnp; 1507 struct rcu_node *rnp;
1513 1508
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1520 /* 1515 /*
1521 * Lots of memory barriers to reject false positives. 1516 * Lots of memory barriers to reject false positives.
1522 * 1517 *
1523 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall, 1518 * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
1524 * then rsp->gp_start, and finally rsp->completed. These values 1519 * then rsp->gp_start, and finally another copy of rsp->gp_seq.
1525 * are updated in the opposite order with memory barriers (or 1520 * These values are updated in the opposite order with memory
1526 * equivalent) during grace-period initialization and cleanup. 1521 * barriers (or equivalent) during grace-period initialization
1527 * Now, a false positive can occur if we get an new value of 1522 * and cleanup. Now, a false positive can occur if we get an new
1528 * rsp->gp_start and a old value of rsp->jiffies_stall. But given 1523 * value of rsp->gp_start and a old value of rsp->jiffies_stall.
1529 * the memory barriers, the only way that this can happen is if one 1524 * But given the memory barriers, the only way that this can happen
1530 * grace period ends and another starts between these two fetches. 1525 * is if one grace period ends and another starts between these
1531 * Detect this by comparing rsp->completed with the previous fetch 1526 * two fetches. This is detected by comparing the second fetch
1532 * from rsp->gpnum. 1527 * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
1533 * 1528 *
1534 * Given this check, comparisons of jiffies, rsp->jiffies_stall, 1529 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1535 * and rsp->gp_start suffice to forestall false positives. 1530 * and rsp->gp_start suffice to forestall false positives.
1536 */ 1531 */
1537 gpnum = READ_ONCE(rsp->gpnum); 1532 gs1 = READ_ONCE(rsp->gp_seq);
1538 smp_rmb(); /* Pick up ->gpnum first... */ 1533 smp_rmb(); /* Pick up ->gp_seq first... */
1539 js = READ_ONCE(rsp->jiffies_stall); 1534 js = READ_ONCE(rsp->jiffies_stall);
1540 smp_rmb(); /* ...then ->jiffies_stall before the rest... */ 1535 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1541 gps = READ_ONCE(rsp->gp_start); 1536 gps = READ_ONCE(rsp->gp_start);
1542 smp_rmb(); /* ...and finally ->gp_start before ->completed. */ 1537 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
1543 completed = READ_ONCE(rsp->completed); 1538 gs2 = READ_ONCE(rsp->gp_seq);
1544 if (ULONG_CMP_GE(completed, gpnum) || 1539 if (gs1 != gs2 ||
1545 ULONG_CMP_LT(j, js) || 1540 ULONG_CMP_LT(j, js) ||
1546 ULONG_CMP_GE(gps, js)) 1541 ULONG_CMP_GE(gps, js))
1547 return; /* No stall or GP completed since entering function. */ 1542 return; /* No stall or GP completed since entering function. */
1548 rnp = rdp->mynode; 1543 rnp = rdp->mynode;
1544 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
1549 if (rcu_gp_in_progress(rsp) && 1545 if (rcu_gp_in_progress(rsp) &&
1550 (READ_ONCE(rnp->qsmask) & rdp->grpmask)) { 1546 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
1547 cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
1551 1548
1552 /* We haven't checked in, so go dump stack. */ 1549 /* We haven't checked in, so go dump stack. */
1553 print_cpu_stall(rsp); 1550 print_cpu_stall(rsp);
1554 1551
1555 } else if (rcu_gp_in_progress(rsp) && 1552 } else if (rcu_gp_in_progress(rsp) &&
1556 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { 1553 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
1554 cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
1557 1555
1558 /* They had a few time units to dump stack, so complain. */ 1556 /* They had a few time units to dump stack, so complain. */
1559 print_other_cpu_stall(rsp, gpnum); 1557 print_other_cpu_stall(rsp, gs2);
1560 } 1558 }
1561} 1559}
1562 1560
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
1577 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2); 1575 WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1578} 1576}
1579 1577
1580/*
1581 * Determine the value that ->completed will have at the end of the
1582 * next subsequent grace period. This is used to tag callbacks so that
1583 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1584 * been dyntick-idle for an extended period with callbacks under the
1585 * influence of RCU_FAST_NO_HZ.
1586 *
1587 * The caller must hold rnp->lock with interrupts disabled.
1588 */
1589static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1590 struct rcu_node *rnp)
1591{
1592 raw_lockdep_assert_held_rcu_node(rnp);
1593
1594 /*
1595 * If RCU is idle, we just wait for the next grace period.
1596 * But we can only be sure that RCU is idle if we are looking
1597 * at the root rcu_node structure -- otherwise, a new grace
1598 * period might have started, but just not yet gotten around
1599 * to initializing the current non-root rcu_node structure.
1600 */
1601 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1602 return rnp->completed + 1;
1603
1604 /*
1605 * If the current rcu_node structure believes that RCU is
1606 * idle, and if the rcu_state structure does not yet reflect
1607 * the start of a new grace period, then the next grace period
1608 * will suffice. The memory barrier is needed to accurately
1609 * sample the rsp->gpnum, and pairs with the second lock
1610 * acquisition in rcu_gp_init(), which is augmented with
1611 * smp_mb__after_unlock_lock() for this purpose.
1612 */
1613 if (rnp->gpnum == rnp->completed) {
1614 smp_mb(); /* See above block comment. */
1615 if (READ_ONCE(rsp->gpnum) == rnp->completed)
1616 return rnp->completed + 1;
1617 }
1618
1619 /*
1620 * Otherwise, wait for a possible partial grace period and
1621 * then the subsequent full grace period.
1622 */
1623 return rnp->completed + 2;
1624}
1625
1626/* Trace-event wrapper function for trace_rcu_future_grace_period. */ 1578/* Trace-event wrapper function for trace_rcu_future_grace_period. */
1627static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1579static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1628 unsigned long c, const char *s) 1580 unsigned long gp_seq_req, const char *s)
1629{ 1581{
1630 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, 1582 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
1631 rnp->completed, c, rnp->level, 1583 rnp->level, rnp->grplo, rnp->grphi, s);
1632 rnp->grplo, rnp->grphi, s);
1633} 1584}
1634 1585
1635/* 1586/*
1587 * rcu_start_this_gp - Request the start of a particular grace period
1588 * @rnp_start: The leaf node of the CPU from which to start.
1589 * @rdp: The rcu_data corresponding to the CPU from which to start.
1590 * @gp_seq_req: The gp_seq of the grace period to start.
1591 *
1636 * Start the specified grace period, as needed to handle newly arrived 1592 * Start the specified grace period, as needed to handle newly arrived
1637 * callbacks. The required future grace periods are recorded in each 1593 * callbacks. The required future grace periods are recorded in each
1638 * rcu_node structure's ->need_future_gp[] field. Returns true if there 1594 * rcu_node structure's ->gp_seq_needed field. Returns true if there
1639 * is reason to awaken the grace-period kthread. 1595 * is reason to awaken the grace-period kthread.
1640 * 1596 *
1641 * The caller must hold the specified rcu_node structure's ->lock, which 1597 * The caller must hold the specified rcu_node structure's ->lock, which
1642 * is why the caller is responsible for waking the grace-period kthread. 1598 * is why the caller is responsible for waking the grace-period kthread.
1599 *
1600 * Returns true if the GP thread needs to be awakened else false.
1643 */ 1601 */
1644static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, 1602static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1645 unsigned long c) 1603 unsigned long gp_seq_req)
1646{ 1604{
1647 bool ret = false; 1605 bool ret = false;
1648 struct rcu_state *rsp = rdp->rsp; 1606 struct rcu_state *rsp = rdp->rsp;
1649 struct rcu_node *rnp_root; 1607 struct rcu_node *rnp;
1650 1608
1651 /* 1609 /*
1652 * Use funnel locking to either acquire the root rcu_node 1610 * Use funnel locking to either acquire the root rcu_node
1653 * structure's lock or bail out if the need for this grace period 1611 * structure's lock or bail out if the need for this grace period
1654 * has already been recorded -- or has already started. If there 1612 * has already been recorded -- or if that grace period has in
1655 * is already a grace period in progress in a non-leaf node, no 1613 * fact already started. If there is already a grace period in
1656 * recording is needed because the end of the grace period will 1614 * progress in a non-leaf node, no recording is needed because the
1657 * scan the leaf rcu_node structures. Note that rnp->lock must 1615 * end of the grace period will scan the leaf rcu_node structures.
1658 * not be released. 1616 * Note that rnp_start->lock must not be released.
1659 */ 1617 */
1660 raw_lockdep_assert_held_rcu_node(rnp); 1618 raw_lockdep_assert_held_rcu_node(rnp_start);
1661 trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf")); 1619 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1662 for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) { 1620 for (rnp = rnp_start; 1; rnp = rnp->parent) {
1663 if (rnp_root != rnp) 1621 if (rnp != rnp_start)
1664 raw_spin_lock_rcu_node(rnp_root); 1622 raw_spin_lock_rcu_node(rnp);
1665 WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum + 1623 if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1666 need_future_gp_mask(), c)); 1624 rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1667 if (need_future_gp_element(rnp_root, c) || 1625 (rnp != rnp_start &&
1668 ULONG_CMP_GE(rnp_root->gpnum, c) || 1626 rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1669 (rnp != rnp_root && 1627 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1670 rnp_root->gpnum != rnp_root->completed)) { 1628 TPS("Prestarted"));
1671 trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
1672 goto unlock_out; 1629 goto unlock_out;
1673 } 1630 }
1674 need_future_gp_element(rnp_root, c) = true; 1631 rnp->gp_seq_needed = gp_seq_req;
1675 if (rnp_root != rnp && rnp_root->parent != NULL) 1632 if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1676 raw_spin_unlock_rcu_node(rnp_root); 1633 /*
1677 if (!rnp_root->parent) 1634 * We just marked the leaf or internal node, and a
1635 * grace period is in progress, which means that
1636 * rcu_gp_cleanup() will see the marking. Bail to
1637 * reduce contention.
1638 */
1639 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1640 TPS("Startedleaf"));
1641 goto unlock_out;
1642 }
1643 if (rnp != rnp_start && rnp->parent != NULL)
1644 raw_spin_unlock_rcu_node(rnp);
1645 if (!rnp->parent)
1678 break; /* At root, and perhaps also leaf. */ 1646 break; /* At root, and perhaps also leaf. */
1679 } 1647 }
1680 1648
1681 /* If GP already in progress, just leave, otherwise start one. */ 1649 /* If GP already in progress, just leave, otherwise start one. */
1682 if (rnp_root->gpnum != rnp_root->completed) { 1650 if (rcu_gp_in_progress(rsp)) {
1683 trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot")); 1651 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1684 goto unlock_out; 1652 goto unlock_out;
1685 } 1653 }
1686 trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot")); 1654 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1687 WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); 1655 WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
1656 rsp->gp_req_activity = jiffies;
1688 if (!rsp->gp_kthread) { 1657 if (!rsp->gp_kthread) {
1689 trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread")); 1658 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1690 goto unlock_out; 1659 goto unlock_out;
1691 } 1660 }
1692 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq")); 1661 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
1693 ret = true; /* Caller must wake GP kthread. */ 1662 ret = true; /* Caller must wake GP kthread. */
1694unlock_out: 1663unlock_out:
1695 if (rnp != rnp_root) 1664 /* Push furthest requested GP to leaf node and rcu_data structure. */
1696 raw_spin_unlock_rcu_node(rnp_root); 1665 if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1666 rnp_start->gp_seq_needed = rnp->gp_seq_needed;
1667 rdp->gp_seq_needed = rnp->gp_seq_needed;
1668 }
1669 if (rnp != rnp_start)
1670 raw_spin_unlock_rcu_node(rnp);
1697 return ret; 1671 return ret;
1698} 1672}
1699 1673
@@ -1703,13 +1677,13 @@ unlock_out:
1703 */ 1677 */
1704static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 1678static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1705{ 1679{
1706 unsigned long c = rnp->completed;
1707 bool needmore; 1680 bool needmore;
1708 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 1681 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1709 1682
1710 need_future_gp_element(rnp, c) = false; 1683 needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1711 needmore = need_any_future_gp(rnp); 1684 if (!needmore)
1712 trace_rcu_this_gp(rnp, rdp, c, 1685 rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1686 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1713 needmore ? TPS("CleanupMore") : TPS("Cleanup")); 1687 needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1714 return needmore; 1688 return needmore;
1715} 1689}
@@ -1731,21 +1705,21 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1731} 1705}
1732 1706
1733/* 1707/*
1734 * If there is room, assign a ->completed number to any callbacks on 1708 * If there is room, assign a ->gp_seq number to any callbacks on this
1735 * this CPU that have not already been assigned. Also accelerate any 1709 * CPU that have not already been assigned. Also accelerate any callbacks
1736 * callbacks that were previously assigned a ->completed number that has 1710 * that were previously assigned a ->gp_seq number that has since proven
1737 * since proven to be too conservative, which can happen if callbacks get 1711 * to be too conservative, which can happen if callbacks get assigned a
1738 * assigned a ->completed number while RCU is idle, but with reference to 1712 * ->gp_seq number while RCU is idle, but with reference to a non-root
1739 * a non-root rcu_node structure. This function is idempotent, so it does 1713 * rcu_node structure. This function is idempotent, so it does not hurt
1740 * not hurt to call it repeatedly. Returns an flag saying that we should 1714 * to call it repeatedly. Returns an flag saying that we should awaken
1741 * awaken the RCU grace-period kthread. 1715 * the RCU grace-period kthread.
1742 * 1716 *
1743 * The caller must hold rnp->lock with interrupts disabled. 1717 * The caller must hold rnp->lock with interrupts disabled.
1744 */ 1718 */
1745static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, 1719static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1746 struct rcu_data *rdp) 1720 struct rcu_data *rdp)
1747{ 1721{
1748 unsigned long c; 1722 unsigned long gp_seq_req;
1749 bool ret = false; 1723 bool ret = false;
1750 1724
1751 raw_lockdep_assert_held_rcu_node(rnp); 1725 raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1764 * accelerating callback invocation to an earlier grace-period 1738 * accelerating callback invocation to an earlier grace-period
1765 * number. 1739 * number.
1766 */ 1740 */
1767 c = rcu_cbs_completed(rsp, rnp); 1741 gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
1768 if (rcu_segcblist_accelerate(&rdp->cblist, c)) 1742 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1769 ret = rcu_start_this_gp(rnp, rdp, c); 1743 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1770 1744
1771 /* Trace depending on how much we were able to accelerate. */ 1745 /* Trace depending on how much we were able to accelerate. */
1772 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) 1746 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1773 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB")); 1747 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
1774 else 1748 else
1775 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB")); 1749 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
1776 return ret; 1750 return ret;
1777} 1751}
1778 1752
1779/* 1753/*
1754 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1755 * rcu_node structure's ->lock be held. It consults the cached value
1756 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1757 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1758 * while holding the leaf rcu_node structure's ->lock.
1759 */
1760static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
1761 struct rcu_node *rnp,
1762 struct rcu_data *rdp)
1763{
1764 unsigned long c;
1765 bool needwake;
1766
1767 lockdep_assert_irqs_disabled();
1768 c = rcu_seq_snap(&rsp->gp_seq);
1769 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1770 /* Old request still live, so mark recent callbacks. */
1771 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1772 return;
1773 }
1774 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1775 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1776 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1777 if (needwake)
1778 rcu_gp_kthread_wake(rsp);
1779}
1780
1781/*
1780 * Move any callbacks whose grace period has completed to the 1782 * Move any callbacks whose grace period has completed to the
1781 * RCU_DONE_TAIL sublist, then compact the remaining sublists and 1783 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1782 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL 1784 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1783 * sublist. This function is idempotent, so it does not hurt to 1785 * sublist. This function is idempotent, so it does not hurt to
1784 * invoke it repeatedly. As long as it is not invoked -too- often... 1786 * invoke it repeatedly. As long as it is not invoked -too- often...
1785 * Returns true if the RCU grace-period kthread needs to be awakened. 1787 * Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1796 return false; 1798 return false;
1797 1799
1798 /* 1800 /*
1799 * Find all callbacks whose ->completed numbers indicate that they 1801 * Find all callbacks whose ->gp_seq numbers indicate that they
1800 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist. 1802 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1801 */ 1803 */
1802 rcu_segcblist_advance(&rdp->cblist, rnp->completed); 1804 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1803 1805
1804 /* Classify any remaining callbacks. */ 1806 /* Classify any remaining callbacks. */
1805 return rcu_accelerate_cbs(rsp, rnp, rdp); 1807 return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1819 1821
1820 raw_lockdep_assert_held_rcu_node(rnp); 1822 raw_lockdep_assert_held_rcu_node(rnp);
1821 1823
1822 /* Handle the ends of any preceding grace periods first. */ 1824 if (rdp->gp_seq == rnp->gp_seq)
1823 if (rdp->completed == rnp->completed && 1825 return false; /* Nothing to do. */
1824 !unlikely(READ_ONCE(rdp->gpwrap))) {
1825
1826 /* No grace period end, so just accelerate recent callbacks. */
1827 ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1828 1826
1827 /* Handle the ends of any preceding grace periods first. */
1828 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1829 unlikely(READ_ONCE(rdp->gpwrap))) {
1830 ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
1831 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
1829 } else { 1832 } else {
1830 1833 ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
1831 /* Advance callbacks. */
1832 ret = rcu_advance_cbs(rsp, rnp, rdp);
1833
1834 /* Remember that we saw this grace-period completion. */
1835 rdp->completed = rnp->completed;
1836 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1837 } 1834 }
1838 1835
1839 if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) { 1836 /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1837 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1838 unlikely(READ_ONCE(rdp->gpwrap))) {
1840 /* 1839 /*
1841 * If the current grace period is waiting for this CPU, 1840 * If the current grace period is waiting for this CPU,
1842 * set up to detect a quiescent state, otherwise don't 1841 * set up to detect a quiescent state, otherwise don't
1843 * go looking for one. 1842 * go looking for one.
1844 */ 1843 */
1845 rdp->gpnum = rnp->gpnum; 1844 trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
1846 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1847 need_gp = !!(rnp->qsmask & rdp->grpmask); 1845 need_gp = !!(rnp->qsmask & rdp->grpmask);
1848 rdp->cpu_no_qs.b.norm = need_gp; 1846 rdp->cpu_no_qs.b.norm = need_gp;
1849 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr); 1847 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1850 rdp->core_needs_qs = need_gp; 1848 rdp->core_needs_qs = need_gp;
1851 zero_cpu_stall_ticks(rdp); 1849 zero_cpu_stall_ticks(rdp);
1852 WRITE_ONCE(rdp->gpwrap, false);
1853 rcu_gpnum_ovf(rnp, rdp);
1854 } 1850 }
1851 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1852 if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
1853 rdp->gp_seq_needed = rnp->gp_seq_needed;
1854 WRITE_ONCE(rdp->gpwrap, false);
1855 rcu_gpnum_ovf(rnp, rdp);
1855 return ret; 1856 return ret;
1856} 1857}
1857 1858
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1863 1864
1864 local_irq_save(flags); 1865 local_irq_save(flags);
1865 rnp = rdp->mynode; 1866 rnp = rdp->mynode;
1866 if ((rdp->gpnum == READ_ONCE(rnp->gpnum) && 1867 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1867 rdp->completed == READ_ONCE(rnp->completed) &&
1868 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ 1868 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1869 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */ 1869 !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1870 local_irq_restore(flags); 1870 local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1879static void rcu_gp_slow(struct rcu_state *rsp, int delay) 1879static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1880{ 1880{
1881 if (delay > 0 && 1881 if (delay > 0 &&
1882 !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay))) 1882 !(rcu_seq_ctr(rsp->gp_seq) %
1883 (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1883 schedule_timeout_uninterruptible(delay); 1884 schedule_timeout_uninterruptible(delay);
1884} 1885}
1885 1886
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1888 */ 1889 */
1889static bool rcu_gp_init(struct rcu_state *rsp) 1890static bool rcu_gp_init(struct rcu_state *rsp)
1890{ 1891{
1892 unsigned long flags;
1891 unsigned long oldmask; 1893 unsigned long oldmask;
1894 unsigned long mask;
1892 struct rcu_data *rdp; 1895 struct rcu_data *rdp;
1893 struct rcu_node *rnp = rcu_get_root(rsp); 1896 struct rcu_node *rnp = rcu_get_root(rsp);
1894 1897
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1912 1915
1913 /* Advance to a new grace period and initialize state. */ 1916 /* Advance to a new grace period and initialize state. */
1914 record_gp_stall_check_time(rsp); 1917 record_gp_stall_check_time(rsp);
1915 /* Record GP times before starting GP, hence smp_store_release(). */ 1918 /* Record GP times before starting GP, hence rcu_seq_start(). */
1916 smp_store_release(&rsp->gpnum, rsp->gpnum + 1); 1919 rcu_seq_start(&rsp->gp_seq);
1917 trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start")); 1920 trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
1918 raw_spin_unlock_irq_rcu_node(rnp); 1921 raw_spin_unlock_irq_rcu_node(rnp);
1919 1922
1920 /* 1923 /*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1923 * for subsequent online CPUs, and that quiescent-state forcing 1926 * for subsequent online CPUs, and that quiescent-state forcing
1924 * will handle subsequent offline CPUs. 1927 * will handle subsequent offline CPUs.
1925 */ 1928 */
1929 rsp->gp_state = RCU_GP_ONOFF;
1926 rcu_for_each_leaf_node(rsp, rnp) { 1930 rcu_for_each_leaf_node(rsp, rnp) {
1927 rcu_gp_slow(rsp, gp_preinit_delay); 1931 spin_lock(&rsp->ofl_lock);
1928 raw_spin_lock_irq_rcu_node(rnp); 1932 raw_spin_lock_irq_rcu_node(rnp);
1929 if (rnp->qsmaskinit == rnp->qsmaskinitnext && 1933 if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1930 !rnp->wait_blkd_tasks) { 1934 !rnp->wait_blkd_tasks) {
1931 /* Nothing to do on this leaf rcu_node structure. */ 1935 /* Nothing to do on this leaf rcu_node structure. */
1932 raw_spin_unlock_irq_rcu_node(rnp); 1936 raw_spin_unlock_irq_rcu_node(rnp);
1937 spin_unlock(&rsp->ofl_lock);
1933 continue; 1938 continue;
1934 } 1939 }
1935 1940
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1939 1944
1940 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */ 1945 /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1941 if (!oldmask != !rnp->qsmaskinit) { 1946 if (!oldmask != !rnp->qsmaskinit) {
1942 if (!oldmask) /* First online CPU for this rcu_node. */ 1947 if (!oldmask) { /* First online CPU for rcu_node. */
1943 rcu_init_new_rnp(rnp); 1948 if (!rnp->wait_blkd_tasks) /* Ever offline? */
1944 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */ 1949 rcu_init_new_rnp(rnp);
1945 rnp->wait_blkd_tasks = true; 1950 } else if (rcu_preempt_has_tasks(rnp)) {
1946 else /* Last offline CPU and can propagate. */ 1951 rnp->wait_blkd_tasks = true; /* blocked tasks */
1952 } else { /* Last offline CPU and can propagate. */
1947 rcu_cleanup_dead_rnp(rnp); 1953 rcu_cleanup_dead_rnp(rnp);
1954 }
1948 } 1955 }
1949 1956
1950 /* 1957 /*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1953 * still offline, propagate up the rcu_node tree and 1960 * still offline, propagate up the rcu_node tree and
1954 * clear ->wait_blkd_tasks. Otherwise, if one of this 1961 * clear ->wait_blkd_tasks. Otherwise, if one of this
1955 * rcu_node structure's CPUs has since come back online, 1962 * rcu_node structure's CPUs has since come back online,
1956 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp() 1963 * simply clear ->wait_blkd_tasks.
1957 * checks for this, so just call it unconditionally).
1958 */ 1964 */
1959 if (rnp->wait_blkd_tasks && 1965 if (rnp->wait_blkd_tasks &&
1960 (!rcu_preempt_has_tasks(rnp) || 1966 (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1961 rnp->qsmaskinit)) {
1962 rnp->wait_blkd_tasks = false; 1967 rnp->wait_blkd_tasks = false;
1963 rcu_cleanup_dead_rnp(rnp); 1968 if (!rnp->qsmaskinit)
1969 rcu_cleanup_dead_rnp(rnp);
1964 } 1970 }
1965 1971
1966 raw_spin_unlock_irq_rcu_node(rnp); 1972 raw_spin_unlock_irq_rcu_node(rnp);
1973 spin_unlock(&rsp->ofl_lock);
1967 } 1974 }
1975 rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
1968 1976
1969 /* 1977 /*
1970 * Set the quiescent-state-needed bits in all the rcu_node 1978 * Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
1978 * The grace period cannot complete until the initialization 1986 * The grace period cannot complete until the initialization
1979 * process finishes, because this kthread handles both. 1987 * process finishes, because this kthread handles both.
1980 */ 1988 */
1989 rsp->gp_state = RCU_GP_INIT;
1981 rcu_for_each_node_breadth_first(rsp, rnp) { 1990 rcu_for_each_node_breadth_first(rsp, rnp) {
1982 rcu_gp_slow(rsp, gp_init_delay); 1991 rcu_gp_slow(rsp, gp_init_delay);
1983 raw_spin_lock_irq_rcu_node(rnp); 1992 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1984 rdp = this_cpu_ptr(rsp->rda); 1993 rdp = this_cpu_ptr(rsp->rda);
1985 rcu_preempt_check_blocked_tasks(rnp); 1994 rcu_preempt_check_blocked_tasks(rsp, rnp);
1986 rnp->qsmask = rnp->qsmaskinit; 1995 rnp->qsmask = rnp->qsmaskinit;
1987 WRITE_ONCE(rnp->gpnum, rsp->gpnum); 1996 WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
1988 if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1989 WRITE_ONCE(rnp->completed, rsp->completed);
1990 if (rnp == rdp->mynode) 1997 if (rnp == rdp->mynode)
1991 (void)__note_gp_changes(rsp, rnp, rdp); 1998 (void)__note_gp_changes(rsp, rnp, rdp);
1992 rcu_preempt_boost_start_gp(rnp); 1999 rcu_preempt_boost_start_gp(rnp);
1993 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, 2000 trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
1994 rnp->level, rnp->grplo, 2001 rnp->level, rnp->grplo,
1995 rnp->grphi, rnp->qsmask); 2002 rnp->grphi, rnp->qsmask);
1996 raw_spin_unlock_irq_rcu_node(rnp); 2003 /* Quiescent states for tasks on any now-offline CPUs. */
2004 mask = rnp->qsmask & ~rnp->qsmaskinitnext;
2005 rnp->rcu_gp_init_mask = mask;
2006 if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
2007 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
2008 else
2009 raw_spin_unlock_irq_rcu_node(rnp);
1997 cond_resched_tasks_rcu_qs(); 2010 cond_resched_tasks_rcu_qs();
1998 WRITE_ONCE(rsp->gp_activity, jiffies); 2011 WRITE_ONCE(rsp->gp_activity, jiffies);
1999 } 2012 }
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2053{ 2066{
2054 unsigned long gp_duration; 2067 unsigned long gp_duration;
2055 bool needgp = false; 2068 bool needgp = false;
2069 unsigned long new_gp_seq;
2056 struct rcu_data *rdp; 2070 struct rcu_data *rdp;
2057 struct rcu_node *rnp = rcu_get_root(rsp); 2071 struct rcu_node *rnp = rcu_get_root(rsp);
2058 struct swait_queue_head *sq; 2072 struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2074 raw_spin_unlock_irq_rcu_node(rnp); 2088 raw_spin_unlock_irq_rcu_node(rnp);
2075 2089
2076 /* 2090 /*
2077 * Propagate new ->completed value to rcu_node structures so 2091 * Propagate new ->gp_seq value to rcu_node structures so that
2078 * that other CPUs don't have to wait until the start of the next 2092 * other CPUs don't have to wait until the start of the next grace
2079 * grace period to process their callbacks. This also avoids 2093 * period to process their callbacks. This also avoids some nasty
2080 * some nasty RCU grace-period initialization races by forcing 2094 * RCU grace-period initialization races by forcing the end of
2081 * the end of the current grace period to be completely recorded in 2095 * the current grace period to be completely recorded in all of
2082 * all of the rcu_node structures before the beginning of the next 2096 * the rcu_node structures before the beginning of the next grace
2083 * grace period is recorded in any of the rcu_node structures. 2097 * period is recorded in any of the rcu_node structures.
2084 */ 2098 */
2099 new_gp_seq = rsp->gp_seq;
2100 rcu_seq_end(&new_gp_seq);
2085 rcu_for_each_node_breadth_first(rsp, rnp) { 2101 rcu_for_each_node_breadth_first(rsp, rnp) {
2086 raw_spin_lock_irq_rcu_node(rnp); 2102 raw_spin_lock_irq_rcu_node(rnp);
2087 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 2103 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2104 dump_blkd_tasks(rsp, rnp, 10);
2088 WARN_ON_ONCE(rnp->qsmask); 2105 WARN_ON_ONCE(rnp->qsmask);
2089 WRITE_ONCE(rnp->completed, rsp->gpnum); 2106 WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2090 rdp = this_cpu_ptr(rsp->rda); 2107 rdp = this_cpu_ptr(rsp->rda);
2091 if (rnp == rdp->mynode) 2108 if (rnp == rdp->mynode)
2092 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; 2109 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
2100 rcu_gp_slow(rsp, gp_cleanup_delay); 2117 rcu_gp_slow(rsp, gp_cleanup_delay);
2101 } 2118 }
2102 rnp = rcu_get_root(rsp); 2119 rnp = rcu_get_root(rsp);
2103 raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */ 2120 raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
2104 2121
2105 /* Declare grace period done. */ 2122 /* Declare grace period done. */
2106 WRITE_ONCE(rsp->completed, rsp->gpnum); 2123 rcu_seq_end(&rsp->gp_seq);
2107 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end")); 2124 trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
2108 rsp->gp_state = RCU_GP_IDLE; 2125 rsp->gp_state = RCU_GP_IDLE;
2109 /* Check for GP requests since above loop. */ 2126 /* Check for GP requests since above loop. */
2110 rdp = this_cpu_ptr(rsp->rda); 2127 rdp = this_cpu_ptr(rsp->rda);
2111 if (need_any_future_gp(rnp)) { 2128 if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2112 trace_rcu_this_gp(rnp, rdp, rsp->completed - 1, 2129 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2113 TPS("CleanupMore")); 2130 TPS("CleanupMore"));
2114 needgp = true; 2131 needgp = true;
2115 } 2132 }
2116 /* Advance CBs to reduce false positives below. */ 2133 /* Advance CBs to reduce false positives below. */
2117 if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) { 2134 if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
2118 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT); 2135 WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2119 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), 2136 rsp->gp_req_activity = jiffies;
2137 trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
2120 TPS("newreq")); 2138 TPS("newreq"));
2139 } else {
2140 WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
2121 } 2141 }
2122 WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
2123 raw_spin_unlock_irq_rcu_node(rnp); 2142 raw_spin_unlock_irq_rcu_node(rnp);
2124} 2143}
2125 2144
@@ -2141,7 +2160,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2141 /* Handle grace-period start. */ 2160 /* Handle grace-period start. */
2142 for (;;) { 2161 for (;;) {
2143 trace_rcu_grace_period(rsp->name, 2162 trace_rcu_grace_period(rsp->name,
2144 READ_ONCE(rsp->gpnum), 2163 READ_ONCE(rsp->gp_seq),
2145 TPS("reqwait")); 2164 TPS("reqwait"));
2146 rsp->gp_state = RCU_GP_WAIT_GPS; 2165 rsp->gp_state = RCU_GP_WAIT_GPS;
2147 swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & 2166 swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
2154 WRITE_ONCE(rsp->gp_activity, jiffies); 2173 WRITE_ONCE(rsp->gp_activity, jiffies);
2155 WARN_ON(signal_pending(current)); 2174 WARN_ON(signal_pending(current));
2156 trace_rcu_grace_period(rsp->name, 2175 trace_rcu_grace_period(rsp->name,
2157 READ_ONCE(rsp->gpnum), 2176 READ_ONCE(rsp->gp_seq),
2158 TPS("reqwaitsig")); 2177 TPS("reqwaitsig"));
2159 } 2178 }
2160 2179
2161 /* Handle quiescent-state forcing. */ 2180 /* Handle quiescent-state forcing. */
2162 first_gp_fqs = true; 2181 first_gp_fqs = true;
2163 j = jiffies_till_first_fqs; 2182 j = jiffies_till_first_fqs;
2164 if (j > HZ) {
2165 j = HZ;
2166 jiffies_till_first_fqs = HZ;
2167 }
2168 ret = 0; 2183 ret = 0;
2169 for (;;) { 2184 for (;;) {
2170 if (!ret) { 2185 if (!ret) {
@@ -2173,7 +2188,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
2173 jiffies + 3 * j); 2188 jiffies + 3 * j);
2174 } 2189 }
2175 trace_rcu_grace_period(rsp->name, 2190 trace_rcu_grace_period(rsp->name,
2176 READ_ONCE(rsp->gpnum), 2191 READ_ONCE(rsp->gp_seq),
2177 TPS("fqswait")); 2192 TPS("fqswait"));
2178 rsp->gp_state = RCU_GP_WAIT_FQS; 2193 rsp->gp_state = RCU_GP_WAIT_FQS;
2179 ret = swait_event_idle_timeout_exclusive(rsp->gp_wq, 2194 ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
2188 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) || 2203 if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2189 (gf & RCU_GP_FLAG_FQS)) { 2204 (gf & RCU_GP_FLAG_FQS)) {
2190 trace_rcu_grace_period(rsp->name, 2205 trace_rcu_grace_period(rsp->name,
2191 READ_ONCE(rsp->gpnum), 2206 READ_ONCE(rsp->gp_seq),
2192 TPS("fqsstart")); 2207 TPS("fqsstart"));
2193 rcu_gp_fqs(rsp, first_gp_fqs); 2208 rcu_gp_fqs(rsp, first_gp_fqs);
2194 first_gp_fqs = false; 2209 first_gp_fqs = false;
2195 trace_rcu_grace_period(rsp->name, 2210 trace_rcu_grace_period(rsp->name,
2196 READ_ONCE(rsp->gpnum), 2211 READ_ONCE(rsp->gp_seq),
2197 TPS("fqsend")); 2212 TPS("fqsend"));
2198 cond_resched_tasks_rcu_qs(); 2213 cond_resched_tasks_rcu_qs();
2199 WRITE_ONCE(rsp->gp_activity, jiffies); 2214 WRITE_ONCE(rsp->gp_activity, jiffies);
2200 ret = 0; /* Force full wait till next FQS. */ 2215 ret = 0; /* Force full wait till next FQS. */
2201 j = jiffies_till_next_fqs; 2216 j = jiffies_till_next_fqs;
2202 if (j > HZ) {
2203 j = HZ;
2204 jiffies_till_next_fqs = HZ;
2205 } else if (j < 1) {
2206 j = 1;
2207 jiffies_till_next_fqs = 1;
2208 }
2209 } else { 2217 } else {
2210 /* Deal with stray signal. */ 2218 /* Deal with stray signal. */
2211 cond_resched_tasks_rcu_qs(); 2219 cond_resched_tasks_rcu_qs();
2212 WRITE_ONCE(rsp->gp_activity, jiffies); 2220 WRITE_ONCE(rsp->gp_activity, jiffies);
2213 WARN_ON(signal_pending(current)); 2221 WARN_ON(signal_pending(current));
2214 trace_rcu_grace_period(rsp->name, 2222 trace_rcu_grace_period(rsp->name,
2215 READ_ONCE(rsp->gpnum), 2223 READ_ONCE(rsp->gp_seq),
2216 TPS("fqswaitsig")); 2224 TPS("fqswaitsig"));
2217 ret = 1; /* Keep old FQS timing. */ 2225 ret = 1; /* Keep old FQS timing. */
2218 j = jiffies; 2226 j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2256 * must be represented by the same rcu_node structure (which need not be a 2264 * must be represented by the same rcu_node structure (which need not be a
2257 * leaf rcu_node structure, though it often will be). The gps parameter 2265 * leaf rcu_node structure, though it often will be). The gps parameter
2258 * is the grace-period snapshot, which means that the quiescent states 2266 * is the grace-period snapshot, which means that the quiescent states
2259 * are valid only if rnp->gpnum is equal to gps. That structure's lock 2267 * are valid only if rnp->gp_seq is equal to gps. That structure's lock
2260 * must be held upon entry, and it is released before return. 2268 * must be held upon entry, and it is released before return.
2269 *
2270 * As a special case, if mask is zero, the bit-already-cleared check is
2271 * disabled. This allows propagating quiescent state due to resumed tasks
2272 * during grace-period initialization.
2261 */ 2273 */
2262static void 2274static void
2263rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 2275rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2271 2283
2272 /* Walk up the rcu_node hierarchy. */ 2284 /* Walk up the rcu_node hierarchy. */
2273 for (;;) { 2285 for (;;) {
2274 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { 2286 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2275 2287
2276 /* 2288 /*
2277 * Our bit has already been cleared, or the 2289 * Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2284 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) && 2296 WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2285 rcu_preempt_blocked_readers_cgp(rnp)); 2297 rcu_preempt_blocked_readers_cgp(rnp));
2286 rnp->qsmask &= ~mask; 2298 rnp->qsmask &= ~mask;
2287 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, 2299 trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
2288 mask, rnp->qsmask, rnp->level, 2300 mask, rnp->qsmask, rnp->level,
2289 rnp->grplo, rnp->grphi, 2301 rnp->grplo, rnp->grphi,
2290 !!rnp->gp_tasks); 2302 !!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2294 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2306 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2295 return; 2307 return;
2296 } 2308 }
2309 rnp->completedqs = rnp->gp_seq;
2297 mask = rnp->grpmask; 2310 mask = rnp->grpmask;
2298 if (rnp->parent == NULL) { 2311 if (rnp->parent == NULL) {
2299 2312
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2323 * irqs disabled, and this lock is released upon return, but irqs remain 2336 * irqs disabled, and this lock is released upon return, but irqs remain
2324 * disabled. 2337 * disabled.
2325 */ 2338 */
2326static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp, 2339static void __maybe_unused
2327 struct rcu_node *rnp, unsigned long flags) 2340rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2341 struct rcu_node *rnp, unsigned long flags)
2328 __releases(rnp->lock) 2342 __releases(rnp->lock)
2329{ 2343{
2330 unsigned long gps; 2344 unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2332 struct rcu_node *rnp_p; 2346 struct rcu_node *rnp_p;
2333 2347
2334 raw_lockdep_assert_held_rcu_node(rnp); 2348 raw_lockdep_assert_held_rcu_node(rnp);
2335 if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p || 2349 if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
2336 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 2350 WARN_ON_ONCE(rsp != rcu_state_p) ||
2351 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2352 rnp->qsmask != 0) {
2337 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2353 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2338 return; /* Still need more quiescent states! */ 2354 return; /* Still need more quiescent states! */
2339 } 2355 }
2340 2356
2357 rnp->completedqs = rnp->gp_seq;
2341 rnp_p = rnp->parent; 2358 rnp_p = rnp->parent;
2342 if (rnp_p == NULL) { 2359 if (rnp_p == NULL) {
2343 /* 2360 /*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2348 return; 2365 return;
2349 } 2366 }
2350 2367
2351 /* Report up the rest of the hierarchy, tracking current ->gpnum. */ 2368 /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2352 gps = rnp->gpnum; 2369 gps = rnp->gp_seq;
2353 mask = rnp->grpmask; 2370 mask = rnp->grpmask;
2354 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ 2371 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2355 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */ 2372 raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2370 2387
2371 rnp = rdp->mynode; 2388 rnp = rdp->mynode;
2372 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2389 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2373 if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum || 2390 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2374 rnp->completed == rnp->gpnum || rdp->gpwrap) { 2391 rdp->gpwrap) {
2375 2392
2376 /* 2393 /*
2377 * The grace period in which this quiescent state was 2394 * The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2396 */ 2413 */
2397 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); 2414 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2398 2415
2399 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2416 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
2400 /* ^^^ Released rnp->lock */ 2417 /* ^^^ Released rnp->lock */
2401 if (needwake) 2418 if (needwake)
2402 rcu_gp_kthread_wake(rsp); 2419 rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2441 */ 2458 */
2442static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) 2459static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2443{ 2460{
2444 RCU_TRACE(unsigned long mask;) 2461 RCU_TRACE(bool blkd;)
2445 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);) 2462 RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2446 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;) 2463 RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2447 2464
2448 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) 2465 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2449 return; 2466 return;
2450 2467
2451 RCU_TRACE(mask = rdp->grpmask;) 2468 RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
2452 trace_rcu_grace_period(rsp->name, 2469 trace_rcu_grace_period(rsp->name, rnp->gp_seq,
2453 rnp->gpnum + 1 - !!(rnp->qsmask & mask), 2470 blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
2454 TPS("cpuofl"));
2455} 2471}
2456 2472
2457/* 2473/*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2463 * This function therefore goes up the tree of rcu_node structures, 2479 * This function therefore goes up the tree of rcu_node structures,
2464 * clearing the corresponding bits in the ->qsmaskinit fields. Note that 2480 * clearing the corresponding bits in the ->qsmaskinit fields. Note that
2465 * the leaf rcu_node structure's ->qsmaskinit field has already been 2481 * the leaf rcu_node structure's ->qsmaskinit field has already been
2466 * updated 2482 * updated.
2467 * 2483 *
2468 * This function does check that the specified rcu_node structure has 2484 * This function does check that the specified rcu_node structure has
2469 * all CPUs offline and no blocked tasks, so it is OK to invoke it 2485 * all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2476 long mask; 2492 long mask;
2477 struct rcu_node *rnp = rnp_leaf; 2493 struct rcu_node *rnp = rnp_leaf;
2478 2494
2479 raw_lockdep_assert_held_rcu_node(rnp); 2495 raw_lockdep_assert_held_rcu_node(rnp_leaf);
2480 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || 2496 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2481 rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) 2497 WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2498 WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2482 return; 2499 return;
2483 for (;;) { 2500 for (;;) {
2484 mask = rnp->grpmask; 2501 mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2487 break; 2504 break;
2488 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 2505 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2489 rnp->qsmaskinit &= ~mask; 2506 rnp->qsmaskinit &= ~mask;
2490 rnp->qsmask &= ~mask; 2507 /* Between grace periods, so better already be zero! */
2508 WARN_ON_ONCE(rnp->qsmask);
2491 if (rnp->qsmaskinit) { 2509 if (rnp->qsmaskinit) {
2492 raw_spin_unlock_rcu_node(rnp); 2510 raw_spin_unlock_rcu_node(rnp);
2493 /* irqs remain disabled. */ 2511 /* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
2630 2648
2631 rcu_sched_qs(); 2649 rcu_sched_qs();
2632 rcu_bh_qs(); 2650 rcu_bh_qs();
2651 rcu_note_voluntary_context_switch(current);
2633 2652
2634 } else if (!in_softirq()) { 2653 } else if (!in_softirq()) {
2635 2654
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
2645 rcu_preempt_check_callbacks(); 2664 rcu_preempt_check_callbacks();
2646 if (rcu_pending()) 2665 if (rcu_pending())
2647 invoke_rcu_core(); 2666 invoke_rcu_core();
2648 if (user) 2667
2649 rcu_note_voluntary_context_switch(current);
2650 trace_rcu_utilization(TPS("End scheduler-tick")); 2668 trace_rcu_utilization(TPS("End scheduler-tick"));
2651} 2669}
2652 2670
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2681 /* rcu_initiate_boost() releases rnp->lock */ 2699 /* rcu_initiate_boost() releases rnp->lock */
2682 continue; 2700 continue;
2683 } 2701 }
2684 if (rnp->parent && 2702 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2685 (rnp->parent->qsmask & rnp->grpmask)) { 2703 continue;
2686 /*
2687 * Race between grace-period
2688 * initialization and task exiting RCU
2689 * read-side critical section: Report.
2690 */
2691 rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2692 /* rcu_report_unblock_qs_rnp() rlses ->lock */
2693 continue;
2694 }
2695 } 2704 }
2696 for_each_leaf_node_possible_cpu(rnp, cpu) { 2705 for_each_leaf_node_possible_cpu(rnp, cpu) {
2697 unsigned long bit = leaf_node_cpu_bit(rnp, cpu); 2706 unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2701 } 2710 }
2702 } 2711 }
2703 if (mask != 0) { 2712 if (mask != 0) {
2704 /* Idle/offline CPUs, report (releases rnp->lock. */ 2713 /* Idle/offline CPUs, report (releases rnp->lock). */
2705 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); 2714 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
2706 } else { 2715 } else {
2707 /* Nothing to do here, so just drop the lock. */ 2716 /* Nothing to do here, so just drop the lock. */
2708 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2717 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2747,6 +2756,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
2747} 2756}
2748 2757
2749/* 2758/*
2759 * This function checks for grace-period requests that fail to motivate
2760 * RCU to come out of its idle mode.
2761 */
2762static void
2763rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
2764 struct rcu_data *rdp)
2765{
2766 const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
2767 unsigned long flags;
2768 unsigned long j;
2769 struct rcu_node *rnp_root = rcu_get_root(rsp);
2770 static atomic_t warned = ATOMIC_INIT(0);
2771
2772 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
2773 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
2774 return;
2775 j = jiffies; /* Expensive access, and in common case don't get here. */
2776 if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
2777 time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
2778 atomic_read(&warned))
2779 return;
2780
2781 raw_spin_lock_irqsave_rcu_node(rnp, flags);
2782 j = jiffies;
2783 if (rcu_gp_in_progress(rsp) ||
2784 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2785 time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
2786 time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
2787 atomic_read(&warned)) {
2788 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2789 return;
2790 }
2791 /* Hold onto the leaf lock to make others see warned==1. */
2792
2793 if (rnp_root != rnp)
2794 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
2795 j = jiffies;
2796 if (rcu_gp_in_progress(rsp) ||
2797 ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
2798 time_before(j, rsp->gp_req_activity + gpssdelay) ||
2799 time_before(j, rsp->gp_activity + gpssdelay) ||
2800 atomic_xchg(&warned, 1)) {
2801 raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
2802 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2803 return;
2804 }
2805 pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
2806 __func__, (long)READ_ONCE(rsp->gp_seq),
2807 (long)READ_ONCE(rnp_root->gp_seq_needed),
2808 j - rsp->gp_req_activity, j - rsp->gp_activity,
2809 rsp->gp_flags, rsp->gp_state, rsp->name,
2810 rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
2811 WARN_ON(1);
2812 if (rnp_root != rnp)
2813 raw_spin_unlock_rcu_node(rnp_root);
2814 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2815}
2816
2817/*
2750 * This does the RCU core processing work for the specified rcu_state 2818 * This does the RCU core processing work for the specified rcu_state
2751 * and rcu_data structures. This may be called only from the CPU to 2819 * and rcu_data structures. This may be called only from the CPU to
2752 * whom the rdp belongs. 2820 * whom the rdp belongs.
@@ -2755,9 +2823,8 @@ static void
2755__rcu_process_callbacks(struct rcu_state *rsp) 2823__rcu_process_callbacks(struct rcu_state *rsp)
2756{ 2824{
2757 unsigned long flags; 2825 unsigned long flags;
2758 bool needwake;
2759 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); 2826 struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2760 struct rcu_node *rnp; 2827 struct rcu_node *rnp = rdp->mynode;
2761 2828
2762 WARN_ON_ONCE(!rdp->beenonline); 2829 WARN_ON_ONCE(!rdp->beenonline);
2763 2830
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
2768 if (!rcu_gp_in_progress(rsp) && 2835 if (!rcu_gp_in_progress(rsp) &&
2769 rcu_segcblist_is_enabled(&rdp->cblist)) { 2836 rcu_segcblist_is_enabled(&rdp->cblist)) {
2770 local_irq_save(flags); 2837 local_irq_save(flags);
2771 if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) { 2838 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2772 local_irq_restore(flags); 2839 rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
2773 } else { 2840 local_irq_restore(flags);
2774 rnp = rdp->mynode;
2775 raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
2776 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2777 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2778 if (needwake)
2779 rcu_gp_kthread_wake(rsp);
2780 }
2781 } 2841 }
2782 2842
2843 rcu_check_gp_start_stall(rsp, rnp, rdp);
2844
2783 /* If there are callbacks ready, invoke them. */ 2845 /* If there are callbacks ready, invoke them. */
2784 if (rcu_segcblist_ready_cbs(&rdp->cblist)) 2846 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2785 invoke_rcu_callbacks(rsp, rdp); 2847 invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
2833static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, 2895static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2834 struct rcu_head *head, unsigned long flags) 2896 struct rcu_head *head, unsigned long flags)
2835{ 2897{
2836 bool needwake;
2837
2838 /* 2898 /*
2839 * If called from an extended quiescent state, invoke the RCU 2899 * If called from an extended quiescent state, invoke the RCU
2840 * core in order to force a re-evaluation of RCU's idleness. 2900 * core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2861 2921
2862 /* Start a new grace period if one not already started. */ 2922 /* Start a new grace period if one not already started. */
2863 if (!rcu_gp_in_progress(rsp)) { 2923 if (!rcu_gp_in_progress(rsp)) {
2864 struct rcu_node *rnp = rdp->mynode; 2924 rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
2865
2866 raw_spin_lock_rcu_node(rnp);
2867 needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2868 raw_spin_unlock_rcu_node(rnp);
2869 if (needwake)
2870 rcu_gp_kthread_wake(rsp);
2871 } else { 2925 } else {
2872 /* Give the grace period a kick. */ 2926 /* Give the grace period a kick. */
2873 rdp->blimit = LONG_MAX; 2927 rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
3037 * when there was in fact only one the whole time, as this just adds 3091 * when there was in fact only one the whole time, as this just adds
3038 * some overhead: RCU still operates correctly. 3092 * some overhead: RCU still operates correctly.
3039 */ 3093 */
3040static inline int rcu_blocking_is_gp(void) 3094static int rcu_blocking_is_gp(void)
3041{ 3095{
3042 int ret; 3096 int ret;
3043 3097
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
3136{ 3190{
3137 /* 3191 /*
3138 * Any prior manipulation of RCU-protected data must happen 3192 * Any prior manipulation of RCU-protected data must happen
3139 * before the load from ->gpnum. 3193 * before the load from ->gp_seq.
3140 */ 3194 */
3141 smp_mb(); /* ^^^ */ 3195 smp_mb(); /* ^^^ */
3142 3196 return rcu_seq_snap(&rcu_state_p->gp_seq);
3143 /*
3144 * Make sure this load happens before the purportedly
3145 * time-consuming work between get_state_synchronize_rcu()
3146 * and cond_synchronize_rcu().
3147 */
3148 return smp_load_acquire(&rcu_state_p->gpnum);
3149} 3197}
3150EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); 3198EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3151 3199
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3165 */ 3213 */
3166void cond_synchronize_rcu(unsigned long oldstate) 3214void cond_synchronize_rcu(unsigned long oldstate)
3167{ 3215{
3168 unsigned long newstate; 3216 if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
3169
3170 /*
3171 * Ensure that this load happens before any RCU-destructive
3172 * actions the caller might carry out after we return.
3173 */
3174 newstate = smp_load_acquire(&rcu_state_p->completed);
3175 if (ULONG_CMP_GE(oldstate, newstate))
3176 synchronize_rcu(); 3217 synchronize_rcu();
3218 else
3219 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3177} 3220}
3178EXPORT_SYMBOL_GPL(cond_synchronize_rcu); 3221EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3179 3222
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
3188{ 3231{
3189 /* 3232 /*
3190 * Any prior manipulation of RCU-protected data must happen 3233 * Any prior manipulation of RCU-protected data must happen
3191 * before the load from ->gpnum. 3234 * before the load from ->gp_seq.
3192 */ 3235 */
3193 smp_mb(); /* ^^^ */ 3236 smp_mb(); /* ^^^ */
3194 3237 return rcu_seq_snap(&rcu_sched_state.gp_seq);
3195 /*
3196 * Make sure this load happens before the purportedly
3197 * time-consuming work between get_state_synchronize_sched()
3198 * and cond_synchronize_sched().
3199 */
3200 return smp_load_acquire(&rcu_sched_state.gpnum);
3201} 3238}
3202EXPORT_SYMBOL_GPL(get_state_synchronize_sched); 3239EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3203 3240
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3217 */ 3254 */
3218void cond_synchronize_sched(unsigned long oldstate) 3255void cond_synchronize_sched(unsigned long oldstate)
3219{ 3256{
3220 unsigned long newstate; 3257 if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
3221
3222 /*
3223 * Ensure that this load happens before any RCU-destructive
3224 * actions the caller might carry out after we return.
3225 */
3226 newstate = smp_load_acquire(&rcu_sched_state.completed);
3227 if (ULONG_CMP_GE(oldstate, newstate))
3228 synchronize_sched(); 3258 synchronize_sched();
3259 else
3260 smp_mb(); /* Ensure GP ends before subsequent accesses. */
3229} 3261}
3230EXPORT_SYMBOL_GPL(cond_synchronize_sched); 3262EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3231 3263
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3261 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) 3293 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3262 return 1; 3294 return 1;
3263 3295
3264 /* Has another RCU grace period completed? */ 3296 /* Have RCU grace period completed or started? */
3265 if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */ 3297 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3266 return 1;
3267
3268 /* Has a new RCU grace period started? */
3269 if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3270 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ 3298 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3271 return 1; 3299 return 1;
3272 3300
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
3298 * non-NULL, store an indication of whether all callbacks are lazy. 3326 * non-NULL, store an indication of whether all callbacks are lazy.
3299 * (If there are no callbacks, all of them are deemed to be lazy.) 3327 * (If there are no callbacks, all of them are deemed to be lazy.)
3300 */ 3328 */
3301static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy) 3329static bool rcu_cpu_has_callbacks(bool *all_lazy)
3302{ 3330{
3303 bool al = true; 3331 bool al = true;
3304 bool hc = false; 3332 bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3484static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) 3512static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3485{ 3513{
3486 long mask; 3514 long mask;
3515 long oldmask;
3487 struct rcu_node *rnp = rnp_leaf; 3516 struct rcu_node *rnp = rnp_leaf;
3488 3517
3489 raw_lockdep_assert_held_rcu_node(rnp); 3518 raw_lockdep_assert_held_rcu_node(rnp_leaf);
3519 WARN_ON_ONCE(rnp->wait_blkd_tasks);
3490 for (;;) { 3520 for (;;) {
3491 mask = rnp->grpmask; 3521 mask = rnp->grpmask;
3492 rnp = rnp->parent; 3522 rnp = rnp->parent;
3493 if (rnp == NULL) 3523 if (rnp == NULL)
3494 return; 3524 return;
3495 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */ 3525 raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3526 oldmask = rnp->qsmaskinit;
3496 rnp->qsmaskinit |= mask; 3527 rnp->qsmaskinit |= mask;
3497 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */ 3528 raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3529 if (oldmask)
3530 return;
3498 } 3531 }
3499} 3532}
3500 3533
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3511 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 3544 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3512 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1); 3545 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3513 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks))); 3546 WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3547 rdp->rcu_ofl_gp_seq = rsp->gp_seq;
3548 rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
3549 rdp->rcu_onl_gp_seq = rsp->gp_seq;
3550 rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
3514 rdp->cpu = cpu; 3551 rdp->cpu = cpu;
3515 rdp->rsp = rsp; 3552 rdp->rsp = rsp;
3516 rcu_boot_init_nocb_percpu_data(rdp); 3553 rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3518 3555
3519/* 3556/*
3520 * Initialize a CPU's per-CPU RCU data. Note that only one online or 3557 * Initialize a CPU's per-CPU RCU data. Note that only one online or
3521 * offline event can be happening at a given time. Note also that we 3558 * offline event can be happening at a given time. Note also that we can
3522 * can accept some slop in the rsp->completed access due to the fact 3559 * accept some slop in the rsp->gp_seq access due to the fact that this
3523 * that this CPU cannot possibly have any RCU callbacks in flight yet. 3560 * CPU cannot possibly have any RCU callbacks in flight yet.
3524 */ 3561 */
3525static void 3562static void
3526rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 3563rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3549 rnp = rdp->mynode; 3586 rnp = rdp->mynode;
3550 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */ 3587 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
3551 rdp->beenonline = true; /* We have now been online. */ 3588 rdp->beenonline = true; /* We have now been online. */
3552 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ 3589 rdp->gp_seq = rnp->gp_seq;
3553 rdp->completed = rnp->completed; 3590 rdp->gp_seq_needed = rnp->gp_seq;
3554 rdp->cpu_no_qs.b.norm = true; 3591 rdp->cpu_no_qs.b.norm = true;
3555 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu); 3592 rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3556 rdp->core_needs_qs = false; 3593 rdp->core_needs_qs = false;
3557 rdp->rcu_iw_pending = false; 3594 rdp->rcu_iw_pending = false;
3558 rdp->rcu_iw_gpnum = rnp->gpnum - 1; 3595 rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
3559 trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl")); 3596 trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
3560 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3597 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3561} 3598}
3562 3599
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
3705 nbits = bitmap_weight(&oldmask, BITS_PER_LONG); 3742 nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3706 /* Allow lockless access for expedited grace periods. */ 3743 /* Allow lockless access for expedited grace periods. */
3707 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */ 3744 smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
3708 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3745 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
3746 rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
3747 rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
3748 if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
3749 /* Report QS -after- changing ->qsmaskinitnext! */
3750 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
3751 } else {
3752 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3753 }
3709 } 3754 }
3710 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */ 3755 smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3711} 3756}
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
3713#ifdef CONFIG_HOTPLUG_CPU 3758#ifdef CONFIG_HOTPLUG_CPU
3714/* 3759/*
3715 * The CPU is exiting the idle loop into the arch_cpu_idle_dead() 3760 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
3716 * function. We now remove it from the rcu_node tree's ->qsmaskinit 3761 * function. We now remove it from the rcu_node tree's ->qsmaskinitnext
3717 * bit masks. 3762 * bit masks.
3718 */ 3763 */
3719static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp) 3764static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3725 3770
3726 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */ 3771 /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
3727 mask = rdp->grpmask; 3772 mask = rdp->grpmask;
3773 spin_lock(&rsp->ofl_lock);
3728 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */ 3774 raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3775 rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
3776 rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
3777 if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
3778 /* Report quiescent state -before- changing ->qsmaskinitnext! */
3779 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
3780 raw_spin_lock_irqsave_rcu_node(rnp, flags);
3781 }
3729 rnp->qsmaskinitnext &= ~mask; 3782 rnp->qsmaskinitnext &= ~mask;
3730 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 3783 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3784 spin_unlock(&rsp->ofl_lock);
3731} 3785}
3732 3786
3733/* 3787/*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
3839 struct task_struct *t; 3893 struct task_struct *t;
3840 3894
3841 /* Force priority into range. */ 3895 /* Force priority into range. */
3842 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1) 3896 if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
3897 && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
3898 kthread_prio = 2;
3899 else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3843 kthread_prio = 1; 3900 kthread_prio = 1;
3844 else if (kthread_prio < 0) 3901 else if (kthread_prio < 0)
3845 kthread_prio = 0; 3902 kthread_prio = 0;
3846 else if (kthread_prio > 99) 3903 else if (kthread_prio > 99)
3847 kthread_prio = 99; 3904 kthread_prio = 99;
3905
3848 if (kthread_prio != kthread_prio_in) 3906 if (kthread_prio != kthread_prio_in)
3849 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n", 3907 pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3850 kthread_prio, kthread_prio_in); 3908 kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
3928 raw_spin_lock_init(&rnp->fqslock); 3986 raw_spin_lock_init(&rnp->fqslock);
3929 lockdep_set_class_and_name(&rnp->fqslock, 3987 lockdep_set_class_and_name(&rnp->fqslock,
3930 &rcu_fqs_class[i], fqs[i]); 3988 &rcu_fqs_class[i], fqs[i]);
3931 rnp->gpnum = rsp->gpnum; 3989 rnp->gp_seq = rsp->gp_seq;
3932 rnp->completed = rsp->completed; 3990 rnp->gp_seq_needed = rsp->gp_seq;
3991 rnp->completedqs = rsp->gp_seq;
3933 rnp->qsmask = 0; 3992 rnp->qsmask = 0;
3934 rnp->qsmaskinit = 0; 3993 rnp->qsmaskinit = 0;
3935 rnp->grplo = j * cpustride; 3994 rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
3997 if (rcu_fanout_leaf == RCU_FANOUT_LEAF && 4056 if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3998 nr_cpu_ids == NR_CPUS) 4057 nr_cpu_ids == NR_CPUS)
3999 return; 4058 return;
4000 pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n", 4059 pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4001 rcu_fanout_leaf, nr_cpu_ids); 4060 rcu_fanout_leaf, nr_cpu_ids);
4002 4061
4003 /* 4062 /*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 78e051dffc5b..4e74df768c57 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -81,18 +81,16 @@ struct rcu_node {
81 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ 81 raw_spinlock_t __private lock; /* Root rcu_node's lock protects */
82 /* some rcu_state fields as well as */ 82 /* some rcu_state fields as well as */
83 /* following. */ 83 /* following. */
84 unsigned long gpnum; /* Current grace period for this node. */ 84 unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
85 /* This will either be equal to or one */ 85 unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
86 /* behind the root rcu_node's gpnum. */ 86 unsigned long completedqs; /* All QSes done for this node. */
87 unsigned long completed; /* Last GP completed for this node. */
88 /* This will either be equal to or one */
89 /* behind the root rcu_node's gpnum. */
90 unsigned long qsmask; /* CPUs or groups that need to switch in */ 87 unsigned long qsmask; /* CPUs or groups that need to switch in */
91 /* order for current grace period to proceed.*/ 88 /* order for current grace period to proceed.*/
92 /* In leaf rcu_node, each bit corresponds to */ 89 /* In leaf rcu_node, each bit corresponds to */
93 /* an rcu_data structure, otherwise, each */ 90 /* an rcu_data structure, otherwise, each */
94 /* bit corresponds to a child rcu_node */ 91 /* bit corresponds to a child rcu_node */
95 /* structure. */ 92 /* structure. */
93 unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
96 unsigned long qsmaskinit; 94 unsigned long qsmaskinit;
97 /* Per-GP initial value for qsmask. */ 95 /* Per-GP initial value for qsmask. */
98 /* Initialized from ->qsmaskinitnext at the */ 96 /* Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
158 struct swait_queue_head nocb_gp_wq[2]; 156 struct swait_queue_head nocb_gp_wq[2];
159 /* Place for rcu_nocb_kthread() to wait GP. */ 157 /* Place for rcu_nocb_kthread() to wait GP. */
160#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 158#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
161 u8 need_future_gp[4]; /* Counts of upcoming GP requests. */
162 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; 159 raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
163 160
164 spinlock_t exp_lock ____cacheline_internodealigned_in_smp; 161 spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
168 bool exp_need_flush; /* Need to flush workitem? */ 165 bool exp_need_flush; /* Need to flush workitem? */
169} ____cacheline_internodealigned_in_smp; 166} ____cacheline_internodealigned_in_smp;
170 167
171/* Accessors for ->need_future_gp[] array. */
172#define need_future_gp_mask() \
173 (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
174#define need_future_gp_element(rnp, c) \
175 ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
176#define need_any_future_gp(rnp) \
177({ \
178 int __i; \
179 bool __nonzero = false; \
180 \
181 for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \
182 __nonzero = __nonzero || \
183 READ_ONCE((rnp)->need_future_gp[__i]); \
184 __nonzero; \
185})
186
187/* 168/*
188 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and 169 * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
189 * are indexed relative to this interval rather than the global CPU ID space. 170 * are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
206/* Per-CPU data for read-copy update. */ 187/* Per-CPU data for read-copy update. */
207struct rcu_data { 188struct rcu_data {
208 /* 1) quiescent-state and grace-period handling : */ 189 /* 1) quiescent-state and grace-period handling : */
209 unsigned long completed; /* Track rsp->completed gp number */ 190 unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
210 /* in order to detect GP end. */ 191 unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
211 unsigned long gpnum; /* Highest gp number that this CPU */
212 /* is aware of having started. */
213 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ 192 unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
214 /* for rcu_all_qs() invocations. */ 193 /* for rcu_all_qs() invocations. */
215 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ 194 union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
216 bool core_needs_qs; /* Core waits for quiesc state. */ 195 bool core_needs_qs; /* Core waits for quiesc state. */
217 bool beenonline; /* CPU online at least once. */ 196 bool beenonline; /* CPU online at least once. */
218 bool gpwrap; /* Possible gpnum/completed wrap. */ 197 bool gpwrap; /* Possible ->gp_seq wrap. */
219 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ 198 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
220 unsigned long grpmask; /* Mask to apply to leaf qsmask. */ 199 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
221 unsigned long ticks_this_gp; /* The number of scheduling-clock */ 200 unsigned long ticks_this_gp; /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
239 218
240 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 219 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
241 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 220 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
242 unsigned long offline_fqs; /* Kicked due to being offline. */
243 unsigned long cond_resched_completed; 221 unsigned long cond_resched_completed;
244 /* Grace period that needs help */ 222 /* Grace period that needs help */
245 /* from cond_resched(). */ 223 /* from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
278 /* Leader CPU takes GP-end wakeups. */ 256 /* Leader CPU takes GP-end wakeups. */
279#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 257#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
280 258
281 /* 7) RCU CPU stall data. */ 259 /* 7) Diagnostic data, including RCU CPU stall warnings. */
282 unsigned int softirq_snap; /* Snapshot of softirq activity. */ 260 unsigned int softirq_snap; /* Snapshot of softirq activity. */
283 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */ 261 /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
284 struct irq_work rcu_iw; /* Check for non-irq activity. */ 262 struct irq_work rcu_iw; /* Check for non-irq activity. */
285 bool rcu_iw_pending; /* Is ->rcu_iw pending? */ 263 bool rcu_iw_pending; /* Is ->rcu_iw pending? */
286 unsigned long rcu_iw_gpnum; /* ->gpnum associated with ->rcu_iw. */ 264 unsigned long rcu_iw_gp_seq; /* ->gp_seq associated with ->rcu_iw. */
265 unsigned long rcu_ofl_gp_seq; /* ->gp_seq at last offline. */
266 short rcu_ofl_gp_flags; /* ->gp_flags at last offline. */
267 unsigned long rcu_onl_gp_seq; /* ->gp_seq at last online. */
268 short rcu_onl_gp_flags; /* ->gp_flags at last online. */
287 269
288 int cpu; 270 int cpu;
289 struct rcu_state *rsp; 271 struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
340 322
341 u8 boost ____cacheline_internodealigned_in_smp; 323 u8 boost ____cacheline_internodealigned_in_smp;
342 /* Subject to priority boost. */ 324 /* Subject to priority boost. */
343 unsigned long gpnum; /* Current gp number. */ 325 unsigned long gp_seq; /* Grace-period sequence #. */
344 unsigned long completed; /* # of last completed gp. */
345 struct task_struct *gp_kthread; /* Task for grace periods. */ 326 struct task_struct *gp_kthread; /* Task for grace periods. */
346 struct swait_queue_head gp_wq; /* Where GP task waits. */ 327 struct swait_queue_head gp_wq; /* Where GP task waits. */
347 short gp_flags; /* Commands for GP task. */ 328 short gp_flags; /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
373 /* but in jiffies. */ 354 /* but in jiffies. */
374 unsigned long gp_activity; /* Time of last GP kthread */ 355 unsigned long gp_activity; /* Time of last GP kthread */
375 /* activity in jiffies. */ 356 /* activity in jiffies. */
357 unsigned long gp_req_activity; /* Time of last GP request */
358 /* in jiffies. */
376 unsigned long jiffies_stall; /* Time at which to check */ 359 unsigned long jiffies_stall; /* Time at which to check */
377 /* for CPU stalls. */ 360 /* for CPU stalls. */
378 unsigned long jiffies_resched; /* Time at which to resched */ 361 unsigned long jiffies_resched; /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
384 const char *name; /* Name of structure. */ 367 const char *name; /* Name of structure. */
385 char abbr; /* Abbreviated name. */ 368 char abbr; /* Abbreviated name. */
386 struct list_head flavors; /* List of RCU flavors. */ 369 struct list_head flavors; /* List of RCU flavors. */
370
371 spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
372 /* Synchronize offline with */
373 /* GP pre-initialization. */
387}; 374};
388 375
389/* Values for rcu_state structure's gp_flags field. */ 376/* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
394#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ 381#define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */
395#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ 382#define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */
396#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ 383#define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */
397#define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ 384#define RCU_GP_ONOFF 3 /* Grace-period initialization hotplug. */
398#define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */ 385#define RCU_GP_INIT 4 /* Grace-period initialization. */
399#define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ 386#define RCU_GP_WAIT_FQS 5 /* Wait for force-quiescent-state time. */
400#define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ 387#define RCU_GP_DOING_FQS 6 /* Wait done for force-quiescent-state time. */
388#define RCU_GP_CLEANUP 7 /* Grace-period cleanup started. */
389#define RCU_GP_CLEANED 8 /* Grace-period cleanup complete. */
401 390
402#ifndef RCU_TREE_NONCORE 391#ifndef RCU_TREE_NONCORE
403static const char * const gp_state_names[] = { 392static const char * const gp_state_names[] = {
404 "RCU_GP_IDLE", 393 "RCU_GP_IDLE",
405 "RCU_GP_WAIT_GPS", 394 "RCU_GP_WAIT_GPS",
406 "RCU_GP_DONE_GPS", 395 "RCU_GP_DONE_GPS",
396 "RCU_GP_ONOFF",
397 "RCU_GP_INIT",
407 "RCU_GP_WAIT_FQS", 398 "RCU_GP_WAIT_FQS",
408 "RCU_GP_DOING_FQS", 399 "RCU_GP_DOING_FQS",
409 "RCU_GP_CLEANUP", 400 "RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
449static void rcu_print_detail_task_stall(struct rcu_state *rsp); 440static void rcu_print_detail_task_stall(struct rcu_state *rsp);
450static int rcu_print_task_stall(struct rcu_node *rnp); 441static int rcu_print_task_stall(struct rcu_node *rnp);
451static int rcu_print_task_exp_stall(struct rcu_node *rnp); 442static int rcu_print_task_exp_stall(struct rcu_node *rnp);
452static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); 443static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
444 struct rcu_node *rnp);
453static void rcu_preempt_check_callbacks(void); 445static void rcu_preempt_check_callbacks(void);
454void call_rcu(struct rcu_head *head, rcu_callback_t func); 446void call_rcu(struct rcu_head *head, rcu_callback_t func);
455static void __init __rcu_init_preempt(void); 447static void __init __rcu_init_preempt(void);
448static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
449 int ncheck);
456static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 450static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
457static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); 451static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
458static void invoke_rcu_callbacks_kthread(void); 452static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
489#ifdef CONFIG_RCU_NOCB_CPU 483#ifdef CONFIG_RCU_NOCB_CPU
490static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); 484static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
491#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 485#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
492static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
493static bool init_nocb_callback_list(struct rcu_data *rdp); 486static bool init_nocb_callback_list(struct rcu_data *rdp);
494static void rcu_bind_gp_kthread(void); 487static void rcu_bind_gp_kthread(void);
495static bool rcu_nohz_full_cpu(struct rcu_state *rsp); 488static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d428cc1064c8..0b2c2ad69629 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -472,6 +472,7 @@ retry_ipi:
472static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, 472static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
473 smp_call_func_t func) 473 smp_call_func_t func)
474{ 474{
475 int cpu;
475 struct rcu_node *rnp; 476 struct rcu_node *rnp;
476 477
477 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); 478 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
486 rnp->rew.rew_func = func; 487 rnp->rew.rew_func = func;
487 rnp->rew.rew_rsp = rsp; 488 rnp->rew.rew_rsp = rsp;
488 if (!READ_ONCE(rcu_par_gp_wq) || 489 if (!READ_ONCE(rcu_par_gp_wq) ||
489 rcu_scheduler_active != RCU_SCHEDULER_RUNNING) { 490 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
490 /* No workqueues yet. */ 491 rcu_is_last_leaf_node(rsp, rnp)) {
492 /* No workqueues yet or last leaf, do direct call. */
491 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); 493 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
492 continue; 494 continue;
493 } 495 }
494 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); 496 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
495 queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); 497 preempt_disable();
498 cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
499 /* If all offline, queue the work on an unbound CPU. */
500 if (unlikely(cpu > rnp->grphi))
501 cpu = WORK_CPU_UNBOUND;
502 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
503 preempt_enable();
496 rnp->exp_need_flush = true; 504 rnp->exp_need_flush = true;
497 } 505 }
498 506
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index ad53d133f709..a97c20ea9bce 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
74 pr_info("\tRCU event tracing is enabled.\n"); 74 pr_info("\tRCU event tracing is enabled.\n");
75 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) || 75 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
76 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32)) 76 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
77 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n", 77 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
78 RCU_FANOUT); 78 RCU_FANOUT);
79 if (rcu_fanout_exact) 79 if (rcu_fanout_exact)
80 pr_info("\tHierarchical RCU autobalancing is disabled.\n"); 80 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
81 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ)) 81 if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
88 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", 88 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
89 RCU_FANOUT_LEAF); 89 RCU_FANOUT_LEAF);
90 if (rcu_fanout_leaf != RCU_FANOUT_LEAF) 90 if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
91 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf); 91 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
92 rcu_fanout_leaf);
92 if (nr_cpu_ids != NR_CPUS) 93 if (nr_cpu_ids != NR_CPUS)
93 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids); 94 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
94#ifdef CONFIG_RCU_BOOST 95#ifdef CONFIG_RCU_BOOST
95 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY); 96 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
97 kthread_prio, CONFIG_RCU_BOOST_DELAY);
96#endif 98#endif
97 if (blimit != DEFAULT_RCU_BLIMIT) 99 if (blimit != DEFAULT_RCU_BLIMIT)
98 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); 100 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
127 129
128static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, 130static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
129 bool wake); 131 bool wake);
132static void rcu_read_unlock_special(struct task_struct *t);
130 133
131/* 134/*
132 * Tell them what RCU they are running. 135 * Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
183 raw_lockdep_assert_held_rcu_node(rnp); 186 raw_lockdep_assert_held_rcu_node(rnp);
184 WARN_ON_ONCE(rdp->mynode != rnp); 187 WARN_ON_ONCE(rdp->mynode != rnp);
185 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 188 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
189 /* RCU better not be waiting on newly onlined CPUs! */
190 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
191 rdp->grpmask);
186 192
187 /* 193 /*
188 * Decide where to queue the newly blocked task. In theory, 194 * Decide where to queue the newly blocked task. In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
260 * ->exp_tasks pointers, respectively, to reference the newly 266 * ->exp_tasks pointers, respectively, to reference the newly
261 * blocked tasks. 267 * blocked tasks.
262 */ 268 */
263 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) 269 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
264 rnp->gp_tasks = &t->rcu_node_entry; 270 rnp->gp_tasks = &t->rcu_node_entry;
271 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
272 }
265 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) 273 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
266 rnp->exp_tasks = &t->rcu_node_entry; 274 rnp->exp_tasks = &t->rcu_node_entry;
267 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) != 275 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
286} 294}
287 295
288/* 296/*
289 * Record a preemptible-RCU quiescent state for the specified CPU. Note 297 * Record a preemptible-RCU quiescent state for the specified CPU.
290 * that this just means that the task currently running on the CPU is 298 * Note that this does not necessarily mean that the task currently running
291 * not in a quiescent state. There might be any number of tasks blocked 299 * on the CPU is in a quiescent state: Instead, it means that the current
292 * while in an RCU read-side critical section. 300 * grace period need not wait on any RCU read-side critical section that
301 * starts later on this CPU. It also means that if the current task is
302 * in an RCU read-side critical section, it has already added itself to
303 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
304 * current task, there might be any number of other tasks blocked while
305 * in an RCU read-side critical section.
293 * 306 *
294 * As with the other rcu_*_qs() functions, callers to this function 307 * Callers to this function must disable preemption.
295 * must disable preemption.
296 */ 308 */
297static void rcu_preempt_qs(void) 309static void rcu_preempt_qs(void)
298{ 310{
299 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n"); 311 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
300 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) { 312 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
301 trace_rcu_grace_period(TPS("rcu_preempt"), 313 trace_rcu_grace_period(TPS("rcu_preempt"),
302 __this_cpu_read(rcu_data_p->gpnum), 314 __this_cpu_read(rcu_data_p->gp_seq),
303 TPS("cpuqs")); 315 TPS("cpuqs"));
304 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false); 316 __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
305 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */ 317 barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
348 trace_rcu_preempt_task(rdp->rsp->name, 360 trace_rcu_preempt_task(rdp->rsp->name,
349 t->pid, 361 t->pid,
350 (rnp->qsmask & rdp->grpmask) 362 (rnp->qsmask & rdp->grpmask)
351 ? rnp->gpnum 363 ? rnp->gp_seq
352 : rnp->gpnum + 1); 364 : rcu_seq_snap(&rnp->gp_seq));
353 rcu_preempt_ctxt_queue(rnp, rdp); 365 rcu_preempt_ctxt_queue(rnp, rdp);
354 } else if (t->rcu_read_lock_nesting < 0 && 366 } else if (t->rcu_read_lock_nesting < 0 &&
355 t->rcu_read_unlock_special.s) { 367 t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
456 * notify RCU core processing or task having blocked during the RCU 468 * notify RCU core processing or task having blocked during the RCU
457 * read-side critical section. 469 * read-side critical section.
458 */ 470 */
459void rcu_read_unlock_special(struct task_struct *t) 471static void rcu_read_unlock_special(struct task_struct *t)
460{ 472{
461 bool empty_exp; 473 bool empty_exp;
462 bool empty_norm; 474 bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
535 WARN_ON_ONCE(rnp != t->rcu_blocked_node); 547 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
536 WARN_ON_ONCE(!rcu_is_leaf_node(rnp)); 548 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
537 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); 549 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
550 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
551 (!empty_norm || rnp->qsmask));
538 empty_exp = sync_rcu_preempt_exp_done(rnp); 552 empty_exp = sync_rcu_preempt_exp_done(rnp);
539 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 553 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
540 np = rcu_next_node_entry(t, rnp); 554 np = rcu_next_node_entry(t, rnp);
541 list_del_init(&t->rcu_node_entry); 555 list_del_init(&t->rcu_node_entry);
542 t->rcu_blocked_node = NULL; 556 t->rcu_blocked_node = NULL;
543 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"), 557 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
544 rnp->gpnum, t->pid); 558 rnp->gp_seq, t->pid);
545 if (&t->rcu_node_entry == rnp->gp_tasks) 559 if (&t->rcu_node_entry == rnp->gp_tasks)
546 rnp->gp_tasks = np; 560 rnp->gp_tasks = np;
547 if (&t->rcu_node_entry == rnp->exp_tasks) 561 if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
562 empty_exp_now = sync_rcu_preempt_exp_done(rnp); 576 empty_exp_now = sync_rcu_preempt_exp_done(rnp);
563 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { 577 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
564 trace_rcu_quiescent_state_report(TPS("preempt_rcu"), 578 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
565 rnp->gpnum, 579 rnp->gp_seq,
566 0, rnp->qsmask, 580 0, rnp->qsmask,
567 rnp->level, 581 rnp->level,
568 rnp->grplo, 582 rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
686 * Check that the list of blocked tasks for the newly completed grace 700 * Check that the list of blocked tasks for the newly completed grace
687 * period is in fact empty. It is a serious bug to complete a grace 701 * period is in fact empty. It is a serious bug to complete a grace
688 * period that still has RCU readers blocked! This function must be 702 * period that still has RCU readers blocked! This function must be
689 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 703 * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
690 * must be held by the caller. 704 * must be held by the caller.
691 * 705 *
692 * Also, if there are blocked tasks on the list, they automatically 706 * Also, if there are blocked tasks on the list, they automatically
693 * block the newly created grace period, so set up ->gp_tasks accordingly. 707 * block the newly created grace period, so set up ->gp_tasks accordingly.
694 */ 708 */
695static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 709static void
710rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
696{ 711{
697 struct task_struct *t; 712 struct task_struct *t;
698 713
699 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n"); 714 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
700 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); 715 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
701 if (rcu_preempt_has_tasks(rnp)) { 716 dump_blkd_tasks(rsp, rnp, 10);
717 if (rcu_preempt_has_tasks(rnp) &&
718 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
702 rnp->gp_tasks = rnp->blkd_tasks.next; 719 rnp->gp_tasks = rnp->blkd_tasks.next;
703 t = container_of(rnp->gp_tasks, struct task_struct, 720 t = container_of(rnp->gp_tasks, struct task_struct,
704 rcu_node_entry); 721 rcu_node_entry);
705 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), 722 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
706 rnp->gpnum, t->pid); 723 rnp->gp_seq, t->pid);
707 } 724 }
708 WARN_ON_ONCE(rnp->qsmask); 725 WARN_ON_ONCE(rnp->qsmask);
709} 726}
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
717 */ 734 */
718static void rcu_preempt_check_callbacks(void) 735static void rcu_preempt_check_callbacks(void)
719{ 736{
737 struct rcu_state *rsp = &rcu_preempt_state;
720 struct task_struct *t = current; 738 struct task_struct *t = current;
721 739
722 if (t->rcu_read_lock_nesting == 0) { 740 if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
725 } 743 }
726 if (t->rcu_read_lock_nesting > 0 && 744 if (t->rcu_read_lock_nesting > 0 &&
727 __this_cpu_read(rcu_data_p->core_needs_qs) && 745 __this_cpu_read(rcu_data_p->core_needs_qs) &&
728 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm)) 746 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
747 !t->rcu_read_unlock_special.b.need_qs &&
748 time_after(jiffies, rsp->gp_start + HZ))
729 t->rcu_read_unlock_special.b.need_qs = true; 749 t->rcu_read_unlock_special.b.need_qs = true;
730} 750}
731 751
@@ -841,6 +861,47 @@ void exit_rcu(void)
841 __rcu_read_unlock(); 861 __rcu_read_unlock();
842} 862}
843 863
864/*
865 * Dump the blocked-tasks state, but limit the list dump to the
866 * specified number of elements.
867 */
868static void
869dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
870{
871 int cpu;
872 int i;
873 struct list_head *lhp;
874 bool onl;
875 struct rcu_data *rdp;
876 struct rcu_node *rnp1;
877
878 raw_lockdep_assert_held_rcu_node(rnp);
879 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
880 __func__, rnp->grplo, rnp->grphi, rnp->level,
881 (long)rnp->gp_seq, (long)rnp->completedqs);
882 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
883 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
884 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
885 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
886 __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
887 pr_info("%s: ->blkd_tasks", __func__);
888 i = 0;
889 list_for_each(lhp, &rnp->blkd_tasks) {
890 pr_cont(" %p", lhp);
891 if (++i >= 10)
892 break;
893 }
894 pr_cont("\n");
895 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
896 rdp = per_cpu_ptr(rsp->rda, cpu);
897 onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
898 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
899 cpu, ".o"[onl],
900 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
901 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
902 }
903}
904
844#else /* #ifdef CONFIG_PREEMPT_RCU */ 905#else /* #ifdef CONFIG_PREEMPT_RCU */
845 906
846static struct rcu_state *const rcu_state_p = &rcu_sched_state; 907static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
911 * so there is no need to check for blocked tasks. So check only for 972 * so there is no need to check for blocked tasks. So check only for
912 * bogus qsmask values. 973 * bogus qsmask values.
913 */ 974 */
914static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 975static void
976rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
915{ 977{
916 WARN_ON_ONCE(rnp->qsmask); 978 WARN_ON_ONCE(rnp->qsmask);
917} 979}
@@ -949,6 +1011,15 @@ void exit_rcu(void)
949{ 1011{
950} 1012}
951 1013
1014/*
1015 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
1016 */
1017static void
1018dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
1019{
1020 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
1021}
1022
952#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ 1023#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
953 1024
954#ifdef CONFIG_RCU_BOOST 1025#ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
1433 * completed since we last checked and there are 1504 * completed since we last checked and there are
1434 * callbacks not yet ready to invoke. 1505 * callbacks not yet ready to invoke.
1435 */ 1506 */
1436 if ((rdp->completed != rnp->completed || 1507 if ((rcu_seq_completed_gp(rdp->gp_seq,
1508 rcu_seq_current(&rnp->gp_seq)) ||
1437 unlikely(READ_ONCE(rdp->gpwrap))) && 1509 unlikely(READ_ONCE(rdp->gpwrap))) &&
1438 rcu_segcblist_pend_cbs(&rdp->cblist)) 1510 rcu_segcblist_pend_cbs(&rdp->cblist))
1439 note_gp_changes(rsp, rdp); 1511 note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1720 */ 1792 */
1721 touch_nmi_watchdog(); 1793 touch_nmi_watchdog();
1722 1794
1723 if (rsp->gpnum == rdp->gpnum) { 1795 ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
1796 if (ticks_value) {
1797 ticks_title = "GPs behind";
1798 } else {
1724 ticks_title = "ticks this GP"; 1799 ticks_title = "ticks this GP";
1725 ticks_value = rdp->ticks_this_gp; 1800 ticks_value = rdp->ticks_this_gp;
1726 } else {
1727 ticks_title = "GPs behind";
1728 ticks_value = rsp->gpnum - rdp->gpnum;
1729 } 1801 }
1730 print_cpu_stall_fast_no_hz(fast_no_hz, cpu); 1802 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1731 delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum; 1803 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
1732 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n", 1804 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
1733 cpu, 1805 cpu,
1734 "O."[!!cpu_online(cpu)], 1806 "O."[!!cpu_online(cpu)],
1735 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], 1807 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1817 1889
1818static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) 1890static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1819{ 1891{
1820 return &rnp->nocb_gp_wq[rnp->completed & 0x1]; 1892 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
1821} 1893}
1822 1894
1823static void rcu_init_one_nocb(struct rcu_node *rnp) 1895static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2069 bool needwake; 2141 bool needwake;
2070 struct rcu_node *rnp = rdp->mynode; 2142 struct rcu_node *rnp = rdp->mynode;
2071 2143
2072 raw_spin_lock_irqsave_rcu_node(rnp, flags); 2144 local_irq_save(flags);
2073 c = rcu_cbs_completed(rdp->rsp, rnp); 2145 c = rcu_seq_snap(&rdp->rsp->gp_seq);
2074 needwake = rcu_start_this_gp(rnp, rdp, c); 2146 if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
2075 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 2147 local_irq_restore(flags);
2076 if (needwake) 2148 } else {
2077 rcu_gp_kthread_wake(rdp->rsp); 2149 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2150 needwake = rcu_start_this_gp(rnp, rdp, c);
2151 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2152 if (needwake)
2153 rcu_gp_kthread_wake(rdp->rsp);
2154 }
2078 2155
2079 /* 2156 /*
2080 * Wait for the grace period. Do so interruptibly to avoid messing 2157 * Wait for the grace period. Do so interruptibly to avoid messing
@@ -2083,8 +2160,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2083 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); 2160 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
2084 for (;;) { 2161 for (;;) {
2085 swait_event_interruptible_exclusive( 2162 swait_event_interruptible_exclusive(
2086 rnp->nocb_gp_wq[c & 0x1], 2163 rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
2087 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); 2164 (d = rcu_seq_done(&rnp->gp_seq, c)));
2088 if (likely(d)) 2165 if (likely(d))
2089 break; 2166 break;
2090 WARN_ON(signal_pending(current)); 2167 WARN_ON(signal_pending(current));
@@ -2569,23 +2646,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2569#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ 2646#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2570 2647
2571/* 2648/*
2572 * An adaptive-ticks CPU can potentially execute in kernel mode for an
2573 * arbitrarily long period of time with the scheduling-clock tick turned
2574 * off. RCU will be paying attention to this CPU because it is in the
2575 * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2576 * machine because the scheduling-clock tick has been disabled. Therefore,
2577 * if an adaptive-ticks CPU is failing to respond to the current grace
2578 * period and has not be idle from an RCU perspective, kick it.
2579 */
2580static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2581{
2582#ifdef CONFIG_NO_HZ_FULL
2583 if (tick_nohz_full_cpu(cpu))
2584 smp_send_reschedule(cpu);
2585#endif /* #ifdef CONFIG_NO_HZ_FULL */
2586}
2587
2588/*
2589 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the 2649 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2590 * grace-period kthread will do force_quiescent_state() processing? 2650 * grace-period kthread will do force_quiescent_state() processing?
2591 * The idea is to avoid waking up RCU core processing on such a 2651 * The idea is to avoid waking up RCU core processing on such a
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2610 */ 2670 */
2611static void rcu_bind_gp_kthread(void) 2671static void rcu_bind_gp_kthread(void)
2612{ 2672{
2613 int __maybe_unused cpu;
2614
2615 if (!tick_nohz_full_enabled()) 2673 if (!tick_nohz_full_enabled())
2616 return; 2674 return;
2617 housekeeping_affine(current, HK_FLAG_RCU); 2675 housekeeping_affine(current, HK_FLAG_RCU);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 4c230a60ece4..39cb23d22109 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
507#ifdef CONFIG_TASKS_RCU 507#ifdef CONFIG_TASKS_RCU
508 508
509/* 509/*
510 * Simple variant of RCU whose quiescent states are voluntary context switch, 510 * Simple variant of RCU whose quiescent states are voluntary context
511 * user-space execution, and idle. As such, grace periods can take one good 511 * switch, cond_resched_rcu_qs(), user-space execution, and idle.
512 * long time. There are no read-side primitives similar to rcu_read_lock() 512 * As such, grace periods can take one good long time. There are no
513 * and rcu_read_unlock() because this implementation is intended to get 513 * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
514 * the system into a safe state for some of the manipulations involved in 514 * because this implementation is intended to get the system into a safe
515 * tracing and the like. Finally, this implementation does not support 515 * state for some of the manipulations involved in tracing and the like.
516 * high call_rcu_tasks() rates from multiple CPUs. If this is required, 516 * Finally, this implementation does not support high call_rcu_tasks()
517 * per-CPU callback lists will be needed. 517 * rates from multiple CPUs. If this is required, per-CPU callback lists
518 * will be needed.
518 */ 519 */
519 520
520/* Global list of callbacks and associated lock. */ 521/* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
542 * period elapses, in other words after all currently executing RCU 543 * period elapses, in other words after all currently executing RCU
543 * read-side critical sections have completed. call_rcu_tasks() assumes 544 * read-side critical sections have completed. call_rcu_tasks() assumes
544 * that the read-side critical sections end at a voluntary context 545 * that the read-side critical sections end at a voluntary context
545 * switch (not a preemption!), entry into idle, or transition to usermode 546 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
546 * execution. As such, there are no read-side primitives analogous to 547 * or transition to usermode execution. As such, there are no read-side
547 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended 548 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
548 * to determine that all tasks have passed through a safe state, not so 549 * this primitive is intended to determine that all tasks have passed
549 * much for data-strcuture synchronization. 550 * through a safe state, not so much for data-strcuture synchronization.
550 * 551 *
551 * See the description of call_rcu() for more detailed information on 552 * See the description of call_rcu() for more detailed information on
552 * memory ordering guarantees. 553 * memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
667 struct rcu_head *list; 668 struct rcu_head *list;
668 struct rcu_head *next; 669 struct rcu_head *next;
669 LIST_HEAD(rcu_tasks_holdouts); 670 LIST_HEAD(rcu_tasks_holdouts);
671 int fract;
670 672
671 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 673 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
672 housekeeping_affine(current, HK_FLAG_RCU); 674 housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
748 * holdouts. When the list is empty, we are done. 750 * holdouts. When the list is empty, we are done.
749 */ 751 */
750 lastreport = jiffies; 752 lastreport = jiffies;
751 while (!list_empty(&rcu_tasks_holdouts)) { 753
754 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
755 fract = 10;
756
757 for (;;) {
752 bool firstreport; 758 bool firstreport;
753 bool needreport; 759 bool needreport;
754 int rtst; 760 int rtst;
755 struct task_struct *t1; 761 struct task_struct *t1;
756 762
757 schedule_timeout_interruptible(HZ); 763 if (list_empty(&rcu_tasks_holdouts))
764 break;
765
766 /* Slowly back off waiting for holdouts */
767 schedule_timeout_interruptible(HZ/fract);
768
769 if (fract > 1)
770 fract--;
771
758 rtst = READ_ONCE(rcu_task_stall_timeout); 772 rtst = READ_ONCE(rcu_task_stall_timeout);
759 needreport = rtst > 0 && 773 needreport = rtst > 0 &&
760 time_after(jiffies, lastreport + rtst); 774 time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
800 list = next; 814 list = next;
801 cond_resched(); 815 cond_resched();
802 } 816 }
817 /* Paranoid sleep to keep this from entering a tight loop */
803 schedule_timeout_uninterruptible(HZ/10); 818 schedule_timeout_uninterruptible(HZ/10);
804 } 819 }
805} 820}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 75ffc1d1a2e0..6f584861d329 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -390,7 +390,7 @@ static inline void tick_irq_exit(void)
390 390
391 /* Make sure that timer wheel updates are propagated */ 391 /* Make sure that timer wheel updates are propagated */
392 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { 392 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
393 if (!in_interrupt()) 393 if (!in_irq())
394 tick_nohz_irq_exit(); 394 tick_nohz_irq_exit();
395 } 395 }
396#endif 396#endif
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 34b6652e8677..067cb83f37ea 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -81,6 +81,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
81 unsigned long flags; 81 unsigned long flags;
82 bool enabled; 82 bool enabled;
83 83
84 preempt_disable();
84 raw_spin_lock_irqsave(&stopper->lock, flags); 85 raw_spin_lock_irqsave(&stopper->lock, flags);
85 enabled = stopper->enabled; 86 enabled = stopper->enabled;
86 if (enabled) 87 if (enabled)
@@ -90,6 +91,7 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
90 raw_spin_unlock_irqrestore(&stopper->lock, flags); 91 raw_spin_unlock_irqrestore(&stopper->lock, flags);
91 92
92 wake_up_q(&wakeq); 93 wake_up_q(&wakeq);
94 preempt_enable();
93 95
94 return enabled; 96 return enabled;
95} 97}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index da9455a6b42b..5b33e2f5c0ed 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,7 +642,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
642 642
643static inline bool local_timer_softirq_pending(void) 643static inline bool local_timer_softirq_pending(void)
644{ 644{
645 return local_softirq_pending() & TIMER_SOFTIRQ; 645 return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
646} 646}
647 647
648static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) 648static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
diff --git a/kernel/torture.c b/kernel/torture.c
index 3de1efbecd6a..1ac24a826589 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -20,6 +20,9 @@
20 * Author: Paul E. McKenney <paulmck@us.ibm.com> 20 * Author: Paul E. McKenney <paulmck@us.ibm.com>
21 * Based on kernel/rcu/torture.c. 21 * Based on kernel/rcu/torture.c.
22 */ 22 */
23
24#define pr_fmt(fmt) fmt
25
23#include <linux/types.h> 26#include <linux/types.h>
24#include <linux/kernel.h> 27#include <linux/kernel.h>
25#include <linux/init.h> 28#include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
53MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 56MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
54 57
55static char *torture_type; 58static char *torture_type;
56static bool verbose; 59static int verbose;
57 60
58/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ 61/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
59#define FULLSTOP_DONTSTOP 0 /* Normal operation. */ 62#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
98 if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 101 if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
99 return false; 102 return false;
100 103
101 if (verbose) 104 if (verbose > 1)
102 pr_alert("%s" TORTURE_FLAG 105 pr_alert("%s" TORTURE_FLAG
103 "torture_onoff task: offlining %d\n", 106 "torture_onoff task: offlining %d\n",
104 torture_type, cpu); 107 torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
111 "torture_onoff task: offline %d failed: errno %d\n", 114 "torture_onoff task: offline %d failed: errno %d\n",
112 torture_type, cpu, ret); 115 torture_type, cpu, ret);
113 } else { 116 } else {
114 if (verbose) 117 if (verbose > 1)
115 pr_alert("%s" TORTURE_FLAG 118 pr_alert("%s" TORTURE_FLAG
116 "torture_onoff task: offlined %d\n", 119 "torture_onoff task: offlined %d\n",
117 torture_type, cpu); 120 torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
147 if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 150 if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
148 return false; 151 return false;
149 152
150 if (verbose) 153 if (verbose > 1)
151 pr_alert("%s" TORTURE_FLAG 154 pr_alert("%s" TORTURE_FLAG
152 "torture_onoff task: onlining %d\n", 155 "torture_onoff task: onlining %d\n",
153 torture_type, cpu); 156 torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
160 "torture_onoff task: online %d failed: errno %d\n", 163 "torture_onoff task: online %d failed: errno %d\n",
161 torture_type, cpu, ret); 164 torture_type, cpu, ret);
162 } else { 165 } else {
163 if (verbose) 166 if (verbose > 1)
164 pr_alert("%s" TORTURE_FLAG 167 pr_alert("%s" TORTURE_FLAG
165 "torture_onoff task: onlined %d\n", 168 "torture_onoff task: onlined %d\n",
166 torture_type, cpu); 169 torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
647 * The runnable parameter points to a flag that controls whether or not 650 * The runnable parameter points to a flag that controls whether or not
648 * the test is currently runnable. If there is no such flag, pass in NULL. 651 * the test is currently runnable. If there is no such flag, pass in NULL.
649 */ 652 */
650bool torture_init_begin(char *ttype, bool v) 653bool torture_init_begin(char *ttype, int v)
651{ 654{
652 mutex_lock(&fullstop_mutex); 655 mutex_lock(&fullstop_mutex);
653 if (torture_type != NULL) { 656 if (torture_type != NULL) {
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6a46af21765c..0b0b688ea166 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3227,6 +3227,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
3227} 3227}
3228 3228
3229/** 3229/**
3230 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3231 * @buffer: The ring buffer to see if write is set enabled
3232 *
3233 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3234 * Note that this does NOT mean it is in a writable state.
3235 *
3236 * It may return true when the ring buffer has been disabled by
3237 * ring_buffer_record_disable(), as that is a temporary disabling of
3238 * the ring buffer.
3239 */
3240int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3241{
3242 return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3243}
3244
3245/**
3230 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 3246 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3231 * @buffer: The ring buffer to stop writes to. 3247 * @buffer: The ring buffer to stop writes to.
3232 * @cpu: The CPU buffer to stop 3248 * @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 87cf25171fb8..823687997b01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1373,6 +1373,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1373 1373
1374 arch_spin_lock(&tr->max_lock); 1374 arch_spin_lock(&tr->max_lock);
1375 1375
1376 /* Inherit the recordable setting from trace_buffer */
1377 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1378 ring_buffer_record_on(tr->max_buffer.buffer);
1379 else
1380 ring_buffer_record_off(tr->max_buffer.buffer);
1381
1376 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer); 1382 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1377 1383
1378 __update_max_tr(tr, tsk, cpu); 1384 __update_max_tr(tr, tsk, cpu);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d18249683682..5dea177cef53 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
679 goto out_free; 679 goto out_free;
680 680
681 out_reg: 681 out_reg:
682 /* Up the trigger_data count to make sure reg doesn't free it on failure */
683 event_trigger_init(trigger_ops, trigger_data);
682 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); 684 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
683 /* 685 /*
684 * The above returns on success the # of functions enabled, 686 * The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
686 * Consider no functions a failure too. 688 * Consider no functions a failure too.
687 */ 689 */
688 if (!ret) { 690 if (!ret) {
691 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
689 ret = -ENOENT; 692 ret = -ENOENT;
690 goto out_free; 693 } else if (ret > 0)
691 } else if (ret < 0) 694 ret = 0;
692 goto out_free; 695
693 ret = 0; 696 /* Down the counter of trigger_data or free it if not used anymore */
697 event_trigger_free(trigger_ops, trigger_data);
694 out: 698 out:
695 return ret; 699 return ret;
696 700
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1416 goto out; 1420 goto out;
1417 } 1421 }
1418 1422
1423 /* Up the trigger_data count to make sure nothing frees it on failure */
1424 event_trigger_init(trigger_ops, trigger_data);
1425
1419 if (trigger) { 1426 if (trigger) {
1420 number = strsep(&trigger, ":"); 1427 number = strsep(&trigger, ":");
1421 1428
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1466 goto out_disable; 1473 goto out_disable;
1467 /* Just return zero, not the number of enabled functions */ 1474 /* Just return zero, not the number of enabled functions */
1468 ret = 0; 1475 ret = 0;
1476 event_trigger_free(trigger_ops, trigger_data);
1469 out: 1477 out:
1470 return ret; 1478 return ret;
1471 1479
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
1476 out_free: 1484 out_free:
1477 if (cmd_ops->set_filter) 1485 if (cmd_ops->set_filter)
1478 cmd_ops->set_filter(NULL, trigger_data, NULL); 1486 cmd_ops->set_filter(NULL, trigger_data, NULL);
1479 kfree(trigger_data); 1487 event_trigger_free(trigger_ops, trigger_data);
1480 kfree(enable_data); 1488 kfree(enable_data);
1481 goto out; 1489 goto out;
1482} 1490}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 21f718472942..6b71860f3998 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
400static int 400static int
401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) 401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
402{ 402{
403 struct event_file_link *link = NULL;
403 int ret = 0; 404 int ret = 0;
404 405
405 if (file) { 406 if (file) {
406 struct event_file_link *link;
407
408 link = kmalloc(sizeof(*link), GFP_KERNEL); 407 link = kmalloc(sizeof(*link), GFP_KERNEL);
409 if (!link) { 408 if (!link) {
410 ret = -ENOMEM; 409 ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
424 else 423 else
425 ret = enable_kprobe(&tk->rp.kp); 424 ret = enable_kprobe(&tk->rp.kp);
426 } 425 }
426
427 if (ret) {
428 if (file) {
429 /* Notice the if is true on not WARN() */
430 if (!WARN_ON_ONCE(!link))
431 list_del_rcu(&link->list);
432 kfree(link);
433 tk->tp.flags &= ~TP_FLAG_TRACE;
434 } else {
435 tk->tp.flags &= ~TP_FLAG_PROFILE;
436 }
437 }
427 out: 438 out:
428 return ret; 439 return ret;
429} 440}
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index c253c1b46c6b..befb127507c0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,7 @@ if HAVE_ARCH_KASAN
5 5
6config KASAN 6config KASAN
7 bool "KASan: runtime memory debugger" 7 bool "KASan: runtime memory debugger"
8 depends on SLUB || (SLAB && !DEBUG_SLAB) 8 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
9 select SLUB_DEBUG if SLUB 9 select SLUB_DEBUG if SLUB
10 select CONSTRUCTORS 10 select CONSTRUCTORS
11 select STACKDEPOT 11 select STACKDEPOT
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 19d42ea75ec2..98fa559ebd80 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,6 @@
1config ARCH_HAS_UBSAN_SANITIZE_ALL 1config ARCH_HAS_UBSAN_SANITIZE_ALL
2 bool 2 bool
3 3
4config ARCH_WANTS_UBSAN_NO_NULL
5 def_bool n
6
7config UBSAN 4config UBSAN
8 bool "Undefined behaviour sanity checker" 5 bool "Undefined behaviour sanity checker"
9 help 6 help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
39 Enabling this option on architectures that support unaligned 36 Enabling this option on architectures that support unaligned
40 accesses may produce a lot of false positives. 37 accesses may produce a lot of false positives.
41 38
42config UBSAN_NULL
43 bool "Enable checking of null pointers"
44 depends on UBSAN
45 default y if !ARCH_WANTS_UBSAN_NO_NULL
46 help
47 This option enables detection of memory accesses via a
48 null pointer.
49
50config TEST_UBSAN 39config TEST_UBSAN
51 tristate "Module for testing for undefined behavior detection" 40 tristate "Module for testing for undefined behavior detection"
52 depends on m && UBSAN 41 depends on m && UBSAN
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 994be4805cec..70935ed91125 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
360 360
361 limit++; 361 limit++;
362 if (is_on_stack) 362 if (is_on_stack)
363 pr_warn("object is on stack, but not annotated\n"); 363 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
364 task_stack_page(current));
364 else 365 else
365 pr_warn("object is not on stack, but annotated\n"); 366 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
367 task_stack_page(current));
368
366 WARN_ON(1); 369 WARN_ON(1);
367} 370}
368 371
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
1185 1188
1186 if (!obj_cache || debug_objects_replace_static_objects()) { 1189 if (!obj_cache || debug_objects_replace_static_objects()) {
1187 debug_objects_enabled = 0; 1190 debug_objects_enabled = 0;
1188 if (obj_cache) 1191 kmem_cache_destroy(obj_cache);
1189 kmem_cache_destroy(obj_cache);
1190 pr_warn("out of memory.\n"); 1192 pr_warn("out of memory.\n");
1191 } else 1193 } else
1192 debug_objects_selftest(); 1194 debug_objects_selftest();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 039ddbc574e9..3103099f64fd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3167,6 +3167,13 @@ static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3167 return 0; 3167 return 0;
3168} 3168}
3169 3169
3170/*
3171 * When a new function is introduced to vm_operations_struct and added
3172 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3173 * This is because under System V memory model, mappings created via
3174 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3175 * their original vm_ops are overwritten with shm_vm_ops.
3176 */
3170const struct vm_operations_struct hugetlb_vm_ops = { 3177const struct vm_operations_struct hugetlb_vm_ops = {
3171 .fault = hugetlb_vm_op_fault, 3178 .fault = hugetlb_vm_op_fault,
3172 .open = hugetlb_vm_op_open, 3179 .open = hugetlb_vm_op_open,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8c0280b3143e..b2173f7e5164 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4037,6 +4037,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
4037 4037
4038static DEFINE_IDR(mem_cgroup_idr); 4038static DEFINE_IDR(mem_cgroup_idr);
4039 4039
4040static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4041{
4042 if (memcg->id.id > 0) {
4043 idr_remove(&mem_cgroup_idr, memcg->id.id);
4044 memcg->id.id = 0;
4045 }
4046}
4047
4040static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) 4048static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4041{ 4049{
4042 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0); 4050 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
@@ -4047,8 +4055,7 @@ static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4047{ 4055{
4048 VM_BUG_ON(atomic_read(&memcg->id.ref) < n); 4056 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4049 if (atomic_sub_and_test(n, &memcg->id.ref)) { 4057 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4050 idr_remove(&mem_cgroup_idr, memcg->id.id); 4058 mem_cgroup_id_remove(memcg);
4051 memcg->id.id = 0;
4052 4059
4053 /* Memcg ID pins CSS */ 4060 /* Memcg ID pins CSS */
4054 css_put(&memcg->css); 4061 css_put(&memcg->css);
@@ -4185,8 +4192,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4185 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); 4192 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4186 return memcg; 4193 return memcg;
4187fail: 4194fail:
4188 if (memcg->id.id > 0) 4195 mem_cgroup_id_remove(memcg);
4189 idr_remove(&mem_cgroup_idr, memcg->id.id);
4190 __mem_cgroup_free(memcg); 4196 __mem_cgroup_free(memcg);
4191 return NULL; 4197 return NULL;
4192} 4198}
@@ -4245,6 +4251,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4245 4251
4246 return &memcg->css; 4252 return &memcg->css;
4247fail: 4253fail:
4254 mem_cgroup_id_remove(memcg);
4248 mem_cgroup_free(memcg); 4255 mem_cgroup_free(memcg);
4249 return ERR_PTR(-ENOMEM); 4256 return ERR_PTR(-ENOMEM);
4250} 4257}
diff --git a/mm/memory.c b/mm/memory.c
index 7206a634270b..c5e87a3a82ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1417,11 +1417,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1417 do { 1417 do {
1418 next = pmd_addr_end(addr, end); 1418 next = pmd_addr_end(addr, end);
1419 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1419 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1420 if (next - addr != HPAGE_PMD_SIZE) { 1420 if (next - addr != HPAGE_PMD_SIZE)
1421 VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
1422 !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1423 __split_huge_pmd(vma, pmd, addr, false, NULL); 1421 __split_huge_pmd(vma, pmd, addr, false, NULL);
1424 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1422 else if (zap_huge_pmd(tlb, vma, pmd, addr))
1425 goto next; 1423 goto next;
1426 /* fall through */ 1424 /* fall through */
1427 } 1425 }
@@ -4397,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4397 return -EINVAL; 4395 return -EINVAL;
4398 4396
4399 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 4397 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4398 if (!maddr)
4399 return -ENOMEM;
4400
4400 if (write) 4401 if (write)
4401 memcpy_toio(maddr + offset, buf, len); 4402 memcpy_toio(maddr + offset, buf, len);
4402 else 4403 else
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9ac49ef17b4e..01f1a14facc4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2505 2505
2506 /* Create pseudo-vma that contains just the policy */ 2506 /* Create pseudo-vma that contains just the policy */
2507 memset(&pvma, 0, sizeof(struct vm_area_struct)); 2507 memset(&pvma, 0, sizeof(struct vm_area_struct));
2508 vma_init(&pvma, NULL);
2508 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ 2509 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2509 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ 2510 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2510 2511
diff --git a/mm/mmap.c b/mm/mmap.c
index ff1944d8d458..17bbf4d3e24f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1778,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
1778 error = shmem_zero_setup(vma); 1778 error = shmem_zero_setup(vma);
1779 if (error) 1779 if (error)
1780 goto free_vma; 1780 goto free_vma;
1781 } else {
1782 vma_set_anonymous(vma);
1781 } 1783 }
1782 1784
1783 vma_link(mm, vma, prev, rb_link, rb_parent); 1785 vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -2983,6 +2985,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
2983 return -ENOMEM; 2985 return -ENOMEM;
2984 } 2986 }
2985 2987
2988 vma_set_anonymous(vma);
2986 vma->vm_start = addr; 2989 vma->vm_start = addr;
2987 vma->vm_end = addr + len; 2990 vma->vm_end = addr + len;
2988 vma->vm_pgoff = pgoff; 2991 vma->vm_pgoff = pgoff;
diff --git a/mm/nommu.c b/mm/nommu.c
index 1d22fdbf7d7c..9fc9e43335b6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
1145 if (ret < len) 1145 if (ret < len)
1146 memset(base + ret, 0, len - ret); 1146 memset(base + ret, 0, len - ret);
1147 1147
1148 } else {
1149 vma_set_anonymous(vma);
1148 } 1150 }
1149 1151
1150 return 0; 1152 return 0;
diff --git a/mm/shmem.c b/mm/shmem.c
index 2cab84403055..41b9bbf24e16 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1421{ 1421{
1422 /* Create a pseudo vma that just contains the policy */ 1422 /* Create a pseudo vma that just contains the policy */
1423 memset(vma, 0, sizeof(*vma)); 1423 memset(vma, 0, sizeof(*vma));
1424 vma_init(vma, NULL);
1424 /* Bias interleave by inode number to distribute better across nodes */ 1425 /* Bias interleave by inode number to distribute better across nodes */
1425 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1426 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1426 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1427 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
diff --git a/mm/zswap.c b/mm/zswap.c
index 7d34e69507e3..cd91fd9d96b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
1026 ret = -ENOMEM; 1026 ret = -ENOMEM;
1027 goto reject; 1027 goto reject;
1028 } 1028 }
1029
1030 /* A second zswap_is_full() check after
1031 * zswap_shrink() to make sure it's now
1032 * under the max_pool_percent
1033 */
1034 if (zswap_is_full()) {
1035 ret = -ENOMEM;
1036 goto reject;
1037 }
1029 } 1038 }
1030 1039
1031 /* allocate entry */ 1040 /* allocate entry */
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e0adcd123f48..711d7156efd8 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
131 caifd = caif_get(skb->dev); 131 caifd = caif_get(skb->dev);
132 132
133 WARN_ON(caifd == NULL); 133 WARN_ON(caifd == NULL);
134 if (caifd == NULL) 134 if (!caifd) {
135 rcu_read_unlock();
135 return; 136 return;
137 }
136 138
137 caifd_hold(caifd); 139 caifd_hold(caifd);
138 rcu_read_unlock(); 140 rcu_read_unlock();
diff --git a/net/core/dev.c b/net/core/dev.c
index a5aa1c7444e6..559a91271f82 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -7149,16 +7149,19 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7149 dev->tx_queue_len = new_len; 7149 dev->tx_queue_len = new_len;
7150 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev); 7150 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7151 res = notifier_to_errno(res); 7151 res = notifier_to_errno(res);
7152 if (res) { 7152 if (res)
7153 netdev_err(dev, 7153 goto err_rollback;
7154 "refused to change device tx_queue_len\n"); 7154 res = dev_qdisc_change_tx_queue_len(dev);
7155 dev->tx_queue_len = orig_len; 7155 if (res)
7156 return res; 7156 goto err_rollback;
7157 }
7158 return dev_qdisc_change_tx_queue_len(dev);
7159 } 7157 }
7160 7158
7161 return 0; 7159 return 0;
7160
7161err_rollback:
7162 netdev_err(dev, "refused to change device tx_queue_len\n");
7163 dev->tx_queue_len = orig_len;
7164 return res;
7162} 7165}
7163 7166
7164/** 7167/**
diff --git a/net/core/filter.c b/net/core/filter.c
index 06da770f543f..9dfd145eedcc 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1712,24 +1712,26 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1712BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, 1712BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1713 u32, offset, void *, to, u32, len, u32, start_header) 1713 u32, offset, void *, to, u32, len, u32, start_header)
1714{ 1714{
1715 u8 *end = skb_tail_pointer(skb);
1716 u8 *net = skb_network_header(skb);
1717 u8 *mac = skb_mac_header(skb);
1715 u8 *ptr; 1718 u8 *ptr;
1716 1719
1717 if (unlikely(offset > 0xffff || len > skb_headlen(skb))) 1720 if (unlikely(offset > 0xffff || len > (end - mac)))
1718 goto err_clear; 1721 goto err_clear;
1719 1722
1720 switch (start_header) { 1723 switch (start_header) {
1721 case BPF_HDR_START_MAC: 1724 case BPF_HDR_START_MAC:
1722 ptr = skb_mac_header(skb) + offset; 1725 ptr = mac + offset;
1723 break; 1726 break;
1724 case BPF_HDR_START_NET: 1727 case BPF_HDR_START_NET:
1725 ptr = skb_network_header(skb) + offset; 1728 ptr = net + offset;
1726 break; 1729 break;
1727 default: 1730 default:
1728 goto err_clear; 1731 goto err_clear;
1729 } 1732 }
1730 1733
1731 if (likely(ptr >= skb_mac_header(skb) && 1734 if (likely(ptr >= mac && ptr + len <= end)) {
1732 ptr + len <= skb_tail_pointer(skb))) {
1733 memcpy(to, ptr, len); 1735 memcpy(to, ptr, len);
1734 return 0; 1736 return 0;
1735 } 1737 }
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index e7e626fb87bb..e45098593dc0 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -217,7 +217,7 @@ static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
217 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) 217 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
218 return -EINVAL; 218 return -EINVAL;
219 219
220 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_KERNEL); 220 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
221 if (!prog->name) 221 if (!prog->name)
222 return -ENOMEM; 222 return -ENOMEM;
223 223
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 68bf07206744..43a932cb609b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
269 struct page *page; 269 struct page *page;
270 270
271 /* Empty recycle ring */ 271 /* Empty recycle ring */
272 while ((page = ptr_ring_consume(&pool->ring))) { 272 while ((page = ptr_ring_consume_bh(&pool->ring))) {
273 /* Verify the refcnt invariant of cached pages */ 273 /* Verify the refcnt invariant of cached pages */
274 if (!(page_ref_count(page) == 1)) 274 if (!(page_ref_count(page) == 1))
275 pr_crit("%s() page_pool refcnt %d violation\n", 275 pr_crit("%s() page_pool refcnt %d violation\n",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5ef61222fdef..e3f743c141b3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2759 return err; 2759 return err;
2760 } 2760 }
2761 2761
2762 dev->rtnl_link_state = RTNL_LINK_INITIALIZED; 2762 if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2763 2763 __dev_notify_flags(dev, old_flags, 0U);
2764 __dev_notify_flags(dev, old_flags, ~0U); 2764 } else {
2765 dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2766 __dev_notify_flags(dev, old_flags, ~0U);
2767 }
2765 return 0; 2768 return 0;
2766} 2769}
2767EXPORT_SYMBOL(rtnl_configure_link); 2770EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8e51f8555e11..fb35b62af272 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3720,6 +3720,7 @@ normal:
3720 net_warn_ratelimited( 3720 net_warn_ratelimited(
3721 "skb_segment: too many frags: %u %u\n", 3721 "skb_segment: too many frags: %u %u\n",
3722 pos, mss); 3722 pos, mss);
3723 err = -EINVAL;
3723 goto err; 3724 goto err;
3724 } 3725 }
3725 3726
@@ -3753,11 +3754,10 @@ skip_fraglist:
3753 3754
3754perform_csum_check: 3755perform_csum_check:
3755 if (!csum) { 3756 if (!csum) {
3756 if (skb_has_shared_frag(nskb)) { 3757 if (skb_has_shared_frag(nskb) &&
3757 err = __skb_linearize(nskb); 3758 __skb_linearize(nskb))
3758 if (err) 3759 goto err;
3759 goto err; 3760
3760 }
3761 if (!nskb->remcsum_offload) 3761 if (!nskb->remcsum_offload)
3762 nskb->ip_summed = CHECKSUM_NONE; 3762 nskb->ip_summed = CHECKSUM_NONE;
3763 SKB_GSO_CB(nskb)->csum = 3763 SKB_GSO_CB(nskb)->csum =
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e8f65585b81..bc2d7a37297f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2277 pfrag->offset += use; 2277 pfrag->offset += use;
2278 2278
2279 sge = sg + sg_curr - 1; 2279 sge = sg + sg_curr - 1;
2280 if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page && 2280 if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
2281 sg->offset + sg->length == orig_offset) { 2281 sge->offset + sge->length == orig_offset) {
2282 sg->length += use; 2282 sge->length += use;
2283 } else { 2283 } else {
2284 sge = sg + sg_curr; 2284 sge = sg + sg_curr;
2285 sg_unmark_end(sge); 2285 sg_unmark_end(sge);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 9d1f22072d5d..6771f1855b96 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -345,7 +345,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
345 rcu_read_lock(); 345 rcu_read_lock();
346 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 346 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
347 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 347 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
348 xa->zc_alloc->free(xa->zc_alloc, handle); 348 if (!WARN_ON_ONCE(!xa))
349 xa->zc_alloc->free(xa->zc_alloc, handle);
349 rcu_read_unlock(); 350 rcu_read_unlock();
350 default: 351 default:
351 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 352 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 2b75df469220..842a9c7c73a3 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
229 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 229 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
230 u32 cwnd = hc->tx_cwnd, restart_cwnd, 230 u32 cwnd = hc->tx_cwnd, restart_cwnd,
231 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache); 231 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
232 s32 delta = now - hc->tx_lsndtime;
232 233
233 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); 234 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
234 235
235 /* don't reduce cwnd below the initial window (IW) */ 236 /* don't reduce cwnd below the initial window (IW) */
236 restart_cwnd = min(cwnd, iwnd); 237 restart_cwnd = min(cwnd, iwnd);
237 cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
238 hc->tx_cwnd = max(cwnd, restart_cwnd);
239 238
239 while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
240 cwnd >>= 1;
241 hc->tx_cwnd = max(cwnd, restart_cwnd);
240 hc->tx_cwnd_stamp = now; 242 hc->tx_cwnd_stamp = now;
241 hc->tx_cwnd_used = 0; 243 hc->tx_cwnd_used = 0;
242 244
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1e3b6a6d8a40..9864bcd3d317 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
639 int ret; 639 int ret;
640 640
641 /* Port's PHY and MAC both need to be EEE capable */ 641 /* Port's PHY and MAC both need to be EEE capable */
642 if (!dev->phydev) 642 if (!dev->phydev && !dp->pl)
643 return -ENODEV; 643 return -ENODEV;
644 644
645 if (!ds->ops->set_mac_eee) 645 if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
659 int ret; 659 int ret;
660 660
661 /* Port's PHY and MAC both need to be EEE capable */ 661 /* Port's PHY and MAC both need to be EEE capable */
662 if (!dev->phydev) 662 if (!dev->phydev && !dp->pl)
663 return -ENODEV; 663 return -ENODEV;
664 664
665 if (!ds->ops->get_mac_eee) 665 if (!ds->ops->get_mac_eee)
@@ -1248,6 +1248,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1248{ 1248{
1249 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1249 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1250 1250
1251 if (!netif_running(slave_dev))
1252 return 0;
1253
1251 netif_device_detach(slave_dev); 1254 netif_device_detach(slave_dev);
1252 1255
1253 rtnl_lock(); 1256 rtnl_lock();
@@ -1261,6 +1264,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
1261{ 1264{
1262 struct dsa_port *dp = dsa_slave_to_port(slave_dev); 1265 struct dsa_port *dp = dsa_slave_to_port(slave_dev);
1263 1266
1267 if (!netif_running(slave_dev))
1268 return 0;
1269
1264 netif_device_attach(slave_dev); 1270 netif_device_attach(slave_dev);
1265 1271
1266 rtnl_lock(); 1272 rtnl_lock();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index e46cdd310e5f..2998b0e47d4b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -292,19 +292,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
292 return ip_hdr(skb)->daddr; 292 return ip_hdr(skb)->daddr;
293 293
294 in_dev = __in_dev_get_rcu(dev); 294 in_dev = __in_dev_get_rcu(dev);
295 BUG_ON(!in_dev);
296 295
297 net = dev_net(dev); 296 net = dev_net(dev);
298 297
299 scope = RT_SCOPE_UNIVERSE; 298 scope = RT_SCOPE_UNIVERSE;
300 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { 299 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
300 bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
301 struct flowi4 fl4 = { 301 struct flowi4 fl4 = {
302 .flowi4_iif = LOOPBACK_IFINDEX, 302 .flowi4_iif = LOOPBACK_IFINDEX,
303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev), 303 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
304 .daddr = ip_hdr(skb)->saddr, 304 .daddr = ip_hdr(skb)->saddr,
305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 305 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
306 .flowi4_scope = scope, 306 .flowi4_scope = scope,
307 .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, 307 .flowi4_mark = vmark ? skb->mark : 0,
308 }; 308 };
309 if (!fib_lookup(net, &fl4, &res, 0)) 309 if (!fib_lookup(net, &fl4, &res, 0))
310 return FIB_RES_PREFSRC(net, res); 310 return FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index b3c899a630a0..75151be21413 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1200,8 +1200,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1200 spin_lock_bh(&im->lock); 1200 spin_lock_bh(&im->lock);
1201 if (pmc) { 1201 if (pmc) {
1202 im->interface = pmc->interface; 1202 im->interface = pmc->interface;
1203 im->sfmode = pmc->sfmode; 1203 if (im->sfmode == MCAST_INCLUDE) {
1204 if (pmc->sfmode == MCAST_INCLUDE) {
1205 im->tomb = pmc->tomb; 1204 im->tomb = pmc->tomb;
1206 im->sources = pmc->sources; 1205 im->sources = pmc->sources;
1207 for (psf = im->sources; psf; psf = psf->sf_next) 1206 for (psf = im->sources; psf; psf = psf->sf_next)
@@ -1388,7 +1387,8 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
1388/* 1387/*
1389 * A socket has joined a multicast group on device dev. 1388 * A socket has joined a multicast group on device dev.
1390 */ 1389 */
1391void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode) 1390static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
1391 unsigned int mode)
1392{ 1392{
1393 struct ip_mc_list *im; 1393 struct ip_mc_list *im;
1394#ifdef CONFIG_IP_MULTICAST 1394#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 1e4cf3ab560f..0d70608cc2e1 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -157,9 +157,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
157{ 157{
158 struct inet_frag_queue *q; 158 struct inet_frag_queue *q;
159 159
160 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
161 return NULL;
162
163 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); 160 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
164 if (!q) 161 if (!q)
165 return NULL; 162 return NULL;
@@ -204,6 +201,9 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
204{ 201{
205 struct inet_frag_queue *fq; 202 struct inet_frag_queue *fq;
206 203
204 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
205 return NULL;
206
207 rcu_read_lock(); 207 rcu_read_lock();
208 208
209 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params); 209 fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 8e9528ebaa8e..d14d741fb05e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -383,11 +383,16 @@ found:
383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */ 383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
384 384
385 if (i < next->len) { 385 if (i < next->len) {
386 int delta = -next->truesize;
387
386 /* Eat head of the next overlapped fragment 388 /* Eat head of the next overlapped fragment
387 * and leave the loop. The next ones cannot overlap. 389 * and leave the loop. The next ones cannot overlap.
388 */ 390 */
389 if (!pskb_pull(next, i)) 391 if (!pskb_pull(next, i))
390 goto err; 392 goto err;
393 delta += next->truesize;
394 if (delta)
395 add_frag_mem_limit(qp->q.net, delta);
391 next->ip_defrag_offset += i; 396 next->ip_defrag_offset += i;
392 qp->q.meat -= i; 397 qp->q.meat -= i;
393 if (next->ip_summed != CHECKSUM_UNNECESSARY) 398 if (next->ip_summed != CHECKSUM_UNNECESSARY)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index b3308e9d9762..0e3edd25f881 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
523 to->dev = from->dev; 523 to->dev = from->dev;
524 to->mark = from->mark; 524 to->mark = from->mark;
525 525
526 skb_copy_hash(to, from);
527
526 /* Copy the flags to each fragment. */ 528 /* Copy the flags to each fragment. */
527 IPCB(to)->flags = IPCB(from)->flags; 529 IPCB(to)->flags = IPCB(from)->flags;
528 530
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64c76dcf7386..c0fe5ad996f2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 struct sockaddr_in sin; 151 struct sockaddr_in sin;
152 const struct iphdr *iph = ip_hdr(skb); 152 const struct iphdr *iph = ip_hdr(skb);
153 __be16 *ports = (__be16 *)skb_transport_header(skb); 153 __be16 *ports;
154 int end;
154 155
155 if (skb_transport_offset(skb) + 4 > (int)skb->len) 156 end = skb_transport_offset(skb) + 4;
157 if (end > 0 && !pskb_may_pull(skb, end))
156 return; 158 return;
157 159
158 /* All current transport protocols have the port numbers in the 160 /* All current transport protocols have the port numbers in the
159 * first four bytes of the transport header and this function is 161 * first four bytes of the transport header and this function is
160 * written with this assumption in mind. 162 * written with this assumption in mind.
161 */ 163 */
164 ports = (__be16 *)skb_transport_header(skb);
162 165
163 sin.sin_family = AF_INET; 166 sin.sin_family = AF_INET;
164 sin.sin_addr.s_addr = iph->daddr; 167 sin.sin_addr.s_addr = iph->daddr;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 58e2f479ffb4..4bfff3c87e8e 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -354,6 +354,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
354 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ 354 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
355 cwnd = (cwnd + 1) & ~1U; 355 cwnd = (cwnd + 1) & ~1U;
356 356
357 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
358 if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
359 cwnd += 2;
360
357 return cwnd; 361 return cwnd;
358} 362}
359 363
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 5869f89ca656..8b637f9f23a2 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -129,24 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
129 struct dctcp *ca = inet_csk_ca(sk); 129 struct dctcp *ca = inet_csk_ca(sk);
130 struct tcp_sock *tp = tcp_sk(sk); 130 struct tcp_sock *tp = tcp_sk(sk);
131 131
132 /* State has changed from CE=0 to CE=1 and delayed 132 if (!ca->ce_state) {
133 * ACK has not sent yet. 133 /* State has changed from CE=0 to CE=1, force an immediate
134 */ 134 * ACK to reflect the new CE state. If an ACK was delayed,
135 if (!ca->ce_state && 135 * send that first to reflect the prior CE state.
136 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 136 */
137 u32 tmp_rcv_nxt; 137 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
138 138 __tcp_send_ack(sk, ca->prior_rcv_nxt);
139 /* Save current rcv_nxt. */ 139 tcp_enter_quickack_mode(sk, 1);
140 tmp_rcv_nxt = tp->rcv_nxt;
141
142 /* Generate previous ack with CE=0. */
143 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
144 tp->rcv_nxt = ca->prior_rcv_nxt;
145
146 tcp_send_ack(sk);
147
148 /* Recover current rcv_nxt. */
149 tp->rcv_nxt = tmp_rcv_nxt;
150 } 140 }
151 141
152 ca->prior_rcv_nxt = tp->rcv_nxt; 142 ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -160,24 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
160 struct dctcp *ca = inet_csk_ca(sk); 150 struct dctcp *ca = inet_csk_ca(sk);
161 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
162 152
163 /* State has changed from CE=1 to CE=0 and delayed 153 if (ca->ce_state) {
164 * ACK has not sent yet. 154 /* State has changed from CE=1 to CE=0, force an immediate
165 */ 155 * ACK to reflect the new CE state. If an ACK was delayed,
166 if (ca->ce_state && 156 * send that first to reflect the prior CE state.
167 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { 157 */
168 u32 tmp_rcv_nxt; 158 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
169 159 __tcp_send_ack(sk, ca->prior_rcv_nxt);
170 /* Save current rcv_nxt. */ 160 tcp_enter_quickack_mode(sk, 1);
171 tmp_rcv_nxt = tp->rcv_nxt;
172
173 /* Generate previous ack with CE=1. */
174 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
175 tp->rcv_nxt = ca->prior_rcv_nxt;
176
177 tcp_send_ack(sk);
178
179 /* Recover current rcv_nxt. */
180 tp->rcv_nxt = tmp_rcv_nxt;
181 } 161 }
182 162
183 ca->prior_rcv_nxt = tp->rcv_nxt; 163 ca->prior_rcv_nxt = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8e5522c6833a..f9dcb29be12d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
215 icsk->icsk_ack.quick = quickacks; 215 icsk->icsk_ack.quick = quickacks;
216} 216}
217 217
218static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 218void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
219{ 219{
220 struct inet_connection_sock *icsk = inet_csk(sk); 220 struct inet_connection_sock *icsk = inet_csk(sk);
221 221
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
223 icsk->icsk_ack.pingpong = 0; 223 icsk->icsk_ack.pingpong = 0;
224 icsk->icsk_ack.ato = TCP_ATO_MIN; 224 icsk->icsk_ack.ato = TCP_ATO_MIN;
225} 225}
226EXPORT_SYMBOL(tcp_enter_quickack_mode);
226 227
227/* Send ACKs quickly, if "quick" count is not exhausted 228/* Send ACKs quickly, if "quick" count is not exhausted
228 * and the session is not interactive. 229 * and the session is not interactive.
@@ -245,8 +246,15 @@ static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
245 246
246static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 247static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
247{ 248{
248 if (tcp_hdr(skb)->cwr) 249 if (tcp_hdr(skb)->cwr) {
249 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 250 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
251
252 /* If the sender is telling us it has entered CWR, then its
253 * cwnd may be very low (even just 1 packet), so we should ACK
254 * immediately.
255 */
256 tcp_enter_quickack_mode((struct sock *)tp, 2);
257 }
250} 258}
251 259
252static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 260static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
@@ -4357,6 +4365,23 @@ static bool tcp_try_coalesce(struct sock *sk,
4357 return true; 4365 return true;
4358} 4366}
4359 4367
4368static bool tcp_ooo_try_coalesce(struct sock *sk,
4369 struct sk_buff *to,
4370 struct sk_buff *from,
4371 bool *fragstolen)
4372{
4373 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4374
4375 /* In case tcp_drop() is called later, update to->gso_segs */
4376 if (res) {
4377 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4378 max_t(u16, 1, skb_shinfo(from)->gso_segs);
4379
4380 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4381 }
4382 return res;
4383}
4384
4360static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4385static void tcp_drop(struct sock *sk, struct sk_buff *skb)
4361{ 4386{
4362 sk_drops_add(sk, skb); 4387 sk_drops_add(sk, skb);
@@ -4480,8 +4505,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4480 /* In the typical case, we are adding an skb to the end of the list. 4505 /* In the typical case, we are adding an skb to the end of the list.
4481 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4506 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4482 */ 4507 */
4483 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4508 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4484 skb, &fragstolen)) { 4509 skb, &fragstolen)) {
4485coalesce_done: 4510coalesce_done:
4486 tcp_grow_window(sk, skb); 4511 tcp_grow_window(sk, skb);
4487 kfree_skb_partial(skb, fragstolen); 4512 kfree_skb_partial(skb, fragstolen);
@@ -4509,7 +4534,7 @@ coalesce_done:
4509 /* All the bits are present. Drop. */ 4534 /* All the bits are present. Drop. */
4510 NET_INC_STATS(sock_net(sk), 4535 NET_INC_STATS(sock_net(sk),
4511 LINUX_MIB_TCPOFOMERGE); 4536 LINUX_MIB_TCPOFOMERGE);
4512 __kfree_skb(skb); 4537 tcp_drop(sk, skb);
4513 skb = NULL; 4538 skb = NULL;
4514 tcp_dsack_set(sk, seq, end_seq); 4539 tcp_dsack_set(sk, seq, end_seq);
4515 goto add_sack; 4540 goto add_sack;
@@ -4528,11 +4553,11 @@ coalesce_done:
4528 TCP_SKB_CB(skb1)->end_seq); 4553 TCP_SKB_CB(skb1)->end_seq);
4529 NET_INC_STATS(sock_net(sk), 4554 NET_INC_STATS(sock_net(sk),
4530 LINUX_MIB_TCPOFOMERGE); 4555 LINUX_MIB_TCPOFOMERGE);
4531 __kfree_skb(skb1); 4556 tcp_drop(sk, skb1);
4532 goto merge_right; 4557 goto merge_right;
4533 } 4558 }
4534 } else if (tcp_try_coalesce(sk, skb1, 4559 } else if (tcp_ooo_try_coalesce(sk, skb1,
4535 skb, &fragstolen)) { 4560 skb, &fragstolen)) {
4536 goto coalesce_done; 4561 goto coalesce_done;
4537 } 4562 }
4538 p = &parent->rb_right; 4563 p = &parent->rb_right;
@@ -4901,6 +4926,7 @@ end:
4901static void tcp_collapse_ofo_queue(struct sock *sk) 4926static void tcp_collapse_ofo_queue(struct sock *sk)
4902{ 4927{
4903 struct tcp_sock *tp = tcp_sk(sk); 4928 struct tcp_sock *tp = tcp_sk(sk);
4929 u32 range_truesize, sum_tiny = 0;
4904 struct sk_buff *skb, *head; 4930 struct sk_buff *skb, *head;
4905 u32 start, end; 4931 u32 start, end;
4906 4932
@@ -4912,6 +4938,7 @@ new_range:
4912 } 4938 }
4913 start = TCP_SKB_CB(skb)->seq; 4939 start = TCP_SKB_CB(skb)->seq;
4914 end = TCP_SKB_CB(skb)->end_seq; 4940 end = TCP_SKB_CB(skb)->end_seq;
4941 range_truesize = skb->truesize;
4915 4942
4916 for (head = skb;;) { 4943 for (head = skb;;) {
4917 skb = skb_rb_next(skb); 4944 skb = skb_rb_next(skb);
@@ -4922,11 +4949,20 @@ new_range:
4922 if (!skb || 4949 if (!skb ||
4923 after(TCP_SKB_CB(skb)->seq, end) || 4950 after(TCP_SKB_CB(skb)->seq, end) ||
4924 before(TCP_SKB_CB(skb)->end_seq, start)) { 4951 before(TCP_SKB_CB(skb)->end_seq, start)) {
4925 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4952 /* Do not attempt collapsing tiny skbs */
4926 head, skb, start, end); 4953 if (range_truesize != head->truesize ||
4954 end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
4955 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
4956 head, skb, start, end);
4957 } else {
4958 sum_tiny += range_truesize;
4959 if (sum_tiny > sk->sk_rcvbuf >> 3)
4960 return;
4961 }
4927 goto new_range; 4962 goto new_range;
4928 } 4963 }
4929 4964
4965 range_truesize += skb->truesize;
4930 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4966 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
4931 start = TCP_SKB_CB(skb)->seq; 4967 start = TCP_SKB_CB(skb)->seq;
4932 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4968 if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4941,6 +4977,7 @@ new_range:
4941 * 2) not add too big latencies if thousands of packets sit there. 4977 * 2) not add too big latencies if thousands of packets sit there.
4942 * (But if application shrinks SO_RCVBUF, we could still end up 4978 * (But if application shrinks SO_RCVBUF, we could still end up
4943 * freeing whole queue here) 4979 * freeing whole queue here)
4980 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
4944 * 4981 *
4945 * Return true if queue has shrunk. 4982 * Return true if queue has shrunk.
4946 */ 4983 */
@@ -4948,20 +4985,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
4948{ 4985{
4949 struct tcp_sock *tp = tcp_sk(sk); 4986 struct tcp_sock *tp = tcp_sk(sk);
4950 struct rb_node *node, *prev; 4987 struct rb_node *node, *prev;
4988 int goal;
4951 4989
4952 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4990 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
4953 return false; 4991 return false;
4954 4992
4955 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4993 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
4994 goal = sk->sk_rcvbuf >> 3;
4956 node = &tp->ooo_last_skb->rbnode; 4995 node = &tp->ooo_last_skb->rbnode;
4957 do { 4996 do {
4958 prev = rb_prev(node); 4997 prev = rb_prev(node);
4959 rb_erase(node, &tp->out_of_order_queue); 4998 rb_erase(node, &tp->out_of_order_queue);
4999 goal -= rb_to_skb(node)->truesize;
4960 tcp_drop(sk, rb_to_skb(node)); 5000 tcp_drop(sk, rb_to_skb(node));
4961 sk_mem_reclaim(sk); 5001 if (!prev || goal <= 0) {
4962 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 5002 sk_mem_reclaim(sk);
4963 !tcp_under_memory_pressure(sk)) 5003 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
4964 break; 5004 !tcp_under_memory_pressure(sk))
5005 break;
5006 goal = sk->sk_rcvbuf >> 3;
5007 }
4965 node = prev; 5008 node = prev;
4966 } while (node); 5009 } while (node);
4967 tp->ooo_last_skb = rb_to_skb(prev); 5010 tp->ooo_last_skb = rb_to_skb(prev);
@@ -4996,6 +5039,9 @@ static int tcp_prune_queue(struct sock *sk)
4996 else if (tcp_under_memory_pressure(sk)) 5039 else if (tcp_under_memory_pressure(sk))
4997 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 5040 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
4998 5041
5042 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5043 return 0;
5044
4999 tcp_collapse_ofo_queue(sk); 5045 tcp_collapse_ofo_queue(sk);
5000 if (!skb_queue_empty(&sk->sk_receive_queue)) 5046 if (!skb_queue_empty(&sk->sk_receive_queue))
5001 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 5047 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 00e5a300ddb9..c4172c1fb198 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
160} 160}
161 161
162/* Account for an ACK we sent. */ 162/* Account for an ACK we sent. */
163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 163static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
164 u32 rcv_nxt)
164{ 165{
165 struct tcp_sock *tp = tcp_sk(sk); 166 struct tcp_sock *tp = tcp_sk(sk);
166 167
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
171 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 172 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
172 __sock_put(sk); 173 __sock_put(sk);
173 } 174 }
175
176 if (unlikely(rcv_nxt != tp->rcv_nxt))
177 return; /* Special ACK sent by DCTCP to reflect ECN */
174 tcp_dec_quickack_mode(sk, pkts); 178 tcp_dec_quickack_mode(sk, pkts);
175 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 179 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
176} 180}
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
1023 * We are working here with either a clone of the original 1027 * We are working here with either a clone of the original
1024 * SKB, or a fresh unique copy made by the retransmit engine. 1028 * SKB, or a fresh unique copy made by the retransmit engine.
1025 */ 1029 */
1026static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 1030static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1027 gfp_t gfp_mask) 1031 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1028{ 1032{
1029 const struct inet_connection_sock *icsk = inet_csk(sk); 1033 const struct inet_connection_sock *icsk = inet_csk(sk);
1030 struct inet_sock *inet; 1034 struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1100 th->source = inet->inet_sport; 1104 th->source = inet->inet_sport;
1101 th->dest = inet->inet_dport; 1105 th->dest = inet->inet_dport;
1102 th->seq = htonl(tcb->seq); 1106 th->seq = htonl(tcb->seq);
1103 th->ack_seq = htonl(tp->rcv_nxt); 1107 th->ack_seq = htonl(rcv_nxt);
1104 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 1108 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
1105 tcb->tcp_flags); 1109 tcb->tcp_flags);
1106 1110
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1141 icsk->icsk_af_ops->send_check(sk, skb); 1145 icsk->icsk_af_ops->send_check(sk, skb);
1142 1146
1143 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1147 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1144 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1148 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1145 1149
1146 if (skb->len != tcp_header_size) { 1150 if (skb->len != tcp_header_size) {
1147 tcp_event_data_sent(tp, sk); 1151 tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1178 return err; 1182 return err;
1179} 1183}
1180 1184
1185static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1186 gfp_t gfp_mask)
1187{
1188 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1189 tcp_sk(sk)->rcv_nxt);
1190}
1191
1181/* This routine just queues the buffer for sending. 1192/* This routine just queues the buffer for sending.
1182 * 1193 *
1183 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1194 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3571,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
3571} 3582}
3572 3583
3573/* This routine sends an ack and also updates the window. */ 3584/* This routine sends an ack and also updates the window. */
3574void tcp_send_ack(struct sock *sk) 3585void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3575{ 3586{
3576 struct sk_buff *buff; 3587 struct sk_buff *buff;
3577 3588
@@ -3604,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
3604 skb_set_tcp_pure_ack(buff); 3615 skb_set_tcp_pure_ack(buff);
3605 3616
3606 /* Send it off, this clears delayed acks for us. */ 3617 /* Send it off, this clears delayed acks for us. */
3607 tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 3618 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3619}
3620EXPORT_SYMBOL_GPL(__tcp_send_ack);
3621
3622void tcp_send_ack(struct sock *sk)
3623{
3624 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3608} 3625}
3609EXPORT_SYMBOL_GPL(tcp_send_ack);
3610 3626
3611/* This routine sends a packet with an out of date sequence 3627/* This routine sends a packet with an out of date sequence
3612 * number. It assumes the other end will try to ack it. 3628 * number. It assumes the other end will try to ack it.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 91580c62bb86..f66a1cae3366 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2374 continue; 2374 continue;
2375 if ((rt->fib6_flags & noflags) != 0) 2375 if ((rt->fib6_flags & noflags) != 0)
2376 continue; 2376 continue;
2377 fib6_info_hold(rt); 2377 if (!fib6_info_hold_safe(rt))
2378 continue;
2378 break; 2379 break;
2379 } 2380 }
2380out: 2381out:
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2ee08b6a86a4..1a1f876f8e28 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
700 } 700 }
701 if (np->rxopt.bits.rxorigdstaddr) { 701 if (np->rxopt.bits.rxorigdstaddr) {
702 struct sockaddr_in6 sin6; 702 struct sockaddr_in6 sin6;
703 __be16 *ports = (__be16 *) skb_transport_header(skb); 703 __be16 *ports;
704 int end;
704 705
705 if (skb_transport_offset(skb) + 4 <= (int)skb->len) { 706 end = skb_transport_offset(skb) + 4;
707 if (end <= 0 || pskb_may_pull(skb, end)) {
706 /* All current transport protocols have the port numbers in the 708 /* All current transport protocols have the port numbers in the
707 * first four bytes of the transport header and this function is 709 * first four bytes of the transport header and this function is
708 * written with this assumption in mind. 710 * written with this assumption in mind.
709 */ 711 */
712 ports = (__be16 *)skb_transport_header(skb);
710 713
711 sin6.sin6_family = AF_INET6; 714 sin6.sin6_family = AF_INET6;
712 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 715 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 97513f35bcc5..88a7579c23bd 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -669,8 +669,10 @@ skip_cow:
669 669
670 sg_init_table(sg, nfrags); 670 sg_init_table(sg, nfrags);
671 ret = skb_to_sgvec(skb, sg, 0, skb->len); 671 ret = skb_to_sgvec(skb, sg, 0, skb->len);
672 if (unlikely(ret < 0)) 672 if (unlikely(ret < 0)) {
673 kfree(tmp);
673 goto out; 674 goto out;
675 }
674 676
675 skb->ip_summed = CHECKSUM_NONE; 677 skb->ip_summed = CHECKSUM_NONE;
676 678
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index be491bf6ab6e..ef2505aefc15 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
402 402
403 /* for local traffic to local address, skb dev is the loopback 403 /* for local traffic to local address, skb dev is the loopback
404 * device. Check if there is a dst attached to the skb and if so 404 * device. Check if there is a dst attached to the skb and if so
405 * get the real device index. 405 * get the real device index. Same is needed for replies to a link
406 * local address on a device enslaved to an L3 master device
406 */ 407 */
407 if (unlikely(iif == LOOPBACK_IFINDEX)) { 408 if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
408 const struct rt6_info *rt6 = skb_rt6_info(skb); 409 const struct rt6_info *rt6 = skb_rt6_info(skb);
409 410
410 if (rt6) 411 if (rt6)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a14fb4fcdf18..3168847c30d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
570 to->dev = from->dev; 570 to->dev = from->dev;
571 to->mark = from->mark; 571 to->mark = from->mark;
572 572
573 skb_copy_hash(to, from);
574
573#ifdef CONFIG_NET_SCHED 575#ifdef CONFIG_NET_SCHED
574 to->tc_index = from->tc_index; 576 to->tc_index = from->tc_index;
575#endif 577#endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 00e138a44cbb..1cc9650af9fb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1133,12 +1133,8 @@ route_lookup:
1133 max_headroom += 8; 1133 max_headroom += 8;
1134 mtu -= 8; 1134 mtu -= 8;
1135 } 1135 }
1136 if (skb->protocol == htons(ETH_P_IPV6)) { 1136 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1137 if (mtu < IPV6_MIN_MTU) 1137 IPV6_MIN_MTU : IPV4_MIN_MTU);
1138 mtu = IPV6_MIN_MTU;
1139 } else if (mtu < 576) {
1140 mtu = 576;
1141 }
1142 1138
1143 skb_dst_update_pmtu(skb, mtu); 1139 skb_dst_update_pmtu(skb, mtu);
1144 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1140 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b7f28deddaea..c72ae3a4fe09 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -480,10 +480,6 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
480 goto tx_err_dst_release; 480 goto tx_err_dst_release;
481 } 481 }
482 482
483 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
484 skb_dst_set(skb, dst);
485 skb->dev = skb_dst(skb)->dev;
486
487 mtu = dst_mtu(dst); 483 mtu = dst_mtu(dst);
488 if (!skb->ignore_df && skb->len > mtu) { 484 if (!skb->ignore_df && skb->len > mtu) {
489 skb_dst_update_pmtu(skb, mtu); 485 skb_dst_update_pmtu(skb, mtu);
@@ -498,9 +494,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
498 htonl(mtu)); 494 htonl(mtu));
499 } 495 }
500 496
501 return -EMSGSIZE; 497 err = -EMSGSIZE;
498 goto tx_err_dst_release;
502 } 499 }
503 500
501 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
502 skb_dst_set(skb, dst);
503 skb->dev = skb_dst(skb)->dev;
504
504 err = dst_output(t->net, skb->sk, skb); 505 err = dst_output(t->net, skb->sk, skb);
505 if (net_xmit_eval(err) == 0) { 506 if (net_xmit_eval(err) == 0) {
506 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 507 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 2699be7202be..f60f310785fd 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -790,8 +790,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
790 spin_lock_bh(&im->mca_lock); 790 spin_lock_bh(&im->mca_lock);
791 if (pmc) { 791 if (pmc) {
792 im->idev = pmc->idev; 792 im->idev = pmc->idev;
793 im->mca_sfmode = pmc->mca_sfmode; 793 if (im->mca_sfmode == MCAST_INCLUDE) {
794 if (pmc->mca_sfmode == MCAST_INCLUDE) {
795 im->mca_tomb = pmc->mca_tomb; 794 im->mca_tomb = pmc->mca_tomb;
796 im->mca_sources = pmc->mca_sources; 795 im->mca_sources = pmc->mca_sources;
797 for (psf = im->mca_sources; psf; psf = psf->sf_next) 796 for (psf = im->mca_sources; psf; psf = psf->sf_next)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 2ce0bd17de4f..7208c16302f6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -972,18 +972,15 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
972 rt->dst.lastuse = jiffies; 972 rt->dst.lastuse = jiffies;
973} 973}
974 974
975/* Caller must already hold reference to @from */
975static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) 976static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
976{ 977{
977 rt->rt6i_flags &= ~RTF_EXPIRES; 978 rt->rt6i_flags &= ~RTF_EXPIRES;
978 fib6_info_hold(from);
979 rcu_assign_pointer(rt->from, from); 979 rcu_assign_pointer(rt->from, from);
980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); 980 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
981 if (from->fib6_metrics != &dst_default_metrics) {
982 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
983 refcount_inc(&from->fib6_metrics->refcnt);
984 }
985} 981}
986 982
983/* Caller must already hold reference to @ort */
987static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort) 984static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
988{ 985{
989 struct net_device *dev = fib6_info_nh_dev(ort); 986 struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1041,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1044 struct net_device *dev = rt->fib6_nh.nh_dev; 1041 struct net_device *dev = rt->fib6_nh.nh_dev;
1045 struct rt6_info *nrt; 1042 struct rt6_info *nrt;
1046 1043
1044 if (!fib6_info_hold_safe(rt))
1045 return NULL;
1046
1047 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1047 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1048 if (nrt) 1048 if (nrt)
1049 ip6_rt_copy_init(nrt, rt); 1049 ip6_rt_copy_init(nrt, rt);
1050 else
1051 fib6_info_release(rt);
1050 1052
1051 return nrt; 1053 return nrt;
1052} 1054}
@@ -1178,10 +1180,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
1178 * Clone the route. 1180 * Clone the route.
1179 */ 1181 */
1180 1182
1183 if (!fib6_info_hold_safe(ort))
1184 return NULL;
1185
1181 dev = ip6_rt_get_dev_rcu(ort); 1186 dev = ip6_rt_get_dev_rcu(ort);
1182 rt = ip6_dst_alloc(dev_net(dev), dev, 0); 1187 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1183 if (!rt) 1188 if (!rt) {
1189 fib6_info_release(ort);
1184 return NULL; 1190 return NULL;
1191 }
1185 1192
1186 ip6_rt_copy_init(rt, ort); 1193 ip6_rt_copy_init(rt, ort);
1187 rt->rt6i_flags |= RTF_CACHE; 1194 rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1217,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
1210 struct net_device *dev; 1217 struct net_device *dev;
1211 struct rt6_info *pcpu_rt; 1218 struct rt6_info *pcpu_rt;
1212 1219
1220 if (!fib6_info_hold_safe(rt))
1221 return NULL;
1222
1213 rcu_read_lock(); 1223 rcu_read_lock();
1214 dev = ip6_rt_get_dev_rcu(rt); 1224 dev = ip6_rt_get_dev_rcu(rt);
1215 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags); 1225 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1216 rcu_read_unlock(); 1226 rcu_read_unlock();
1217 if (!pcpu_rt) 1227 if (!pcpu_rt) {
1228 fib6_info_release(rt);
1218 return NULL; 1229 return NULL;
1230 }
1219 ip6_rt_copy_init(pcpu_rt, rt); 1231 ip6_rt_copy_init(pcpu_rt, rt);
1220 pcpu_rt->rt6i_flags |= RTF_PCPU; 1232 pcpu_rt->rt6i_flags |= RTF_PCPU;
1221 return pcpu_rt; 1233 return pcpu_rt;
@@ -2486,7 +2498,7 @@ restart:
2486 2498
2487out: 2499out:
2488 if (ret) 2500 if (ret)
2489 dst_hold(&ret->dst); 2501 ip6_hold_safe(net, &ret, true);
2490 else 2502 else
2491 ret = ip6_create_rt_rcu(rt); 2503 ret = ip6_create_rt_rcu(rt);
2492 2504
@@ -3303,7 +3315,8 @@ static int ip6_route_del(struct fib6_config *cfg,
3303 continue; 3315 continue;
3304 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol) 3316 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3305 continue; 3317 continue;
3306 fib6_info_hold(rt); 3318 if (!fib6_info_hold_safe(rt))
3319 continue;
3307 rcu_read_unlock(); 3320 rcu_read_unlock();
3308 3321
3309 /* if gateway was specified only delete the one hop */ 3322 /* if gateway was specified only delete the one hop */
@@ -3409,6 +3422,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
3409 3422
3410 rcu_read_lock(); 3423 rcu_read_lock();
3411 from = rcu_dereference(rt->from); 3424 from = rcu_dereference(rt->from);
3425 /* This fib6_info_hold() is safe here because we hold reference to rt
3426 * and rt already holds reference to fib6_info.
3427 */
3412 fib6_info_hold(from); 3428 fib6_info_hold(from);
3413 rcu_read_unlock(); 3429 rcu_read_unlock();
3414 3430
@@ -3470,7 +3486,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
3470 continue; 3486 continue;
3471 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr)) 3487 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
3472 continue; 3488 continue;
3473 fib6_info_hold(rt); 3489 if (!fib6_info_hold_safe(rt))
3490 continue;
3474 break; 3491 break;
3475 } 3492 }
3476out: 3493out:
@@ -3530,8 +3547,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
3530 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr)) 3547 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
3531 break; 3548 break;
3532 } 3549 }
3533 if (rt) 3550 if (rt && !fib6_info_hold_safe(rt))
3534 fib6_info_hold(rt); 3551 rt = NULL;
3535 rcu_read_unlock(); 3552 rcu_read_unlock();
3536 return rt; 3553 return rt;
3537} 3554}
@@ -3579,8 +3596,8 @@ restart:
3579 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL; 3596 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3580 3597
3581 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) && 3598 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3582 (!idev || idev->cnf.accept_ra != 2)) { 3599 (!idev || idev->cnf.accept_ra != 2) &&
3583 fib6_info_hold(rt); 3600 fib6_info_hold_safe(rt)) {
3584 rcu_read_unlock(); 3601 rcu_read_unlock();
3585 ip6_del_rt(net, rt); 3602 ip6_del_rt(net, rt);
3586 goto restart; 3603 goto restart;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7efa9fd7e109..03e6b7a2bc53 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
938 &tcp_hashinfo, NULL, 0, 938 &tcp_hashinfo, NULL, 0,
939 &ipv6h->saddr, 939 &ipv6h->saddr,
940 th->source, &ipv6h->daddr, 940 th->source, &ipv6h->daddr,
941 ntohs(th->source), tcp_v6_iif(skb), 941 ntohs(th->source),
942 tcp_v6_iif_l3_slave(skb),
942 tcp_v6_sdif(skb)); 943 tcp_v6_sdif(skb));
943 if (!sk1) 944 if (!sk1)
944 goto out; 945 goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
1609 skb, __tcp_hdrlen(th), 1610 skb, __tcp_hdrlen(th),
1610 &ipv6_hdr(skb)->saddr, th->source, 1611 &ipv6_hdr(skb)->saddr, th->source,
1611 &ipv6_hdr(skb)->daddr, 1612 &ipv6_hdr(skb)->daddr,
1612 ntohs(th->dest), tcp_v6_iif(skb), 1613 ntohs(th->dest),
1614 tcp_v6_iif_l3_slave(skb),
1613 sdif); 1615 sdif);
1614 if (sk2) { 1616 if (sk2) {
1615 struct inet_timewait_sock *tw = inet_twsk(sk); 1617 struct inet_timewait_sock *tw = inet_twsk(sk);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index e398797878a9..cf6cca260e7b 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1201,13 +1201,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1201 l2tp_session_get(sock_net(sk), tunnel, 1201 l2tp_session_get(sock_net(sk), tunnel,
1202 stats.session_id); 1202 stats.session_id);
1203 1203
1204 if (session && session->pwtype == L2TP_PWTYPE_PPP) { 1204 if (!session) {
1205 err = pppol2tp_session_ioctl(session, cmd, 1205 err = -EBADR;
1206 arg); 1206 break;
1207 }
1208 if (session->pwtype != L2TP_PWTYPE_PPP) {
1207 l2tp_session_dec_refcount(session); 1209 l2tp_session_dec_refcount(session);
1208 } else {
1209 err = -EBADR; 1210 err = -EBADR;
1211 break;
1210 } 1212 }
1213
1214 err = pppol2tp_session_ioctl(session, cmd, arg);
1215 l2tp_session_dec_refcount(session);
1211 break; 1216 break;
1212 } 1217 }
1213#ifdef CONFIG_XFRM 1218#ifdef CONFIG_XFRM
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 89041260784c..260b3dc1b4a2 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
73 73
74 rcu_read_lock_bh(); 74 rcu_read_lock_bh();
75 sap = __llc_sap_find(sap_value); 75 sap = __llc_sap_find(sap_value);
76 if (sap) 76 if (!sap || !llc_sap_hold_safe(sap))
77 llc_sap_hold(sap); 77 sap = NULL;
78 rcu_read_unlock_bh(); 78 rcu_read_unlock_bh();
79 return sap; 79 return sap;
80} 80}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0a38cc1cbebc..932985ca4e66 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2254 sdata->control_port_over_nl80211)) { 2254 sdata->control_port_over_nl80211)) {
2255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2255 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2256 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2256 bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
2257 struct ethhdr *ehdr = eth_hdr(skb);
2258 2257
2259 cfg80211_rx_control_port(dev, skb->data, skb->len, 2258 cfg80211_rx_control_port(dev, skb, noencrypt);
2260 ehdr->h_source,
2261 be16_to_cpu(skb->protocol), noencrypt);
2262 dev_kfree_skb(skb); 2259 dev_kfree_skb(skb);
2263 } else { 2260 } else {
2264 /* deliver to local stack */ 2261 /* deliver to local stack */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5e2e511c4a6f..d02fbfec3783 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2111 if (!sta->uploaded) 2111 if (!sta->uploaded)
2112 continue; 2112 continue;
2113 2113
2114 if (sta->sdata->vif.type != NL80211_IFTYPE_AP) 2114 if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
2115 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
2115 continue; 2116 continue;
2116 2117
2117 for (state = IEEE80211_STA_NOTEXIST; 2118 for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index abe647d5b8c6..9ce6336d1e55 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
243 * We currently ignore Sync packets 243 * We currently ignore Sync packets
244 * 244 *
245 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 245 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
246 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 246 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
247 }, 247 },
248 [DCCP_PKT_SYNCACK] = { 248 [DCCP_PKT_SYNCACK] = {
249 /* 249 /*
250 * We currently ignore SyncAck packets 250 * We currently ignore SyncAck packets
251 * 251 *
252 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 252 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
253 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 253 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
254 }, 254 },
255 }, 255 },
256 [CT_DCCP_ROLE_SERVER] = { 256 [CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
371 * We currently ignore Sync packets 371 * We currently ignore Sync packets
372 * 372 *
373 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 373 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
374 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 374 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
375 }, 375 },
376 [DCCP_PKT_SYNCACK] = { 376 [DCCP_PKT_SYNCACK] = {
377 /* 377 /*
378 * We currently ignore SyncAck packets 378 * We currently ignore SyncAck packets
379 * 379 *
380 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */ 380 * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
381 sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG, 381 sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
382 }, 382 },
383 }, 383 },
384}; 384};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 896d4a36081d..f5745e4c6513 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
75{ 75{
76 ctx->net = net; 76 ctx->net = net;
77 ctx->family = family; 77 ctx->family = family;
78 ctx->level = 0;
78 ctx->table = table; 79 ctx->table = table;
79 ctx->chain = chain; 80 ctx->chain = chain;
80 ctx->nla = nla; 81 ctx->nla = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1597 struct nft_base_chain *basechain; 1598 struct nft_base_chain *basechain;
1598 struct nft_stats *stats = NULL; 1599 struct nft_stats *stats = NULL;
1599 struct nft_chain_hook hook; 1600 struct nft_chain_hook hook;
1600 const struct nlattr *name;
1601 struct nf_hook_ops *ops; 1601 struct nf_hook_ops *ops;
1602 struct nft_trans *trans; 1602 struct nft_trans *trans;
1603 int err; 1603 int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1645 return PTR_ERR(stats); 1645 return PTR_ERR(stats);
1646 } 1646 }
1647 1647
1648 err = -ENOMEM;
1648 trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, 1649 trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
1649 sizeof(struct nft_trans_chain)); 1650 sizeof(struct nft_trans_chain));
1650 if (trans == NULL) { 1651 if (trans == NULL)
1651 free_percpu(stats); 1652 goto err;
1652 return -ENOMEM;
1653 }
1654 1653
1655 nft_trans_chain_stats(trans) = stats; 1654 nft_trans_chain_stats(trans) = stats;
1656 nft_trans_chain_update(trans) = true; 1655 nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
1660 else 1659 else
1661 nft_trans_chain_policy(trans) = -1; 1660 nft_trans_chain_policy(trans) = -1;
1662 1661
1663 name = nla[NFTA_CHAIN_NAME]; 1662 if (nla[NFTA_CHAIN_HANDLE] &&
1664 if (nla[NFTA_CHAIN_HANDLE] && name) { 1663 nla[NFTA_CHAIN_NAME]) {
1665 nft_trans_chain_name(trans) = 1664 struct nft_trans *tmp;
1666 nla_strdup(name, GFP_KERNEL); 1665 char *name;
1667 if (!nft_trans_chain_name(trans)) { 1666
1668 kfree(trans); 1667 err = -ENOMEM;
1669 free_percpu(stats); 1668 name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
1670 return -ENOMEM; 1669 if (!name)
1670 goto err;
1671
1672 err = -EEXIST;
1673 list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
1674 if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
1675 tmp->ctx.table == table &&
1676 nft_trans_chain_update(tmp) &&
1677 nft_trans_chain_name(tmp) &&
1678 strcmp(name, nft_trans_chain_name(tmp)) == 0) {
1679 kfree(name);
1680 goto err;
1681 }
1671 } 1682 }
1683
1684 nft_trans_chain_name(trans) = name;
1672 } 1685 }
1673 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 1686 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
1674 1687
1675 return 0; 1688 return 0;
1689err:
1690 free_percpu(stats);
1691 kfree(trans);
1692 return err;
1676} 1693}
1677 1694
1678static int nf_tables_newchain(struct net *net, struct sock *nlsk, 1695static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
2254 return skb->len; 2271 return skb->len;
2255} 2272}
2256 2273
2274static int nf_tables_dump_rules_start(struct netlink_callback *cb)
2275{
2276 const struct nlattr * const *nla = cb->data;
2277 struct nft_rule_dump_ctx *ctx = NULL;
2278
2279 if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
2280 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2281 if (!ctx)
2282 return -ENOMEM;
2283
2284 if (nla[NFTA_RULE_TABLE]) {
2285 ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
2286 GFP_ATOMIC);
2287 if (!ctx->table) {
2288 kfree(ctx);
2289 return -ENOMEM;
2290 }
2291 }
2292 if (nla[NFTA_RULE_CHAIN]) {
2293 ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
2294 GFP_ATOMIC);
2295 if (!ctx->chain) {
2296 kfree(ctx->table);
2297 kfree(ctx);
2298 return -ENOMEM;
2299 }
2300 }
2301 }
2302
2303 cb->data = ctx;
2304 return 0;
2305}
2306
2257static int nf_tables_dump_rules_done(struct netlink_callback *cb) 2307static int nf_tables_dump_rules_done(struct netlink_callback *cb)
2258{ 2308{
2259 struct nft_rule_dump_ctx *ctx = cb->data; 2309 struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
2283 2333
2284 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2334 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2285 struct netlink_dump_control c = { 2335 struct netlink_dump_control c = {
2336 .start= nf_tables_dump_rules_start,
2286 .dump = nf_tables_dump_rules, 2337 .dump = nf_tables_dump_rules,
2287 .done = nf_tables_dump_rules_done, 2338 .done = nf_tables_dump_rules_done,
2288 .module = THIS_MODULE, 2339 .module = THIS_MODULE,
2340 .data = (void *)nla,
2289 }; 2341 };
2290 2342
2291 if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
2292 struct nft_rule_dump_ctx *ctx;
2293
2294 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
2295 if (!ctx)
2296 return -ENOMEM;
2297
2298 if (nla[NFTA_RULE_TABLE]) {
2299 ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
2300 GFP_ATOMIC);
2301 if (!ctx->table) {
2302 kfree(ctx);
2303 return -ENOMEM;
2304 }
2305 }
2306 if (nla[NFTA_RULE_CHAIN]) {
2307 ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
2308 GFP_ATOMIC);
2309 if (!ctx->chain) {
2310 kfree(ctx->table);
2311 kfree(ctx);
2312 return -ENOMEM;
2313 }
2314 }
2315 c.data = ctx;
2316 }
2317
2318 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 2343 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
2319 } 2344 }
2320 2345
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
2384 struct nft_rule *rule; 2409 struct nft_rule *rule;
2385 int err; 2410 int err;
2386 2411
2412 if (ctx->level == NFT_JUMP_STACK_SIZE)
2413 return -EMLINK;
2414
2387 list_for_each_entry(rule, &chain->rules, list) { 2415 list_for_each_entry(rule, &chain->rules, list) {
2388 if (!nft_is_active_next(ctx->net, rule)) 2416 if (!nft_is_active_next(ctx->net, rule))
2389 continue; 2417 continue;
@@ -3161,6 +3189,18 @@ done:
3161 return skb->len; 3189 return skb->len;
3162} 3190}
3163 3191
3192static int nf_tables_dump_sets_start(struct netlink_callback *cb)
3193{
3194 struct nft_ctx *ctx_dump = NULL;
3195
3196 ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
3197 if (ctx_dump == NULL)
3198 return -ENOMEM;
3199
3200 cb->data = ctx_dump;
3201 return 0;
3202}
3203
3164static int nf_tables_dump_sets_done(struct netlink_callback *cb) 3204static int nf_tables_dump_sets_done(struct netlink_callback *cb)
3165{ 3205{
3166 kfree(cb->data); 3206 kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
3188 3228
3189 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3229 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3190 struct netlink_dump_control c = { 3230 struct netlink_dump_control c = {
3231 .start = nf_tables_dump_sets_start,
3191 .dump = nf_tables_dump_sets, 3232 .dump = nf_tables_dump_sets,
3192 .done = nf_tables_dump_sets_done, 3233 .done = nf_tables_dump_sets_done,
3234 .data = &ctx,
3193 .module = THIS_MODULE, 3235 .module = THIS_MODULE,
3194 }; 3236 };
3195 struct nft_ctx *ctx_dump;
3196
3197 ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
3198 if (ctx_dump == NULL)
3199 return -ENOMEM;
3200
3201 *ctx_dump = ctx;
3202 c.data = ctx_dump;
3203 3237
3204 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 3238 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
3205 } 3239 }
@@ -3849,6 +3883,15 @@ nla_put_failure:
3849 return -ENOSPC; 3883 return -ENOSPC;
3850} 3884}
3851 3885
3886static int nf_tables_dump_set_start(struct netlink_callback *cb)
3887{
3888 struct nft_set_dump_ctx *dump_ctx = cb->data;
3889
3890 cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
3891
3892 return cb->data ? 0 : -ENOMEM;
3893}
3894
3852static int nf_tables_dump_set_done(struct netlink_callback *cb) 3895static int nf_tables_dump_set_done(struct netlink_callback *cb)
3853{ 3896{
3854 kfree(cb->data); 3897 kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
4002 4045
4003 if (nlh->nlmsg_flags & NLM_F_DUMP) { 4046 if (nlh->nlmsg_flags & NLM_F_DUMP) {
4004 struct netlink_dump_control c = { 4047 struct netlink_dump_control c = {
4048 .start = nf_tables_dump_set_start,
4005 .dump = nf_tables_dump_set, 4049 .dump = nf_tables_dump_set,
4006 .done = nf_tables_dump_set_done, 4050 .done = nf_tables_dump_set_done,
4007 .module = THIS_MODULE, 4051 .module = THIS_MODULE,
4008 }; 4052 };
4009 struct nft_set_dump_ctx *dump_ctx; 4053 struct nft_set_dump_ctx dump_ctx = {
4010 4054 .set = set,
4011 dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC); 4055 .ctx = ctx,
4012 if (!dump_ctx) 4056 };
4013 return -ENOMEM;
4014
4015 dump_ctx->set = set;
4016 dump_ctx->ctx = ctx;
4017 4057
4018 c.data = dump_ctx; 4058 c.data = &dump_ctx;
4019 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 4059 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
4020 } 4060 }
4021 4061
@@ -4975,38 +5015,42 @@ done:
4975 return skb->len; 5015 return skb->len;
4976} 5016}
4977 5017
4978static int nf_tables_dump_obj_done(struct netlink_callback *cb) 5018static int nf_tables_dump_obj_start(struct netlink_callback *cb)
4979{ 5019{
4980 struct nft_obj_filter *filter = cb->data; 5020 const struct nlattr * const *nla = cb->data;
5021 struct nft_obj_filter *filter = NULL;
4981 5022
4982 if (filter) { 5023 if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
4983 kfree(filter->table); 5024 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
4984 kfree(filter); 5025 if (!filter)
5026 return -ENOMEM;
5027
5028 if (nla[NFTA_OBJ_TABLE]) {
5029 filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
5030 if (!filter->table) {
5031 kfree(filter);
5032 return -ENOMEM;
5033 }
5034 }
5035
5036 if (nla[NFTA_OBJ_TYPE])
5037 filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
4985 } 5038 }
4986 5039
5040 cb->data = filter;
4987 return 0; 5041 return 0;
4988} 5042}
4989 5043
4990static struct nft_obj_filter * 5044static int nf_tables_dump_obj_done(struct netlink_callback *cb)
4991nft_obj_filter_alloc(const struct nlattr * const nla[])
4992{ 5045{
4993 struct nft_obj_filter *filter; 5046 struct nft_obj_filter *filter = cb->data;
4994
4995 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
4996 if (!filter)
4997 return ERR_PTR(-ENOMEM);
4998 5047
4999 if (nla[NFTA_OBJ_TABLE]) { 5048 if (filter) {
5000 filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC); 5049 kfree(filter->table);
5001 if (!filter->table) { 5050 kfree(filter);
5002 kfree(filter);
5003 return ERR_PTR(-ENOMEM);
5004 }
5005 } 5051 }
5006 if (nla[NFTA_OBJ_TYPE])
5007 filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
5008 5052
5009 return filter; 5053 return 0;
5010} 5054}
5011 5055
5012/* called with rcu_read_lock held */ 5056/* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
5027 5071
5028 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5072 if (nlh->nlmsg_flags & NLM_F_DUMP) {
5029 struct netlink_dump_control c = { 5073 struct netlink_dump_control c = {
5074 .start = nf_tables_dump_obj_start,
5030 .dump = nf_tables_dump_obj, 5075 .dump = nf_tables_dump_obj,
5031 .done = nf_tables_dump_obj_done, 5076 .done = nf_tables_dump_obj_done,
5032 .module = THIS_MODULE, 5077 .module = THIS_MODULE,
5078 .data = (void *)nla,
5033 }; 5079 };
5034 5080
5035 if (nla[NFTA_OBJ_TABLE] ||
5036 nla[NFTA_OBJ_TYPE]) {
5037 struct nft_obj_filter *filter;
5038
5039 filter = nft_obj_filter_alloc(nla);
5040 if (IS_ERR(filter))
5041 return -ENOMEM;
5042
5043 c.data = filter;
5044 }
5045 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5081 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
5046 } 5082 }
5047 5083
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
5320 flowtable->ops[i].priv = &flowtable->data; 5356 flowtable->ops[i].priv = &flowtable->data;
5321 flowtable->ops[i].hook = flowtable->data.type->hook; 5357 flowtable->ops[i].hook = flowtable->data.type->hook;
5322 flowtable->ops[i].dev = dev_array[i]; 5358 flowtable->ops[i].dev = dev_array[i];
5323 flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
5324 GFP_KERNEL);
5325 } 5359 }
5326 5360
5327 return err; 5361 return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
5479err6: 5513err6:
5480 i = flowtable->ops_len; 5514 i = flowtable->ops_len;
5481err5: 5515err5:
5482 for (k = i - 1; k >= 0; k--) { 5516 for (k = i - 1; k >= 0; k--)
5483 kfree(flowtable->dev_name[k]);
5484 nf_unregister_net_hook(net, &flowtable->ops[k]); 5517 nf_unregister_net_hook(net, &flowtable->ops[k]);
5485 }
5486 5518
5487 kfree(flowtable->ops); 5519 kfree(flowtable->ops);
5488err4: 5520err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
5581 goto nla_put_failure; 5613 goto nla_put_failure;
5582 5614
5583 for (i = 0; i < flowtable->ops_len; i++) { 5615 for (i = 0; i < flowtable->ops_len; i++) {
5584 if (flowtable->dev_name[i][0] && 5616 const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
5585 nla_put_string(skb, NFTA_DEVICE_NAME, 5617
5586 flowtable->dev_name[i])) 5618 if (dev &&
5619 nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
5587 goto nla_put_failure; 5620 goto nla_put_failure;
5588 } 5621 }
5589 nla_nest_end(skb, nest_devs); 5622 nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
5650 return skb->len; 5683 return skb->len;
5651} 5684}
5652 5685
5653static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) 5686static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
5654{ 5687{
5655 struct nft_flowtable_filter *filter = cb->data; 5688 const struct nlattr * const *nla = cb->data;
5689 struct nft_flowtable_filter *filter = NULL;
5656 5690
5657 if (!filter) 5691 if (nla[NFTA_FLOWTABLE_TABLE]) {
5658 return 0; 5692 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
5693 if (!filter)
5694 return -ENOMEM;
5659 5695
5660 kfree(filter->table); 5696 filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
5661 kfree(filter); 5697 GFP_ATOMIC);
5698 if (!filter->table) {
5699 kfree(filter);
5700 return -ENOMEM;
5701 }
5702 }
5662 5703
5704 cb->data = filter;
5663 return 0; 5705 return 0;
5664} 5706}
5665 5707
5666static struct nft_flowtable_filter * 5708static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
5667nft_flowtable_filter_alloc(const struct nlattr * const nla[])
5668{ 5709{
5669 struct nft_flowtable_filter *filter; 5710 struct nft_flowtable_filter *filter = cb->data;
5670 5711
5671 filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
5672 if (!filter) 5712 if (!filter)
5673 return ERR_PTR(-ENOMEM); 5713 return 0;
5674 5714
5675 if (nla[NFTA_FLOWTABLE_TABLE]) { 5715 kfree(filter->table);
5676 filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], 5716 kfree(filter);
5677 GFP_ATOMIC); 5717
5678 if (!filter->table) { 5718 return 0;
5679 kfree(filter);
5680 return ERR_PTR(-ENOMEM);
5681 }
5682 }
5683 return filter;
5684} 5719}
5685 5720
5686/* called with rcu_read_lock held */ 5721/* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
5700 5735
5701 if (nlh->nlmsg_flags & NLM_F_DUMP) { 5736 if (nlh->nlmsg_flags & NLM_F_DUMP) {
5702 struct netlink_dump_control c = { 5737 struct netlink_dump_control c = {
5738 .start = nf_tables_dump_flowtable_start,
5703 .dump = nf_tables_dump_flowtable, 5739 .dump = nf_tables_dump_flowtable,
5704 .done = nf_tables_dump_flowtable_done, 5740 .done = nf_tables_dump_flowtable_done,
5705 .module = THIS_MODULE, 5741 .module = THIS_MODULE,
5742 .data = (void *)nla,
5706 }; 5743 };
5707 5744
5708 if (nla[NFTA_FLOWTABLE_TABLE]) {
5709 struct nft_flowtable_filter *filter;
5710
5711 filter = nft_flowtable_filter_alloc(nla);
5712 if (IS_ERR(filter))
5713 return -ENOMEM;
5714
5715 c.data = filter;
5716 }
5717 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c); 5745 return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
5718 } 5746 }
5719 5747
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
5783 kfree(flowtable->name); 5811 kfree(flowtable->name);
5784 flowtable->data.type->free(&flowtable->data); 5812 flowtable->data.type->free(&flowtable->data);
5785 module_put(flowtable->data.type->owner); 5813 module_put(flowtable->data.type->owner);
5814 kfree(flowtable);
5786} 5815}
5787 5816
5788static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, 5817static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
5825 continue; 5854 continue;
5826 5855
5827 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); 5856 nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
5828 flowtable->dev_name[i][0] = '\0';
5829 flowtable->ops[i].dev = NULL; 5857 flowtable->ops[i].dev = NULL;
5830 break; 5858 break;
5831 } 5859 }
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
6086 case NFT_MSG_DELTABLE: 6114 case NFT_MSG_DELTABLE:
6087 nf_tables_table_destroy(&trans->ctx); 6115 nf_tables_table_destroy(&trans->ctx);
6088 break; 6116 break;
6117 case NFT_MSG_NEWCHAIN:
6118 kfree(nft_trans_chain_name(trans));
6119 break;
6089 case NFT_MSG_DELCHAIN: 6120 case NFT_MSG_DELCHAIN:
6090 nf_tables_chain_destroy(&trans->ctx); 6121 nf_tables_chain_destroy(&trans->ctx);
6091 break; 6122 break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
6315 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); 6346 nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
6316 break; 6347 break;
6317 case NFT_MSG_NEWCHAIN: 6348 case NFT_MSG_NEWCHAIN:
6318 if (nft_trans_chain_update(trans)) 6349 if (nft_trans_chain_update(trans)) {
6319 nft_chain_commit_update(trans); 6350 nft_chain_commit_update(trans);
6320 else 6351 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
6352 /* trans destroyed after rcu grace period */
6353 } else {
6321 nft_clear(net, trans->ctx.chain); 6354 nft_clear(net, trans->ctx.chain);
6322 6355 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
6323 nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); 6356 nft_trans_destroy(trans);
6324 nft_trans_destroy(trans); 6357 }
6325 break; 6358 break;
6326 case NFT_MSG_DELCHAIN: 6359 case NFT_MSG_DELCHAIN:
6327 nft_chain_del(trans->ctx.chain); 6360 nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
6471 case NFT_MSG_NEWCHAIN: 6504 case NFT_MSG_NEWCHAIN:
6472 if (nft_trans_chain_update(trans)) { 6505 if (nft_trans_chain_update(trans)) {
6473 free_percpu(nft_trans_chain_stats(trans)); 6506 free_percpu(nft_trans_chain_stats(trans));
6474 6507 kfree(nft_trans_chain_name(trans));
6475 nft_trans_destroy(trans); 6508 nft_trans_destroy(trans);
6476 } else { 6509 } else {
6477 trans->ctx.table->use--; 6510 trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
6837 err = nf_tables_check_loops(ctx, data->verdict.chain); 6870 err = nf_tables_check_loops(ctx, data->verdict.chain);
6838 if (err < 0) 6871 if (err < 0)
6839 return err; 6872 return err;
6840
6841 if (ctx->chain->level + 1 >
6842 data->verdict.chain->level) {
6843 if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
6844 return -EMLINK;
6845 data->verdict.chain->level = ctx->chain->level + 1;
6846 }
6847 } 6873 }
6848 6874
6849 return 0; 6875 return 0;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 15adf8ca82c3..0777a93211e2 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
98 const struct nft_data **d) 98 const struct nft_data **d)
99{ 99{
100 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 100 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
101 struct nft_ctx *pctx = (struct nft_ctx *)ctx;
101 const struct nft_data *data; 102 const struct nft_data *data;
102 int err; 103 int err;
103 104
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
109 switch (data->verdict.code) { 110 switch (data->verdict.code) {
110 case NFT_JUMP: 111 case NFT_JUMP:
111 case NFT_GOTO: 112 case NFT_GOTO:
113 pctx->level++;
112 err = nft_chain_validate(ctx, data->verdict.chain); 114 err = nft_chain_validate(ctx, data->verdict.chain);
113 if (err < 0) 115 if (err < 0)
114 return err; 116 return err;
117 pctx->level--;
115 break; 118 break;
116 default: 119 default:
117 break; 120 break;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 42e6fadf1417..c2a1d84cdfc4 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
155 struct nft_set_elem *elem) 155 struct nft_set_elem *elem)
156{ 156{
157 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); 157 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
158 struct nft_ctx *pctx = (struct nft_ctx *)ctx;
158 const struct nft_data *data; 159 const struct nft_data *data;
160 int err;
159 161
160 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) && 162 if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
161 *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END) 163 *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
165 switch (data->verdict.code) { 167 switch (data->verdict.code) {
166 case NFT_JUMP: 168 case NFT_JUMP:
167 case NFT_GOTO: 169 case NFT_GOTO:
168 return nft_chain_validate(ctx, data->verdict.chain); 170 pctx->level++;
171 err = nft_chain_validate(ctx, data->verdict.chain);
172 if (err < 0)
173 return err;
174 pctx->level--;
175 break;
169 default: 176 default:
170 return 0; 177 break;
171 } 178 }
179
180 return 0;
172} 181}
173 182
174static int nft_lookup_validate(const struct nft_ctx *ctx, 183static int nft_lookup_validate(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 72ef35b51cac..90c3e7e6cacb 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
387 struct nft_rhash *priv = nft_set_priv(set); 387 struct nft_rhash *priv = nft_set_priv(set);
388 388
389 cancel_delayed_work_sync(&priv->gc_work); 389 cancel_delayed_work_sync(&priv->gc_work);
390 rcu_barrier();
390 rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy, 391 rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
391 (void *)set); 392 (void *)set);
392} 393}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 1f8f257cb518..9873d734b494 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
381 381
382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC); 382 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
383 if (!gcb) 383 if (!gcb)
384 goto out; 384 break;
385 385
386 atomic_dec(&set->nelems); 386 atomic_dec(&set->nelems);
387 nft_set_gc_batch_add(gcb, rbe); 387 nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
390 rbe = rb_entry(prev, struct nft_rbtree_elem, node); 390 rbe = rb_entry(prev, struct nft_rbtree_elem, node);
391 atomic_dec(&set->nelems); 391 atomic_dec(&set->nelems);
392 nft_set_gc_batch_add(gcb, rbe); 392 nft_set_gc_batch_add(gcb, rbe);
393 prev = NULL;
393 } 394 }
394 node = rb_next(node); 395 node = rb_next(node);
396 if (!node)
397 break;
395 } 398 }
396out:
397 if (gcb) { 399 if (gcb) {
398 for (i = 0; i < gcb->head.cnt; i++) { 400 for (i = 0; i < gcb->head.cnt; i++) {
399 rbe = gcb->elems[i]; 401 rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
440 struct rb_node *node; 442 struct rb_node *node;
441 443
442 cancel_delayed_work_sync(&priv->gc_work); 444 cancel_delayed_work_sync(&priv->gc_work);
445 rcu_barrier();
443 while ((node = priv->root.rb_node) != NULL) { 446 while ((node = priv->root.rb_node) != NULL) {
444 rb_erase(node, &priv->root); 447 rb_erase(node, &priv->root);
445 rbe = rb_entry(node, struct nft_rbtree_elem, node); 448 rbe = rb_entry(node, struct nft_rbtree_elem, node);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 393573a99a5a..56704d95f82d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -63,6 +63,7 @@
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h> 65#include <linux/net_namespace.h>
66#include <linux/nospec.h>
66 67
67#include <net/net_namespace.h> 68#include <net/net_namespace.h>
68#include <net/netns/generic.h> 69#include <net/netns/generic.h>
@@ -679,6 +680,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
679 680
680 if (protocol < 0 || protocol >= MAX_LINKS) 681 if (protocol < 0 || protocol >= MAX_LINKS)
681 return -EPROTONOSUPPORT; 682 return -EPROTONOSUPPORT;
683 protocol = array_index_nospec(protocol, MAX_LINKS);
682 684
683 netlink_lock_table(); 685 netlink_lock_table();
684#ifdef CONFIG_MODULES 686#ifdef CONFIG_MODULES
@@ -1009,6 +1011,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1009 return err; 1011 return err;
1010 } 1012 }
1011 1013
1014 if (nlk->ngroups == 0)
1015 groups = 0;
1016 else if (nlk->ngroups < 8*sizeof(groups))
1017 groups &= (1UL << nlk->ngroups) - 1;
1018
1012 bound = nlk->bound; 1019 bound = nlk->bound;
1013 if (bound) { 1020 if (bound) {
1014 /* Ensure nlk->portid is up-to-date. */ 1021 /* Ensure nlk->portid is up-to-date. */
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index b891a91577f8..c038e021a591 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -211,6 +211,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
211 if (!meter) 211 if (!meter)
212 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
213 213
214 meter->id = nla_get_u32(a[OVS_METER_ATTR_ID]);
214 meter->used = div_u64(ktime_get_ns(), 1000 * 1000); 215 meter->used = div_u64(ktime_get_ns(), 1000 * 1000);
215 meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0; 216 meter->kbps = a[OVS_METER_ATTR_KBPS] ? 1 : 0;
216 meter->keep_stats = !a[OVS_METER_ATTR_CLEAR]; 217 meter->keep_stats = !a[OVS_METER_ATTR_CLEAR];
@@ -280,6 +281,10 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
280 u32 meter_id; 281 u32 meter_id;
281 bool failed; 282 bool failed;
282 283
284 if (!a[OVS_METER_ATTR_ID]) {
285 return -ENODEV;
286 }
287
283 meter = dp_meter_create(a); 288 meter = dp_meter_create(a);
284 if (IS_ERR_OR_NULL(meter)) 289 if (IS_ERR_OR_NULL(meter))
285 return PTR_ERR(meter); 290 return PTR_ERR(meter);
@@ -298,11 +303,6 @@ static int ovs_meter_cmd_set(struct sk_buff *skb, struct genl_info *info)
298 goto exit_unlock; 303 goto exit_unlock;
299 } 304 }
300 305
301 if (!a[OVS_METER_ATTR_ID]) {
302 err = -ENODEV;
303 goto exit_unlock;
304 }
305
306 meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]); 306 meter_id = nla_get_u32(a[OVS_METER_ATTR_ID]);
307 307
308 /* Cannot fail after this. */ 308 /* Cannot fail after this. */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9b27d0cd766d..e6445d8f3f57 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4226 } 4226 }
4227 4227
4228 if (req->tp_block_nr) { 4228 if (req->tp_block_nr) {
4229 unsigned int min_frame_size;
4230
4229 /* Sanity tests and some calculations */ 4231 /* Sanity tests and some calculations */
4230 err = -EBUSY; 4232 err = -EBUSY;
4231 if (unlikely(rb->pg_vec)) 4233 if (unlikely(rb->pg_vec))
@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4248 goto out; 4250 goto out;
4249 if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) 4251 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4250 goto out; 4252 goto out;
4253 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4251 if (po->tp_version >= TPACKET_V3 && 4254 if (po->tp_version >= TPACKET_V3 &&
4252 req->tp_block_size <= 4255 req->tp_block_size <
4253 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr)) 4256 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4254 goto out; 4257 goto out;
4255 if (unlikely(req->tp_frame_size < po->tp_hdrlen + 4258 if (unlikely(req->tp_frame_size < min_frame_size))
4256 po->tp_reserve))
4257 goto out; 4259 goto out;
4258 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1))) 4260 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4259 goto out; 4261 goto out;
diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
index 48332a6ed738..d152e48ea371 100644
--- a/net/rds/ib_frmr.c
+++ b/net/rds/ib_frmr.c
@@ -344,6 +344,11 @@ struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
344 struct rds_ib_frmr *frmr; 344 struct rds_ib_frmr *frmr;
345 int ret; 345 int ret;
346 346
347 if (!ic) {
348 /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
349 return ERR_PTR(-EOPNOTSUPP);
350 }
351
347 do { 352 do {
348 if (ibmr) 353 if (ibmr)
349 rds_ib_free_frmr(ibmr, true); 354 rds_ib_free_frmr(ibmr, true);
diff --git a/net/rds/ib_mr.h b/net/rds/ib_mr.h
index 0ea4ab017a8c..655f01d427fe 100644
--- a/net/rds/ib_mr.h
+++ b/net/rds/ib_mr.h
@@ -115,7 +115,8 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
115 struct rds_info_rdma_connection *iinfo); 115 struct rds_info_rdma_connection *iinfo);
116void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *); 116void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
117void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 117void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
118 struct rds_sock *rs, u32 *key_ret); 118 struct rds_sock *rs, u32 *key_ret,
119 struct rds_connection *conn);
119void rds_ib_sync_mr(void *trans_private, int dir); 120void rds_ib_sync_mr(void *trans_private, int dir);
120void rds_ib_free_mr(void *trans_private, int invalidate); 121void rds_ib_free_mr(void *trans_private, int invalidate);
121void rds_ib_flush_mrs(void); 122void rds_ib_flush_mrs(void);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index e678699268a2..2e49a40a5e11 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -537,11 +537,12 @@ void rds_ib_flush_mrs(void)
537} 537}
538 538
539void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, 539void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
540 struct rds_sock *rs, u32 *key_ret) 540 struct rds_sock *rs, u32 *key_ret,
541 struct rds_connection *conn)
541{ 542{
542 struct rds_ib_device *rds_ibdev; 543 struct rds_ib_device *rds_ibdev;
543 struct rds_ib_mr *ibmr = NULL; 544 struct rds_ib_mr *ibmr = NULL;
544 struct rds_ib_connection *ic = rs->rs_conn->c_transport_data; 545 struct rds_ib_connection *ic = NULL;
545 int ret; 546 int ret;
546 547
547 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr); 548 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
@@ -550,6 +551,9 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
550 goto out; 551 goto out;
551 } 552 }
552 553
554 if (conn)
555 ic = conn->c_transport_data;
556
553 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { 557 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
554 ret = -ENODEV; 558 ret = -ENODEV;
555 goto out; 559 goto out;
@@ -559,17 +563,18 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
559 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); 563 ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
560 else 564 else
561 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret); 565 ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
562 if (ibmr) 566 if (IS_ERR(ibmr)) {
563 rds_ibdev = NULL; 567 ret = PTR_ERR(ibmr);
564
565 out:
566 if (!ibmr)
567 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret); 568 pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
569 } else {
570 return ibmr;
571 }
568 572
573 out:
569 if (rds_ibdev) 574 if (rds_ibdev)
570 rds_ib_dev_put(rds_ibdev); 575 rds_ib_dev_put(rds_ibdev);
571 576
572 return ibmr; 577 return ERR_PTR(ret);
573} 578}
574 579
575void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) 580void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 634cfcb7bba6..80920e47f2c7 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -170,7 +170,8 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
170} 170}
171 171
172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, 172static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
173 u64 *cookie_ret, struct rds_mr **mr_ret) 173 u64 *cookie_ret, struct rds_mr **mr_ret,
174 struct rds_conn_path *cp)
174{ 175{
175 struct rds_mr *mr = NULL, *found; 176 struct rds_mr *mr = NULL, *found;
176 unsigned int nr_pages; 177 unsigned int nr_pages;
@@ -269,7 +270,8 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
269 * Note that dma_map() implies that pending writes are 270 * Note that dma_map() implies that pending writes are
270 * flushed to RAM, so no dma_sync is needed here. */ 271 * flushed to RAM, so no dma_sync is needed here. */
271 trans_private = rs->rs_transport->get_mr(sg, nents, rs, 272 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
272 &mr->r_key); 273 &mr->r_key,
274 cp ? cp->cp_conn : NULL);
273 275
274 if (IS_ERR(trans_private)) { 276 if (IS_ERR(trans_private)) {
275 for (i = 0 ; i < nents; i++) 277 for (i = 0 ; i < nents; i++)
@@ -330,7 +332,7 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
330 sizeof(struct rds_get_mr_args))) 332 sizeof(struct rds_get_mr_args)))
331 return -EFAULT; 333 return -EFAULT;
332 334
333 return __rds_rdma_map(rs, &args, NULL, NULL); 335 return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
334} 336}
335 337
336int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) 338int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
@@ -354,7 +356,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
354 new_args.cookie_addr = args.cookie_addr; 356 new_args.cookie_addr = args.cookie_addr;
355 new_args.flags = args.flags; 357 new_args.flags = args.flags;
356 358
357 return __rds_rdma_map(rs, &new_args, NULL, NULL); 359 return __rds_rdma_map(rs, &new_args, NULL, NULL, NULL);
358} 360}
359 361
360/* 362/*
@@ -782,7 +784,8 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
782 rm->m_rdma_cookie != 0) 784 rm->m_rdma_cookie != 0)
783 return -EINVAL; 785 return -EINVAL;
784 786
785 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr); 787 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie,
788 &rm->rdma.op_rdma_mr, rm->m_conn_path);
786} 789}
787 790
788/* 791/*
diff --git a/net/rds/rds.h b/net/rds/rds.h
index f2272fb8cd45..60b3b787fbdb 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -464,6 +464,8 @@ struct rds_message {
464 struct scatterlist *op_sg; 464 struct scatterlist *op_sg;
465 } data; 465 } data;
466 }; 466 };
467
468 struct rds_conn_path *m_conn_path;
467}; 469};
468 470
469/* 471/*
@@ -544,7 +546,8 @@ struct rds_transport {
544 unsigned int avail); 546 unsigned int avail);
545 void (*exit)(void); 547 void (*exit)(void);
546 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg, 548 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
547 struct rds_sock *rs, u32 *key_ret); 549 struct rds_sock *rs, u32 *key_ret,
550 struct rds_connection *conn);
548 void (*sync_mr)(void *trans_private, int direction); 551 void (*sync_mr)(void *trans_private, int direction);
549 void (*free_mr)(void *trans_private, int invalidate); 552 void (*free_mr)(void *trans_private, int invalidate);
550 void (*flush_mrs)(void); 553 void (*flush_mrs)(void);
diff --git a/net/rds/send.c b/net/rds/send.c
index 94c7f74909be..59f17a2335f4 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1169,6 +1169,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1169 rs->rs_conn = conn; 1169 rs->rs_conn = conn;
1170 } 1170 }
1171 1171
1172 if (conn->c_trans->t_mp_capable)
1173 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1174 else
1175 cpath = &conn->c_path[0];
1176
1177 rm->m_conn_path = cpath;
1178
1172 /* Parse any control messages the user may have included. */ 1179 /* Parse any control messages the user may have included. */
1173 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1180 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1174 if (ret) { 1181 if (ret) {
@@ -1192,11 +1199,6 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1192 goto out; 1199 goto out;
1193 } 1200 }
1194 1201
1195 if (conn->c_trans->t_mp_capable)
1196 cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
1197 else
1198 cpath = &conn->c_path[0];
1199
1200 if (rds_destroy_pending(conn)) { 1202 if (rds_destroy_pending(conn)) {
1201 ret = -EAGAIN; 1203 ret = -EAGAIN;
1202 goto out; 1204 goto out;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 5fb7d3254d9e..707630ab4713 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -104,9 +104,9 @@ struct rxrpc_net {
104 104
105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ 105#define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
106 u8 peer_keepalive_cursor; 106 u8 peer_keepalive_cursor;
107 ktime_t peer_keepalive_base; 107 time64_t peer_keepalive_base;
108 struct hlist_head peer_keepalive[RXRPC_KEEPALIVE_TIME + 1]; 108 struct list_head peer_keepalive[32];
109 struct hlist_head peer_keepalive_new; 109 struct list_head peer_keepalive_new;
110 struct timer_list peer_keepalive_timer; 110 struct timer_list peer_keepalive_timer;
111 struct work_struct peer_keepalive_work; 111 struct work_struct peer_keepalive_work;
112}; 112};
@@ -295,7 +295,7 @@ struct rxrpc_peer {
295 struct hlist_head error_targets; /* targets for net error distribution */ 295 struct hlist_head error_targets; /* targets for net error distribution */
296 struct work_struct error_distributor; 296 struct work_struct error_distributor;
297 struct rb_root service_conns; /* Service connections */ 297 struct rb_root service_conns; /* Service connections */
298 struct hlist_node keepalive_link; /* Link in net->peer_keepalive[] */ 298 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
299 time64_t last_tx_at; /* Last time packet sent here */ 299 time64_t last_tx_at; /* Last time packet sent here */
300 seqlock_t service_conn_lock; 300 seqlock_t service_conn_lock;
301 spinlock_t lock; /* access lock */ 301 spinlock_t lock; /* access lock */
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index a9a9be5519b9..9d1e298b784c 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -116,9 +116,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
116 while (*pp) { 116 while (*pp) {
117 parent = *pp; 117 parent = *pp;
118 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 118 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
119 if (user_call_ID < call->user_call_ID) 119 if (user_call_ID < xcall->user_call_ID)
120 pp = &(*pp)->rb_left; 120 pp = &(*pp)->rb_left;
121 else if (user_call_ID > call->user_call_ID) 121 else if (user_call_ID > xcall->user_call_ID)
122 pp = &(*pp)->rb_right; 122 pp = &(*pp)->rb_right;
123 else 123 else
124 goto id_in_use; 124 goto id_in_use;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 8229a52c2acd..3fde001fcc39 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
136 } 136 }
137 137
138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 138 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
139 conn->params.peer->last_tx_at = ktime_get_real(); 139 conn->params.peer->last_tx_at = ktime_get_seconds();
140 if (ret < 0) 140 if (ret < 0)
141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 141 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
142 rxrpc_tx_fail_call_final_resend); 142 rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
245 return -EAGAIN; 245 return -EAGAIN;
246 } 246 }
247 247
248 conn->params.peer->last_tx_at = ktime_get_real(); 248 conn->params.peer->last_tx_at = ktime_get_seconds();
249 249
250 _leave(" = 0"); 250 _leave(" = 0");
251 return 0; 251 return 0;
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 5d6a773db973..417d80867c4f 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
85 hash_init(rxnet->peer_hash); 85 hash_init(rxnet->peer_hash);
86 spin_lock_init(&rxnet->peer_hash_lock); 86 spin_lock_init(&rxnet->peer_hash_lock);
87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++) 87 for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
88 INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]); 88 INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
89 INIT_HLIST_HEAD(&rxnet->peer_keepalive_new); 89 INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
90 timer_setup(&rxnet->peer_keepalive_timer, 90 timer_setup(&rxnet->peer_keepalive_timer,
91 rxrpc_peer_keepalive_timeout, 0); 91 rxrpc_peer_keepalive_timeout, 0);
92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker); 92 INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
93 rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC); 93 rxnet->peer_keepalive_base = ktime_get_seconds();
94 94
95 ret = -ENOMEM; 95 ret = -ENOMEM;
96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net); 96 rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f03de1c59ba3..4774c8f5634d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
209 now = ktime_get_real(); 209 now = ktime_get_real();
210 if (ping) 210 if (ping)
211 call->ping_time = now; 211 call->ping_time = now;
212 conn->params.peer->last_tx_at = ktime_get_real(); 212 conn->params.peer->last_tx_at = ktime_get_seconds();
213 if (ret < 0) 213 if (ret < 0)
214 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 214 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
215 rxrpc_tx_fail_call_ack); 215 rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
296 296
297 ret = kernel_sendmsg(conn->params.local->socket, 297 ret = kernel_sendmsg(conn->params.local->socket,
298 &msg, iov, 1, sizeof(pkt)); 298 &msg, iov, 1, sizeof(pkt));
299 conn->params.peer->last_tx_at = ktime_get_real(); 299 conn->params.peer->last_tx_at = ktime_get_seconds();
300 if (ret < 0) 300 if (ret < 0)
301 trace_rxrpc_tx_fail(call->debug_id, serial, ret, 301 trace_rxrpc_tx_fail(call->debug_id, serial, ret,
302 rxrpc_tx_fail_call_abort); 302 rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
391 * message and update the peer record 391 * message and update the peer record
392 */ 392 */
393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 393 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
394 conn->params.peer->last_tx_at = ktime_get_real(); 394 conn->params.peer->last_tx_at = ktime_get_seconds();
395 395
396 up_read(&conn->params.local->defrag_sem); 396 up_read(&conn->params.local->defrag_sem);
397 if (ret < 0) 397 if (ret < 0)
@@ -457,7 +457,7 @@ send_fragmentable:
457 if (ret == 0) { 457 if (ret == 0) {
458 ret = kernel_sendmsg(conn->params.local->socket, &msg, 458 ret = kernel_sendmsg(conn->params.local->socket, &msg,
459 iov, 2, len); 459 iov, 2, len);
460 conn->params.peer->last_tx_at = ktime_get_real(); 460 conn->params.peer->last_tx_at = ktime_get_seconds();
461 461
462 opt = IP_PMTUDISC_DO; 462 opt = IP_PMTUDISC_DO;
463 kernel_setsockopt(conn->params.local->socket, SOL_IP, 463 kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ send_fragmentable:
475 if (ret == 0) { 475 if (ret == 0) {
476 ret = kernel_sendmsg(conn->params.local->socket, &msg, 476 ret = kernel_sendmsg(conn->params.local->socket, &msg,
477 iov, 2, len); 477 iov, 2, len);
478 conn->params.peer->last_tx_at = ktime_get_real(); 478 conn->params.peer->last_tx_at = ktime_get_seconds();
479 479
480 opt = IPV6_PMTUDISC_DO; 480 opt = IPV6_PMTUDISC_DO;
481 kernel_setsockopt(conn->params.local->socket, 481 kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret, 599 trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
600 rxrpc_tx_fail_version_keepalive); 600 rxrpc_tx_fail_version_keepalive);
601 601
602 peer->last_tx_at = ktime_get_real(); 602 peer->last_tx_at = ktime_get_seconds();
603 _leave(""); 603 _leave("");
604} 604}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 0ed8b651cec2..4f9da2f51c69 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
350} 350}
351 351
352/* 352/*
353 * Perform keep-alive pings with VERSION packets to keep any NAT alive. 353 * Perform keep-alive pings.
354 */ 354 */
355void rxrpc_peer_keepalive_worker(struct work_struct *work) 355static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
356 struct list_head *collector,
357 time64_t base,
358 u8 cursor)
356{ 359{
357 struct rxrpc_net *rxnet =
358 container_of(work, struct rxrpc_net, peer_keepalive_work);
359 struct rxrpc_peer *peer; 360 struct rxrpc_peer *peer;
360 unsigned long delay; 361 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
361 ktime_t base, now = ktime_get_real(); 362 time64_t keepalive_at;
362 s64 diff; 363 int slot;
363 u8 cursor, slot;
364 364
365 base = rxnet->peer_keepalive_base; 365 spin_lock_bh(&rxnet->peer_hash_lock);
366 cursor = rxnet->peer_keepalive_cursor;
367 366
368 _enter("%u,%lld", cursor, ktime_sub(now, base)); 367 while (!list_empty(collector)) {
368 peer = list_entry(collector->next,
369 struct rxrpc_peer, keepalive_link);
369 370
370next_bucket: 371 list_del_init(&peer->keepalive_link);
371 diff = ktime_to_ns(ktime_sub(now, base)); 372 if (!rxrpc_get_peer_maybe(peer))
372 if (diff < 0) 373 continue;
373 goto resched;
374 374
375 _debug("at %u", cursor);
376 spin_lock_bh(&rxnet->peer_hash_lock);
377next_peer:
378 if (!rxnet->live) {
379 spin_unlock_bh(&rxnet->peer_hash_lock); 375 spin_unlock_bh(&rxnet->peer_hash_lock);
380 goto out;
381 }
382 376
383 /* Everything in the bucket at the cursor is processed this second; the 377 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
384 * bucket at cursor + 1 goes now + 1s and so on... 378 slot = keepalive_at - base;
385 */ 379 _debug("%02x peer %u t=%d {%pISp}",
386 if (hlist_empty(&rxnet->peer_keepalive[cursor])) { 380 cursor, peer->debug_id, slot, &peer->srx.transport);
387 if (hlist_empty(&rxnet->peer_keepalive_new)) { 381
388 spin_unlock_bh(&rxnet->peer_hash_lock); 382 if (keepalive_at <= base ||
389 goto emptied_bucket; 383 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
384 rxrpc_send_keepalive(peer);
385 slot = RXRPC_KEEPALIVE_TIME;
390 } 386 }
391 387
392 hlist_move_list(&rxnet->peer_keepalive_new, 388 /* A transmission to this peer occurred since last we examined
393 &rxnet->peer_keepalive[cursor]); 389 * it so put it into the appropriate future bucket.
390 */
391 slot += cursor;
392 slot &= mask;
393 spin_lock_bh(&rxnet->peer_hash_lock);
394 list_add_tail(&peer->keepalive_link,
395 &rxnet->peer_keepalive[slot & mask]);
396 rxrpc_put_peer(peer);
394 } 397 }
395 398
396 peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
397 struct rxrpc_peer, keepalive_link);
398 hlist_del_init(&peer->keepalive_link);
399 if (!rxrpc_get_peer_maybe(peer))
400 goto next_peer;
401
402 spin_unlock_bh(&rxnet->peer_hash_lock); 399 spin_unlock_bh(&rxnet->peer_hash_lock);
400}
403 401
404 _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport); 402/*
403 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
404 */
405void rxrpc_peer_keepalive_worker(struct work_struct *work)
406{
407 struct rxrpc_net *rxnet =
408 container_of(work, struct rxrpc_net, peer_keepalive_work);
409 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
410 time64_t base, now, delay;
411 u8 cursor, stop;
412 LIST_HEAD(collector);
405 413
406recalc: 414 now = ktime_get_seconds();
407 diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC); 415 base = rxnet->peer_keepalive_base;
408 if (diff < -30 || diff > 30) 416 cursor = rxnet->peer_keepalive_cursor;
409 goto send; /* LSW of 64-bit time probably wrapped on 32-bit */ 417 _enter("%lld,%u", base - now, cursor);
410 diff += RXRPC_KEEPALIVE_TIME - 1;
411 if (diff < 0)
412 goto send;
413 418
414 slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff; 419 if (!rxnet->live)
415 if (slot == 0) 420 return;
416 goto send;
417 421
418 /* A transmission to this peer occurred since last we examined it so 422 /* Remove to a temporary list all the peers that are currently lodged
419 * put it into the appropriate future bucket. 423 * in expired buckets plus all new peers.
424 *
425 * Everything in the bucket at the cursor is processed this
426 * second; the bucket at cursor + 1 goes at now + 1s and so
427 * on...
420 */ 428 */
421 slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
422 spin_lock_bh(&rxnet->peer_hash_lock); 429 spin_lock_bh(&rxnet->peer_hash_lock);
423 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]); 430 list_splice_init(&rxnet->peer_keepalive_new, &collector);
424 rxrpc_put_peer(peer); 431
425 goto next_peer; 432 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
426 433 while (base <= now && (s8)(cursor - stop) < 0) {
427send: 434 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
428 rxrpc_send_keepalive(peer); 435 &collector);
429 now = ktime_get_real(); 436 base++;
430 goto recalc; 437 cursor++;
438 }
431 439
432emptied_bucket: 440 base = now;
433 cursor++; 441 spin_unlock_bh(&rxnet->peer_hash_lock);
434 if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
435 cursor = 0;
436 base = ktime_add_ns(base, NSEC_PER_SEC);
437 goto next_bucket;
438 442
439resched:
440 rxnet->peer_keepalive_base = base; 443 rxnet->peer_keepalive_base = base;
441 rxnet->peer_keepalive_cursor = cursor; 444 rxnet->peer_keepalive_cursor = cursor;
442 delay = nsecs_to_jiffies(-diff) + 1; 445 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
443 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); 446 ASSERT(list_empty(&collector));
444out: 447
448 /* Schedule the timer for the next occupied timeslot. */
449 cursor = rxnet->peer_keepalive_cursor;
450 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
451 for (; (s8)(cursor - stop) < 0; cursor++) {
452 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
453 break;
454 base++;
455 }
456
457 now = ktime_get_seconds();
458 delay = base - now;
459 if (delay < 1)
460 delay = 1;
461 delay *= HZ;
462 if (rxnet->live)
463 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
464
445 _leave(""); 465 _leave("");
446} 466}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1b7e8107b3ae..24ec7cdcf332 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
322 if (!peer) { 322 if (!peer) {
323 peer = prealloc; 323 peer = prealloc;
324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); 324 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
325 hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new); 325 list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
326 } 326 }
327 327
328 spin_unlock(&rxnet->peer_hash_lock); 328 spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
367 if (!peer) { 367 if (!peer) {
368 hash_add_rcu(rxnet->peer_hash, 368 hash_add_rcu(rxnet->peer_hash,
369 &candidate->hash_link, hash_key); 369 &candidate->hash_link, hash_key);
370 hlist_add_head(&candidate->keepalive_link, 370 list_add_tail(&candidate->keepalive_link,
371 &rxnet->peer_keepalive_new); 371 &rxnet->peer_keepalive_new);
372 } 372 }
373 373
374 spin_unlock_bh(&rxnet->peer_hash_lock); 374 spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
441 441
442 spin_lock_bh(&rxnet->peer_hash_lock); 442 spin_lock_bh(&rxnet->peer_hash_lock);
443 hash_del_rcu(&peer->hash_link); 443 hash_del_rcu(&peer->hash_link);
444 hlist_del_init(&peer->keepalive_link); 444 list_del_init(&peer->keepalive_link);
445 spin_unlock_bh(&rxnet->peer_hash_lock); 445 spin_unlock_bh(&rxnet->peer_hash_lock);
446 446
447 kfree_rcu(peer, rcu); 447 kfree_rcu(peer, rcu);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 278ac0807a60..47cb019c521a 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
669 return -EAGAIN; 669 return -EAGAIN;
670 } 670 }
671 671
672 conn->params.peer->last_tx_at = ktime_get_real(); 672 conn->params.peer->last_tx_at = ktime_get_seconds();
673 _leave(" = 0"); 673 _leave(" = 0");
674 return 0; 674 return 0;
675} 675}
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
725 return -EAGAIN; 725 return -EAGAIN;
726 } 726 }
727 727
728 conn->params.peer->last_tx_at = ktime_get_real(); 728 conn->params.peer->last_tx_at = ktime_get_seconds();
729 _leave(" = 0"); 729 _leave(" = 0");
730 return 0; 730 return 0;
731} 731}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 05e4ffe5aabd..e7de5f282722 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
1122 sock_hold(lsk); /* sock_put in smc_listen_work */ 1122 sock_hold(lsk); /* sock_put in smc_listen_work */
1123 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work); 1123 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1124 smc_copy_sock_settings_to_smc(new_smc); 1124 smc_copy_sock_settings_to_smc(new_smc);
1125 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1126 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1125 sock_hold(&new_smc->sk); /* sock_put in passive closing */ 1127 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1126 if (!schedule_work(&new_smc->smc_listen_work)) 1128 if (!schedule_work(&new_smc->smc_listen_work))
1127 sock_put(&new_smc->sk); 1129 sock_put(&new_smc->sk);
@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
1397 lock_sock(sk); 1399 lock_sock(sk);
1398 1400
1399 rc = -ENOTCONN; 1401 rc = -ENOTCONN;
1400 if ((sk->sk_state != SMC_LISTEN) && 1402 if ((sk->sk_state != SMC_ACTIVE) &&
1401 (sk->sk_state != SMC_ACTIVE) &&
1402 (sk->sk_state != SMC_PEERCLOSEWAIT1) && 1403 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1403 (sk->sk_state != SMC_PEERCLOSEWAIT2) && 1404 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1404 (sk->sk_state != SMC_APPCLOSEWAIT1) && 1405 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
1521 1522
1522 smc = smc_sk(sock->sk); 1523 smc = smc_sk(sock->sk);
1523 conn = &smc->conn; 1524 conn = &smc->conn;
1525 lock_sock(&smc->sk);
1524 if (smc->use_fallback) { 1526 if (smc->use_fallback) {
1525 if (!smc->clcsock) 1527 if (!smc->clcsock) {
1528 release_sock(&smc->sk);
1526 return -EBADF; 1529 return -EBADF;
1527 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); 1530 }
1531 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1532 release_sock(&smc->sk);
1533 return answ;
1528 } 1534 }
1529 lock_sock(&smc->sk);
1530 switch (cmd) { 1535 switch (cmd) {
1531 case SIOCINQ: /* same as FIONREAD */ 1536 case SIOCINQ: /* same as FIONREAD */
1532 if (smc->sk.sk_state == SMC_LISTEN) { 1537 if (smc->sk.sk_state == SMC_LISTEN) {
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index a7e8d63fc8ae..9bde1e4ca288 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -233,7 +233,8 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
233 /* force immediate tx of current consumer cursor, but 233 /* force immediate tx of current consumer cursor, but
234 * under send_lock to guarantee arrival in seqno-order 234 * under send_lock to guarantee arrival in seqno-order
235 */ 235 */
236 smc_tx_sndbuf_nonempty(conn); 236 if (smc->sk.sk_state != SMC_INIT)
237 smc_tx_sndbuf_nonempty(conn);
237 } 238 }
238 } 239 }
239 240
diff --git a/net/socket.c b/net/socket.c
index 85633622c94d..8c24d5dc4bc8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,7 @@
89#include <linux/magic.h> 89#include <linux/magic.h>
90#include <linux/slab.h> 90#include <linux/slab.h>
91#include <linux/xattr.h> 91#include <linux/xattr.h>
92#include <linux/nospec.h>
92 93
93#include <linux/uaccess.h> 94#include <linux/uaccess.h>
94#include <asm/unistd.h> 95#include <asm/unistd.h>
@@ -2522,6 +2523,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2522 2523
2523 if (call < 1 || call > SYS_SENDMMSG) 2524 if (call < 1 || call > SYS_SENDMMSG)
2524 return -EINVAL; 2525 return -EINVAL;
2526 call = array_index_nospec(call, SYS_SENDMMSG + 1);
2525 2527
2526 len = nargs[call]; 2528 len = nargs[call];
2527 if (len > sizeof(a)) 2529 if (len > sizeof(a))
@@ -2688,7 +2690,8 @@ EXPORT_SYMBOL(sock_unregister);
2688 2690
2689bool sock_is_registered(int family) 2691bool sock_is_registered(int family)
2690{ 2692{
2691 return family < NPROTO && rcu_access_pointer(net_families[family]); 2693 return family < NPROTO &&
2694 rcu_access_pointer(net_families[array_index_nospec(family, NPROTO)]);
2692} 2695}
2693 2696
2694static int __init sock_init(void) 2697static int __init sock_init(void)
diff --git a/net/tipc/net.c b/net/tipc/net.c
index a7f6964c3a4b..62199cf5a56c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
123{ 123{
124 struct tipc_net *tn = tipc_net(net); 124 struct tipc_net *tn = tipc_net(net);
125 125
126 spin_lock_bh(&tn->node_list_lock); 126 if (!cmpxchg(&tn->node_addr, 0, addr)) {
127 if (!tipc_own_addr(net)) {
128 tipc_set_node_addr(net, addr); 127 tipc_set_node_addr(net, addr);
129 tipc_named_reinit(net); 128 tipc_named_reinit(net);
130 tipc_sk_reinit(net); 129 tipc_sk_reinit(net);
131 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr, 130 tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
132 TIPC_CLUSTER_SCOPE, 0, addr); 131 TIPC_CLUSTER_SCOPE, 0, addr);
133 } 132 }
134 spin_unlock_bh(&tn->node_list_lock);
135} 133}
136 134
137void tipc_net_stop(struct net *net) 135void tipc_net_stop(struct net *net)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 4618f1c31137..1f3d9789af30 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
646 return NULL; 646 return NULL;
647 } 647 }
648 648
649 if (sk->sk_shutdown & RCV_SHUTDOWN)
650 return NULL;
651
649 if (sock_flag(sk, SOCK_DONE)) 652 if (sock_flag(sk, SOCK_DONE))
650 return NULL; 653 return NULL;
651 654
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index c1076c19b858..ab27a2872935 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
451 return transport->shutdown(vsock_sk(sk), mode); 451 return transport->shutdown(vsock_sk(sk), mode);
452} 452}
453 453
454void vsock_pending_work(struct work_struct *work) 454static void vsock_pending_work(struct work_struct *work)
455{ 455{
456 struct sock *sk; 456 struct sock *sk;
457 struct sock *listener; 457 struct sock *listener;
458 struct vsock_sock *vsk; 458 struct vsock_sock *vsk;
459 bool cleanup; 459 bool cleanup;
460 460
461 vsk = container_of(work, struct vsock_sock, dwork.work); 461 vsk = container_of(work, struct vsock_sock, pending_work.work);
462 sk = sk_vsock(vsk); 462 sk = sk_vsock(vsk);
463 listener = vsk->listener; 463 listener = vsk->listener;
464 cleanup = true; 464 cleanup = true;
@@ -498,7 +498,6 @@ out:
498 sock_put(sk); 498 sock_put(sk);
499 sock_put(listener); 499 sock_put(listener);
500} 500}
501EXPORT_SYMBOL_GPL(vsock_pending_work);
502 501
503/**** SOCKET OPERATIONS ****/ 502/**** SOCKET OPERATIONS ****/
504 503
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
597 return retval; 596 return retval;
598} 597}
599 598
599static void vsock_connect_timeout(struct work_struct *work);
600
600struct sock *__vsock_create(struct net *net, 601struct sock *__vsock_create(struct net *net,
601 struct socket *sock, 602 struct socket *sock,
602 struct sock *parent, 603 struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
638 vsk->sent_request = false; 639 vsk->sent_request = false;
639 vsk->ignore_connecting_rst = false; 640 vsk->ignore_connecting_rst = false;
640 vsk->peer_shutdown = 0; 641 vsk->peer_shutdown = 0;
642 INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
643 INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
641 644
642 psk = parent ? vsock_sk(parent) : NULL; 645 psk = parent ? vsock_sk(parent) : NULL;
643 if (parent) { 646 if (parent) {
@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
1117 struct vsock_sock *vsk; 1120 struct vsock_sock *vsk;
1118 int cancel = 0; 1121 int cancel = 0;
1119 1122
1120 vsk = container_of(work, struct vsock_sock, dwork.work); 1123 vsk = container_of(work, struct vsock_sock, connect_work.work);
1121 sk = sk_vsock(vsk); 1124 sk = sk_vsock(vsk);
1122 1125
1123 lock_sock(sk); 1126 lock_sock(sk);
@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1221 * timeout fires. 1224 * timeout fires.
1222 */ 1225 */
1223 sock_hold(sk); 1226 sock_hold(sk);
1224 INIT_DELAYED_WORK(&vsk->dwork, 1227 schedule_delayed_work(&vsk->connect_work, timeout);
1225 vsock_connect_timeout);
1226 schedule_delayed_work(&vsk->dwork, timeout);
1227 1228
1228 /* Skip ahead to preserve error code set above. */ 1229 /* Skip ahead to preserve error code set above. */
1229 goto out_wait; 1230 goto out_wait;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index a7a73ffe675b..cb332adb84cd 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
1094 vpending->listener = sk; 1094 vpending->listener = sk;
1095 sock_hold(sk); 1095 sock_hold(sk);
1096 sock_hold(pending); 1096 sock_hold(pending);
1097 INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work); 1097 schedule_delayed_work(&vpending->pending_work, HZ);
1098 schedule_delayed_work(&vpending->dwork, HZ);
1099 1098
1100out: 1099out:
1101 return err; 1100 return err;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4eece06be1e7..80bc986c79e5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
4409 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) | 4409 params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
4410 BIT(NL80211_STA_FLAG_MFP) | 4410 BIT(NL80211_STA_FLAG_MFP) |
4411 BIT(NL80211_STA_FLAG_AUTHORIZED); 4411 BIT(NL80211_STA_FLAG_AUTHORIZED);
4412 break;
4412 default: 4413 default:
4413 return -EINVAL; 4414 return -EINVAL;
4414 } 4415 }
@@ -14923,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
14923EXPORT_SYMBOL(cfg80211_mgmt_tx_status); 14924EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
14924 14925
14925static int __nl80211_rx_control_port(struct net_device *dev, 14926static int __nl80211_rx_control_port(struct net_device *dev,
14926 const u8 *buf, size_t len, 14927 struct sk_buff *skb,
14927 const u8 *addr, u16 proto,
14928 bool unencrypted, gfp_t gfp) 14928 bool unencrypted, gfp_t gfp)
14929{ 14929{
14930 struct wireless_dev *wdev = dev->ieee80211_ptr; 14930 struct wireless_dev *wdev = dev->ieee80211_ptr;
14931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); 14931 struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
14932 struct ethhdr *ehdr = eth_hdr(skb);
14933 const u8 *addr = ehdr->h_source;
14934 u16 proto = be16_to_cpu(skb->protocol);
14932 struct sk_buff *msg; 14935 struct sk_buff *msg;
14933 void *hdr; 14936 void *hdr;
14937 struct nlattr *frame;
14938
14934 u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid); 14939 u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
14935 14940
14936 if (!nlportid) 14941 if (!nlportid)
14937 return -ENOENT; 14942 return -ENOENT;
14938 14943
14939 msg = nlmsg_new(100 + len, gfp); 14944 msg = nlmsg_new(100 + skb->len, gfp);
14940 if (!msg) 14945 if (!msg)
14941 return -ENOMEM; 14946 return -ENOMEM;
14942 14947
@@ -14950,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
14950 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) || 14955 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
14951 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev), 14956 nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
14952 NL80211_ATTR_PAD) || 14957 NL80211_ATTR_PAD) ||
14953 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
14954 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) || 14958 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
14955 nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) || 14959 nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
14956 (unencrypted && nla_put_flag(msg, 14960 (unencrypted && nla_put_flag(msg,
14957 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT))) 14961 NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
14958 goto nla_put_failure; 14962 goto nla_put_failure;
14959 14963
14964 frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
14965 if (!frame)
14966 goto nla_put_failure;
14967
14968 skb_copy_bits(skb, 0, nla_data(frame), skb->len);
14960 genlmsg_end(msg, hdr); 14969 genlmsg_end(msg, hdr);
14961 14970
14962 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid); 14971 return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14967,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
14967} 14976}
14968 14977
14969bool cfg80211_rx_control_port(struct net_device *dev, 14978bool cfg80211_rx_control_port(struct net_device *dev,
14970 const u8 *buf, size_t len, 14979 struct sk_buff *skb, bool unencrypted)
14971 const u8 *addr, u16 proto, bool unencrypted)
14972{ 14980{
14973 int ret; 14981 int ret;
14974 14982
14975 trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted); 14983 trace_cfg80211_rx_control_port(dev, skb, unencrypted);
14976 ret = __nl80211_rx_control_port(dev, buf, len, addr, proto, 14984 ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
14977 unencrypted, GFP_ATOMIC);
14978 trace_cfg80211_return_bool(ret == 0); 14985 trace_cfg80211_return_bool(ret == 0);
14979 return ret == 0; 14986 return ret == 0;
14980} 14987}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bbe6298e4bb9..4fc66a117b7d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
2240 * as some drivers used this to restore its orig_* reg domain. 2240 * as some drivers used this to restore its orig_* reg domain.
2241 */ 2241 */
2242 if (initiator == NL80211_REGDOM_SET_BY_CORE && 2242 if (initiator == NL80211_REGDOM_SET_BY_CORE &&
2243 wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) 2243 wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
2244 !(wiphy->regulatory_flags &
2245 REGULATORY_WIPHY_SELF_MANAGED))
2244 reg_call_notifier(wiphy, lr); 2246 reg_call_notifier(wiphy, lr);
2245 return; 2247 return;
2246 } 2248 }
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
2787 } 2789 }
2788} 2790}
2789 2791
2790static bool reg_only_self_managed_wiphys(void)
2791{
2792 struct cfg80211_registered_device *rdev;
2793 struct wiphy *wiphy;
2794 bool self_managed_found = false;
2795
2796 ASSERT_RTNL();
2797
2798 list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
2799 wiphy = &rdev->wiphy;
2800 if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
2801 self_managed_found = true;
2802 else
2803 return false;
2804 }
2805
2806 /* make sure at least one self-managed wiphy exists */
2807 return self_managed_found;
2808}
2809
2810/* 2792/*
2811 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* 2793 * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
2812 * Regulatory hints come on a first come first serve basis and we 2794 * Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
2839 spin_unlock(&reg_requests_lock); 2821 spin_unlock(&reg_requests_lock);
2840 2822
2841 notify_self_managed_wiphys(reg_request); 2823 notify_self_managed_wiphys(reg_request);
2842 if (reg_only_self_managed_wiphys()) {
2843 reg_free_request(reg_request);
2844 return;
2845 }
2846 2824
2847 reg_process_hint(reg_request); 2825 reg_process_hint(reg_request);
2848 2826
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 2b417a2fe63f..7c73510b161f 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
2627); 2627);
2628 2628
2629TRACE_EVENT(cfg80211_rx_control_port, 2629TRACE_EVENT(cfg80211_rx_control_port,
2630 TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len, 2630 TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
2631 const u8 *addr, u16 proto, bool unencrypted), 2631 bool unencrypted),
2632 TP_ARGS(netdev, buf, len, addr, proto, unencrypted), 2632 TP_ARGS(netdev, skb, unencrypted),
2633 TP_STRUCT__entry( 2633 TP_STRUCT__entry(
2634 NETDEV_ENTRY 2634 NETDEV_ENTRY
2635 MAC_ENTRY(addr) 2635 __field(int, len)
2636 MAC_ENTRY(from)
2636 __field(u16, proto) 2637 __field(u16, proto)
2637 __field(bool, unencrypted) 2638 __field(bool, unencrypted)
2638 ), 2639 ),
2639 TP_fast_assign( 2640 TP_fast_assign(
2640 NETDEV_ASSIGN; 2641 NETDEV_ASSIGN;
2641 MAC_ASSIGN(addr, addr); 2642 __entry->len = skb->len;
2642 __entry->proto = proto; 2643 MAC_ASSIGN(from, eth_hdr(skb)->h_source);
2644 __entry->proto = be16_to_cpu(skb->protocol);
2643 __entry->unencrypted = unencrypted; 2645 __entry->unencrypted = unencrypted;
2644 ), 2646 ),
2645 TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s", 2647 TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
2646 NETDEV_PR_ARG, MAC_PR_ARG(addr), 2648 NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
2647 __entry->proto, BOOL_TO_STR(__entry->unencrypted)) 2649 __entry->proto, BOOL_TO_STR(__entry->unencrypted))
2648); 2650);
2649 2651
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 72335c2e8108..4e937cd7c17d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -84,10 +84,8 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
84{ 84{
85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len); 85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
86 86
87 if (err) { 87 if (err)
88 xdp_return_buff(xdp);
89 xs->rx_dropped++; 88 xs->rx_dropped++;
90 }
91 89
92 return err; 90 return err;
93} 91}
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 52ecaf770642..8a64b150be54 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -250,7 +250,7 @@ static inline bool xskq_full_desc(struct xsk_queue *q)
250 250
251static inline bool xskq_empty_desc(struct xsk_queue *q) 251static inline bool xskq_empty_desc(struct xsk_queue *q)
252{ 252{
253 return xskq_nb_free(q, q->prod_tail, 1) == q->nentries; 253 return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
254} 254}
255 255
256void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props); 256void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5f48251c1319..7c5e8978aeaa 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2286,6 +2286,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2286 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2286 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2287 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2287 return make_blackhole(net, dst_orig->ops->family, dst_orig);
2288 2288
2289 if (IS_ERR(dst))
2290 dst_release(dst_orig);
2291
2289 return dst; 2292 return dst;
2290} 2293}
2291EXPORT_SYMBOL(xfrm_lookup_route); 2294EXPORT_SYMBOL(xfrm_lookup_route);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 080035f056d9..33878e6e0d0a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1025,10 +1025,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1025{ 1025{
1026 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); 1026 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1027 1027
1028 if (nlsk) 1028 if (!nlsk) {
1029 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); 1029 kfree_skb(skb);
1030 else 1030 return -EPIPE;
1031 return -1; 1031 }
1032
1033 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1032} 1034}
1033 1035
1034static inline unsigned int xfrm_spdinfo_msgsize(void) 1036static inline unsigned int xfrm_spdinfo_msgsize(void)
@@ -1671,9 +1673,11 @@ static inline unsigned int userpolicy_type_attrsize(void)
1671#ifdef CONFIG_XFRM_SUB_POLICY 1673#ifdef CONFIG_XFRM_SUB_POLICY
1672static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1674static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1673{ 1675{
1674 struct xfrm_userpolicy_type upt = { 1676 struct xfrm_userpolicy_type upt;
1675 .type = type, 1677
1676 }; 1678 /* Sadly there are two holes in struct xfrm_userpolicy_type */
1679 memset(&upt, 0, sizeof(upt));
1680 upt.type = type;
1677 1681
1678 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1682 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1679} 1683}
diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c
index 303e9e7161f3..4938dcbaecbf 100644
--- a/samples/bpf/xdp_redirect_cpu_kern.c
+++ b/samples/bpf/xdp_redirect_cpu_kern.c
@@ -14,7 +14,7 @@
14#include <uapi/linux/bpf.h> 14#include <uapi/linux/bpf.h>
15#include "bpf_helpers.h" 15#include "bpf_helpers.h"
16 16
17#define MAX_CPUS 12 /* WARNING - sync with _user.c */ 17#define MAX_CPUS 64 /* WARNING - sync with _user.c */
18 18
19/* Special map type that can XDP_REDIRECT frames to another CPU */ 19/* Special map type that can XDP_REDIRECT frames to another CPU */
20struct bpf_map_def SEC("maps") cpu_map = { 20struct bpf_map_def SEC("maps") cpu_map = {
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index f6efaefd485b..4b4d78fffe30 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -19,7 +19,7 @@ static const char *__doc__ =
19#include <arpa/inet.h> 19#include <arpa/inet.h>
20#include <linux/if_link.h> 20#include <linux/if_link.h>
21 21
22#define MAX_CPUS 12 /* WARNING - sync with _kern.c */ 22#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
23 23
24/* How many xdp_progs are defined in _kern.c */ 24/* How many xdp_progs are defined in _kern.c */
25#define MAX_PROG 5 25#define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
527 * procedure. 527 * procedure.
528 */ 528 */
529 create_cpu_entry(1, 1024, 0, false); 529 create_cpu_entry(1, 1024, 0, false);
530 create_cpu_entry(1, 128, 0, false); 530 create_cpu_entry(1, 8, 0, false);
531 create_cpu_entry(1, 16000, 0, false); 531 create_cpu_entry(1, 16000, 0, false);
532} 532}
533 533
diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan
index b593b36ccff8..38b2b4818e8e 100644
--- a/scripts/Makefile.ubsan
+++ b/scripts/Makefile.ubsan
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
14 CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) 14 CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
15endif 15endif
16 16
17ifdef CONFIG_UBSAN_NULL
18 CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
19endif
20
21 # -fsanitize=* options makes GCC less smart than usual and 17 # -fsanitize=* options makes GCC less smart than usual and
22 # increase number of 'maybe-uninitialized false-positives 18 # increase number of 'maybe-uninitialized false-positives
23 CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized) 19 CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
index ac5ba55066dd..985534d0b448 100644
--- a/tools/arch/powerpc/include/uapi/asm/unistd.h
+++ b/tools/arch/powerpc/include/uapi/asm/unistd.h
@@ -399,5 +399,6 @@
399#define __NR_pkey_free 385 399#define __NR_pkey_free 385
400#define __NR_pkey_mprotect 386 400#define __NR_pkey_mprotect 386
401#define __NR_rseq 387 401#define __NR_rseq 387
402#define __NR_io_pgetevents 388
402 403
403#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h
new file mode 100644
index 000000000000..2ccd588fbad4
--- /dev/null
+++ b/tools/arch/x86/include/asm/mcsafe_test.h
@@ -0,0 +1,13 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _MCSAFE_TEST_H_
3#define _MCSAFE_TEST_H_
4
5.macro MCSAFE_TEST_CTL
6.endm
7
8.macro MCSAFE_TEST_SRC reg count target
9.endm
10
11.macro MCSAFE_TEST_DST reg count target
12.endm
13#endif /* _MCSAFE_TEST_H_ */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index 9a53a06e5a3e..298ef1479240 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -3,6 +3,7 @@
3#include <linux/linkage.h> 3#include <linux/linkage.h>
4#include <asm/errno.h> 4#include <asm/errno.h>
5#include <asm/cpufeatures.h> 5#include <asm/cpufeatures.h>
6#include <asm/mcsafe_test.h>
6#include <asm/alternative-asm.h> 7#include <asm/alternative-asm.h>
7#include <asm/export.h> 8#include <asm/export.h>
8 9
@@ -183,12 +184,15 @@ ENTRY(memcpy_orig)
183ENDPROC(memcpy_orig) 184ENDPROC(memcpy_orig)
184 185
185#ifndef CONFIG_UML 186#ifndef CONFIG_UML
187
188MCSAFE_TEST_CTL
189
186/* 190/*
187 * memcpy_mcsafe_unrolled - memory copy with machine check exception handling 191 * __memcpy_mcsafe - memory copy with machine check exception handling
188 * Note that we only catch machine checks when reading the source addresses. 192 * Note that we only catch machine checks when reading the source addresses.
189 * Writes to target are posted and don't generate machine checks. 193 * Writes to target are posted and don't generate machine checks.
190 */ 194 */
191ENTRY(memcpy_mcsafe_unrolled) 195ENTRY(__memcpy_mcsafe)
192 cmpl $8, %edx 196 cmpl $8, %edx
193 /* Less than 8 bytes? Go to byte copy loop */ 197 /* Less than 8 bytes? Go to byte copy loop */
194 jb .L_no_whole_words 198 jb .L_no_whole_words
@@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)
204 subl $8, %ecx 208 subl $8, %ecx
205 negl %ecx 209 negl %ecx
206 subl %ecx, %edx 210 subl %ecx, %edx
207.L_copy_leading_bytes: 211.L_read_leading_bytes:
208 movb (%rsi), %al 212 movb (%rsi), %al
213 MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
214 MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
215.L_write_leading_bytes:
209 movb %al, (%rdi) 216 movb %al, (%rdi)
210 incq %rsi 217 incq %rsi
211 incq %rdi 218 incq %rdi
212 decl %ecx 219 decl %ecx
213 jnz .L_copy_leading_bytes 220 jnz .L_read_leading_bytes
214 221
215.L_8byte_aligned: 222.L_8byte_aligned:
216 /* Figure out how many whole cache lines (64-bytes) to copy */
217 movl %edx, %ecx
218 andl $63, %edx
219 shrl $6, %ecx
220 jz .L_no_whole_cache_lines
221
222 /* Loop copying whole cache lines */
223.L_cache_w0: movq (%rsi), %r8
224.L_cache_w1: movq 1*8(%rsi), %r9
225.L_cache_w2: movq 2*8(%rsi), %r10
226.L_cache_w3: movq 3*8(%rsi), %r11
227 movq %r8, (%rdi)
228 movq %r9, 1*8(%rdi)
229 movq %r10, 2*8(%rdi)
230 movq %r11, 3*8(%rdi)
231.L_cache_w4: movq 4*8(%rsi), %r8
232.L_cache_w5: movq 5*8(%rsi), %r9
233.L_cache_w6: movq 6*8(%rsi), %r10
234.L_cache_w7: movq 7*8(%rsi), %r11
235 movq %r8, 4*8(%rdi)
236 movq %r9, 5*8(%rdi)
237 movq %r10, 6*8(%rdi)
238 movq %r11, 7*8(%rdi)
239 leaq 64(%rsi), %rsi
240 leaq 64(%rdi), %rdi
241 decl %ecx
242 jnz .L_cache_w0
243
244 /* Are there any trailing 8-byte words? */
245.L_no_whole_cache_lines:
246 movl %edx, %ecx 223 movl %edx, %ecx
247 andl $7, %edx 224 andl $7, %edx
248 shrl $3, %ecx 225 shrl $3, %ecx
249 jz .L_no_whole_words 226 jz .L_no_whole_words
250 227
251 /* Copy trailing words */ 228.L_read_words:
252.L_copy_trailing_words:
253 movq (%rsi), %r8 229 movq (%rsi), %r8
254 mov %r8, (%rdi) 230 MCSAFE_TEST_SRC %rsi 8 .E_read_words
255 leaq 8(%rsi), %rsi 231 MCSAFE_TEST_DST %rdi 8 .E_write_words
256 leaq 8(%rdi), %rdi 232.L_write_words:
233 movq %r8, (%rdi)
234 addq $8, %rsi
235 addq $8, %rdi
257 decl %ecx 236 decl %ecx
258 jnz .L_copy_trailing_words 237 jnz .L_read_words
259 238
260 /* Any trailing bytes? */ 239 /* Any trailing bytes? */
261.L_no_whole_words: 240.L_no_whole_words:
@@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)
264 243
265 /* Copy trailing bytes */ 244 /* Copy trailing bytes */
266 movl %edx, %ecx 245 movl %edx, %ecx
267.L_copy_trailing_bytes: 246.L_read_trailing_bytes:
268 movb (%rsi), %al 247 movb (%rsi), %al
248 MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
249 MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
250.L_write_trailing_bytes:
269 movb %al, (%rdi) 251 movb %al, (%rdi)
270 incq %rsi 252 incq %rsi
271 incq %rdi 253 incq %rdi
272 decl %ecx 254 decl %ecx
273 jnz .L_copy_trailing_bytes 255 jnz .L_read_trailing_bytes
274 256
275 /* Copy successful. Return zero */ 257 /* Copy successful. Return zero */
276.L_done_memcpy_trap: 258.L_done_memcpy_trap:
277 xorq %rax, %rax 259 xorq %rax, %rax
278 ret 260 ret
279ENDPROC(memcpy_mcsafe_unrolled) 261ENDPROC(__memcpy_mcsafe)
280EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) 262EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
281 263
282 .section .fixup, "ax" 264 .section .fixup, "ax"
283 /* Return -EFAULT for any failure */ 265 /*
284.L_memcpy_mcsafe_fail: 266 * Return number of bytes not copied for any failure. Note that
285 mov $-EFAULT, %rax 267 * there is no "tail" handling since the source buffer is 8-byte
268 * aligned and poison is cacheline aligned.
269 */
270.E_read_words:
271 shll $3, %ecx
272.E_leading_bytes:
273 addl %edx, %ecx
274.E_trailing_bytes:
275 mov %ecx, %eax
286 ret 276 ret
287 277
278 /*
279 * For write fault handling, given the destination is unaligned,
280 * we handle faults on multi-byte writes with a byte-by-byte
281 * copy up to the write-protected page.
282 */
283.E_write_words:
284 shll $3, %ecx
285 addl %edx, %ecx
286 movl %ecx, %edx
287 jmp mcsafe_handle_tail
288
288 .previous 289 .previous
289 290
290 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) 291 _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
291 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) 292 _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
292 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) 293 _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
293 _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) 294 _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
294 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) 295 _ASM_EXTABLE(.L_write_words, .E_write_words)
295 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) 296 _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
296 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
297 _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
298 _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
299 _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
300 _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
301#endif 297#endif
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 32f9e397a6c0..3f140eff039f 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
217 int err; 217 int err;
218 int fd; 218 int fd;
219 219
220 if (argc < 3) {
221 p_err("too few arguments, id ID and FILE path is required");
222 return -1;
223 } else if (argc > 3) {
224 p_err("too many arguments");
225 return -1;
226 }
227
220 if (!is_prefix(*argv, "id")) { 228 if (!is_prefix(*argv, "id")) {
221 p_err("expected 'id' got %s", *argv); 229 p_err("expected 'id' got %s", *argv);
222 return -1; 230 return -1;
@@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
230 } 238 }
231 NEXT_ARG(); 239 NEXT_ARG();
232 240
233 if (argc != 1)
234 usage();
235
236 fd = get_fd_by_id(id); 241 fd = get_fd_by_id(id);
237 if (fd < 0) { 242 if (fd < 0) {
238 p_err("can't get prog by id (%u): %s", id, strerror(errno)); 243 p_err("can't get prog by id (%u): %s", id, strerror(errno));
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 097b1a5e046b..f74a8bcbda87 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -36,6 +36,7 @@
36#include <assert.h> 36#include <assert.h>
37#include <errno.h> 37#include <errno.h>
38#include <fcntl.h> 38#include <fcntl.h>
39#include <linux/kernel.h>
39#include <stdbool.h> 40#include <stdbool.h>
40#include <stdio.h> 41#include <stdio.h>
41#include <stdlib.h> 42#include <stdlib.h>
@@ -90,7 +91,8 @@ static bool map_is_map_of_progs(__u32 type)
90static void *alloc_value(struct bpf_map_info *info) 91static void *alloc_value(struct bpf_map_info *info)
91{ 92{
92 if (map_is_per_cpu(info->type)) 93 if (map_is_per_cpu(info->type))
93 return malloc(info->value_size * get_possible_cpus()); 94 return malloc(round_up(info->value_size, 8) *
95 get_possible_cpus());
94 else 96 else
95 return malloc(info->value_size); 97 return malloc(info->value_size);
96} 98}
@@ -161,9 +163,10 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
161 jsonw_name(json_wtr, "value"); 163 jsonw_name(json_wtr, "value");
162 print_hex_data_json(value, info->value_size); 164 print_hex_data_json(value, info->value_size);
163 } else { 165 } else {
164 unsigned int i, n; 166 unsigned int i, n, step;
165 167
166 n = get_possible_cpus(); 168 n = get_possible_cpus();
169 step = round_up(info->value_size, 8);
167 170
168 jsonw_name(json_wtr, "key"); 171 jsonw_name(json_wtr, "key");
169 print_hex_data_json(key, info->key_size); 172 print_hex_data_json(key, info->key_size);
@@ -176,7 +179,7 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
176 jsonw_int_field(json_wtr, "cpu", i); 179 jsonw_int_field(json_wtr, "cpu", i);
177 180
178 jsonw_name(json_wtr, "value"); 181 jsonw_name(json_wtr, "value");
179 print_hex_data_json(value + i * info->value_size, 182 print_hex_data_json(value + i * step,
180 info->value_size); 183 info->value_size);
181 184
182 jsonw_end_object(json_wtr); 185 jsonw_end_object(json_wtr);
@@ -207,9 +210,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
207 210
208 printf("\n"); 211 printf("\n");
209 } else { 212 } else {
210 unsigned int i, n; 213 unsigned int i, n, step;
211 214
212 n = get_possible_cpus(); 215 n = get_possible_cpus();
216 step = round_up(info->value_size, 8);
213 217
214 printf("key:\n"); 218 printf("key:\n");
215 fprint_hex(stdout, key, info->key_size, " "); 219 fprint_hex(stdout, key, info->key_size, " ");
@@ -217,7 +221,7 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
217 for (i = 0; i < n; i++) { 221 for (i = 0; i < n; i++) {
218 printf("value (CPU %02d):%c", 222 printf("value (CPU %02d):%c",
219 i, info->value_size > 16 ? '\n' : ' '); 223 i, info->value_size > 16 ? '\n' : ' ');
220 fprint_hex(stdout, value + i * info->value_size, 224 fprint_hex(stdout, value + i * step,
221 info->value_size, " "); 225 info->value_size, " ");
222 printf("\n"); 226 printf("\n");
223 } 227 }
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 59b19b6a40d7..b7db3261c62d 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1857,7 +1857,8 @@ union bpf_attr {
1857 * is resolved), the nexthop address is returned in ipv4_dst 1857 * is resolved), the nexthop address is returned in ipv4_dst
1858 * or ipv6_dst based on family, smac is set to mac address of 1858 * or ipv6_dst based on family, smac is set to mac address of
1859 * egress device, dmac is set to nexthop mac address, rt_metric 1859 * egress device, dmac is set to nexthop mac address, rt_metric
1860 * is set to metric from route (IPv4/IPv6 only). 1860 * is set to metric from route (IPv4/IPv6 only), and ifindex
1861 * is set to the device index of the nexthop from the FIB lookup.
1861 * 1862 *
1862 * *plen* argument is the size of the passed in struct. 1863 * *plen* argument is the size of the passed in struct.
1863 * *flags* argument can be a combination of one or more of the 1864 * *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
1873 * *ctx* is either **struct xdp_md** for XDP programs or 1874 * *ctx* is either **struct xdp_md** for XDP programs or
1874 * **struct sk_buff** tc cls_act programs. 1875 * **struct sk_buff** tc cls_act programs.
1875 * Return 1876 * Return
1876 * Egress device index on success, 0 if packet needs to continue 1877 * * < 0 if any input argument is invalid
1877 * up the stack for further processing or a negative error in case 1878 * * 0 on success (packet is forwarded, nexthop neighbor exists)
1878 * of failure. 1879 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
1880 * * packet is not forwarded or needs assist from full stack
1879 * 1881 *
1880 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) 1882 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
1881 * Description 1883 * Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
2612#define BPF_FIB_LOOKUP_DIRECT BIT(0) 2614#define BPF_FIB_LOOKUP_DIRECT BIT(0)
2613#define BPF_FIB_LOOKUP_OUTPUT BIT(1) 2615#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
2614 2616
2617enum {
2618 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
2619 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
2620 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
2621 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
2622 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
2623 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
2624 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
2625 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
2626 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
2627};
2628
2615struct bpf_fib_lookup { 2629struct bpf_fib_lookup {
2616 /* input: network family for lookup (AF_INET, AF_INET6) 2630 /* input: network family for lookup (AF_INET, AF_INET6)
2617 * output: network family of egress nexthop 2631 * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
2625 2639
2626 /* total length of packet from network header - used for MTU check */ 2640 /* total length of packet from network header - used for MTU check */
2627 __u16 tot_len; 2641 __u16 tot_len;
2628 __u32 ifindex; /* L3 device index for lookup */ 2642
2643 /* input: L3 device index for lookup
2644 * output: device index from FIB lookup
2645 */
2646 __u32 ifindex;
2629 2647
2630 union { 2648 union {
2631 /* inputs to lookup */ 2649 /* inputs to lookup */
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
76 */ 76 */
77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 77#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 78#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
79#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff) 79#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
80 80
81/* Attributes stored in the BTF_INT_ENCODING */ 81/* Attributes stored in the BTF_INT_ENCODING */
82#define BTF_INT_SIGNED (1 << 0) 82#define BTF_INT_SIGNED (1 << 0)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
143 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 143 PERF_SAMPLE_PHYS_ADDR = 1U << 19,
144 144
145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ 145 PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
146
147 __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
146}; 148};
147 149
148/* 150/*
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 8c54a4b6f187..c36a3a76986a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,8 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1// SPDX-License-Identifier: LGPL-2.1
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <stdlib.h> 4#include <stdlib.h>
5#include <stdint.h>
6#include <string.h> 5#include <string.h>
7#include <unistd.h> 6#include <unistd.h>
8#include <errno.h> 7#include <errno.h>
@@ -27,13 +26,13 @@ struct btf {
27 struct btf_type **types; 26 struct btf_type **types;
28 const char *strings; 27 const char *strings;
29 void *nohdr_data; 28 void *nohdr_data;
30 uint32_t nr_types; 29 __u32 nr_types;
31 uint32_t types_size; 30 __u32 types_size;
32 uint32_t data_size; 31 __u32 data_size;
33 int fd; 32 int fd;
34}; 33};
35 34
36static const char *btf_name_by_offset(const struct btf *btf, uint32_t offset) 35static const char *btf_name_by_offset(const struct btf *btf, __u32 offset)
37{ 36{
38 if (offset < btf->hdr->str_len) 37 if (offset < btf->hdr->str_len)
39 return &btf->strings[offset]; 38 return &btf->strings[offset];
@@ -45,7 +44,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
45{ 44{
46 if (btf->types_size - btf->nr_types < 2) { 45 if (btf->types_size - btf->nr_types < 2) {
47 struct btf_type **new_types; 46 struct btf_type **new_types;
48 u32 expand_by, new_size; 47 __u32 expand_by, new_size;
49 48
50 if (btf->types_size == BTF_MAX_NR_TYPES) 49 if (btf->types_size == BTF_MAX_NR_TYPES)
51 return -E2BIG; 50 return -E2BIG;
@@ -72,7 +71,7 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
72static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) 71static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
73{ 72{
74 const struct btf_header *hdr = btf->hdr; 73 const struct btf_header *hdr = btf->hdr;
75 u32 meta_left; 74 __u32 meta_left;
76 75
77 if (btf->data_size < sizeof(struct btf_header)) { 76 if (btf->data_size < sizeof(struct btf_header)) {
78 elog("BTF header not found\n"); 77 elog("BTF header not found\n");
@@ -151,7 +150,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
151 150
152 while (next_type < end_type) { 151 while (next_type < end_type) {
153 struct btf_type *t = next_type; 152 struct btf_type *t = next_type;
154 uint16_t vlen = BTF_INFO_VLEN(t->info); 153 __u16 vlen = BTF_INFO_VLEN(t->info);
155 int err; 154 int err;
156 155
157 next_type += sizeof(*t); 156 next_type += sizeof(*t);
@@ -190,8 +189,7 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
190 return 0; 189 return 0;
191} 190}
192 191
193static const struct btf_type *btf_type_by_id(const struct btf *btf, 192const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
194 uint32_t type_id)
195{ 193{
196 if (type_id > btf->nr_types) 194 if (type_id > btf->nr_types)
197 return NULL; 195 return NULL;
@@ -209,7 +207,7 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
209 return !t || btf_type_is_void(t); 207 return !t || btf_type_is_void(t);
210} 208}
211 209
212static int64_t btf_type_size(const struct btf_type *t) 210static __s64 btf_type_size(const struct btf_type *t)
213{ 211{
214 switch (BTF_INFO_KIND(t->info)) { 212 switch (BTF_INFO_KIND(t->info)) {
215 case BTF_KIND_INT: 213 case BTF_KIND_INT:
@@ -226,15 +224,15 @@ static int64_t btf_type_size(const struct btf_type *t)
226 224
227#define MAX_RESOLVE_DEPTH 32 225#define MAX_RESOLVE_DEPTH 32
228 226
229int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id) 227__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
230{ 228{
231 const struct btf_array *array; 229 const struct btf_array *array;
232 const struct btf_type *t; 230 const struct btf_type *t;
233 uint32_t nelems = 1; 231 __u32 nelems = 1;
234 int64_t size = -1; 232 __s64 size = -1;
235 int i; 233 int i;
236 234
237 t = btf_type_by_id(btf, type_id); 235 t = btf__type_by_id(btf, type_id);
238 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 236 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
239 i++) { 237 i++) {
240 size = btf_type_size(t); 238 size = btf_type_size(t);
@@ -259,7 +257,7 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
259 return -EINVAL; 257 return -EINVAL;
260 } 258 }
261 259
262 t = btf_type_by_id(btf, type_id); 260 t = btf__type_by_id(btf, type_id);
263 } 261 }
264 262
265 if (size < 0) 263 if (size < 0)
@@ -271,9 +269,9 @@ int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id)
271 return nelems * size; 269 return nelems * size;
272} 270}
273 271
274int32_t btf__find_by_name(const struct btf *btf, const char *type_name) 272__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
275{ 273{
276 uint32_t i; 274 __u32 i;
277 275
278 if (!strcmp(type_name, "void")) 276 if (!strcmp(type_name, "void"))
279 return 0; 277 return 0;
@@ -302,10 +300,9 @@ void btf__free(struct btf *btf)
302 free(btf); 300 free(btf);
303} 301}
304 302
305struct btf *btf__new(uint8_t *data, uint32_t size, 303struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
306 btf_print_fn_t err_log)
307{ 304{
308 uint32_t log_buf_size = 0; 305 __u32 log_buf_size = 0;
309 char *log_buf = NULL; 306 char *log_buf = NULL;
310 struct btf *btf; 307 struct btf *btf;
311 int err; 308 int err;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 74bb344035bb..caac3a404dc5 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -1,22 +1,24 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: LGPL-2.1 */
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#ifndef __BPF_BTF_H 4#ifndef __BPF_BTF_H
5#define __BPF_BTF_H 5#define __BPF_BTF_H
6 6
7#include <stdint.h> 7#include <linux/types.h>
8 8
9#define BTF_ELF_SEC ".BTF" 9#define BTF_ELF_SEC ".BTF"
10 10
11struct btf; 11struct btf;
12struct btf_type;
12 13
13typedef int (*btf_print_fn_t)(const char *, ...) 14typedef int (*btf_print_fn_t)(const char *, ...)
14 __attribute__((format(printf, 1, 2))); 15 __attribute__((format(printf, 1, 2)));
15 16
16void btf__free(struct btf *btf); 17void btf__free(struct btf *btf);
17struct btf *btf__new(uint8_t *data, uint32_t size, btf_print_fn_t err_log); 18struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
18int32_t btf__find_by_name(const struct btf *btf, const char *type_name); 19__s32 btf__find_by_name(const struct btf *btf, const char *type_name);
19int64_t btf__resolve_size(const struct btf *btf, uint32_t type_id); 20const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id);
21__s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
20int btf__fd(const struct btf *btf); 22int btf__fd(const struct btf *btf);
21 23
22#endif 24#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a1e96b5de5ff..1aafdbe827fe 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -36,6 +36,7 @@
36#include <linux/err.h> 36#include <linux/err.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/bpf.h> 38#include <linux/bpf.h>
39#include <linux/btf.h>
39#include <linux/list.h> 40#include <linux/list.h>
40#include <linux/limits.h> 41#include <linux/limits.h>
41#include <sys/stat.h> 42#include <sys/stat.h>
@@ -216,8 +217,8 @@ struct bpf_map {
216 size_t offset; 217 size_t offset;
217 int map_ifindex; 218 int map_ifindex;
218 struct bpf_map_def def; 219 struct bpf_map_def def;
219 uint32_t btf_key_type_id; 220 __u32 btf_key_type_id;
220 uint32_t btf_value_type_id; 221 __u32 btf_value_type_id;
221 void *priv; 222 void *priv;
222 bpf_map_clear_priv_t clear_priv; 223 bpf_map_clear_priv_t clear_priv;
223}; 224};
@@ -1014,68 +1015,72 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1014 1015
1015static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1016static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1016{ 1017{
1018 const struct btf_type *container_type;
1019 const struct btf_member *key, *value;
1017 struct bpf_map_def *def = &map->def; 1020 struct bpf_map_def *def = &map->def;
1018 const size_t max_name = 256; 1021 const size_t max_name = 256;
1019 int64_t key_size, value_size; 1022 char container_name[max_name];
1020 int32_t key_id, value_id; 1023 __s64 key_size, value_size;
1021 char name[max_name]; 1024 __s32 container_id;
1022 1025
1023 /* Find key type by name from BTF */ 1026 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1024 if (snprintf(name, max_name, "%s_key", map->name) == max_name) { 1027 max_name) {
1025 pr_warning("map:%s length of BTF key_type:%s_key is too long\n", 1028 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1026 map->name, map->name); 1029 map->name, map->name);
1027 return -EINVAL; 1030 return -EINVAL;
1028 } 1031 }
1029 1032
1030 key_id = btf__find_by_name(btf, name); 1033 container_id = btf__find_by_name(btf, container_name);
1031 if (key_id < 0) { 1034 if (container_id < 0) {
1032 pr_debug("map:%s key_type:%s cannot be found in BTF\n", 1035 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1033 map->name, name); 1036 map->name, container_name);
1034 return key_id; 1037 return container_id;
1035 } 1038 }
1036 1039
1037 key_size = btf__resolve_size(btf, key_id); 1040 container_type = btf__type_by_id(btf, container_id);
1038 if (key_size < 0) { 1041 if (!container_type) {
1039 pr_warning("map:%s key_type:%s cannot get the BTF type_size\n", 1042 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1040 map->name, name); 1043 map->name, container_id);
1041 return key_size; 1044 return -EINVAL;
1042 } 1045 }
1043 1046
1044 if (def->key_size != key_size) { 1047 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1045 pr_warning("map:%s key_type:%s has BTF type_size:%u != key_size:%u\n", 1048 BTF_INFO_VLEN(container_type->info) < 2) {
1046 map->name, name, (unsigned int)key_size, def->key_size); 1049 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1050 map->name, container_name);
1047 return -EINVAL; 1051 return -EINVAL;
1048 } 1052 }
1049 1053
1050 /* Find value type from BTF */ 1054 key = (struct btf_member *)(container_type + 1);
1051 if (snprintf(name, max_name, "%s_value", map->name) == max_name) { 1055 value = key + 1;
1052 pr_warning("map:%s length of BTF value_type:%s_value is too long\n", 1056
1053 map->name, map->name); 1057 key_size = btf__resolve_size(btf, key->type);
1054 return -EINVAL; 1058 if (key_size < 0) {
1059 pr_warning("map:%s invalid BTF key_type_size\n",
1060 map->name);
1061 return key_size;
1055 } 1062 }
1056 1063
1057 value_id = btf__find_by_name(btf, name); 1064 if (def->key_size != key_size) {
1058 if (value_id < 0) { 1065 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1059 pr_debug("map:%s value_type:%s cannot be found in BTF\n", 1066 map->name, (__u32)key_size, def->key_size);
1060 map->name, name); 1067 return -EINVAL;
1061 return value_id;
1062 } 1068 }
1063 1069
1064 value_size = btf__resolve_size(btf, value_id); 1070 value_size = btf__resolve_size(btf, value->type);
1065 if (value_size < 0) { 1071 if (value_size < 0) {
1066 pr_warning("map:%s value_type:%s cannot get the BTF type_size\n", 1072 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1067 map->name, name);
1068 return value_size; 1073 return value_size;
1069 } 1074 }
1070 1075
1071 if (def->value_size != value_size) { 1076 if (def->value_size != value_size) {
1072 pr_warning("map:%s value_type:%s has BTF type_size:%u != value_size:%u\n", 1077 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1073 map->name, name, (unsigned int)value_size, def->value_size); 1078 map->name, (__u32)value_size, def->value_size);
1074 return -EINVAL; 1079 return -EINVAL;
1075 } 1080 }
1076 1081
1077 map->btf_key_type_id = key_id; 1082 map->btf_key_type_id = key->type;
1078 map->btf_value_type_id = value_id; 1083 map->btf_value_type_id = value->type;
1079 1084
1080 return 0; 1085 return 0;
1081} 1086}
@@ -2089,12 +2094,12 @@ const char *bpf_map__name(struct bpf_map *map)
2089 return map ? map->name : NULL; 2094 return map ? map->name : NULL;
2090} 2095}
2091 2096
2092uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map) 2097__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2093{ 2098{
2094 return map ? map->btf_key_type_id : 0; 2099 return map ? map->btf_key_type_id : 0;
2095} 2100}
2096 2101
2097uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map) 2102__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2098{ 2103{
2099 return map ? map->btf_value_type_id : 0; 2104 return map ? map->btf_value_type_id : 0;
2100} 2105}
@@ -2268,8 +2273,8 @@ bpf_perf_event_read_simple(void *mem, unsigned long size,
2268 volatile struct perf_event_mmap_page *header = mem; 2273 volatile struct perf_event_mmap_page *header = mem;
2269 __u64 data_tail = header->data_tail; 2274 __u64 data_tail = header->data_tail;
2270 __u64 data_head = header->data_head; 2275 __u64 data_head = header->data_head;
2276 int ret = LIBBPF_PERF_EVENT_ERROR;
2271 void *base, *begin, *end; 2277 void *base, *begin, *end;
2272 int ret;
2273 2278
2274 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ 2279 asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
2275 if (data_head == data_tail) 2280 if (data_head == data_tail)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 09976531aa74..b33ae02f7d0e 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -244,8 +244,8 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
244int bpf_map__fd(struct bpf_map *map); 244int bpf_map__fd(struct bpf_map *map);
245const struct bpf_map_def *bpf_map__def(struct bpf_map *map); 245const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
246const char *bpf_map__name(struct bpf_map *map); 246const char *bpf_map__name(struct bpf_map *map);
247uint32_t bpf_map__btf_key_type_id(const struct bpf_map *map); 247__u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
248uint32_t bpf_map__btf_value_type_id(const struct bpf_map *map); 248__u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
249 249
250typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); 250typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
251int bpf_map__set_priv(struct bpf_map *map, void *priv, 251int bpf_map__set_priv(struct bpf_map *map, void *priv,
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index 63a74c32ddc5..e33ef5bc31c5 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <string.h> 2#include <string.h>
3 3
4#include <linux/stddef.h>
4#include <linux/perf_event.h> 5#include <linux/perf_event.h>
5 6
6#include "../../util/intel-pt.h" 7#include "../../util/intel-pt.h"
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 06bae7023a51..950539f9a4f7 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -2,6 +2,7 @@
2#include <stdbool.h> 2#include <stdbool.h>
3#include <errno.h> 3#include <errno.h>
4 4
5#include <linux/stddef.h>
5#include <linux/perf_event.h> 6#include <linux/perf_event.h>
6 7
7#include "../../perf.h" 8#include "../../perf.h"
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index 60bf11943047..eafce1a130a1 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -7,6 +7,7 @@ perf-y += futex-wake-parallel.o
7perf-y += futex-requeue.o 7perf-y += futex-requeue.o
8perf-y += futex-lock-pi.o 8perf-y += futex-lock-pi.o
9 9
10perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
10perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o 11perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
11perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o 12perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
12 13
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index b43f8d2a34ec..9ad015a1e202 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -6,6 +6,7 @@
6#define altinstr_replacement text 6#define altinstr_replacement text
7#define globl p2align 4; .globl 7#define globl p2align 4; .globl
8#define _ASM_EXTABLE_FAULT(x, y) 8#define _ASM_EXTABLE_FAULT(x, y)
9#define _ASM_EXTABLE(x, y)
9 10
10#include "../../arch/x86/lib/memcpy_64.S" 11#include "../../arch/x86/lib/memcpy_64.S"
11/* 12/*
diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c
new file mode 100644
index 000000000000..4130734dde84
--- /dev/null
+++ b/tools/perf/bench/mem-memcpy-x86-64-lib.c
@@ -0,0 +1,24 @@
1/*
2 * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
3 * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
4 * happy.
5 */
6#include <linux/types.h>
7
8unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
9unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
10
11unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
12{
13 for (; len; --len, to++, from++) {
14 /*
15 * Call the assembly routine back directly since
16 * memcpy_mcsafe() may silently fallback to memcpy.
17 */
18 unsigned long rem = __memcpy_mcsafe(to, from, 1);
19
20 if (rem)
21 break;
22 }
23 return len;
24}
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index a1a97956136f..d215714f48df 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -5,6 +5,7 @@
5#include <time.h> 5#include <time.h>
6#include <stdbool.h> 6#include <stdbool.h>
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/stddef.h>
8#include <linux/perf_event.h> 9#include <linux/perf_event.h>
9 10
10extern bool test_attr__enabled; 11extern bool test_attr__enabled;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 90d4577a92dc..6d7fe44aadc0 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -2,6 +2,7 @@
2#ifndef __PERF_HEADER_H 2#ifndef __PERF_HEADER_H
3#define __PERF_HEADER_H 3#define __PERF_HEADER_H
4 4
5#include <linux/stddef.h>
5#include <linux/perf_event.h> 6#include <linux/perf_event.h>
6#include <sys/types.h> 7#include <sys/types.h>
7#include <stdbool.h> 8#include <stdbool.h>
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index 760558dcfd18..cae1a9a39722 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -10,6 +10,7 @@
10#define __PERF_NAMESPACES_H 10#define __PERF_NAMESPACES_H
11 11
12#include <sys/types.h> 12#include <sys/types.h>
13#include <linux/stddef.h>
13#include <linux/perf_event.h> 14#include <linux/perf_event.h>
14#include <linux/refcount.h> 15#include <linux/refcount.h>
15#include <linux/types.h> 16#include <linux/types.h>
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index d39e4ff7d0bf..a6db83a88e85 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -106,7 +106,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
106\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved. 106\fBC1%, C2%, C3%\fP The residency percentage that Linux requested C1, C2, C3.... The system summary is the average of all CPUs in the system. Note that these are software, reflecting what was requested. The hardware counters reflect what was actually achieved.
107\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. 107\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters.
108\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. 108\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
109\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. 109\fBPkgTmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
110\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. 110\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
111\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. 111\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
112\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. 112\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters.
@@ -114,7 +114,7 @@ The system configuration dump (if --quiet is not used) is followed by statistics
114\fBCorWatt\fP Watts consumed by the core part of the package. 114\fBCorWatt\fP Watts consumed by the core part of the package.
115\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors. 115\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
116\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors. 116\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
117\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. 117\fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package. Note that the system summary is the sum of the package throttling time, and thus may be higher than 100% on a multi-package system. Note that the meaning of this field is model specific. For example, some hardware increments this counter when RAPL responds to thermal limits, but does not increment this counter when RAPL responds to power limits. Comparing PkgWatt and PkgTmp to system limits is necessary.
118\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM. 118\fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
119.fi 119.fi
120.SH TOO MUCH INFORMATION EXAMPLE 120.SH TOO MUCH INFORMATION EXAMPLE
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 4d14bbbf9b63..980bd9d20646 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1163,9 +1163,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
1163 if (!printed || !summary_only) 1163 if (!printed || !summary_only)
1164 print_header("\t"); 1164 print_header("\t");
1165 1165
1166 if (topo.num_cpus > 1) 1166 format_counters(&average.threads, &average.cores, &average.packages);
1167 format_counters(&average.threads, &average.cores,
1168 &average.packages);
1169 1167
1170 printed = 1; 1168 printed = 1;
1171 1169
@@ -1692,7 +1690,7 @@ void get_apic_id(struct thread_data *t)
1692 t->x2apic_id = edx; 1690 t->x2apic_id = edx;
1693 1691
1694 if (debug && (t->apic_id != t->x2apic_id)) 1692 if (debug && (t->apic_id != t->x2apic_id))
1695 fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id); 1693 fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
1696} 1694}
1697 1695
1698/* 1696/*
@@ -2473,55 +2471,43 @@ int get_core_id(int cpu)
2473 2471
2474void set_node_data(void) 2472void set_node_data(void)
2475{ 2473{
2476 char path[80]; 2474 int pkg, node, lnode, cpu, cpux;
2477 FILE *filep; 2475 int cpu_count;
2478 int pkg, node, cpu; 2476
2479 2477 /* initialize logical_node_id */
2480 struct pkg_node_info { 2478 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu)
2481 int count; 2479 cpus[cpu].logical_node_id = -1;
2482 int min; 2480
2483 } *pni; 2481 cpu_count = 0;
2484 2482 for (pkg = 0; pkg < topo.num_packages; pkg++) {
2485 pni = calloc(topo.num_packages, sizeof(struct pkg_node_info)); 2483 lnode = 0;
2486 if (!pni) 2484 for (cpu = 0; cpu <= topo.max_cpu_num; ++cpu) {
2487 err(1, "calloc pkg_node_count"); 2485 if (cpus[cpu].physical_package_id != pkg)
2488 2486 continue;
2489 for (pkg = 0; pkg < topo.num_packages; pkg++) 2487 /* find a cpu with an unset logical_node_id */
2490 pni[pkg].min = topo.num_cpus; 2488 if (cpus[cpu].logical_node_id != -1)
2491 2489 continue;
2492 for (node = 0; node <= topo.max_node_num; node++) { 2490 cpus[cpu].logical_node_id = lnode;
2493 /* find the "first" cpu in the node */ 2491 node = cpus[cpu].physical_node_id;
2494 sprintf(path, "/sys/bus/node/devices/node%d/cpulist", node); 2492 cpu_count++;
2495 filep = fopen(path, "r"); 2493 /*
2496 if (!filep) 2494 * find all matching cpus on this pkg and set
2497 continue; 2495 * the logical_node_id
2498 fscanf(filep, "%d", &cpu); 2496 */
2499 fclose(filep); 2497 for (cpux = cpu; cpux <= topo.max_cpu_num; cpux++) {
2500 2498 if ((cpus[cpux].physical_package_id == pkg) &&
2501 pkg = cpus[cpu].physical_package_id; 2499 (cpus[cpux].physical_node_id == node)) {
2502 pni[pkg].count++; 2500 cpus[cpux].logical_node_id = lnode;
2503 2501 cpu_count++;
2504 if (node < pni[pkg].min) 2502 }
2505 pni[pkg].min = node; 2503 }
2506 } 2504 lnode++;
2507 2505 if (lnode > topo.nodes_per_pkg)
2508 for (pkg = 0; pkg < topo.num_packages; pkg++) 2506 topo.nodes_per_pkg = lnode;
2509 if (pni[pkg].count > topo.nodes_per_pkg) 2507 }
2510 topo.nodes_per_pkg = pni[0].count; 2508 if (cpu_count >= topo.max_cpu_num)
2511 2509 break;
2512 /* Fake 1 node per pkg for machines that don't
2513 * expose nodes and thus avoid -nan results
2514 */
2515 if (topo.nodes_per_pkg == 0)
2516 topo.nodes_per_pkg = 1;
2517
2518 for (cpu = 0; cpu < topo.num_cpus; cpu++) {
2519 pkg = cpus[cpu].physical_package_id;
2520 node = cpus[cpu].physical_node_id;
2521 cpus[cpu].logical_node_id = node - pni[pkg].min;
2522 } 2510 }
2523 free(pni);
2524
2525} 2511}
2526 2512
2527int get_physical_node_id(struct cpu_topology *thiscpu) 2513int get_physical_node_id(struct cpu_topology *thiscpu)
@@ -4471,7 +4457,9 @@ void process_cpuid()
4471 family = (fms >> 8) & 0xf; 4457 family = (fms >> 8) & 0xf;
4472 model = (fms >> 4) & 0xf; 4458 model = (fms >> 4) & 0xf;
4473 stepping = fms & 0xf; 4459 stepping = fms & 0xf;
4474 if (family == 6 || family == 0xf) 4460 if (family == 0xf)
4461 family += (fms >> 20) & 0xff;
4462 if (family >= 6)
4475 model += ((fms >> 16) & 0xf) << 4; 4463 model += ((fms >> 16) & 0xf) << 4;
4476 4464
4477 if (!quiet) { 4465 if (!quiet) {
@@ -4840,16 +4828,8 @@ void topology_probe()
4840 siblings = get_thread_siblings(&cpus[i]); 4828 siblings = get_thread_siblings(&cpus[i]);
4841 if (siblings > max_siblings) 4829 if (siblings > max_siblings)
4842 max_siblings = siblings; 4830 max_siblings = siblings;
4843 if (cpus[i].thread_id != -1) 4831 if (cpus[i].thread_id == 0)
4844 topo.num_cores++; 4832 topo.num_cores++;
4845
4846 if (debug > 1)
4847 fprintf(outf,
4848 "cpu %d pkg %d node %d core %d thread %d\n",
4849 i, cpus[i].physical_package_id,
4850 cpus[i].physical_node_id,
4851 cpus[i].physical_core_id,
4852 cpus[i].thread_id);
4853 } 4833 }
4854 4834
4855 topo.cores_per_node = max_core_id + 1; 4835 topo.cores_per_node = max_core_id + 1;
@@ -4875,6 +4855,20 @@ void topology_probe()
4875 topo.threads_per_core = max_siblings; 4855 topo.threads_per_core = max_siblings;
4876 if (debug > 1) 4856 if (debug > 1)
4877 fprintf(outf, "max_siblings %d\n", max_siblings); 4857 fprintf(outf, "max_siblings %d\n", max_siblings);
4858
4859 if (debug < 1)
4860 return;
4861
4862 for (i = 0; i <= topo.max_cpu_num; ++i) {
4863 fprintf(outf,
4864 "cpu %d pkg %d node %d lnode %d core %d thread %d\n",
4865 i, cpus[i].physical_package_id,
4866 cpus[i].physical_node_id,
4867 cpus[i].logical_node_id,
4868 cpus[i].physical_core_id,
4869 cpus[i].thread_id);
4870 }
4871
4878} 4872}
4879 4873
4880void 4874void
@@ -5102,7 +5096,7 @@ int get_and_dump_counters(void)
5102} 5096}
5103 5097
5104void print_version() { 5098void print_version() {
5105 fprintf(outf, "turbostat version 18.06.20" 5099 fprintf(outf, "turbostat version 18.07.27"
5106 " - Len Brown <lenb@kernel.org>\n"); 5100 " - Len Brown <lenb@kernel.org>\n");
5107} 5101}
5108 5102
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7a6214e9ae58..a362e3d7abc6 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
105 105
106BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) 106BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
107BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) 107BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
108BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM) 108BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
109 109
110ifneq ($(BTF_LLC_PROBE),) 110ifneq ($(BTF_LLC_PROBE),)
111ifneq ($(BTF_PAHOLE_PROBE),) 111ifneq ($(BTF_PAHOLE_PROBE),)
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index f2f28b6c8915..810de20e8e26 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -158,6 +158,15 @@ struct bpf_map_def {
158 unsigned int numa_node; 158 unsigned int numa_node;
159}; 159};
160 160
161#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
162 struct ____btf_map_##name { \
163 type_key key; \
164 type_val value; \
165 }; \
166 struct ____btf_map_##name \
167 __attribute__ ((section(".maps." #name), used)) \
168 ____btf_map_##name = { }
169
161static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = 170static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
162 (void *) BPF_FUNC_skb_load_bytes; 171 (void *) BPF_FUNC_skb_load_bytes;
163static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = 172static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 3619f3023088..ffdd27737c9e 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -247,6 +247,34 @@ static struct btf_raw_test raw_tests[] = {
247 .max_entries = 4, 247 .max_entries = 4,
248}, 248},
249 249
250{
251 .descr = "struct test #3 Invalid member offset",
252 .raw_types = {
253 /* int */ /* [1] */
254 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
255 /* int64 */ /* [2] */
256 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8),
257
258 /* struct A { */ /* [3] */
259 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 16),
260 BTF_MEMBER_ENC(NAME_TBD, 1, 64), /* int m; */
261 BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* int64 n; */
262 /* } */
263 BTF_END_RAW,
264 },
265 .str_sec = "\0A\0m\0n\0",
266 .str_sec_size = sizeof("\0A\0m\0n\0"),
267 .map_type = BPF_MAP_TYPE_ARRAY,
268 .map_name = "struct_test3_map",
269 .key_size = sizeof(int),
270 .value_size = 16,
271 .key_type_id = 1,
272 .value_type_id = 3,
273 .max_entries = 4,
274 .btf_load_err = true,
275 .err_str = "Invalid member bits_offset",
276},
277
250/* Test member exceeds the size of struct. 278/* Test member exceeds the size of struct.
251 * 279 *
252 * struct A { 280 * struct A {
@@ -479,7 +507,7 @@ static struct btf_raw_test raw_tests[] = {
479 .key_size = sizeof(int), 507 .key_size = sizeof(int),
480 .value_size = sizeof(void *) * 4, 508 .value_size = sizeof(void *) * 4,
481 .key_type_id = 1, 509 .key_type_id = 1,
482 .value_type_id = 4, 510 .value_type_id = 5,
483 .max_entries = 4, 511 .max_entries = 4,
484}, 512},
485 513
@@ -1264,6 +1292,88 @@ static struct btf_raw_test raw_tests[] = {
1264 .err_str = "type != 0", 1292 .err_str = "type != 0",
1265}, 1293},
1266 1294
1295{
1296 .descr = "arraymap invalid btf key (a bit field)",
1297 .raw_types = {
1298 /* int */ /* [1] */
1299 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1300 /* 32 bit int with 32 bit offset */ /* [2] */
1301 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 32, 32, 8),
1302 BTF_END_RAW,
1303 },
1304 .str_sec = "",
1305 .str_sec_size = sizeof(""),
1306 .map_type = BPF_MAP_TYPE_ARRAY,
1307 .map_name = "array_map_check_btf",
1308 .key_size = sizeof(int),
1309 .value_size = sizeof(int),
1310 .key_type_id = 2,
1311 .value_type_id = 1,
1312 .max_entries = 4,
1313 .map_create_err = true,
1314},
1315
1316{
1317 .descr = "arraymap invalid btf key (!= 32 bits)",
1318 .raw_types = {
1319 /* int */ /* [1] */
1320 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1321 /* 16 bit int with 0 bit offset */ /* [2] */
1322 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 16, 2),
1323 BTF_END_RAW,
1324 },
1325 .str_sec = "",
1326 .str_sec_size = sizeof(""),
1327 .map_type = BPF_MAP_TYPE_ARRAY,
1328 .map_name = "array_map_check_btf",
1329 .key_size = sizeof(int),
1330 .value_size = sizeof(int),
1331 .key_type_id = 2,
1332 .value_type_id = 1,
1333 .max_entries = 4,
1334 .map_create_err = true,
1335},
1336
1337{
1338 .descr = "arraymap invalid btf value (too small)",
1339 .raw_types = {
1340 /* int */ /* [1] */
1341 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1342 BTF_END_RAW,
1343 },
1344 .str_sec = "",
1345 .str_sec_size = sizeof(""),
1346 .map_type = BPF_MAP_TYPE_ARRAY,
1347 .map_name = "array_map_check_btf",
1348 .key_size = sizeof(int),
1349 /* btf_value_size < map->value_size */
1350 .value_size = sizeof(__u64),
1351 .key_type_id = 1,
1352 .value_type_id = 1,
1353 .max_entries = 4,
1354 .map_create_err = true,
1355},
1356
1357{
1358 .descr = "arraymap invalid btf value (too big)",
1359 .raw_types = {
1360 /* int */ /* [1] */
1361 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
1362 BTF_END_RAW,
1363 },
1364 .str_sec = "",
1365 .str_sec_size = sizeof(""),
1366 .map_type = BPF_MAP_TYPE_ARRAY,
1367 .map_name = "array_map_check_btf",
1368 .key_size = sizeof(int),
1369 /* btf_value_size > map->value_size */
1370 .value_size = sizeof(__u16),
1371 .key_type_id = 1,
1372 .value_type_id = 1,
1373 .max_entries = 4,
1374 .map_create_err = true,
1375},
1376
1267}; /* struct btf_raw_test raw_tests[] */ 1377}; /* struct btf_raw_test raw_tests[] */
1268 1378
1269static const char *get_next_str(const char *start, const char *end) 1379static const char *get_next_str(const char *start, const char *end)
@@ -2023,7 +2133,7 @@ static struct btf_raw_test pprint_test = {
2023 BTF_ENUM_ENC(NAME_TBD, 2), 2133 BTF_ENUM_ENC(NAME_TBD, 2),
2024 BTF_ENUM_ENC(NAME_TBD, 3), 2134 BTF_ENUM_ENC(NAME_TBD, 3),
2025 /* struct pprint_mapv */ /* [16] */ 2135 /* struct pprint_mapv */ /* [16] */
2026 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 28), 2136 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32),
2027 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 2137 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
2028 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 2138 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
2029 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ 2139 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/test_btf_haskv.c
index 8c7ca096ecf2..b21b876f475d 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/test_btf_haskv.c
@@ -10,11 +10,6 @@ struct ipv_counts {
10 unsigned int v6; 10 unsigned int v6;
11}; 11};
12 12
13typedef int btf_map_key;
14typedef struct ipv_counts btf_map_value;
15btf_map_key dumm_key;
16btf_map_value dummy_value;
17
18struct bpf_map_def SEC("maps") btf_map = { 13struct bpf_map_def SEC("maps") btf_map = {
19 .type = BPF_MAP_TYPE_ARRAY, 14 .type = BPF_MAP_TYPE_ARRAY,
20 .key_size = sizeof(int), 15 .key_size = sizeof(int),
@@ -22,6 +17,8 @@ struct bpf_map_def SEC("maps") btf_map = {
22 .max_entries = 4, 17 .max_entries = 4,
23}; 18};
24 19
20BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
21
25struct dummy_tracepoint_args { 22struct dummy_tracepoint_args {
26 unsigned long long pad; 23 unsigned long long pad;
27 struct sock *sock; 24 struct sock *sock;
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
index 270fa8f49573..785eabf2a593 100755
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
@@ -115,14 +115,14 @@ ip netns exec ns2 ip -6 route add fb00::6 encap bpf in obj test_lwt_seg6local.o
115ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link 115ip netns exec ns2 ip -6 route add fd00::1 dev veth3 via fb00::43 scope link
116 116
117ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65 117ip netns exec ns3 ip -6 route add fc42::1 dev veth5 via fb00::65
118ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF obj test_lwt_seg6local.o sec add_egr_x dev veth4 118ip netns exec ns3 ip -6 route add fd00::1 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec add_egr_x dev veth4
119 119
120ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF obj test_lwt_seg6local.o sec pop_egr dev veth6 120ip netns exec ns4 ip -6 route add fd00::2 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec pop_egr dev veth6
121ip netns exec ns4 ip -6 addr add fc42::1 dev lo 121ip netns exec ns4 ip -6 addr add fc42::1 dev lo
122ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87 122ip netns exec ns4 ip -6 route add fd00::3 dev veth7 via fb00::87
123 123
124ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109 124ip netns exec ns5 ip -6 route add fd00::4 table 117 dev veth9 via fb00::109
125ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF obj test_lwt_seg6local.o sec inspect_t dev veth8 125ip netns exec ns5 ip -6 route add fd00::3 encap seg6local action End.BPF endpoint obj test_lwt_seg6local.o sec inspect_t dev veth8
126 126
127ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo 127ip netns exec ns6 ip -6 addr add fb00::6/16 dev lo
128ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo 128ip netns exec ns6 ip -6 addr add fd00::4/16 dev lo
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 9e78df207919..0c7d9e556b47 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
354 while (s->bytes_recvd < total_bytes) { 354 while (s->bytes_recvd < total_bytes) {
355 if (txmsg_cork) { 355 if (txmsg_cork) {
356 timeout.tv_sec = 0; 356 timeout.tv_sec = 0;
357 timeout.tv_usec = 1000; 357 timeout.tv_usec = 300000;
358 } else { 358 } else {
359 timeout.tv_sec = 1; 359 timeout.tv_sec = 1;
360 timeout.tv_usec = 0; 360 timeout.tv_usec = 0;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index f5f7bcc96046..41106d9d5cc7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -12005,6 +12005,46 @@ static struct bpf_test tests[] = {
12005 .prog_type = BPF_PROG_TYPE_XDP, 12005 .prog_type = BPF_PROG_TYPE_XDP,
12006 }, 12006 },
12007 { 12007 {
12008 "xadd/w check whether src/dst got mangled, 1",
12009 .insns = {
12010 BPF_MOV64_IMM(BPF_REG_0, 1),
12011 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12012 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12013 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12014 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12015 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
12016 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12017 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12018 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
12019 BPF_EXIT_INSN(),
12020 BPF_MOV64_IMM(BPF_REG_0, 42),
12021 BPF_EXIT_INSN(),
12022 },
12023 .result = ACCEPT,
12024 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12025 .retval = 3,
12026 },
12027 {
12028 "xadd/w check whether src/dst got mangled, 2",
12029 .insns = {
12030 BPF_MOV64_IMM(BPF_REG_0, 1),
12031 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
12032 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
12033 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12034 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12035 BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
12036 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
12037 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
12038 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
12039 BPF_EXIT_INSN(),
12040 BPF_MOV64_IMM(BPF_REG_0, 42),
12041 BPF_EXIT_INSN(),
12042 },
12043 .result = ACCEPT,
12044 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
12045 .retval = 3,
12046 },
12047 {
12008 "bpf_get_stack return R0 within range", 12048 "bpf_get_stack return R0 within range",
12009 .insns = { 12049 .insns = {
12010 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 12050 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 000000000000..3b1f45e13a2e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
1#!/bin/sh
2# description: Snapshot and tracing setting
3# flags: instance
4
5[ ! -f snapshot ] && exit_unsupported
6
7echo "Set tracing off"
8echo 0 > tracing_on
9
10echo "Allocate and take a snapshot"
11echo 1 > snapshot
12
13# Since trace buffer is empty, snapshot is also empty, but allocated
14grep -q "Snapshot is allocated" snapshot
15
16echo "Ensure keep tracing off"
17test `cat tracing_on` -eq 0
18
19echo "Set tracing on"
20echo 1 > tracing_on
21
22echo "Take a snapshot again"
23echo 1 > snapshot
24
25echo "Ensure keep tracing on"
26test `cat tracing_on` -eq 1
27
28exit 0
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index 77f762780199..e8c5dff448eb 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -402,7 +402,7 @@ int main(int argc, char *argv[])
402 exit(1); 402 exit(1);
403 } 403 }
404 404
405 fd = socket(AF_INET6, SOCK_STREAM, 0); 405 fd = socket(cfg_family, SOCK_STREAM, 0);
406 if (fd == -1) { 406 if (fd == -1) {
407 perror("socket"); 407 perror("socket");
408 exit(1); 408 exit(1);
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index c15f270e121d..65541c21a544 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -1,6 +1,6 @@
1#!/bin/bash 1#!/bin/bash
2# 2#
3# Usage: configinit.sh config-spec-file [ build output dir ] 3# Usage: configinit.sh config-spec-file build-output-dir results-dir
4# 4#
5# Create a .config file from the spec file. Run from the kernel source tree. 5# Create a .config file from the spec file. Run from the kernel source tree.
6# Exits with 0 if all went well, with 1 if all went well but the config 6# Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
40 40
41c=$1 41c=$1
42buildloc=$2 42buildloc=$2
43resdir=$3
43builddir= 44builddir=
44if test -n $buildloc 45if echo $buildloc | grep -q '^O='
45then 46then
46 if echo $buildloc | grep -q '^O=' 47 builddir=`echo $buildloc | sed -e 's/^O=//'`
48 if test ! -d $builddir
47 then 49 then
48 builddir=`echo $buildloc | sed -e 's/^O=//'` 50 mkdir $builddir
49 if test ! -d $builddir
50 then
51 mkdir $builddir
52 fi
53 else
54 echo Bad build directory: \"$buildloc\"
55 exit 2
56 fi 51 fi
52else
53 echo Bad build directory: \"$buildloc\"
54 exit 2
57fi 55fi
58 56
59sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh 57sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
61grep '^grep' < $T/u.sh > $T/upd.sh 59grep '^grep' < $T/u.sh > $T/upd.sh
62echo "cat - $c" >> $T/upd.sh 60echo "cat - $c" >> $T/upd.sh
63make mrproper 61make mrproper
64make $buildloc distclean > $builddir/Make.distclean 2>&1 62make $buildloc distclean > $resdir/Make.distclean 2>&1
65make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1 63make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
66mv $builddir/.config $builddir/.config.sav 64mv $builddir/.config $builddir/.config.sav
67sh $T/upd.sh < $builddir/.config.sav > $builddir/.config 65sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
68cp $builddir/.config $builddir/.config.new 66cp $builddir/.config $builddir/.config.new
69yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err 67yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
70 68
71# verify new config matches specification. 69# verify new config matches specification.
72configcheck.sh $builddir/.config $c 70configcheck.sh $builddir/.config $c
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index 34d126734cde..9115fcdb5617 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -2,7 +2,7 @@
2# 2#
3# Build a kvm-ready Linux kernel from the tree in the current directory. 3# Build a kvm-ready Linux kernel from the tree in the current directory.
4# 4#
5# Usage: kvm-build.sh config-template build-dir 5# Usage: kvm-build.sh config-template build-dir resdir
6# 6#
7# This program is free software; you can redistribute it and/or modify 7# This program is free software; you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by 8# it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
29 exit 1 29 exit 1
30fi 30fi
31builddir=${2} 31builddir=${2}
32resdir=${3}
32 33
33T=${TMPDIR-/tmp}/test-linux.sh.$$ 34T=${TMPDIR-/tmp}/test-linux.sh.$$
34trap 'rm -rf $T' 0 35trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
41CONFIG_VIRTIO_CONSOLE=y 42CONFIG_VIRTIO_CONSOLE=y
42___EOF___ 43___EOF___
43 44
44configinit.sh $T/config O=$builddir 45configinit.sh $T/config O=$builddir $resdir
45retval=$? 46retval=$?
46if test $retval -gt 1 47if test $retval -gt 1
47then 48then
48 exit 2 49 exit 2
49fi 50fi
50ncpus=`cpus2use.sh` 51ncpus=`cpus2use.sh`
51make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1 52make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
52retval=$? 53retval=$?
53if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out 54if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
54then 55then
55 echo Kernel build error 56 echo Kernel build error
56 egrep "Stop|Error|error:|warning:" < $builddir/Make.out 57 egrep "Stop|Error|error:|warning:" < $resdir/Make.out
57 echo Run aborted. 58 echo Run aborted.
58 exit 3 59 exit 3
59fi 60fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 477ecb1293ab..0fa8a61ccb7b 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -70,4 +70,5 @@ else
70 else 70 else
71 print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i 71 print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
72 fi 72 fi
73 echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
73fi 74fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index c27e97824163..c9bab57a77eb 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -39,6 +39,7 @@ do
39 head -1 $resdir/log 39 head -1 $resdir/log
40 fi 40 fi
41 TORTURE_SUITE="`cat $i/../TORTURE_SUITE`" 41 TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
42 rm -f $i/console.log.*.diags
42 kvm-recheck-${TORTURE_SUITE}.sh $i 43 kvm-recheck-${TORTURE_SUITE}.sh $i
43 if test -f "$i/console.log" 44 if test -f "$i/console.log"
44 then 45 then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index c5b0f94341d9..f7247ee00514 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -98,14 +98,15 @@ then
98 ln -s $base_resdir/.config $resdir # for kvm-recheck.sh 98 ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
99 # Arch-independent indicator 99 # Arch-independent indicator
100 touch $resdir/builtkernel 100 touch $resdir/builtkernel
101elif kvm-build.sh $T/Kc2 $builddir 101elif kvm-build.sh $T/Kc2 $builddir $resdir
102then 102then
103 # Had to build a kernel for this test. 103 # Had to build a kernel for this test.
104 QEMU="`identify_qemu $builddir/vmlinux`" 104 QEMU="`identify_qemu $builddir/vmlinux`"
105 BOOT_IMAGE="`identify_boot_image $QEMU`" 105 BOOT_IMAGE="`identify_boot_image $QEMU`"
106 cp $builddir/Make*.out $resdir
107 cp $builddir/vmlinux $resdir 106 cp $builddir/vmlinux $resdir
108 cp $builddir/.config $resdir 107 cp $builddir/.config $resdir
108 cp $builddir/Module.symvers $resdir > /dev/null || :
109 cp $builddir/System.map $resdir > /dev/null || :
109 if test -n "$BOOT_IMAGE" 110 if test -n "$BOOT_IMAGE"
110 then 111 then
111 cp $builddir/$BOOT_IMAGE $resdir 112 cp $builddir/$BOOT_IMAGE $resdir
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 56610dbbdf73..5a7a62d76a50 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
347 print "needqemurun=" 347 print "needqemurun="
348 jn=1 348 jn=1
349 for (j = first; j < pastlast; j++) { 349 for (j = first; j < pastlast; j++) {
350 builddir=KVM "/b" jn 350 builddir=KVM "/b1"
351 cpusr[jn] = cpus[j]; 351 cpusr[jn] = cpus[j];
352 if (cfrep[cf[j]] == "") { 352 if (cfrep[cf[j]] == "") {
353 cfr[jn] = cf[j]; 353 cfr[jn] = cf[j];
diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh
index 17293436f551..84933f6aed77 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-console.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh
@@ -163,6 +163,13 @@ then
163 print_warning Summary: $summary 163 print_warning Summary: $summary
164 cat $T.diags >> $file.diags 164 cat $T.diags >> $file.diags
165fi 165fi
166for i in $file.*.diags
167do
168 if test -f "$i"
169 then
170 cat $i >> $file.diags
171 fi
172done
166if ! test -s $file.diags 173if ! test -s $file.diags
167then 174then
168 rm -f $file.diags 175 rm -f $file.diags
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
index 5d2cc0bd50a0..5c3213cc3ad7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
@@ -1,5 +1,5 @@
1rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30 1rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
2rcutree.gp_preinit_delay=3 2rcutree.gp_preinit_delay=12
3rcutree.gp_init_delay=3 3rcutree.gp_init_delay=3
4rcutree.gp_cleanup_delay=3 4rcutree.gp_cleanup_delay=3
5rcutree.kthread_prio=2 5rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644
index 883149b5f2d1..000000000000
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
+++ /dev/null
@@ -1 +0,0 @@
1rcutree.rcu_fanout_exact=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index 24ec91041957..7bab8246392b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
39 if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2" 39 if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
40 then 40 then
41 echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2 41 echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
42 echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30 42 echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
43 fi 43 fi
44} 44}
45 45
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 95dd14648ba5..0f395dfb7774 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
44 44
45/******************** Little Endian Handling ********************************/ 45/******************** Little Endian Handling ********************************/
46 46
47#define cpu_to_le16(x) htole16(x) 47/*
48#define cpu_to_le32(x) htole32(x) 48 * cpu_to_le16/32 are used when initializing structures, a context where a
49 * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
50 * that allows them to be used when initializing structures.
51 */
52
53#if __BYTE_ORDER == __LITTLE_ENDIAN
54#define cpu_to_le16(x) (x)
55#define cpu_to_le32(x) (x)
56#else
57#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
58#define cpu_to_le32(x) \
59 ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
60 (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
61#endif
62
49#define le32_to_cpu(x) le32toh(x) 63#define le32_to_cpu(x) le32toh(x)
50#define le16_to_cpu(x) le16toh(x) 64#define le16_to_cpu(x) le16toh(x)
51 65
52
53/******************** Messages and Errors ***********************************/ 66/******************** Messages and Errors ***********************************/
54 67
55static const char argv0[] = "ffs-test"; 68static const char argv0[] = "ffs-test";
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h
index 0ac3caf90877..d0351f83aebe 100644
--- a/tools/virtio/asm/barrier.h
+++ b/tools/virtio/asm/barrier.h
@@ -13,8 +13,8 @@
13} while (0); 13} while (0);
14/* Weak barriers should be used. If not - it's a bug */ 14/* Weak barriers should be used. If not - it's a bug */
15# define mb() abort() 15# define mb() abort()
16# define rmb() abort() 16# define dma_rmb() abort()
17# define wmb() abort() 17# define dma_wmb() abort()
18#else 18#else
19#error Please fill in barrier macros 19#error Please fill in barrier macros
20#endif 20#endif
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fca8381bbe04..fb22bccfbc8a 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -52,6 +52,11 @@ static inline void *kmalloc(size_t s, gfp_t gfp)
52 return __kmalloc_fake; 52 return __kmalloc_fake;
53 return malloc(s); 53 return malloc(s);
54} 54}
55static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp)
56{
57 return kmalloc(n * s, gfp);
58}
59
55static inline void *kzalloc(size_t s, gfp_t gfp) 60static inline void *kzalloc(size_t s, gfp_t gfp)
56{ 61{
57 void *p = kmalloc(s, gfp); 62 void *p = kmalloc(s, gfp);