aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-16 22:11:55 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-16 22:11:55 -0500
commitc30abd5e40dd863f88e26be09b6ce949145a630a (patch)
tree5b25362084308502a336d8da26b8dc7430d7c812
parent28dc4c8f4557d82e9be020e85e2362239270e704 (diff)
parentf3b5ad89de16f5d42e8ad36fbdf85f705c1ae051 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three sets of overlapping changes, two in the packet scheduler and one in the meson-gxl PHY driver. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/cgroup-v2.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/am33xx-usb.txt2
-rw-r--r--Documentation/filesystems/overlayfs.txt34
-rw-r--r--Documentation/locking/crossrelease.txt874
-rw-r--r--Documentation/virtual/kvm/api.txt15
-rw-r--r--Documentation/vm/zswap.txt22
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi6
-rw-r--r--arch/arm/boot/dts/am437x-cm-t43.dts4
-rw-r--r--arch/arm/boot/dts/armada-385-db-ap.dts1
-rw-r--r--arch/arm/boot/dts/armada-385-linksys.dtsi1
-rw-r--r--arch/arm/boot/dts/armada-385-synology-ds116.dts2
-rw-r--r--arch/arm/boot/dts/armada-388-gp.dts2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts4
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts4
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi9
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts3
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi17
-rw-r--r--arch/arm/boot/dts/meson.dtsi18
-rw-r--r--arch/arm/boot/dts/nspire.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts1
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts1
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep0030-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-lilly-a83x.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-pandora-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-tao3530.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts1
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi1
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5-cm-t54.dts2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi1
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi1
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-c.dts6
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/mach-meson/platsmp.c2
-rw-r--r--arch/arm/mach-omap2/cm_common.c6
-rw-r--r--arch/arm/mach-omap2/omap-secure.c21
-rw-r--r--arch/arm/mach-omap2/omap-secure.h4
-rw-r--r--arch/arm/mach-omap2/omap_device.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/pm.h4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c13
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm33xx.c12
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S26
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi6
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts3
-rw-r--r--arch/arm64/include/asm/assembler.h10
-rw-r--r--arch/arm64/include/asm/cpufeature.h3
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h41
-rw-r--r--arch/arm64/kernel/cpu-reset.S1
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kernel/efi-entry.S2
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/relocate_kernel.S1
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c57
-rw-r--r--arch/arm64/kvm/hyp-init.S1
-rw-r--r--arch/arm64/kvm/hyp/switch.c37
-rw-r--r--arch/arm64/mm/dump.c2
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/arm64/mm/init.c3
-rw-r--r--arch/riscv/include/asm/barrier.h19
-rw-r--r--arch/riscv/kernel/setup.c11
-rw-r--r--arch/riscv/kernel/sys_riscv.c2
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/kernel/compat_linux.c1
-rw-r--r--arch/s390/kvm/Makefile5
-rw-r--r--arch/s390/kvm/diag.c5
-rw-r--r--arch/s390/kvm/gaccess.h5
-rw-r--r--arch/s390/kvm/guestdbg.c5
-rw-r--r--arch/s390/kvm/intercept.c5
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/irq.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c11
-rw-r--r--arch/s390/kvm/kvm-s390.h5
-rw-r--r--arch/s390/kvm/priv.c16
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/s390/kvm/vsie.c5
-rw-r--r--arch/sparc/mm/gup.c4
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/misc.c16
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c28
-rw-r--r--arch/x86/boot/genimage.sh4
-rw-r--r--arch/x86/crypto/salsa20_glue.c7
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/include/asm/suspend_32.h8
-rw-r--r--arch/x86/include/asm/suspend_64.h19
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kvm/emulate.c24
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c63
-rw-r--r--arch/x86/lib/x86-opcode-map.txt13
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/kmmio.c12
-rw-r--r--arch/x86/pci/fixup.c27
-rw-r--r--arch/x86/power/cpu.c99
-rw-r--r--arch/x86/xen/apic.c2
-rw-r--r--crypto/af_alg.c13
-rw-r--r--crypto/algif_aead.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_trust.c2
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c9
-rw-r--r--crypto/asymmetric_keys/public_key.c7
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c8
-rw-r--r--crypto/hmac.c6
-rw-r--r--crypto/rsa_helper.c2
-rw-r--r--crypto/salsa20_generic.c7
-rw-r--r--crypto/shash.c5
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/ata/ahci_mtk.c6
-rw-r--r--drivers/ata/ahci_qoriq.c12
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/pata_pdc2027x.c16
-rw-r--r--drivers/base/power/main.c15
-rw-r--r--drivers/bus/arm-cci.c7
-rw-r--r--drivers/bus/arm-ccn.c25
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c44
-rw-r--r--drivers/char/ipmi/ipmi_si_parisc.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_pci.c7
-rw-r--r--drivers/firmware/arm_scpi.c216
-rw-r--r--drivers/gpu/drm/drm_connector.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c52
-rw-r--r--drivers/gpu/drm/drm_lease.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/hwtracing/stm/ftrace.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.h3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c3
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c22
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/md/dm-bufio.c8
-rw-r--r--drivers/md/dm-cache-target.c12
-rw-r--r--drivers/md/dm-mpath.c67
-rw-r--r--drivers/md/dm-snap.c48
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/misc/eeprom/at24.c26
-rw-r--r--drivers/mmc/core/card.h2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/quirks.h8
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c82
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c80
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h6
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c10
-rw-r--r--drivers/net/ethernet/marvell/skge.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c7
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c27
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c73
-rw-r--r--drivers/net/phy/phy.c9
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/of/of_mdio.c3
-rw-r--r--drivers/pci/host/pcie-rcar.c8
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/platform/x86/asus-wireless.c1
-rw-r--r--drivers/platform/x86/dell-laptop.c17
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/s390/net/qeth_core.h6
-rw-r--r--drivers/s390/net/qeth_core_main.c6
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c36
-rw-r--r--drivers/s390/net/qeth_l3_sys.c75
-rw-r--r--drivers/scsi/aacraid/commsup.c8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.c6
-rw-r--r--drivers/scsi/bfa/bfad_im.h10
-rw-r--r--drivers/scsi/libfc/fc_lport.c4
-rw-r--r--drivers/scsi/libsas/sas_expander.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/scsi_debugfs.c6
-rw-r--r--drivers/scsi/scsi_devinfo.c27
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c4
-rw-r--r--drivers/staging/ccree/ssi_hash.c2
-rw-r--r--drivers/staging/pi433/rf69.c2
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c42
-rw-r--r--drivers/usb/dwc2/params.c29
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c5
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/Kconfig4
-rw-r--r--drivers/usb/gadget/legacy/Kconfig12
-rw-r--r--drivers/usb/host/xhci-mem.c15
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/musb/da8xx.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/usbip/stub_rx.c51
-rw-r--r--drivers/usb/usbip/stub_tx.c7
-rw-r--r--drivers/usb/usbip/usbip_common.h1
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c25
-rw-r--r--drivers/virtio/virtio_mmio.c43
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--fs/autofs4/waitq.c1
-rw-r--r--fs/btrfs/ctree.c18
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/ceph/mds_client.c42
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.c30
-rw-r--r--fs/dax.c3
-rw-r--r--fs/exec.c7
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/dnode.c2
-rw-r--r--fs/hpfs/super.c1
-rw-r--r--fs/nfs/client.c11
-rw-r--r--fs/nfs/nfs4client.c17
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/overlayfs/Kconfig10
-rw-r--r--fs/overlayfs/dir.c3
-rw-r--r--fs/overlayfs/namei.c18
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/ovl_entry.h2
-rw-r--r--fs/overlayfs/readdir.c7
-rw-r--r--fs/overlayfs/super.c87
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c10
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h1
-rw-r--r--fs/xfs/scrub/scrub.c1
-rw-r--r--fs/xfs/scrub/trace.c1
-rw-r--r--fs/xfs/xfs_inode.c33
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_qm.c4
-rw-r--r--fs/xfs/xfs_reflink.c2
-rw-r--r--fs/xfs/xfs_symlink.c15
-rw-r--r--fs/xfs/xfs_trace.c1
-rw-r--r--include/crypto/internal/hash.h8
-rw-r--r--include/drm/drm_connector.h10
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_mode_config.h18
-rw-r--r--include/kvm/arm_arch_timer.h3
-rw-r--r--include/linux/compiler.h47
-rw-r--r--include/linux/completion.h45
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lockdep.h125
-rw-r--r--include/linux/oom.h9
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/rwlock_types.h3
-rw-r--r--include/linux/sched.h17
-rw-r--r--include/linux/sched/coredump.h1
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/spinlock_types.h3
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/trace.h2
-rw-r--r--include/net/gue.h18
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/trace/events/preemptirq.h11
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/rtnetlink.h1
-rw-r--r--init/main.c7
-rw-r--r--kernel/bpf/hashtab.c2
-rw-r--r--kernel/cgroup/debug.c4
-rw-r--r--kernel/cgroup/stat.c8
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/groups.c5
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/locking/lockdep.c652
-rw-r--r--kernel/locking/spinlock.c13
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/bpf_trace.c19
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace.c41
-rw-r--r--kernel/trace/trace_stack.c4
-rw-r--r--kernel/uid16.c1
-rw-r--r--kernel/workqueue.c33
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/asn1_decoder.c49
-rw-r--r--lib/oid_registry.c16
-rw-r--r--lib/rbtree.c10
-rw-r--r--mm/early_ioremap.c2
-rw-r--r--mm/frame_vector.c6
-rw-r--r--mm/gup.c2
-rw-r--r--mm/hmm.c8
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/oom_kill.c4
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/percpu.c4
-rw-r--r--mm/slab.c23
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/tp_meter.c4
-rw-r--r--net/core/netprio_cgroup.c1
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/igmp.c44
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c3
-rw-r--r--net/ipv4/raw.c15
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv6/mcast.c25
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c8
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c128
-rw-r--r--net/netfilter/nf_conntrack_netlink.c13
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c3
-rw-r--r--net/netfilter/nf_tables_api.c7
-rw-r--r--net/netfilter/nfnetlink_cthelper.c10
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c5
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netfilter/xt_bpf.c6
-rw-r--r--net/netfilter/xt_osf.c7
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/sched/act_meta_mark.c1
-rw-r--r--net/sched/act_meta_skbtcindex.c1
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/sched/cls_u32.c1
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_ingress.c15
-rw-r--r--net/sched/sch_red.c31
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/svcauth_unix.c2
-rw-r--r--net/sunrpc/xprt.c28
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/wireless/Makefile48
-rwxr-xr-xscripts/checkpatch.pl22
-rwxr-xr-xscripts/faddr2line8
-rw-r--r--security/keys/key.c1
-rw-r--r--security/keys/keyctl.c24
-rw-r--r--security/keys/request_key.c48
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/include/linux/compiler.h21
-rw-r--r--tools/include/linux/lockdep.h1
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h7
-rw-r--r--tools/include/uapi/linux/kvm.h4
-rw-r--r--tools/objtool/arch/x86/lib/x86-opcode-map.txt15
-rw-r--r--tools/perf/util/intel-pt-decoder/x86-opcode-map.txt13
-rw-r--r--tools/perf/util/mmap.h2
-rw-r--r--tools/testing/selftests/bpf/Makefile13
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c10
-rw-r--r--tools/virtio/ringtest/ptr_ring.c29
-rw-r--r--tools/vm/slabinfo-gnuplot.sh2
-rw-r--r--virt/kvm/arm/arch_timer.c11
-rw-r--r--virt/kvm/arm/arm.c7
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c48
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c6
-rw-r--r--virt/kvm/arm/vgic/vgic.c8
-rw-r--r--virt/kvm/kvm_main.c8
446 files changed, 2942 insertions, 3463 deletions
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 304bf22bb83c..fc1c884fea10 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -75,3 +75,4 @@ stable kernels.
75| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | 75| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
76| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | 76| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
77| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | 77| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
78| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 779211fbb69f..2cddab7efb20 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -898,6 +898,13 @@ controller implements weight and absolute bandwidth limit models for
898normal scheduling policy and absolute bandwidth allocation model for 898normal scheduling policy and absolute bandwidth allocation model for
899realtime scheduling policy. 899realtime scheduling policy.
900 900
901WARNING: cgroup2 doesn't yet support control of realtime processes and
902the cpu controller can only be enabled when all RT processes are in
903the root cgroup. Be aware that system management software may already
904have placed RT processes into nonroot cgroups during the system boot
905process, and these processes may need to be moved to the root cgroup
906before the cpu controller can be enabled.
907
901 908
902CPU Interface Files 909CPU Interface Files
903~~~~~~~~~~~~~~~~~~~ 910~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
index 7a33f22c815a..7a198a30408a 100644
--- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
@@ -95,6 +95,7 @@ usb: usb@47400000 {
95 reg = <0x47401300 0x100>; 95 reg = <0x47401300 0x100>;
96 reg-names = "phy"; 96 reg-names = "phy";
97 ti,ctrl_mod = <&ctrl_mod>; 97 ti,ctrl_mod = <&ctrl_mod>;
98 #phy-cells = <0>;
98 }; 99 };
99 100
100 usb0: usb@47401000 { 101 usb0: usb@47401000 {
@@ -141,6 +142,7 @@ usb: usb@47400000 {
141 reg = <0x47401b00 0x100>; 142 reg = <0x47401b00 0x100>;
142 reg-names = "phy"; 143 reg-names = "phy";
143 ti,ctrl_mod = <&ctrl_mod>; 144 ti,ctrl_mod = <&ctrl_mod>;
145 #phy-cells = <0>;
144 }; 146 };
145 147
146 usb1: usb@47401800 { 148 usb1: usb@47401800 {
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 8caa60734647..e6a5f4912b6d 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -156,6 +156,40 @@ handle it in two different ways:
156 root of the overlay. Finally the directory is moved to the new 156 root of the overlay. Finally the directory is moved to the new
157 location. 157 location.
158 158
159There are several ways to tune the "redirect_dir" feature.
160
161Kernel config options:
162
163- OVERLAY_FS_REDIRECT_DIR:
164 If this is enabled, then redirect_dir is turned on by default.
165- OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW:
166 If this is enabled, then redirects are always followed by default. Enabling
167 this results in a less secure configuration. Enable this option only when
168 worried about backward compatibility with kernels that have the redirect_dir
169 feature and follow redirects even if turned off.
170
171Module options (can also be changed through /sys/module/overlay/parameters/*):
172
173- "redirect_dir=BOOL":
174 See OVERLAY_FS_REDIRECT_DIR kernel config option above.
175- "redirect_always_follow=BOOL":
176 See OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW kernel config option above.
177- "redirect_max=NUM":
178 The maximum number of bytes in an absolute redirect (default is 256).
179
180Mount options:
181
182- "redirect_dir=on":
183 Redirects are enabled.
184- "redirect_dir=follow":
185 Redirects are not created, but followed.
186- "redirect_dir=off":
187 Redirects are not created and only followed if "redirect_always_follow"
188 feature is enabled in the kernel/module config.
189- "redirect_dir=nofollow":
190 Redirects are not created and not followed (equivalent to "redirect_dir=off"
191 if "redirect_always_follow" feature is not enabled).
192
159Non-directories 193Non-directories
160--------------- 194---------------
161 195
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt
deleted file mode 100644
index bdf1423d5f99..000000000000
--- a/Documentation/locking/crossrelease.txt
+++ /dev/null
@@ -1,874 +0,0 @@
1Crossrelease
2============
3
4Started by Byungchul Park <byungchul.park@lge.com>
5
6Contents:
7
8 (*) Background
9
10 - What causes deadlock
11 - How lockdep works
12
13 (*) Limitation
14
15 - Limit lockdep
16 - Pros from the limitation
17 - Cons from the limitation
18 - Relax the limitation
19
20 (*) Crossrelease
21
22 - Introduce crossrelease
23 - Introduce commit
24
25 (*) Implementation
26
27 - Data structures
28 - How crossrelease works
29
30 (*) Optimizations
31
32 - Avoid duplication
33 - Lockless for hot paths
34
35 (*) APPENDIX A: What lockdep does to work aggresively
36
37 (*) APPENDIX B: How to avoid adding false dependencies
38
39
40==========
41Background
42==========
43
44What causes deadlock
45--------------------
46
47A deadlock occurs when a context is waiting for an event to happen,
48which is impossible because another (or the) context who can trigger the
49event is also waiting for another (or the) event to happen, which is
50also impossible due to the same reason.
51
52For example:
53
54 A context going to trigger event C is waiting for event A to happen.
55 A context going to trigger event A is waiting for event B to happen.
56 A context going to trigger event B is waiting for event C to happen.
57
58A deadlock occurs when these three wait operations run at the same time,
59because event C cannot be triggered if event A does not happen, which in
60turn cannot be triggered if event B does not happen, which in turn
61cannot be triggered if event C does not happen. After all, no event can
62be triggered since any of them never meets its condition to wake up.
63
64A dependency might exist between two waiters and a deadlock might happen
65due to an incorrect releationship between dependencies. Thus, we must
66define what a dependency is first. A dependency exists between them if:
67
68 1. There are two waiters waiting for each event at a given time.
69 2. The only way to wake up each waiter is to trigger its event.
70 3. Whether one can be woken up depends on whether the other can.
71
72Each wait in the example creates its dependency like:
73
74 Event C depends on event A.
75 Event A depends on event B.
76 Event B depends on event C.
77
78 NOTE: Precisely speaking, a dependency is one between whether a
79 waiter for an event can be woken up and whether another waiter for
80 another event can be woken up. However from now on, we will describe
81 a dependency as if it's one between an event and another event for
82 simplicity.
83
84And they form circular dependencies like:
85
86 -> C -> A -> B -
87 / \
88 \ /
89 ----------------
90
91 where 'A -> B' means that event A depends on event B.
92
93Such circular dependencies lead to a deadlock since no waiter can meet
94its condition to wake up as described.
95
96CONCLUSION
97
98Circular dependencies cause a deadlock.
99
100
101How lockdep works
102-----------------
103
104Lockdep tries to detect a deadlock by checking dependencies created by
105lock operations, acquire and release. Waiting for a lock corresponds to
106waiting for an event, and releasing a lock corresponds to triggering an
107event in the previous section.
108
109In short, lockdep does:
110
111 1. Detect a new dependency.
112 2. Add the dependency into a global graph.
113 3. Check if that makes dependencies circular.
114 4. Report a deadlock or its possibility if so.
115
116For example, consider a graph built by lockdep that looks like:
117
118 A -> B -
119 \
120 -> E
121 /
122 C -> D -
123
124 where A, B,..., E are different lock classes.
125
126Lockdep will add a dependency into the graph on detection of a new
127dependency. For example, it will add a dependency 'E -> C' when a new
128dependency between lock E and lock C is detected. Then the graph will be:
129
130 A -> B -
131 \
132 -> E -
133 / \
134 -> C -> D - \
135 / /
136 \ /
137 ------------------
138
139 where A, B,..., E are different lock classes.
140
141This graph contains a subgraph which demonstrates circular dependencies:
142
143 -> E -
144 / \
145 -> C -> D - \
146 / /
147 \ /
148 ------------------
149
150 where C, D and E are different lock classes.
151
152This is the condition under which a deadlock might occur. Lockdep
153reports it on detection after adding a new dependency. This is the way
154how lockdep works.
155
156CONCLUSION
157
158Lockdep detects a deadlock or its possibility by checking if circular
159dependencies were created after adding each new dependency.
160
161
162==========
163Limitation
164==========
165
166Limit lockdep
167-------------
168
169Limiting lockdep to work on only typical locks e.g. spin locks and
170mutexes, which are released within the acquire context, the
171implementation becomes simple but its capacity for detection becomes
172limited. Let's check pros and cons in next section.
173
174
175Pros from the limitation
176------------------------
177
178Given the limitation, when acquiring a lock, locks in a held_locks
179cannot be released if the context cannot acquire it so has to wait to
180acquire it, which means all waiters for the locks in the held_locks are
181stuck. It's an exact case to create dependencies between each lock in
182the held_locks and the lock to acquire.
183
184For example:
185
186 CONTEXT X
187 ---------
188 acquire A
189 acquire B /* Add a dependency 'A -> B' */
190 release B
191 release A
192
193 where A and B are different lock classes.
194
195When acquiring lock A, the held_locks of CONTEXT X is empty thus no
196dependency is added. But when acquiring lock B, lockdep detects and adds
197a new dependency 'A -> B' between lock A in the held_locks and lock B.
198They can be simply added whenever acquiring each lock.
199
200And data required by lockdep exists in a local structure, held_locks
201embedded in task_struct. Forcing to access the data within the context,
202lockdep can avoid racy problems without explicit locks while handling
203the local data.
204
205Lastly, lockdep only needs to keep locks currently being held, to build
206a dependency graph. However, relaxing the limitation, it needs to keep
207even locks already released, because a decision whether they created
208dependencies might be long-deferred.
209
210To sum up, we can expect several advantages from the limitation:
211
212 1. Lockdep can easily identify a dependency when acquiring a lock.
213 2. Races are avoidable while accessing local locks in a held_locks.
214 3. Lockdep only needs to keep locks currently being held.
215
216CONCLUSION
217
218Given the limitation, the implementation becomes simple and efficient.
219
220
221Cons from the limitation
222------------------------
223
224Given the limitation, lockdep is applicable only to typical locks. For
225example, page locks for page access or completions for synchronization
226cannot work with lockdep.
227
228Can we detect deadlocks below, under the limitation?
229
230Example 1:
231
232 CONTEXT X CONTEXT Y CONTEXT Z
233 --------- --------- ----------
234 mutex_lock A
235 lock_page B
236 lock_page B
237 mutex_lock A /* DEADLOCK */
238 unlock_page B held by X
239 unlock_page B
240 mutex_unlock A
241 mutex_unlock A
242
243 where A and B are different lock classes.
244
245No, we cannot.
246
247Example 2:
248
249 CONTEXT X CONTEXT Y
250 --------- ---------
251 mutex_lock A
252 mutex_lock A
253 wait_for_complete B /* DEADLOCK */
254 complete B
255 mutex_unlock A
256 mutex_unlock A
257
258 where A is a lock class and B is a completion variable.
259
260No, we cannot.
261
262CONCLUSION
263
264Given the limitation, lockdep cannot detect a deadlock or its
265possibility caused by page locks or completions.
266
267
268Relax the limitation
269--------------------
270
271Under the limitation, things to create dependencies are limited to
272typical locks. However, synchronization primitives like page locks and
273completions, which are allowed to be released in any context, also
274create dependencies and can cause a deadlock. So lockdep should track
275these locks to do a better job. We have to relax the limitation for
276these locks to work with lockdep.
277
278Detecting dependencies is very important for lockdep to work because
279adding a dependency means adding an opportunity to check whether it
280causes a deadlock. The more lockdep adds dependencies, the more it
281thoroughly works. Thus Lockdep has to do its best to detect and add as
282many true dependencies into a graph as possible.
283
284For example, considering only typical locks, lockdep builds a graph like:
285
286 A -> B -
287 \
288 -> E
289 /
290 C -> D -
291
292 where A, B,..., E are different lock classes.
293
294On the other hand, under the relaxation, additional dependencies might
295be created and added. Assuming additional 'FX -> C' and 'E -> GX' are
296added thanks to the relaxation, the graph will be:
297
298 A -> B -
299 \
300 -> E -> GX
301 /
302 FX -> C -> D -
303
304 where A, B,..., E, FX and GX are different lock classes, and a suffix
305 'X' is added on non-typical locks.
306
307The latter graph gives us more chances to check circular dependencies
308than the former. However, it might suffer performance degradation since
309relaxing the limitation, with which design and implementation of lockdep
310can be efficient, might introduce inefficiency inevitably. So lockdep
311should provide two options, strong detection and efficient detection.
312
313Choosing efficient detection:
314
315 Lockdep works with only locks restricted to be released within the
316 acquire context. However, lockdep works efficiently.
317
318Choosing strong detection:
319
320 Lockdep works with all synchronization primitives. However, lockdep
321 suffers performance degradation.
322
323CONCLUSION
324
325Relaxing the limitation, lockdep can add additional dependencies giving
326additional opportunities to check circular dependencies.
327
328
329============
330Crossrelease
331============
332
333Introduce crossrelease
334----------------------
335
336In order to allow lockdep to handle additional dependencies by what
337might be released in any context, namely 'crosslock', we have to be able
338to identify those created by crosslocks. The proposed 'crossrelease'
339feature provoides a way to do that.
340
341Crossrelease feature has to do:
342
343 1. Identify dependencies created by crosslocks.
344 2. Add the dependencies into a dependency graph.
345
346That's all. Once a meaningful dependency is added into graph, then
347lockdep would work with the graph as it did. The most important thing
348crossrelease feature has to do is to correctly identify and add true
349dependencies into the global graph.
350
351A dependency e.g. 'A -> B' can be identified only in the A's release
352context because a decision required to identify the dependency can be
353made only in the release context. That is to decide whether A can be
354released so that a waiter for A can be woken up. It cannot be made in
355other than the A's release context.
356
357It's no matter for typical locks because each acquire context is same as
358its release context, thus lockdep can decide whether a lock can be
359released in the acquire context. However for crosslocks, lockdep cannot
360make the decision in the acquire context but has to wait until the
361release context is identified.
362
363Therefore, deadlocks by crosslocks cannot be detected just when it
364happens, because those cannot be identified until the crosslocks are
365released. However, deadlock possibilities can be detected and it's very
366worth. See 'APPENDIX A' section to check why.
367
368CONCLUSION
369
370Using crossrelease feature, lockdep can work with what might be released
371in any context, namely crosslock.
372
373
374Introduce commit
375----------------
376
377Since crossrelease defers the work adding true dependencies of
378crosslocks until they are actually released, crossrelease has to queue
379all acquisitions which might create dependencies with the crosslocks.
380Then it identifies dependencies using the queued data in batches at a
381proper time. We call it 'commit'.
382
383There are four types of dependencies:
384
3851. TT type: 'typical lock A -> typical lock B'
386
387 Just when acquiring B, lockdep can see it's in the A's release
388 context. So the dependency between A and B can be identified
389 immediately. Commit is unnecessary.
390
3912. TC type: 'typical lock A -> crosslock BX'
392
393 Just when acquiring BX, lockdep can see it's in the A's release
394 context. So the dependency between A and BX can be identified
395 immediately. Commit is unnecessary, too.
396
3973. CT type: 'crosslock AX -> typical lock B'
398
399 When acquiring B, lockdep cannot identify the dependency because
400 there's no way to know if it's in the AX's release context. It has
401 to wait until the decision can be made. Commit is necessary.
402
4034. CC type: 'crosslock AX -> crosslock BX'
404
405 When acquiring BX, lockdep cannot identify the dependency because
406 there's no way to know if it's in the AX's release context. It has
407 to wait until the decision can be made. Commit is necessary.
408 But, handling CC type is not implemented yet. It's a future work.
409
410Lockdep can work without commit for typical locks, but commit step is
411necessary once crosslocks are involved. Introducing commit, lockdep
412performs three steps. What lockdep does in each step is:
413
4141. Acquisition: For typical locks, lockdep does what it originally did
415 and queues the lock so that CT type dependencies can be checked using
416 it at the commit step. For crosslocks, it saves data which will be
417 used at the commit step and increases a reference count for it.
418
4192. Commit: No action is reauired for typical locks. For crosslocks,
420 lockdep adds CT type dependencies using the data saved at the
421 acquisition step.
422
4233. Release: No changes are required for typical locks. When a crosslock
424 is released, it decreases a reference count for it.
425
426CONCLUSION
427
428Crossrelease introduces commit step to handle dependencies of crosslocks
429in batches at a proper time.
430
431
432==============
433Implementation
434==============
435
436Data structures
437---------------
438
439Crossrelease introduces two main data structures.
440
4411. hist_lock
442
443 This is an array embedded in task_struct, for keeping lock history so
444 that dependencies can be added using them at the commit step. Since
445 it's local data, it can be accessed locklessly in the owner context.
446 The array is filled at the acquisition step and consumed at the
447 commit step. And it's managed in circular manner.
448
4492. cross_lock
450
451 One per lockdep_map exists. This is for keeping data of crosslocks
452 and used at the commit step.
453
454
455How crossrelease works
456----------------------
457
458It's the key of how crossrelease works, to defer necessary works to an
459appropriate point in time and perform in at once at the commit step.
460Let's take a look with examples step by step, starting from how lockdep
461works without crossrelease for typical locks.
462
463 acquire A /* Push A onto held_locks */
464 acquire B /* Push B onto held_locks and add 'A -> B' */
465 acquire C /* Push C onto held_locks and add 'B -> C' */
466 release C /* Pop C from held_locks */
467 release B /* Pop B from held_locks */
468 release A /* Pop A from held_locks */
469
470 where A, B and C are different lock classes.
471
472 NOTE: This document assumes that readers already understand how
473 lockdep works without crossrelease thus omits details. But there's
474 one thing to note. Lockdep pretends to pop a lock from held_locks
475 when releasing it. But it's subtly different from the original pop
476 operation because lockdep allows other than the top to be poped.
477
478In this case, lockdep adds 'the top of held_locks -> the lock to acquire'
479dependency every time acquiring a lock.
480
481After adding 'A -> B', a dependency graph will be:
482
483 A -> B
484
485 where A and B are different lock classes.
486
487And after adding 'B -> C', the graph will be:
488
489 A -> B -> C
490
491 where A, B and C are different lock classes.
492
493Let's performs commit step even for typical locks to add dependencies.
494Of course, commit step is not necessary for them, however, it would work
495well because this is a more general way.
496
497 acquire A
498 /*
499 * Queue A into hist_locks
500 *
501 * In hist_locks: A
502 * In graph: Empty
503 */
504
505 acquire B
506 /*
507 * Queue B into hist_locks
508 *
509 * In hist_locks: A, B
510 * In graph: Empty
511 */
512
513 acquire C
514 /*
515 * Queue C into hist_locks
516 *
517 * In hist_locks: A, B, C
518 * In graph: Empty
519 */
520
521 commit C
522 /*
523 * Add 'C -> ?'
524 * Answer the following to decide '?'
525 * What has been queued since acquire C: Nothing
526 *
527 * In hist_locks: A, B, C
528 * In graph: Empty
529 */
530
531 release C
532
533 commit B
534 /*
535 * Add 'B -> ?'
536 * Answer the following to decide '?'
537 * What has been queued since acquire B: C
538 *
539 * In hist_locks: A, B, C
540 * In graph: 'B -> C'
541 */
542
543 release B
544
545 commit A
546 /*
547 * Add 'A -> ?'
548 * Answer the following to decide '?'
549 * What has been queued since acquire A: B, C
550 *
551 * In hist_locks: A, B, C
552 * In graph: 'B -> C', 'A -> B', 'A -> C'
553 */
554
555 release A
556
557 where A, B and C are different lock classes.
558
559In this case, dependencies are added at the commit step as described.
560
561After commits for A, B and C, the graph will be:
562
563 A -> B -> C
564
565 where A, B and C are different lock classes.
566
567 NOTE: A dependency 'A -> C' is optimized out.
568
569We can see the former graph built without commit step is same as the
570latter graph built using commit steps. Of course the former way leads to
571earlier finish for building the graph, which means we can detect a
572deadlock or its possibility sooner. So the former way would be prefered
573when possible. But we cannot avoid using the latter way for crosslocks.
574
575Let's look at how commit steps work for crosslocks. In this case, the
576commit step is performed only on crosslock AX as real. And it assumes
577that the AX release context is different from the AX acquire context.
578
579 BX RELEASE CONTEXT BX ACQUIRE CONTEXT
580 ------------------ ------------------
581 acquire A
582 /*
583 * Push A onto held_locks
584 * Queue A into hist_locks
585 *
586 * In held_locks: A
587 * In hist_locks: A
588 * In graph: Empty
589 */
590
591 acquire BX
592 /*
593 * Add 'the top of held_locks -> BX'
594 *
595 * In held_locks: A
596 * In hist_locks: A
597 * In graph: 'A -> BX'
598 */
599
600 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
601 It must be guaranteed that the following operations are seen after
602 acquiring BX globally. It can be done by things like barrier.
603 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
604
605 acquire C
606 /*
607 * Push C onto held_locks
608 * Queue C into hist_locks
609 *
610 * In held_locks: C
611 * In hist_locks: C
612 * In graph: 'A -> BX'
613 */
614
615 release C
616 /*
617 * Pop C from held_locks
618 *
619 * In held_locks: Empty
620 * In hist_locks: C
621 * In graph: 'A -> BX'
622 */
623 acquire D
624 /*
625 * Push D onto held_locks
626 * Queue D into hist_locks
627 * Add 'the top of held_locks -> D'
628 *
629 * In held_locks: A, D
630 * In hist_locks: A, D
631 * In graph: 'A -> BX', 'A -> D'
632 */
633 acquire E
634 /*
635 * Push E onto held_locks
636 * Queue E into hist_locks
637 *
638 * In held_locks: E
639 * In hist_locks: C, E
640 * In graph: 'A -> BX', 'A -> D'
641 */
642
643 release E
644 /*
645 * Pop E from held_locks
646 *
647 * In held_locks: Empty
648 * In hist_locks: D, E
649 * In graph: 'A -> BX', 'A -> D'
650 */
651 release D
652 /*
653 * Pop D from held_locks
654 *
655 * In held_locks: A
656 * In hist_locks: A, D
657 * In graph: 'A -> BX', 'A -> D'
658 */
659 commit BX
660 /*
661 * Add 'BX -> ?'
662 * What has been queued since acquire BX: C, E
663 *
664 * In held_locks: Empty
665 * In hist_locks: D, E
666 * In graph: 'A -> BX', 'A -> D',
667 * 'BX -> C', 'BX -> E'
668 */
669
670 release BX
671 /*
672 * In held_locks: Empty
673 * In hist_locks: D, E
674 * In graph: 'A -> BX', 'A -> D',
675 * 'BX -> C', 'BX -> E'
676 */
677 release A
678 /*
679 * Pop A from held_locks
680 *
681 * In held_locks: Empty
682 * In hist_locks: A, D
683 * In graph: 'A -> BX', 'A -> D',
684 * 'BX -> C', 'BX -> E'
685 */
686
687 where A, BX, C,..., E are different lock classes, and a suffix 'X' is
688 added on crosslocks.
689
690Crossrelease considers all acquisitions after acqiuring BX are
691candidates which might create dependencies with BX. True dependencies
692will be determined when identifying the release context of BX. Meanwhile,
693all typical locks are queued so that they can be used at the commit step.
694And then two dependencies 'BX -> C' and 'BX -> E' are added at the
695commit step when identifying the release context.
696
697The final graph will be, with crossrelease:
698
699 -> C
700 /
701 -> BX -
702 / \
703 A - -> E
704 \
705 -> D
706
707 where A, BX, C,..., E are different lock classes, and a suffix 'X' is
708 added on crosslocks.
709
710However, the final graph will be, without crossrelease:
711
712 A -> D
713
714 where A and D are different lock classes.
715
716The former graph has three more dependencies, 'A -> BX', 'BX -> C' and
717'BX -> E' giving additional opportunities to check if they cause
718deadlocks. This way lockdep can detect a deadlock or its possibility
719caused by crosslocks.
720
721CONCLUSION
722
723We checked how crossrelease works with several examples.
724
725
726=============
727Optimizations
728=============
729
730Avoid duplication
731-----------------
732
733Crossrelease feature uses a cache like what lockdep already uses for
734dependency chains, but this time it's for caching CT type dependencies.
735Once that dependency is cached, the same will never be added again.
736
737
738Lockless for hot paths
739----------------------
740
741To keep all locks for later use at the commit step, crossrelease adopts
742a local array embedded in task_struct, which makes access to the data
743lockless by forcing it to happen only within the owner context. It's
744like how lockdep handles held_locks. Lockless implmentation is important
745since typical locks are very frequently acquired and released.
746
747
748=================================================
749APPENDIX A: What lockdep does to work aggresively
750=================================================
751
752A deadlock actually occurs when all wait operations creating circular
753dependencies run at the same time. Even though they don't, a potential
754deadlock exists if the problematic dependencies exist. Thus it's
755meaningful to detect not only an actual deadlock but also its potential
756possibility. The latter is rather valuable. When a deadlock occurs
757actually, we can identify what happens in the system by some means or
758other even without lockdep. However, there's no way to detect possiblity
759without lockdep unless the whole code is parsed in head. It's terrible.
760Lockdep does the both, and crossrelease only focuses on the latter.
761
762Whether or not a deadlock actually occurs depends on several factors.
763For example, what order contexts are switched in is a factor. Assuming
764circular dependencies exist, a deadlock would occur when contexts are
765switched so that all wait operations creating the dependencies run
766simultaneously. Thus to detect a deadlock possibility even in the case
767that it has not occured yet, lockdep should consider all possible
768combinations of dependencies, trying to:
769
7701. Use a global dependency graph.
771
772 Lockdep combines all dependencies into one global graph and uses them,
773 regardless of which context generates them or what order contexts are
774 switched in. Aggregated dependencies are only considered so they are
775 prone to be circular if a problem exists.
776
7772. Check dependencies between classes instead of instances.
778
779 What actually causes a deadlock are instances of lock. However,
780 lockdep checks dependencies between classes instead of instances.
781 This way lockdep can detect a deadlock which has not happened but
782 might happen in future by others but the same class.
783
7843. Assume all acquisitions lead to waiting.
785
786 Although locks might be acquired without waiting which is essential
787 to create dependencies, lockdep assumes all acquisitions lead to
788 waiting since it might be true some time or another.
789
790CONCLUSION
791
792Lockdep detects not only an actual deadlock but also its possibility,
793and the latter is more valuable.
794
795
796==================================================
797APPENDIX B: How to avoid adding false dependencies
798==================================================
799
800Remind what a dependency is. A dependency exists if:
801
802 1. There are two waiters waiting for each event at a given time.
803 2. The only way to wake up each waiter is to trigger its event.
804 3. Whether one can be woken up depends on whether the other can.
805
806For example:
807
808 acquire A
809 acquire B /* A dependency 'A -> B' exists */
810 release B
811 release A
812
813 where A and B are different lock classes.
814
815A depedency 'A -> B' exists since:
816
817 1. A waiter for A and a waiter for B might exist when acquiring B.
818 2. Only way to wake up each is to release what it waits for.
819 3. Whether the waiter for A can be woken up depends on whether the
820 other can. IOW, TASK X cannot release A if it fails to acquire B.
821
822For another example:
823
824 TASK X TASK Y
825 ------ ------
826 acquire AX
827 acquire B /* A dependency 'AX -> B' exists */
828 release B
829 release AX held by Y
830
831 where AX and B are different lock classes, and a suffix 'X' is added
832 on crosslocks.
833
834Even in this case involving crosslocks, the same rule can be applied. A
835depedency 'AX -> B' exists since:
836
837 1. A waiter for AX and a waiter for B might exist when acquiring B.
838 2. Only way to wake up each is to release what it waits for.
839 3. Whether the waiter for AX can be woken up depends on whether the
840 other can. IOW, TASK X cannot release AX if it fails to acquire B.
841
842Let's take a look at more complicated example:
843
844 TASK X TASK Y
845 ------ ------
846 acquire B
847 release B
848 fork Y
849 acquire AX
850 acquire C /* A dependency 'AX -> C' exists */
851 release C
852 release AX held by Y
853
854 where AX, B and C are different lock classes, and a suffix 'X' is
855 added on crosslocks.
856
857Does a dependency 'AX -> B' exist? Nope.
858
859Two waiters are essential to create a dependency. However, waiters for
860AX and B to create 'AX -> B' cannot exist at the same time in this
861example. Thus the dependency 'AX -> B' cannot be created.
862
863It would be ideal if the full set of true ones can be considered. But
864we can ensure nothing but what actually happened. Relying on what
865actually happens at runtime, we can anyway add only true ones, though
866they might be a subset of true ones. It's similar to how lockdep works
867for typical locks. There might be more true dependencies than what
868lockdep has detected in runtime. Lockdep has no choice but to rely on
869what actually happens. Crossrelease also relies on it.
870
871CONCLUSION
872
873Relying on what actually happens, lockdep can avoid adding false
874dependencies.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index f670e4b9e7f3..57d3ee9e4bde 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2901,14 +2901,19 @@ userspace buffer and its length:
2901 2901
2902struct kvm_s390_irq_state { 2902struct kvm_s390_irq_state {
2903 __u64 buf; 2903 __u64 buf;
2904 __u32 flags; 2904 __u32 flags; /* will stay unused for compatibility reasons */
2905 __u32 len; 2905 __u32 len;
2906 __u32 reserved[4]; 2906 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2907}; 2907};
2908 2908
2909Userspace passes in the above struct and for each pending interrupt a 2909Userspace passes in the above struct and for each pending interrupt a
2910struct kvm_s390_irq is copied to the provided buffer. 2910struct kvm_s390_irq is copied to the provided buffer.
2911 2911
2912The structure contains a flags and a reserved field for future extensions. As
2913the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and
2914reserved, these fields can not be used in the future without breaking
2915compatibility.
2916
2912If -ENOBUFS is returned the buffer provided was too small and userspace 2917If -ENOBUFS is returned the buffer provided was too small and userspace
2913may retry with a bigger buffer. 2918may retry with a bigger buffer.
2914 2919
@@ -2932,10 +2937,14 @@ containing a struct kvm_s390_irq_state:
2932 2937
2933struct kvm_s390_irq_state { 2938struct kvm_s390_irq_state {
2934 __u64 buf; 2939 __u64 buf;
2940 __u32 flags; /* will stay unused for compatibility reasons */
2935 __u32 len; 2941 __u32 len;
2936 __u32 pad; 2942 __u32 reserved[4]; /* will stay unused for compatibility reasons */
2937}; 2943};
2938 2944
2945The restrictions for flags and reserved apply as well.
2946(see KVM_S390_GET_IRQ_STATE)
2947
2939The userspace memory referenced by buf contains a struct kvm_s390_irq 2948The userspace memory referenced by buf contains a struct kvm_s390_irq
2940for each interrupt to be injected into the guest. 2949for each interrupt to be injected into the guest.
2941If one of the interrupts could not be injected for some reason the 2950If one of the interrupts could not be injected for some reason the
diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt
index 89fff7d611cc..0b3a1148f9f0 100644
--- a/Documentation/vm/zswap.txt
+++ b/Documentation/vm/zswap.txt
@@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its
98original compressor. Once all pages are removed from an old zpool, the zpool 98original compressor. Once all pages are removed from an old zpool, the zpool
99and its compressor are freed. 99and its compressor are freed.
100 100
101Some of the pages in zswap are same-value filled pages (i.e. contents of the
102page have same value or repetitive pattern). These pages include zero-filled
103pages and they are handled differently. During store operation, a page is
104checked if it is a same-value filled page before compressing it. If true, the
105compressed length of the page is set to zero and the pattern or same-filled
106value is stored.
107
108Same-value filled pages identification feature is enabled by default and can be
109disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0,
110e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at
111runtime using the sysfs "same_filled_pages_enabled" attribute, e.g.
112
113echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled
114
115When zswap same-filled page identification is disabled at runtime, it will stop
116checking for the same-value filled pages during store operation. However, the
117existing pages which are marked as same-value filled pages remain stored
118unchanged in zswap until they are either loaded or invalidated.
119
101A debugfs interface is provided for various statistic about pool size, number 120A debugfs interface is provided for various statistic about pool size, number
102of pages stored, and various counters for the reasons pages are rejected. 121of pages stored, same-value filled pages and various counters for the reasons
122pages are rejected.
diff --git a/MAINTAINERS b/MAINTAINERS
index 856029896eec..810415d10b03 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2047,7 +2047,7 @@ F: arch/arm/boot/dts/uniphier*
2047F: arch/arm/include/asm/hardware/cache-uniphier.h 2047F: arch/arm/include/asm/hardware/cache-uniphier.h
2048F: arch/arm/mach-uniphier/ 2048F: arch/arm/mach-uniphier/
2049F: arch/arm/mm/cache-uniphier.c 2049F: arch/arm/mm/cache-uniphier.c
2050F: arch/arm64/boot/dts/socionext/ 2050F: arch/arm64/boot/dts/socionext/uniphier*
2051F: drivers/bus/uniphier-system-bus.c 2051F: drivers/bus/uniphier-system-bus.c
2052F: drivers/clk/uniphier/ 2052F: drivers/clk/uniphier/
2053F: drivers/gpio/gpio-uniphier.c 2053F: drivers/gpio/gpio-uniphier.c
@@ -5435,7 +5435,7 @@ F: drivers/media/tuners/fc2580*
5435 5435
5436FCOE SUBSYSTEM (libfc, libfcoe, fcoe) 5436FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
5437M: Johannes Thumshirn <jth@kernel.org> 5437M: Johannes Thumshirn <jth@kernel.org>
5438L: fcoe-devel@open-fcoe.org 5438L: linux-scsi@vger.kernel.org
5439W: www.Open-FCoE.org 5439W: www.Open-FCoE.org
5440S: Supported 5440S: Supported
5441F: drivers/scsi/libfc/ 5441F: drivers/scsi/libfc/
@@ -13133,6 +13133,7 @@ F: drivers/dma/dw/
13133 13133
13134SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER 13134SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER
13135M: Jie Deng <jiedeng@synopsys.com> 13135M: Jie Deng <jiedeng@synopsys.com>
13136M: Jose Abreu <Jose.Abreu@synopsys.com>
13136L: netdev@vger.kernel.org 13137L: netdev@vger.kernel.org
13137S: Supported 13138S: Supported
13138F: drivers/net/ethernet/synopsys/ 13139F: drivers/net/ethernet/synopsys/
diff --git a/Makefile b/Makefile
index c988e46a53cd..3f4d157add54 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 15 3PATCHLEVEL = 15
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc2 5EXTRAVERSION = -rc3
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 1b81c4e75772..d37f95025807 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -630,6 +630,7 @@
630 reg-names = "phy"; 630 reg-names = "phy";
631 status = "disabled"; 631 status = "disabled";
632 ti,ctrl_mod = <&usb_ctrl_mod>; 632 ti,ctrl_mod = <&usb_ctrl_mod>;
633 #phy-cells = <0>;
633 }; 634 };
634 635
635 usb0: usb@47401000 { 636 usb0: usb@47401000 {
@@ -678,6 +679,7 @@
678 reg-names = "phy"; 679 reg-names = "phy";
679 status = "disabled"; 680 status = "disabled";
680 ti,ctrl_mod = <&usb_ctrl_mod>; 681 ti,ctrl_mod = <&usb_ctrl_mod>;
682 #phy-cells = <0>;
681 }; 683 };
682 684
683 usb1: usb@47401800 { 685 usb1: usb@47401800 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index e5b061469bf8..4714a59fd86d 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -927,7 +927,8 @@
927 reg = <0x48038000 0x2000>, 927 reg = <0x48038000 0x2000>,
928 <0x46000000 0x400000>; 928 <0x46000000 0x400000>;
929 reg-names = "mpu", "dat"; 929 reg-names = "mpu", "dat";
930 interrupts = <80>, <81>; 930 interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
931 <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
931 interrupt-names = "tx", "rx"; 932 interrupt-names = "tx", "rx";
932 status = "disabled"; 933 status = "disabled";
933 dmas = <&edma 8 2>, 934 dmas = <&edma 8 2>,
@@ -941,7 +942,8 @@
941 reg = <0x4803C000 0x2000>, 942 reg = <0x4803C000 0x2000>,
942 <0x46400000 0x400000>; 943 <0x46400000 0x400000>;
943 reg-names = "mpu", "dat"; 944 reg-names = "mpu", "dat";
944 interrupts = <82>, <83>; 945 interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
946 <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
945 interrupt-names = "tx", "rx"; 947 interrupt-names = "tx", "rx";
946 status = "disabled"; 948 status = "disabled";
947 dmas = <&edma 10 2>, 949 dmas = <&edma 10 2>,
diff --git a/arch/arm/boot/dts/am437x-cm-t43.dts b/arch/arm/boot/dts/am437x-cm-t43.dts
index 9e92d480576b..3b9a94c274a7 100644
--- a/arch/arm/boot/dts/am437x-cm-t43.dts
+++ b/arch/arm/boot/dts/am437x-cm-t43.dts
@@ -301,8 +301,8 @@
301 status = "okay"; 301 status = "okay";
302 pinctrl-names = "default"; 302 pinctrl-names = "default";
303 pinctrl-0 = <&spi0_pins>; 303 pinctrl-0 = <&spi0_pins>;
304 dmas = <&edma 16 304 dmas = <&edma 16 0
305 &edma 17>; 305 &edma 17 0>;
306 dma-names = "tx0", "rx0"; 306 dma-names = "tx0", "rx0";
307 307
308 flash: w25q64cvzpig@0 { 308 flash: w25q64cvzpig@0 {
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
index 25d2d720dc0e..678aa023335d 100644
--- a/arch/arm/boot/dts/armada-385-db-ap.dts
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -236,6 +236,7 @@
236 usb3_phy: usb3_phy { 236 usb3_phy: usb3_phy {
237 compatible = "usb-nop-xceiv"; 237 compatible = "usb-nop-xceiv";
238 vcc-supply = <&reg_xhci0_vbus>; 238 vcc-supply = <&reg_xhci0_vbus>;
239 #phy-cells = <0>;
239 }; 240 };
240 241
241 reg_xhci0_vbus: xhci0-vbus { 242 reg_xhci0_vbus: xhci0-vbus {
diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
index e1f355ffc8f7..434dc9aaa5e4 100644
--- a/arch/arm/boot/dts/armada-385-linksys.dtsi
+++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
@@ -66,6 +66,7 @@
66 usb3_1_phy: usb3_1-phy { 66 usb3_1_phy: usb3_1-phy {
67 compatible = "usb-nop-xceiv"; 67 compatible = "usb-nop-xceiv";
68 vcc-supply = <&usb3_1_vbus>; 68 vcc-supply = <&usb3_1_vbus>;
69 #phy-cells = <0>;
69 }; 70 };
70 71
71 usb3_1_vbus: usb3_1-vbus { 72 usb3_1_vbus: usb3_1-vbus {
diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts
index 36ad571e76f3..0a3552ebda3b 100644
--- a/arch/arm/boot/dts/armada-385-synology-ds116.dts
+++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts
@@ -191,11 +191,13 @@
191 usb3_0_phy: usb3_0_phy { 191 usb3_0_phy: usb3_0_phy {
192 compatible = "usb-nop-xceiv"; 192 compatible = "usb-nop-xceiv";
193 vcc-supply = <&reg_usb3_0_vbus>; 193 vcc-supply = <&reg_usb3_0_vbus>;
194 #phy-cells = <0>;
194 }; 195 };
195 196
196 usb3_1_phy: usb3_1_phy { 197 usb3_1_phy: usb3_1_phy {
197 compatible = "usb-nop-xceiv"; 198 compatible = "usb-nop-xceiv";
198 vcc-supply = <&reg_usb3_1_vbus>; 199 vcc-supply = <&reg_usb3_1_vbus>;
200 #phy-cells = <0>;
199 }; 201 };
200 202
201 reg_usb3_0_vbus: usb3-vbus0 { 203 reg_usb3_0_vbus: usb3-vbus0 {
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index f503955dbd3b..51b4ee6df130 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -276,11 +276,13 @@
276 usb2_1_phy: usb2_1_phy { 276 usb2_1_phy: usb2_1_phy {
277 compatible = "usb-nop-xceiv"; 277 compatible = "usb-nop-xceiv";
278 vcc-supply = <&reg_usb2_1_vbus>; 278 vcc-supply = <&reg_usb2_1_vbus>;
279 #phy-cells = <0>;
279 }; 280 };
280 281
281 usb3_phy: usb3_phy { 282 usb3_phy: usb3_phy {
282 compatible = "usb-nop-xceiv"; 283 compatible = "usb-nop-xceiv";
283 vcc-supply = <&reg_usb3_vbus>; 284 vcc-supply = <&reg_usb3_vbus>;
285 #phy-cells = <0>;
284 }; 286 };
285 287
286 reg_usb3_vbus: usb3-vbus { 288 reg_usb3_vbus: usb3-vbus {
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 528b9e3bc1da..dcc55aa84583 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -85,7 +85,7 @@
85 timer@20200 { 85 timer@20200 {
86 compatible = "arm,cortex-a9-global-timer"; 86 compatible = "arm,cortex-a9-global-timer";
87 reg = <0x20200 0x100>; 87 reg = <0x20200 0x100>;
88 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 88 interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
89 clocks = <&periph_clk>; 89 clocks = <&periph_clk>;
90 }; 90 };
91 91
@@ -93,7 +93,7 @@
93 compatible = "arm,cortex-a9-twd-timer"; 93 compatible = "arm,cortex-a9-twd-timer";
94 reg = <0x20600 0x20>; 94 reg = <0x20600 0x20>;
95 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | 95 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
96 IRQ_TYPE_LEVEL_HIGH)>; 96 IRQ_TYPE_EDGE_RISING)>;
97 clocks = <&periph_clk>; 97 clocks = <&periph_clk>;
98 }; 98 };
99 99
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 013431e3d7c3..dcde93c85c2d 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -639,5 +639,6 @@
639 639
640 usbphy: phy { 640 usbphy: phy {
641 compatible = "usb-nop-xceiv"; 641 compatible = "usb-nop-xceiv";
642 #phy-cells = <0>;
642 }; 643 };
643}; 644};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index 3bc50849d013..b8bde13de90a 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -141,10 +141,6 @@
141 status = "okay"; 141 status = "okay";
142}; 142};
143 143
144&sata {
145 status = "okay";
146};
147
148&qspi { 144&qspi {
149 bspi-sel = <0>; 145 bspi-sel = <0>;
150 flash: m25p80@0 { 146 flash: m25p80@0 {
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index d94d14b3c745..6a44b8021702 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -177,10 +177,6 @@
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
180&sata {
181 status = "okay";
182};
183
184&srab { 180&srab {
185 compatible = "brcm,bcm58625-srab", "brcm,nsp-srab"; 181 compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
186 status = "okay"; 182 status = "okay";
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 9708157f5daf..681f5487406e 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -75,6 +75,7 @@
75 reg = <0x47401300 0x100>; 75 reg = <0x47401300 0x100>;
76 reg-names = "phy"; 76 reg-names = "phy";
77 ti,ctrl_mod = <&usb_ctrl_mod>; 77 ti,ctrl_mod = <&usb_ctrl_mod>;
78 #phy-cells = <0>;
78 }; 79 };
79 80
80 usb0: usb@47401000 { 81 usb0: usb@47401000 {
@@ -385,6 +386,7 @@
385 reg = <0x1b00 0x100>; 386 reg = <0x1b00 0x100>;
386 reg-names = "phy"; 387 reg-names = "phy";
387 ti,ctrl_mod = <&usb_ctrl_mod>; 388 ti,ctrl_mod = <&usb_ctrl_mod>;
389 #phy-cells = <0>;
388 }; 390 };
389 }; 391 };
390 392
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index a5a050703320..85071ff8c639 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -433,15 +433,6 @@
433 clock-names = "ipg", "per"; 433 clock-names = "ipg", "per";
434 }; 434 };
435 435
436 srtc: srtc@53fa4000 {
437 compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
438 reg = <0x53fa4000 0x4000>;
439 interrupts = <24>;
440 interrupt-parent = <&tzic>;
441 clocks = <&clks IMX5_CLK_SRTC_GATE>;
442 clock-names = "ipg";
443 };
444
445 iomuxc: iomuxc@53fa8000 { 436 iomuxc: iomuxc@53fa8000 {
446 compatible = "fsl,imx53-iomuxc"; 437 compatible = "fsl,imx53-iomuxc";
447 reg = <0x53fa8000 0x4000>; 438 reg = <0x53fa8000 0x4000>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index 38faa90007d7..2fa5eb4bd402 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -72,7 +72,8 @@
72}; 72};
73 73
74&gpmc { 74&gpmc {
75 ranges = <1 0 0x08000000 0x1000000>; /* CS1: 16MB for LAN9221 */ 75 ranges = <0 0 0x30000000 0x1000000 /* CS0: 16MB for NAND */
76 1 0 0x2c000000 0x1000000>; /* CS1: 16MB for LAN9221 */
76 77
77 ethernet@gpmc { 78 ethernet@gpmc {
78 pinctrl-names = "default"; 79 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 26cce4d18405..29cb804d10cc 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -33,11 +33,12 @@
33 hsusb2_phy: hsusb2_phy { 33 hsusb2_phy: hsusb2_phy {
34 compatible = "usb-nop-xceiv"; 34 compatible = "usb-nop-xceiv";
35 reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */ 35 reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; /* gpio_4 */
36 #phy-cells = <0>;
36 }; 37 };
37}; 38};
38 39
39&gpmc { 40&gpmc {
40 ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ 41 ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
41 42
42 nand@0,0 { 43 nand@0,0 {
43 compatible = "ti,omap2-nand"; 44 compatible = "ti,omap2-nand";
@@ -121,7 +122,7 @@
121 122
122&mmc3 { 123&mmc3 {
123 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; 124 interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
124 pinctrl-0 = <&mmc3_pins>; 125 pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
125 pinctrl-names = "default"; 126 pinctrl-names = "default";
126 vmmc-supply = <&wl12xx_vmmc>; 127 vmmc-supply = <&wl12xx_vmmc>;
127 non-removable; 128 non-removable;
@@ -132,8 +133,8 @@
132 wlcore: wlcore@2 { 133 wlcore: wlcore@2 {
133 compatible = "ti,wl1273"; 134 compatible = "ti,wl1273";
134 reg = <2>; 135 reg = <2>;
135 interrupt-parent = <&gpio5>; 136 interrupt-parent = <&gpio1>;
136 interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */ 137 interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; /* gpio 2 */
137 ref-clock-frequency = <26000000>; 138 ref-clock-frequency = <26000000>;
138 }; 139 };
139}; 140};
@@ -157,8 +158,6 @@
157 OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */ 158 OMAP3_CORE1_IOPAD(0x2166, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat5.sdmmc3_dat1 */
158 OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */ 159 OMAP3_CORE1_IOPAD(0x2168, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat2 */
159 OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */ 160 OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT_PULLUP | MUX_MODE3) /* sdmmc2_dat6.sdmmc3_dat3 */
160 OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT_PULLUP | MUX_MODE4) /* mcbsp4_clkx.gpio_152 */
161 OMAP3_CORE1_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
162 OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */ 161 OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.sdmmc3_cmd */
163 OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */ 162 OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT_PULLUP | MUX_MODE3) /* mcspi1_cs2.sdmmc_clk */
164 >; 163 >;
@@ -228,6 +227,12 @@
228 OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */ 227 OMAP3_WKUP_IOPAD(0x2a0e, PIN_OUTPUT | MUX_MODE4) /* sys_boot2.gpio_4 */
229 >; 228 >;
230 }; 229 };
230 wl127x_gpio: pinmux_wl127x_gpio_pin {
231 pinctrl-single,pins = <
232 OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4) /* sys_boot0.gpio_2 */
233 OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
234 >;
235 };
231}; 236};
232 237
233&omap3_pmx_core2 { 238&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 4926133077b3..0d9faf1a51ea 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -85,15 +85,6 @@
85 reg = <0x7c00 0x200>; 85 reg = <0x7c00 0x200>;
86 }; 86 };
87 87
88 gpio_intc: interrupt-controller@9880 {
89 compatible = "amlogic,meson-gpio-intc";
90 reg = <0xc1109880 0x10>;
91 interrupt-controller;
92 #interrupt-cells = <2>;
93 amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
94 status = "disabled";
95 };
96
97 hwrng: rng@8100 { 88 hwrng: rng@8100 {
98 compatible = "amlogic,meson-rng"; 89 compatible = "amlogic,meson-rng";
99 reg = <0x8100 0x8>; 90 reg = <0x8100 0x8>;
@@ -191,6 +182,15 @@
191 status = "disabled"; 182 status = "disabled";
192 }; 183 };
193 184
185 gpio_intc: interrupt-controller@9880 {
186 compatible = "amlogic,meson-gpio-intc";
187 reg = <0x9880 0x10>;
188 interrupt-controller;
189 #interrupt-cells = <2>;
190 amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
191 status = "disabled";
192 };
193
194 wdt: watchdog@9900 { 194 wdt: watchdog@9900 {
195 compatible = "amlogic,meson6-wdt"; 195 compatible = "amlogic,meson6-wdt";
196 reg = <0x9900 0x8>; 196 reg = <0x9900 0x8>;
diff --git a/arch/arm/boot/dts/nspire.dtsi b/arch/arm/boot/dts/nspire.dtsi
index ec2283b1a638..1a5ae4cd107f 100644
--- a/arch/arm/boot/dts/nspire.dtsi
+++ b/arch/arm/boot/dts/nspire.dtsi
@@ -56,6 +56,7 @@
56 56
57 usb_phy: usb_phy { 57 usb_phy: usb_phy {
58 compatible = "usb-nop-xceiv"; 58 compatible = "usb-nop-xceiv";
59 #phy-cells = <0>;
59 }; 60 };
60 61
61 vbus_reg: vbus_reg { 62 vbus_reg: vbus_reg {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 683b96a8f73e..0349fcc9dc26 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -90,6 +90,7 @@
90 compatible = "usb-nop-xceiv"; 90 compatible = "usb-nop-xceiv";
91 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */ 91 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
92 vcc-supply = <&hsusb2_power>; 92 vcc-supply = <&hsusb2_power>;
93 #phy-cells = <0>;
93 }; 94 };
94 95
95 tfp410: encoder0 { 96 tfp410: encoder0 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 4d2eaf843fa9..3ca8991a6c3e 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -64,6 +64,7 @@
64 compatible = "usb-nop-xceiv"; 64 compatible = "usb-nop-xceiv";
65 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */ 65 reset-gpios = <&gpio5 19 GPIO_ACTIVE_LOW>; /* gpio_147 */
66 vcc-supply = <&hsusb2_power>; 66 vcc-supply = <&hsusb2_power>;
67 #phy-cells = <0>;
67 }; 68 };
68 69
69 sound { 70 sound {
diff --git a/arch/arm/boot/dts/omap3-cm-t3x.dtsi b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
index 31d5ebf38892..ab6003fe5a43 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x.dtsi
@@ -43,12 +43,14 @@
43 hsusb1_phy: hsusb1_phy { 43 hsusb1_phy: hsusb1_phy {
44 compatible = "usb-nop-xceiv"; 44 compatible = "usb-nop-xceiv";
45 vcc-supply = <&hsusb1_power>; 45 vcc-supply = <&hsusb1_power>;
46 #phy-cells = <0>;
46 }; 47 };
47 48
48 /* HS USB Host PHY on PORT 2 */ 49 /* HS USB Host PHY on PORT 2 */
49 hsusb2_phy: hsusb2_phy { 50 hsusb2_phy: hsusb2_phy {
50 compatible = "usb-nop-xceiv"; 51 compatible = "usb-nop-xceiv";
51 vcc-supply = <&hsusb2_power>; 52 vcc-supply = <&hsusb2_power>;
53 #phy-cells = <0>;
52 }; 54 };
53 55
54 ads7846reg: ads7846-reg { 56 ads7846reg: ads7846-reg {
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index dbc3f030a16c..ee64191e41ca 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -29,6 +29,7 @@
29 compatible = "usb-nop-xceiv"; 29 compatible = "usb-nop-xceiv";
30 reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */ 30 reset-gpios = <&gpio1 21 GPIO_ACTIVE_LOW>; /* gpio_21 */
31 vcc-supply = <&hsusb2_power>; 31 vcc-supply = <&hsusb2_power>;
32 #phy-cells = <0>;
32 }; 33 };
33 34
34 leds { 35 leds {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index 4504908c23fe..3dc56fb156b7 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -120,6 +120,7 @@
120 hsusb2_phy: hsusb2_phy { 120 hsusb2_phy: hsusb2_phy {
121 compatible = "usb-nop-xceiv"; 121 compatible = "usb-nop-xceiv";
122 reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; 122 reset-gpios = <&gpio6 14 GPIO_ACTIVE_LOW>;
123 #phy-cells = <0>;
123 }; 124 };
124 125
125 tv0: connector { 126 tv0: connector {
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index 667f96245729..ecbec23af49f 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -58,6 +58,7 @@
58 compatible = "usb-nop-xceiv"; 58 compatible = "usb-nop-xceiv";
59 reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */ 59 reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; /* gpio_24 */
60 vcc-supply = <&hsusb1_power>; 60 vcc-supply = <&hsusb1_power>;
61 #phy-cells = <0>;
61 }; 62 };
62 63
63 tfp410: encoder { 64 tfp410: encoder {
diff --git a/arch/arm/boot/dts/omap3-igep0030-common.dtsi b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
index e94d9427450c..443f71707437 100644
--- a/arch/arm/boot/dts/omap3-igep0030-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0030-common.dtsi
@@ -37,6 +37,7 @@
37 hsusb2_phy: hsusb2_phy { 37 hsusb2_phy: hsusb2_phy {
38 compatible = "usb-nop-xceiv"; 38 compatible = "usb-nop-xceiv";
39 reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; /* gpio_54 */ 39 reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>; /* gpio_54 */
40 #phy-cells = <0>;
40 }; 41 };
41}; 42};
42 43
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index 343a36d8031d..7ada1e93e166 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -51,6 +51,7 @@
51 hsusb1_phy: hsusb1_phy { 51 hsusb1_phy: hsusb1_phy {
52 compatible = "usb-nop-xceiv"; 52 compatible = "usb-nop-xceiv";
53 vcc-supply = <&reg_vcc3>; 53 vcc-supply = <&reg_vcc3>;
54 #phy-cells = <0>;
54 }; 55 };
55}; 56};
56 57
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index f25e158e7163..ac141fcd1742 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -51,6 +51,7 @@
51 compatible = "usb-nop-xceiv"; 51 compatible = "usb-nop-xceiv";
52 reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>; /* gpio_183 */ 52 reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>; /* gpio_183 */
53 vcc-supply = <&hsusb2_power>; 53 vcc-supply = <&hsusb2_power>;
54 #phy-cells = <0>;
54 }; 55 };
55 56
56 /* Regulator to trigger the nPoweron signal of the Wifi module */ 57 /* Regulator to trigger the nPoweron signal of the Wifi module */
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index 53e007abdc71..cd53dc6c0051 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -205,6 +205,7 @@
205 compatible = "usb-nop-xceiv"; 205 compatible = "usb-nop-xceiv";
206 reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */ 206 reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */
207 vcc-supply = <&vaux2>; 207 vcc-supply = <&vaux2>;
208 #phy-cells = <0>;
208 }; 209 };
209 210
210 /* HS USB Host VBUS supply 211 /* HS USB Host VBUS supply
diff --git a/arch/arm/boot/dts/omap3-tao3530.dtsi b/arch/arm/boot/dts/omap3-tao3530.dtsi
index 9a601d15247b..6f5bd027b717 100644
--- a/arch/arm/boot/dts/omap3-tao3530.dtsi
+++ b/arch/arm/boot/dts/omap3-tao3530.dtsi
@@ -46,6 +46,7 @@
46 compatible = "usb-nop-xceiv"; 46 compatible = "usb-nop-xceiv";
47 reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* gpio_162 */ 47 reset-gpios = <&gpio6 2 GPIO_ACTIVE_LOW>; /* gpio_162 */
48 vcc-supply = <&hsusb2_power>; 48 vcc-supply = <&hsusb2_power>;
49 #phy-cells = <0>;
49 }; 50 };
50 51
51 sound { 52 sound {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 90b5c7148feb..bb33935df7b0 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -715,6 +715,7 @@
715 compatible = "ti,ohci-omap3"; 715 compatible = "ti,ohci-omap3";
716 reg = <0x48064400 0x400>; 716 reg = <0x48064400 0x400>;
717 interrupts = <76>; 717 interrupts = <76>;
718 remote-wakeup-connected;
718 }; 719 };
719 720
720 usbhsehci: ehci@48064800 { 721 usbhsehci: ehci@48064800 {
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index 8b93d37310f2..24a463f8641f 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -73,6 +73,7 @@
73 /* HS USB Host PHY on PORT 1 */ 73 /* HS USB Host PHY on PORT 1 */
74 hsusb1_phy: hsusb1_phy { 74 hsusb1_phy: hsusb1_phy {
75 compatible = "usb-nop-xceiv"; 75 compatible = "usb-nop-xceiv";
76 #phy-cells = <0>;
76 }; 77 };
77 78
78 /* LCD regulator from sw5 source */ 79 /* LCD regulator from sw5 source */
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index 6e6810c258eb..eb123b24c8e3 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -43,6 +43,7 @@
43 hsusb1_phy: hsusb1_phy { 43 hsusb1_phy: hsusb1_phy {
44 compatible = "usb-nop-xceiv"; 44 compatible = "usb-nop-xceiv";
45 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */ 45 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */
46 #phy-cells = <0>;
46 47
47 pinctrl-names = "default"; 48 pinctrl-names = "default";
48 pinctrl-0 = <&hsusb1phy_pins>; 49 pinctrl-0 = <&hsusb1phy_pins>;
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 22c1eee9b07a..5501d1b4e6cd 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -89,6 +89,7 @@
89 hsusb1_phy: hsusb1_phy { 89 hsusb1_phy: hsusb1_phy {
90 compatible = "usb-nop-xceiv"; 90 compatible = "usb-nop-xceiv";
91 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */ 91 reset-gpios = <&gpio2 30 GPIO_ACTIVE_LOW>; /* gpio_62 */
92 #phy-cells = <0>;
92 vcc-supply = <&hsusb1_power>; 93 vcc-supply = <&hsusb1_power>;
93 clocks = <&auxclk3_ck>; 94 clocks = <&auxclk3_ck>;
94 clock-names = "main_clk"; 95 clock-names = "main_clk";
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 6500bfc8d130..10fce28ceb5b 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -44,6 +44,7 @@
44 44
45 reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */ 45 reset-gpios = <&gpio6 17 GPIO_ACTIVE_LOW>; /* gpio 177 */
46 vcc-supply = <&vbat>; 46 vcc-supply = <&vbat>;
47 #phy-cells = <0>;
47 48
48 clocks = <&auxclk3_ck>; 49 clocks = <&auxclk3_ck>;
49 clock-names = "main_clk"; 50 clock-names = "main_clk";
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 1dc5a76b3c71..cc1a07a3620f 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -398,7 +398,7 @@
398 elm: elm@48078000 { 398 elm: elm@48078000 {
399 compatible = "ti,am3352-elm"; 399 compatible = "ti,am3352-elm";
400 reg = <0x48078000 0x2000>; 400 reg = <0x48078000 0x2000>;
401 interrupts = <4>; 401 interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
402 ti,hwmods = "elm"; 402 ti,hwmods = "elm";
403 status = "disabled"; 403 status = "disabled";
404 }; 404 };
@@ -1081,14 +1081,13 @@
1081 usbhsohci: ohci@4a064800 { 1081 usbhsohci: ohci@4a064800 {
1082 compatible = "ti,ohci-omap3"; 1082 compatible = "ti,ohci-omap3";
1083 reg = <0x4a064800 0x400>; 1083 reg = <0x4a064800 0x400>;
1084 interrupt-parent = <&gic>;
1085 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; 1084 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
1085 remote-wakeup-connected;
1086 }; 1086 };
1087 1087
1088 usbhsehci: ehci@4a064c00 { 1088 usbhsehci: ehci@4a064c00 {
1089 compatible = "ti,ehci-omap"; 1089 compatible = "ti,ehci-omap";
1090 reg = <0x4a064c00 0x400>; 1090 reg = <0x4a064c00 0x400>;
1091 interrupt-parent = <&gic>;
1092 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; 1091 interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
1093 }; 1092 };
1094 }; 1093 };
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 575ecffb0e9e..1b20838bb9a4 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -73,12 +73,14 @@
73 clocks = <&auxclk1_ck>; 73 clocks = <&auxclk1_ck>;
74 clock-names = "main_clk"; 74 clock-names = "main_clk";
75 clock-frequency = <19200000>; 75 clock-frequency = <19200000>;
76 #phy-cells = <0>;
76 }; 77 };
77 78
78 /* HS USB Host PHY on PORT 3 */ 79 /* HS USB Host PHY on PORT 3 */
79 hsusb3_phy: hsusb3_phy { 80 hsusb3_phy: hsusb3_phy {
80 compatible = "usb-nop-xceiv"; 81 compatible = "usb-nop-xceiv";
81 reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */ 82 reset-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; /* gpio3_79 ETH_NRESET */
83 #phy-cells = <0>;
82 }; 84 };
83 85
84 tpd12s015: encoder { 86 tpd12s015: encoder {
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5b172a04b6f1..5e21fb430a65 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -63,12 +63,14 @@
63 hsusb2_phy: hsusb2_phy { 63 hsusb2_phy: hsusb2_phy {
64 compatible = "usb-nop-xceiv"; 64 compatible = "usb-nop-xceiv";
65 reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */ 65 reset-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; /* gpio3_76 HUB_RESET */
66 #phy-cells = <0>;
66 }; 67 };
67 68
68 /* HS USB Host PHY on PORT 3 */ 69 /* HS USB Host PHY on PORT 3 */
69 hsusb3_phy: hsusb3_phy { 70 hsusb3_phy: hsusb3_phy {
70 compatible = "usb-nop-xceiv"; 71 compatible = "usb-nop-xceiv";
71 reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */ 72 reset-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; /* gpio3_83 ETH_RESET */
73 #phy-cells = <0>;
72 }; 74 };
73 75
74 leds { 76 leds {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4cd0005e462f..51a7fb3d7b9a 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -940,6 +940,7 @@
940 compatible = "ti,ohci-omap3"; 940 compatible = "ti,ohci-omap3";
941 reg = <0x4a064800 0x400>; 941 reg = <0x4a064800 0x400>;
942 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; 942 interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
943 remote-wakeup-connected;
943 }; 944 };
944 945
945 usbhsehci: ehci@4a064c00 { 946 usbhsehci: ehci@4a064c00 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 2f017fee4009..62baabd757b6 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1201,6 +1201,7 @@
1201 clock-names = "extal", "usb_extal"; 1201 clock-names = "extal", "usb_extal";
1202 #clock-cells = <2>; 1202 #clock-cells = <2>;
1203 #power-domain-cells = <0>; 1203 #power-domain-cells = <0>;
1204 #reset-cells = <1>;
1204 }; 1205 };
1205 1206
1206 prr: chipid@ff000044 { 1207 prr: chipid@ff000044 {
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 131f65b0426e..3d080e07374c 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -829,6 +829,7 @@
829 clock-names = "extal"; 829 clock-names = "extal";
830 #clock-cells = <2>; 830 #clock-cells = <2>;
831 #power-domain-cells = <0>; 831 #power-domain-cells = <0>;
832 #reset-cells = <1>;
832 }; 833 };
833 }; 834 };
834 835
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index 58eae569b4e0..0cd1035de1a4 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -1088,6 +1088,7 @@
1088 clock-names = "extal", "usb_extal"; 1088 clock-names = "extal", "usb_extal";
1089 #clock-cells = <2>; 1089 #clock-cells = <2>;
1090 #power-domain-cells = <0>; 1090 #power-domain-cells = <0>;
1091 #reset-cells = <1>;
1091 }; 1092 };
1092 1093
1093 rst: reset-controller@e6160000 { 1094 rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index 905e50c9b524..5643976c1356 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -1099,6 +1099,7 @@
1099 clock-names = "extal", "usb_extal"; 1099 clock-names = "extal", "usb_extal";
1100 #clock-cells = <2>; 1100 #clock-cells = <2>;
1101 #power-domain-cells = <0>; 1101 #power-domain-cells = <0>;
1102 #reset-cells = <1>;
1102 }; 1103 };
1103 1104
1104 rst: reset-controller@e6160000 { 1105 rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
index 02a6227c717c..4b8edc8982cf 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
@@ -121,7 +121,7 @@
121 switch0port10: port@10 { 121 switch0port10: port@10 {
122 reg = <10>; 122 reg = <10>;
123 label = "dsa"; 123 label = "dsa";
124 phy-mode = "xgmii"; 124 phy-mode = "xaui";
125 link = <&switch1port10>; 125 link = <&switch1port10>;
126 }; 126 };
127 }; 127 };
@@ -208,7 +208,7 @@
208 switch1port10: port@10 { 208 switch1port10: port@10 {
209 reg = <10>; 209 reg = <10>;
210 label = "dsa"; 210 label = "dsa";
211 phy-mode = "xgmii"; 211 phy-mode = "xaui";
212 link = <&switch0port10>; 212 link = <&switch0port10>;
213 }; 213 };
214 }; 214 };
@@ -359,7 +359,7 @@
359}; 359};
360 360
361&i2c1 { 361&i2c1 {
362 at24mac602@0 { 362 at24mac602@50 {
363 compatible = "atmel,24c02"; 363 compatible = "atmel,24c02";
364 reg = <0x50>; 364 reg = <0x50>;
365 read-only; 365 read-only;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index c8781450905b..3ab8b3781bfe 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,8 +161,7 @@
161#else 161#else
162#define VTTBR_X (5 - KVM_T0SZ) 162#define VTTBR_X (5 - KVM_T0SZ)
163#endif 163#endif
164#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 164#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
165#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
166#define VTTBR_VMID_SHIFT _AC(48, ULL) 165#define VTTBR_VMID_SHIFT _AC(48, ULL)
167#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 166#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
168 167
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 242151ea6908..a9f7d3f47134 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -285,6 +285,11 @@ static inline void kvm_arm_init_debug(void) {}
285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} 285static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} 286static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} 287static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
288static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
289 struct kvm_run *run)
290{
291 return false;
292}
288 293
289int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 294int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
290 struct kvm_device_attr *attr); 295 struct kvm_device_attr *attr);
diff --git a/arch/arm/mach-meson/platsmp.c b/arch/arm/mach-meson/platsmp.c
index 2555f9056a33..cad7ee8f0d6b 100644
--- a/arch/arm/mach-meson/platsmp.c
+++ b/arch/arm/mach-meson/platsmp.c
@@ -102,7 +102,7 @@ static void __init meson_smp_prepare_cpus(const char *scu_compatible,
102 102
103 scu_base = of_iomap(node, 0); 103 scu_base = of_iomap(node, 0);
104 if (!scu_base) { 104 if (!scu_base) {
105 pr_err("Couln't map SCU registers\n"); 105 pr_err("Couldn't map SCU registers\n");
106 return; 106 return;
107 } 107 }
108 108
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index d555791cf349..83c6fa74cc31 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -68,14 +68,17 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
68int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, 68int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
69 u8 *idlest_reg_id) 69 u8 *idlest_reg_id)
70{ 70{
71 int ret;
71 if (!cm_ll_data->split_idlest_reg) { 72 if (!cm_ll_data->split_idlest_reg) {
72 WARN_ONCE(1, "cm: %s: no low-level function defined\n", 73 WARN_ONCE(1, "cm: %s: no low-level function defined\n",
73 __func__); 74 __func__);
74 return -EINVAL; 75 return -EINVAL;
75 } 76 }
76 77
77 return cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst, 78 ret = cm_ll_data->split_idlest_reg(idlest_reg, prcm_inst,
78 idlest_reg_id); 79 idlest_reg_id);
80 *prcm_inst -= cm_base.offset;
81 return ret;
79} 82}
80 83
81/** 84/**
@@ -337,6 +340,7 @@ int __init omap2_cm_base_init(void)
337 if (mem) { 340 if (mem) {
338 mem->pa = res.start + data->offset; 341 mem->pa = res.start + data->offset;
339 mem->va = data->mem + data->offset; 342 mem->va = data->mem + data->offset;
343 mem->offset = data->offset;
340 } 344 }
341 345
342 data->np = np; 346 data->np = np;
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index 5ac122e88f67..fa7f308c9027 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -73,6 +73,27 @@ phys_addr_t omap_secure_ram_mempool_base(void)
73 return omap_secure_memblock_base; 73 return omap_secure_memblock_base;
74} 74}
75 75
76#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
77u32 omap3_save_secure_ram(void __iomem *addr, int size)
78{
79 u32 ret;
80 u32 param[5];
81
82 if (size != OMAP3_SAVE_SECURE_RAM_SZ)
83 return OMAP3_SAVE_SECURE_RAM_SZ;
84
85 param[0] = 4; /* Number of arguments */
86 param[1] = __pa(addr); /* Physical address for saving */
87 param[2] = 0;
88 param[3] = 1;
89 param[4] = 1;
90
91 ret = save_secure_ram_context(__pa(param));
92
93 return ret;
94}
95#endif
96
76/** 97/**
77 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls 98 * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
78 * @idx: The PPA API index 99 * @idx: The PPA API index
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index bae263fba640..c509cde71f93 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -31,6 +31,8 @@
31/* Maximum Secure memory storage size */ 31/* Maximum Secure memory storage size */
32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K) 32#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
33 33
34#define OMAP3_SAVE_SECURE_RAM_SZ 0x803F
35
34/* Secure low power HAL API index */ 36/* Secure low power HAL API index */
35#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a 37#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
36#define OMAP4_HAL_SAVEHW_INDEX 0x1b 38#define OMAP4_HAL_SAVEHW_INDEX 0x1b
@@ -65,6 +67,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
65extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs); 67extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
66extern phys_addr_t omap_secure_ram_mempool_base(void); 68extern phys_addr_t omap_secure_ram_mempool_base(void);
67extern int omap_secure_ram_reserve_memblock(void); 69extern int omap_secure_ram_reserve_memblock(void);
70extern u32 save_secure_ram_context(u32 args_pa);
71extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
68 72
69extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs, 73extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
70 u32 arg1, u32 arg2, u32 arg3, u32 arg4); 74 u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index d45cbfdb4be6..f0388058b7da 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -391,10 +391,8 @@ omap_device_copy_resources(struct omap_hwmod *oh,
391 const char *name; 391 const char *name;
392 int error, irq = 0; 392 int error, irq = 0;
393 393
394 if (!oh || !oh->od || !oh->od->pdev) { 394 if (!oh || !oh->od || !oh->od->pdev)
395 error = -EINVAL; 395 return -EINVAL;
396 goto error;
397 }
398 396
399 np = oh->od->pdev->dev.of_node; 397 np = oh->od->pdev->dev.of_node;
400 if (!np) { 398 if (!np) {
@@ -516,8 +514,10 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
516 goto odbs_exit1; 514 goto odbs_exit1;
517 515
518 od = omap_device_alloc(pdev, &oh, 1); 516 od = omap_device_alloc(pdev, &oh, 1);
519 if (IS_ERR(od)) 517 if (IS_ERR(od)) {
518 ret = PTR_ERR(od);
520 goto odbs_exit1; 519 goto odbs_exit1;
520 }
521 521
522 ret = platform_device_add_data(pdev, pdata, pdata_len); 522 ret = platform_device_add_data(pdev, pdata, pdata_len);
523 if (ret) 523 if (ret)
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d2106ae4410a..52c9d585b44d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1646,6 +1646,7 @@ static struct omap_hwmod omap3xxx_mmc3_hwmod = {
1646 .main_clk = "mmchs3_fck", 1646 .main_clk = "mmchs3_fck",
1647 .prcm = { 1647 .prcm = {
1648 .omap2 = { 1648 .omap2 = {
1649 .module_offs = CORE_MOD,
1649 .prcm_reg_id = 1, 1650 .prcm_reg_id = 1,
1650 .module_bit = OMAP3430_EN_MMC3_SHIFT, 1651 .module_bit = OMAP3430_EN_MMC3_SHIFT,
1651 .idlest_reg_id = 1, 1652 .idlest_reg_id = 1,
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index b668719b9b25..8e30772cfe32 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
81/* ... and its pointer from SRAM after copy */ 81/* ... and its pointer from SRAM after copy */
82extern void (*omap3_do_wfi_sram)(void); 82extern void (*omap3_do_wfi_sram)(void);
83 83
84/* save_secure_ram_context function pointer and size, for copy to SRAM */
85extern int save_secure_ram_context(u32 *addr);
86extern unsigned int save_secure_ram_context_sz;
87
88extern void omap3_save_scratchpad_contents(void); 84extern void omap3_save_scratchpad_contents(void);
89 85
90#define PM_RTA_ERRATUM_i608 (1 << 0) 86#define PM_RTA_ERRATUM_i608 (1 << 0)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 841ba19d64a6..36c55547137c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -48,6 +48,7 @@
48#include "prm3xxx.h" 48#include "prm3xxx.h"
49#include "pm.h" 49#include "pm.h"
50#include "sdrc.h" 50#include "sdrc.h"
51#include "omap-secure.h"
51#include "sram.h" 52#include "sram.h"
52#include "control.h" 53#include "control.h"
53#include "vc.h" 54#include "vc.h"
@@ -66,7 +67,6 @@ struct power_state {
66 67
67static LIST_HEAD(pwrst_list); 68static LIST_HEAD(pwrst_list);
68 69
69static int (*_omap_save_secure_sram)(u32 *addr);
70void (*omap3_do_wfi_sram)(void); 70void (*omap3_do_wfi_sram)(void);
71 71
72static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 72static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
121 * will hang the system. 121 * will hang the system.
122 */ 122 */
123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON); 123 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
124 ret = _omap_save_secure_sram((u32 *)(unsigned long) 124 ret = omap3_save_secure_ram(omap3_secure_ram_storage,
125 __pa(omap3_secure_ram_storage)); 125 OMAP3_SAVE_SECURE_RAM_SZ);
126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state); 126 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
127 /* Following is for error tracking, it should not happen */ 127 /* Following is for error tracking, it should not happen */
128 if (ret) { 128 if (ret) {
@@ -434,15 +434,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
434 * 434 *
435 * The minimum set of functions is pushed to SRAM for execution: 435 * The minimum set of functions is pushed to SRAM for execution:
436 * - omap3_do_wfi for erratum i581 WA, 436 * - omap3_do_wfi for erratum i581 WA,
437 * - save_secure_ram_context for security extensions.
438 */ 437 */
439void omap_push_sram_idle(void) 438void omap_push_sram_idle(void)
440{ 439{
441 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); 440 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
442
443 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
444 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
445 save_secure_ram_context_sz);
446} 441}
447 442
448static void __init pm_errata_configure(void) 443static void __init pm_errata_configure(void)
@@ -553,7 +548,7 @@ int __init omap3_pm_init(void)
553 clkdm_add_wkdep(neon_clkdm, mpu_clkdm); 548 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
554 if (omap_type() != OMAP2_DEVICE_TYPE_GP) { 549 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
555 omap3_secure_ram_storage = 550 omap3_secure_ram_storage =
556 kmalloc(0x803F, GFP_KERNEL); 551 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
557 if (!omap3_secure_ram_storage) 552 if (!omap3_secure_ram_storage)
558 pr_err("Memory allocation failed when allocating for secure sram context\n"); 553 pr_err("Memory allocation failed when allocating for secure sram context\n");
559 554
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 0592b23902c6..0977da0dab76 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -528,6 +528,7 @@ struct omap_prcm_irq_setup {
528struct omap_domain_base { 528struct omap_domain_base {
529 u32 pa; 529 u32 pa;
530 void __iomem *va; 530 void __iomem *va;
531 s16 offset;
531}; 532};
532 533
533/** 534/**
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
index d2c5bcabdbeb..ebaf80d72a10 100644
--- a/arch/arm/mach-omap2/prm33xx.c
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
176 return v; 176 return v;
177} 177}
178 178
179static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
180{
181 u32 v;
182
183 v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
184 v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
185 v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
186
187 return v;
188}
189
190static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) 179static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
191{ 180{
192 am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK, 181 am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
357 .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst, 346 .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst,
358 .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst, 347 .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst,
359 .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst, 348 .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst,
360 .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst,
361 .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst, 349 .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst,
362 .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst, 350 .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst,
363 .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst, 351 .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst,
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index fa5fd24f524c..22daf4efed68 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
93ENDPROC(enable_omap3630_toggle_l2_on_restore) 93ENDPROC(enable_omap3630_toggle_l2_on_restore)
94 94
95/* 95/*
96 * Function to call rom code to save secure ram context. This gets 96 * Function to call rom code to save secure ram context.
97 * relocated to SRAM, so it can be all in .data section. Otherwise 97 *
98 * we need to initialize api_params separately. 98 * r0 = physical address of the parameters
99 */ 99 */
100 .data
101 .align 3
102ENTRY(save_secure_ram_context) 100ENTRY(save_secure_ram_context)
103 stmfd sp!, {r4 - r11, lr} @ save registers on stack 101 stmfd sp!, {r4 - r11, lr} @ save registers on stack
104 adr r3, api_params @ r3 points to parameters 102 mov r3, r0 @ physical address of parameters
105 str r0, [r3,#0x4] @ r0 has sdram address
106 ldr r12, high_mask
107 and r3, r3, r12
108 ldr r12, sram_phy_addr_mask
109 orr r3, r3, r12
110 mov r0, #25 @ set service ID for PPA 103 mov r0, #25 @ set service ID for PPA
111 mov r12, r0 @ copy secure service ID in r12 104 mov r12, r0 @ copy secure service ID in r12
112 mov r1, #0 @ set task id for ROM code in r1 105 mov r1, #0 @ set task id for ROM code in r1
@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
120 nop 113 nop
121 nop 114 nop
122 ldmfd sp!, {r4 - r11, pc} 115 ldmfd sp!, {r4 - r11, pc}
123 .align
124sram_phy_addr_mask:
125 .word SRAM_BASE_P
126high_mask:
127 .word 0xffff
128api_params:
129 .word 0x4, 0x0, 0x0, 0x1, 0x1
130ENDPROC(save_secure_ram_context) 116ENDPROC(save_secure_ram_context)
131ENTRY(save_secure_ram_context_sz)
132 .word . - save_secure_ram_context
133
134 .text
135 117
136/* 118/*
137 * ====================== 119 * ======================
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a93339f5178f..c9a7e9e1414f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065
557 557
558 If unsure, say Y. 558 If unsure, say Y.
559 559
560
561config SOCIONEXT_SYNQUACER_PREITS 560config SOCIONEXT_SYNQUACER_PREITS
562 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" 561 bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
563 default y 562 default y
@@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802
576 a 128kB offset to be applied to the target address in this commands. 575 a 128kB offset to be applied to the target address in this commands.
577 576
578 If unsure, say Y. 577 If unsure, say Y.
578
579config QCOM_FALKOR_ERRATUM_E1041
580 bool "Falkor E1041: Speculative instruction fetches might cause errant memory access"
581 default y
582 help
583 Falkor CPU may speculatively fetch instructions from an improper
584 memory location when MMU translation is changed from SCTLR_ELn[M]=1
585 to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem.
586
587 If unsure, say Y.
588
579endmenu 589endmenu
580 590
581 591
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index d7c22d51bc50..4aa50b9b26bc 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -12,6 +12,7 @@ subdir-y += cavium
12subdir-y += exynos 12subdir-y += exynos
13subdir-y += freescale 13subdir-y += freescale
14subdir-y += hisilicon 14subdir-y += hisilicon
15subdir-y += lg
15subdir-y += marvell 16subdir-y += marvell
16subdir-y += mediatek 17subdir-y += mediatek
17subdir-y += nvidia 18subdir-y += nvidia
@@ -22,5 +23,4 @@ subdir-y += rockchip
22subdir-y += socionext 23subdir-y += socionext
23subdir-y += sprd 24subdir-y += sprd
24subdir-y += xilinx 25subdir-y += xilinx
25subdir-y += lg
26subdir-y += zte 26subdir-y += zte
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index ead895a4e9a5..1fb8b9d6cb4e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -753,12 +753,12 @@
753 753
754&uart_B { 754&uart_B {
755 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; 755 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
756 clock-names = "xtal", "core", "baud"; 756 clock-names = "xtal", "pclk", "baud";
757}; 757};
758 758
759&uart_C { 759&uart_C {
760 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; 760 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
761 clock-names = "xtal", "core", "baud"; 761 clock-names = "xtal", "pclk", "baud";
762}; 762};
763 763
764&vpu { 764&vpu {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 8ed981f59e5a..6524b89e7115 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -688,7 +688,7 @@
688 688
689&uart_A { 689&uart_A {
690 clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>; 690 clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
691 clock-names = "xtal", "core", "baud"; 691 clock-names = "xtal", "pclk", "baud";
692}; 692};
693 693
694&uart_AO { 694&uart_AO {
@@ -703,12 +703,12 @@
703 703
704&uart_B { 704&uart_B {
705 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>; 705 clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
706 clock-names = "xtal", "core", "baud"; 706 clock-names = "xtal", "pclk", "baud";
707}; 707};
708 708
709&uart_C { 709&uart_C {
710 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>; 710 clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
711 clock-names = "xtal", "core", "baud"; 711 clock-names = "xtal", "pclk", "baud";
712}; 712};
713 713
714&vpu { 714&vpu {
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
index dd7193acc7df..6bdefb26b329 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-ref.dts
@@ -40,7 +40,6 @@
40}; 40};
41 41
42&ethsc { 42&ethsc {
43 interrupt-parent = <&gpio>;
44 interrupts = <0 8>; 43 interrupts = <0 8>;
45}; 44};
46 45
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
index d99e3731358c..254d6795c67e 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-ref.dts
@@ -40,7 +40,6 @@
40}; 40};
41 41
42&ethsc { 42&ethsc {
43 interrupt-parent = <&gpio>;
44 interrupts = <0 8>; 43 interrupts = <0 8>;
45}; 44};
46 45
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
index 864feeb35180..f9f06fcfb94a 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts
@@ -38,8 +38,7 @@
38}; 38};
39 39
40&ethsc { 40&ethsc {
41 interrupt-parent = <&gpio>; 41 interrupts = <4 8>;
42 interrupts = <0 8>;
43}; 42};
44 43
45&serial0 { 44&serial0 {
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aef72d886677..8b168280976f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -512,4 +512,14 @@ alternative_else_nop_endif
512#endif 512#endif
513 .endm 513 .endm
514 514
515/**
516 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
517 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
518 */
519 .macro pre_disable_mmu_workaround
520#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
521 isb
522#endif
523 .endm
524
515#endif /* __ASM_ASSEMBLER_H */ 525#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index ac67cfc2585a..060e3a4008ab 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -60,6 +60,9 @@ enum ftr_type {
60#define FTR_VISIBLE true /* Feature visible to the user space */ 60#define FTR_VISIBLE true /* Feature visible to the user space */
61#define FTR_HIDDEN false /* Feature is hidden from the user */ 61#define FTR_HIDDEN false /* Feature is hidden from the user */
62 62
63#define FTR_VISIBLE_IF_IS_ENABLED(config) \
64 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
65
63struct arm64_ftr_bits { 66struct arm64_ftr_bits {
64 bool sign; /* Value is signed ? */ 67 bool sign; /* Value is signed ? */
65 bool visible; 68 bool visible;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 235e77d98261..cbf08d7cbf30 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -91,6 +91,7 @@
91#define BRCM_CPU_PART_VULCAN 0x516 91#define BRCM_CPU_PART_VULCAN 0x516
92 92
93#define QCOM_CPU_PART_FALKOR_V1 0x800 93#define QCOM_CPU_PART_FALKOR_V1 0x800
94#define QCOM_CPU_PART_FALKOR 0xC00
94 95
95#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 96#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
96#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 97#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
@@ -99,6 +100,7 @@
99#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 100#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
100#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) 101#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
101#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) 102#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
103#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
102 104
103#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
104 106
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 7f069ff37f06..715d395ef45b 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -170,8 +170,7 @@
170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) 170#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) 171#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
172 172
173#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 173#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
174#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
175#define VTTBR_VMID_SHIFT (UL(48)) 174#define VTTBR_VMID_SHIFT (UL(48))
176#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 175#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
177 176
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 674912d7a571..ea6cb5b24258 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -370,6 +370,7 @@ void kvm_arm_init_debug(void);
370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 370void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 371void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 372void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
373bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
373int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 374int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
374 struct kvm_device_attr *attr); 375 struct kvm_device_attr *attr);
375int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 376int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 149d05fb9421..bdcc7f1c9d06 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -42,6 +42,8 @@
42#include <asm/cmpxchg.h> 42#include <asm/cmpxchg.h>
43#include <asm/fixmap.h> 43#include <asm/fixmap.h>
44#include <linux/mmdebug.h> 44#include <linux/mmdebug.h>
45#include <linux/mm_types.h>
46#include <linux/sched.h>
45 47
46extern void __pte_error(const char *file, int line, unsigned long val); 48extern void __pte_error(const char *file, int line, unsigned long val);
47extern void __pmd_error(const char *file, int line, unsigned long val); 49extern void __pmd_error(const char *file, int line, unsigned long val);
@@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte)
149 151
150static inline pte_t pte_mkclean(pte_t pte) 152static inline pte_t pte_mkclean(pte_t pte)
151{ 153{
152 return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
156
157 return pte;
153} 158}
154 159
155static inline pte_t pte_mkdirty(pte_t pte) 160static inline pte_t pte_mkdirty(pte_t pte)
156{ 161{
157 return set_pte_bit(pte, __pgprot(PTE_DIRTY)); 162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
163
164 if (pte_write(pte))
165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
166
167 return pte;
158} 168}
159 169
160static inline pte_t pte_mkold(pte_t pte) 170static inline pte_t pte_mkold(pte_t pte)
@@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
207 } 217 }
208} 218}
209 219
210struct mm_struct;
211struct vm_area_struct;
212
213extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 220extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
214 221
215/* 222/*
@@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
238 * hardware updates of the pte (ptep_set_access_flags safely changes 245 * hardware updates of the pte (ptep_set_access_flags safely changes
239 * valid ptes without going through an invalid entry). 246 * valid ptes without going through an invalid entry).
240 */ 247 */
241 if (pte_valid(*ptep) && pte_valid(pte)) { 248 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
249 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
242 VM_WARN_ONCE(!pte_young(pte), 250 VM_WARN_ONCE(!pte_young(pte),
243 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 251 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
244 __func__, pte_val(*ptep), pte_val(pte)); 252 __func__, pte_val(*ptep), pte_val(pte));
@@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
641#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 649#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
642 650
643/* 651/*
644 * ptep_set_wrprotect - mark read-only while preserving the hardware update of 652 * ptep_set_wrprotect - mark read-only while trasferring potential hardware
645 * the Access Flag. 653 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
646 */ 654 */
647#define __HAVE_ARCH_PTEP_SET_WRPROTECT 655#define __HAVE_ARCH_PTEP_SET_WRPROTECT
648static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 656static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
649{ 657{
650 pte_t old_pte, pte; 658 pte_t old_pte, pte;
651 659
652 /*
653 * ptep_set_wrprotect() is only called on CoW mappings which are
654 * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
655 * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
656 * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
657 * protection_map[]. There is no race with the hardware update of the
658 * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
659 * is set.
660 */
661 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
662 "%s: potential race with hardware DBM", __func__);
663 pte = READ_ONCE(*ptep); 660 pte = READ_ONCE(*ptep);
664 do { 661 do {
665 old_pte = pte; 662 old_pte = pte;
663 /*
664 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY
665 * clear), set the PTE_DIRTY bit.
666 */
667 if (pte_hw_dirty(pte))
668 pte = pte_mkdirty(pte);
666 pte = pte_wrprotect(pte); 669 pte = pte_wrprotect(pte);
667 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 670 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
668 pte_val(old_pte), pte_val(pte)); 671 pte_val(old_pte), pte_val(pte));
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 65f42d257414..2a752cb2a0f3 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart)
37 mrs x12, sctlr_el1 37 mrs x12, sctlr_el1
38 ldr x13, =SCTLR_ELx_FLAGS 38 ldr x13, =SCTLR_ELx_FLAGS
39 bic x12, x12, x13 39 bic x12, x12, x13
40 pre_disable_mmu_workaround
40 msr sctlr_el1, x12 41 msr sctlr_el1, x12
41 isb 42 isb
42 43
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c5ba0097887f..a73a5928f09b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
145}; 145};
146 146
147static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { 147static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
148 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), 148 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
149 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
149 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), 150 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
150 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), 151 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
151 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), 152 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 4e6ad355bd05..6b9736c3fb56 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -96,6 +96,7 @@ ENTRY(entry)
96 mrs x0, sctlr_el2 96 mrs x0, sctlr_el2
97 bic x0, x0, #1 << 0 // clear SCTLR.M 97 bic x0, x0, #1 << 0 // clear SCTLR.M
98 bic x0, x0, #1 << 2 // clear SCTLR.C 98 bic x0, x0, #1 << 2 // clear SCTLR.C
99 pre_disable_mmu_workaround
99 msr sctlr_el2, x0 100 msr sctlr_el2, x0
100 isb 101 isb
101 b 2f 102 b 2f
@@ -103,6 +104,7 @@ ENTRY(entry)
103 mrs x0, sctlr_el1 104 mrs x0, sctlr_el1
104 bic x0, x0, #1 << 0 // clear SCTLR.M 105 bic x0, x0, #1 << 0 // clear SCTLR.M
105 bic x0, x0, #1 << 2 // clear SCTLR.C 106 bic x0, x0, #1 << 2 // clear SCTLR.C
107 pre_disable_mmu_workaround
106 msr sctlr_el1, x0 108 msr sctlr_el1, x0
107 isb 109 isb
1082: 1102:
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 540a1e010eb5..fae81f7964b4 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1043,7 +1043,7 @@ void fpsimd_update_current_state(struct fpsimd_state *state)
1043 1043
1044 local_bh_disable(); 1044 local_bh_disable();
1045 1045
1046 current->thread.fpsimd_state = *state; 1046 current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
1047 if (system_supports_sve() && test_thread_flag(TIF_SVE)) 1047 if (system_supports_sve() && test_thread_flag(TIF_SVE))
1048 fpsimd_to_sve(current); 1048 fpsimd_to_sve(current);
1049 1049
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 67e86a0f57ac..e3cb9fbf96b6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -750,6 +750,7 @@ __primary_switch:
750 * to take into account by discarding the current kernel mapping and 750 * to take into account by discarding the current kernel mapping and
751 * creating a new one. 751 * creating a new one.
752 */ 752 */
753 pre_disable_mmu_workaround
753 msr sctlr_el1, x20 // disable the MMU 754 msr sctlr_el1, x20 // disable the MMU
754 isb 755 isb
755 bl __create_page_tables // recreate kernel mapping 756 bl __create_page_tables // recreate kernel mapping
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 749f81779420..74bb56f656ef 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29#include <linux/ptrace.h> 29#include <linux/ptrace.h>
30#include <linux/smp.h> 30#include <linux/smp.h>
31#include <linux/uaccess.h>
31 32
32#include <asm/compat.h> 33#include <asm/compat.h>
33#include <asm/current.h> 34#include <asm/current.h>
@@ -36,7 +37,6 @@
36#include <asm/traps.h> 37#include <asm/traps.h>
37#include <asm/cputype.h> 38#include <asm/cputype.h>
38#include <asm/system_misc.h> 39#include <asm/system_misc.h>
39#include <asm/uaccess.h>
40 40
41/* Breakpoint currently in use for each BRP. */ 41/* Breakpoint currently in use for each BRP. */
42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); 42static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index ce704a4aeadd..f407e422a720 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel)
45 mrs x0, sctlr_el2 45 mrs x0, sctlr_el2
46 ldr x1, =SCTLR_ELx_FLAGS 46 ldr x1, =SCTLR_ELx_FLAGS
47 bic x0, x0, x1 47 bic x0, x0, x1
48 pre_disable_mmu_workaround
48 msr sctlr_el2, x0 49 msr sctlr_el2, x0
49 isb 50 isb
501: 511:
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index dbadfaf850a7..fa63b28c65e0 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -221,3 +221,24 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
221 } 221 }
222 } 222 }
223} 223}
224
225
226/*
227 * After successfully emulating an instruction, we might want to
228 * return to user space with a KVM_EXIT_DEBUG. We can only do this
229 * once the emulation is complete, though, so for userspace emulations
230 * we have to wait until we have re-entered KVM before calling this
231 * helper.
232 *
233 * Return true (and set exit_reason) to return to userspace or false
234 * if no further action is required.
235 */
236bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
237{
238 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
239 run->exit_reason = KVM_EXIT_DEBUG;
240 run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
241 return true;
242 }
243 return false;
244}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index b71247995469..304203fa9e33 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -28,6 +28,7 @@
28#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
29#include <asm/kvm_mmu.h> 29#include <asm/kvm_mmu.h>
30#include <asm/kvm_psci.h> 30#include <asm/kvm_psci.h>
31#include <asm/debug-monitors.h>
31 32
32#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
33#include "trace.h" 34#include "trace.h"
@@ -187,14 +188,46 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
187} 188}
188 189
189/* 190/*
191 * We may be single-stepping an emulated instruction. If the emulation
192 * has been completed in the kernel, we can return to userspace with a
193 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
194 * emulation first.
195 */
196static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
197{
198 int handled;
199
200 /*
201 * See ARM ARM B1.14.1: "Hyp traps on instructions
202 * that fail their condition code check"
203 */
204 if (!kvm_condition_valid(vcpu)) {
205 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
206 handled = 1;
207 } else {
208 exit_handle_fn exit_handler;
209
210 exit_handler = kvm_get_exit_handler(vcpu);
211 handled = exit_handler(vcpu, run);
212 }
213
214 /*
215 * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
216 * structure if we need to return to userspace.
217 */
218 if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
219 handled = 0;
220
221 return handled;
222}
223
224/*
190 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 225 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
191 * proper exit to userspace. 226 * proper exit to userspace.
192 */ 227 */
193int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 228int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
194 int exception_index) 229 int exception_index)
195{ 230{
196 exit_handle_fn exit_handler;
197
198 if (ARM_SERROR_PENDING(exception_index)) { 231 if (ARM_SERROR_PENDING(exception_index)) {
199 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 232 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
200 233
@@ -220,20 +253,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
220 return 1; 253 return 1;
221 case ARM_EXCEPTION_EL1_SERROR: 254 case ARM_EXCEPTION_EL1_SERROR:
222 kvm_inject_vabt(vcpu); 255 kvm_inject_vabt(vcpu);
223 return 1; 256 /* We may still need to return for single-step */
224 case ARM_EXCEPTION_TRAP: 257 if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
225 /* 258 && kvm_arm_handle_step_debug(vcpu, run))
226 * See ARM ARM B1.14.1: "Hyp traps on instructions 259 return 0;
227 * that fail their condition code check" 260 else
228 */
229 if (!kvm_condition_valid(vcpu)) {
230 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
231 return 1; 261 return 1;
232 } 262 case ARM_EXCEPTION_TRAP:
233 263 return handle_trap_exceptions(vcpu, run);
234 exit_handler = kvm_get_exit_handler(vcpu);
235
236 return exit_handler(vcpu, run);
237 case ARM_EXCEPTION_HYP_GONE: 264 case ARM_EXCEPTION_HYP_GONE:
238 /* 265 /*
239 * EL2 has been reset to the hyp-stub. This happens when a guest 266 * EL2 has been reset to the hyp-stub. This happens when a guest
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3f9615582377..870828c364c5 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -151,6 +151,7 @@ reset:
151 mrs x5, sctlr_el2 151 mrs x5, sctlr_el2
152 ldr x6, =SCTLR_ELx_FLAGS 152 ldr x6, =SCTLR_ELx_FLAGS
153 bic x5, x5, x6 // Clear SCTL_M and etc 153 bic x5, x5, x6 // Clear SCTL_M and etc
154 pre_disable_mmu_workaround
154 msr sctlr_el2, x5 155 msr sctlr_el2, x5
155 isb 156 isb
156 157
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f48867..f7c651f3a8c0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 23#include <asm/kvm_hyp.h>
24#include <asm/fpsimd.h> 24#include <asm/fpsimd.h>
25#include <asm/debug-monitors.h>
25 26
26static bool __hyp_text __fpsimd_enabled_nvhe(void) 27static bool __hyp_text __fpsimd_enabled_nvhe(void)
27{ 28{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
269 return true; 270 return true;
270} 271}
271 272
272static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 273/* Skip an instruction which has been emulated. Returns true if
274 * execution can continue or false if we need to exit hyp mode because
275 * single-step was in effect.
276 */
277static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
273{ 278{
274 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 279 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
275 280
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
282 } 287 }
283 288
284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 289 write_sysreg_el2(*vcpu_pc(vcpu), elr);
290
291 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
292 vcpu->arch.fault.esr_el2 =
293 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
294 return false;
295 } else {
296 return true;
297 }
285} 298}
286 299
287int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) 300int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 355 int ret = __vgic_v2_perform_cpuif_access(vcpu);
343 356
344 if (ret == 1) { 357 if (ret == 1) {
345 __skip_instr(vcpu); 358 if (__skip_instr(vcpu))
346 goto again; 359 goto again;
360 else
361 exit_code = ARM_EXCEPTION_TRAP;
347 } 362 }
348 363
349 if (ret == -1) { 364 if (ret == -1) {
350 /* Promote an illegal access to an SError */ 365 /* Promote an illegal access to an
351 __skip_instr(vcpu); 366 * SError. If we would be returning
367 * due to single-step clear the SS
368 * bit so handle_exit knows what to
369 * do after dealing with the error.
370 */
371 if (!__skip_instr(vcpu))
372 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
352 exit_code = ARM_EXCEPTION_EL1_SERROR; 373 exit_code = ARM_EXCEPTION_EL1_SERROR;
353 } 374 }
354 375
@@ -363,8 +384,10 @@ again:
363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 384 int ret = __vgic_v3_perform_cpuif_access(vcpu);
364 385
365 if (ret == 1) { 386 if (ret == 1) {
366 __skip_instr(vcpu); 387 if (__skip_instr(vcpu))
367 goto again; 388 goto again;
389 else
390 exit_code = ARM_EXCEPTION_TRAP;
368 } 391 }
369 392
370 /* 0 falls through to be handled out of EL2 */ 393 /* 0 falls through to be handled out of EL2 */
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index ca74a2aace42..7b60d62ac593 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -389,7 +389,7 @@ void ptdump_check_wx(void)
389 .check_wx = true, 389 .check_wx = true,
390 }; 390 };
391 391
392 walk_pgd(&st, &init_mm, 0); 392 walk_pgd(&st, &init_mm, VA_START);
393 note_page(&st, 0, 0, 0); 393 note_page(&st, 0, 0, 0);
394 if (st.wx_pages || st.uxn_pages) 394 if (st.wx_pages || st.uxn_pages)
395 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", 395 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 22168cd0dde7..9b7f89df49db 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
574{ 574{
575 struct siginfo info; 575 struct siginfo info;
576 const struct fault_info *inf; 576 const struct fault_info *inf;
577 int ret = 0;
578 577
579 inf = esr_to_fault_info(esr); 578 inf = esr_to_fault_info(esr);
580 pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", 579 pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n",
@@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
589 if (interrupts_enabled(regs)) 588 if (interrupts_enabled(regs))
590 nmi_enter(); 589 nmi_enter();
591 590
592 ret = ghes_notify_sea(); 591 ghes_notify_sea();
593 592
594 if (interrupts_enabled(regs)) 593 if (interrupts_enabled(regs))
595 nmi_exit(); 594 nmi_exit();
@@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
604 info.si_addr = (void __user *)addr; 603 info.si_addr = (void __user *)addr;
605 arm64_notify_die("", regs, &info, esr); 604 arm64_notify_die("", regs, &info, esr);
606 605
607 return ret; 606 return 0;
608} 607}
609 608
610static const struct fault_info fault_info[] = { 609static const struct fault_info fault_info[] = {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 5960bef0170d..00e7b900ca41 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -476,6 +476,8 @@ void __init arm64_memblock_init(void)
476 476
477 reserve_elfcorehdr(); 477 reserve_elfcorehdr();
478 478
479 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
480
479 dma_contiguous_reserve(arm64_dma_phys_limit); 481 dma_contiguous_reserve(arm64_dma_phys_limit);
480 482
481 memblock_allow_resize(); 483 memblock_allow_resize();
@@ -502,7 +504,6 @@ void __init bootmem_init(void)
502 sparse_init(); 504 sparse_init();
503 zone_sizes_init(min, max); 505 zone_sizes_init(min, max);
504 506
505 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
506 memblock_dump_all(); 507 memblock_dump_all();
507} 508}
508 509
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 773c4e039cd7..c0319cbf1eec 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -38,6 +38,25 @@
38#define smp_rmb() RISCV_FENCE(r,r) 38#define smp_rmb() RISCV_FENCE(r,r)
39#define smp_wmb() RISCV_FENCE(w,w) 39#define smp_wmb() RISCV_FENCE(w,w)
40 40
41/*
42 * This is a very specific barrier: it's currently only used in two places in
43 * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
44 * orderings it guarantees, but the "critical section is RCsc" guarantee
45 * mandates a barrier on RISC-V. The sequence looks like:
46 *
47 * lr.aq lock
48 * sc lock <= LOCKED
49 * smp_mb__after_spinlock()
50 * // critical section
51 * lr lock
52 * sc.rl lock <= UNLOCKED
53 *
54 * The AQ/RL pair provides a RCpc critical section, but there's not really any
55 * way we can take advantage of that here because the ordering is only enforced
56 * on that one lock. Thus, we're just doing a full fence.
57 */
58#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
59
41#include <asm-generic/barrier.h> 60#include <asm-generic/barrier.h>
42 61
43#endif /* __ASSEMBLY__ */ 62#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 8fbb6749910d..cb7b0c63014e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -38,10 +38,6 @@
38#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
39#include <asm/thread_info.h> 39#include <asm/thread_info.h>
40 40
41#ifdef CONFIG_HVC_RISCV_SBI
42#include <asm/hvc_riscv_sbi.h>
43#endif
44
45#ifdef CONFIG_DUMMY_CONSOLE 41#ifdef CONFIG_DUMMY_CONSOLE
46struct screen_info screen_info = { 42struct screen_info screen_info = {
47 .orig_video_lines = 30, 43 .orig_video_lines = 30,
@@ -212,13 +208,6 @@ static void __init setup_bootmem(void)
212 208
213void __init setup_arch(char **cmdline_p) 209void __init setup_arch(char **cmdline_p)
214{ 210{
215#if defined(CONFIG_HVC_RISCV_SBI)
216 if (likely(early_console == NULL)) {
217 early_console = &riscv_sbi_early_console_dev;
218 register_console(early_console);
219 }
220#endif
221
222#ifdef CONFIG_CMDLINE_BOOL 211#ifdef CONFIG_CMDLINE_BOOL
223#ifdef CONFIG_CMDLINE_OVERRIDE 212#ifdef CONFIG_CMDLINE_OVERRIDE
224 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 213 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index a2ae936a093e..79c78668258e 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -70,7 +70,7 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
70 bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; 70 bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
71 71
72 /* Check the reserved flags. */ 72 /* Check the reserved flags. */
73 if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL)) 73 if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 flush_icache_mm(mm, local); 76 flush_icache_mm(mm, local);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57d7bc92e0b8..0a6b0286c32e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1264,12 +1264,6 @@ static inline pud_t pud_mkwrite(pud_t pud)
1264 return pud; 1264 return pud;
1265} 1265}
1266 1266
1267#define pud_write pud_write
1268static inline int pud_write(pud_t pud)
1269{
1270 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
1271}
1272
1273static inline pud_t pud_mkclean(pud_t pud) 1267static inline pud_t pud_mkclean(pud_t pud)
1274{ 1268{
1275 if (pud_large(pud)) { 1269 if (pud_large(pud)) {
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index f04db3779b34..59eea9c65d3e 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
263 return retval; 263 return retval;
264 } 264 }
265 265
266 groups_sort(group_info);
266 retval = set_current_groups(group_info); 267 retval = set_current_groups(group_info);
267 put_group_info(group_info); 268 put_group_info(group_info);
268 269
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 6048b1c6e580..05ee90a5ea08 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -1,10 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0
1# Makefile for kernel virtual machines on s390 2# Makefile for kernel virtual machines on s390
2# 3#
3# Copyright IBM Corp. 2008 4# Copyright IBM Corp. 2008
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation.
8 5
9KVM := ../../../virt/kvm 6KVM := ../../../virt/kvm
10common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o 7common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index d93a2c0474bf..89aa114a2cba 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling diagnose instructions 3 * handling diagnose instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2011 5 * Copyright IBM Corp. 2008, 2011
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index bec42b852246..f4c51756c462 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * access guest memory 3 * access guest memory
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index bcbd86621d01..b5f3e82006d0 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm guest debug support 3 * kvm guest debug support
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 8fe034beb623..9c7d70715862 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * in-kernel handling for sie intercepts 3 * in-kernel handling for sie intercepts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2014 5 * Copyright IBM Corp. 2008, 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index fa557372d600..024ad8bcc516 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling kvm guest interrupts 3 * handling kvm guest interrupts
3 * 4 *
4 * Copyright IBM Corp. 2008, 2015 5 * Copyright IBM Corp. 2008, 2015
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */ 8 */
12 9
diff --git a/arch/s390/kvm/irq.h b/arch/s390/kvm/irq.h
index d98e4159643d..484608c71dd0 100644
--- a/arch/s390/kvm/irq.h
+++ b/arch/s390/kvm/irq.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * s390 irqchip routines 3 * s390 irqchip routines
3 * 4 *
4 * Copyright IBM Corp. 2014 5 * Copyright IBM Corp. 2014
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
11 */ 8 */
12#ifndef __KVM_IRQ_H 9#ifndef __KVM_IRQ_H
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 9614aea5839b..ec8b68e97d3c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,11 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * hosting zSeries kernel virtual machines 3 * hosting IBM Z kernel virtual machines (s390x)
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2017
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 * 6 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
@@ -3808,6 +3805,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3808 r = -EINVAL; 3805 r = -EINVAL;
3809 break; 3806 break;
3810 } 3807 }
3808 /* do not use irq_state.flags, it will break old QEMUs */
3811 r = kvm_s390_set_irq_state(vcpu, 3809 r = kvm_s390_set_irq_state(vcpu,
3812 (void __user *) irq_state.buf, 3810 (void __user *) irq_state.buf,
3813 irq_state.len); 3811 irq_state.len);
@@ -3823,6 +3821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3823 r = -EINVAL; 3821 r = -EINVAL;
3824 break; 3822 break;
3825 } 3823 }
3824 /* do not use irq_state.flags, it will break old QEMUs */
3826 r = kvm_s390_get_irq_state(vcpu, 3825 r = kvm_s390_get_irq_state(vcpu,
3827 (__u8 __user *) irq_state.buf, 3826 (__u8 __user *) irq_state.buf,
3828 irq_state.len); 3827 irq_state.len);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 10d65dfbc306..5e46ba429bcb 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,12 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * definition for kvm on s390 3 * definition for kvm on s390
3 * 4 *
4 * Copyright IBM Corp. 2008, 2009 5 * Copyright IBM Corp. 2008, 2009
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c954ac49eee4..572496c688cc 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling privileged instructions 3 * handling privileged instructions
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */ 9 */
@@ -235,8 +232,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 232 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
236 return -EAGAIN; 233 return -EAGAIN;
237 } 234 }
238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
240 return 0; 235 return 0;
241} 236}
242 237
@@ -247,6 +242,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
247 int reg1, reg2; 242 int reg1, reg2;
248 int rc; 243 int rc;
249 244
245 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
246 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
247
250 rc = try_handle_skey(vcpu); 248 rc = try_handle_skey(vcpu);
251 if (rc) 249 if (rc)
252 return rc != -EAGAIN ? rc : 0; 250 return rc != -EAGAIN ? rc : 0;
@@ -276,6 +274,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
276 int reg1, reg2; 274 int reg1, reg2;
277 int rc; 275 int rc;
278 276
277 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
278 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
279
279 rc = try_handle_skey(vcpu); 280 rc = try_handle_skey(vcpu);
280 if (rc) 281 if (rc)
281 return rc != -EAGAIN ? rc : 0; 282 return rc != -EAGAIN ? rc : 0;
@@ -311,6 +312,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
311 int reg1, reg2; 312 int reg1, reg2;
312 int rc; 313 int rc;
313 314
315 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
316 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
317
314 rc = try_handle_skey(vcpu); 318 rc = try_handle_skey(vcpu);
315 if (rc) 319 if (rc)
316 return rc != -EAGAIN ? rc : 0; 320 return rc != -EAGAIN ? rc : 0;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 9d592ef4104b..c1f5cde2c878 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * handling interprocessor communication 3 * handling interprocessor communication
3 * 4 *
4 * Copyright IBM Corp. 2008, 2013 5 * Copyright IBM Corp. 2008, 2013
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com> 8 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com> 9 * Christian Ehrhardt <ehrhardt@de.ibm.com>
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a311938b63b3..5d6ae0326d9e 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kvm nested virtualization support for s390x 3 * kvm nested virtualization support for s390x
3 * 4 *
4 * Copyright IBM Corp. 2016 5 * Copyright IBM Corp. 2016
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */ 8 */
12#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 33c0f8bb0f33..5335ba3c850e 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
75 if (!(pmd_val(pmd) & _PAGE_VALID)) 75 if (!(pmd_val(pmd) & _PAGE_VALID))
76 return 0; 76 return 0;
77 77
78 if (!pmd_access_permitted(pmd, write)) 78 if (write && !pmd_write(pmd))
79 return 0; 79 return 0;
80 80
81 refs = 0; 81 refs = 0;
@@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
114 if (!(pud_val(pud) & _PAGE_VALID)) 114 if (!(pud_val(pud) & _PAGE_VALID))
115 return 0; 115 return 0;
116 116
117 if (!pud_access_permitted(pud, write)) 117 if (write && !pud_write(pud))
118 return 0; 118 return 0;
119 119
120 refs = 0; 120 refs = 0;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 50a32c33d729..73c57f614c9e 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,4 +1,5 @@
1generic-y += barrier.h 1generic-y += barrier.h
2generic-y += bpf_perf_event.h
2generic-y += bug.h 3generic-y += bug.h
3generic-y += clkdev.h 4generic-y += clkdev.h
4generic-y += current.h 5generic-y += current.h
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 6293a8768a91..672441c008c7 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -400,6 +400,7 @@ config UNWINDER_FRAME_POINTER
400config UNWINDER_GUESS 400config UNWINDER_GUESS
401 bool "Guess unwinder" 401 bool "Guess unwinder"
402 depends on EXPERT 402 depends on EXPERT
403 depends on !STACKDEPOT
403 ---help--- 404 ---help---
404 This option enables the "guess" unwinder for unwinding kernel stack 405 This option enables the "guess" unwinder for unwinding kernel stack
405 traces. It scans the stack and reports every kernel text address it 406 traces. It scans the stack and reports every kernel text address it
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 1e9c322e973a..f25e1530e064 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -80,6 +80,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
80ifdef CONFIG_X86_64 80ifdef CONFIG_X86_64
81 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o 81 vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
82 vmlinux-objs-y += $(obj)/mem_encrypt.o 82 vmlinux-objs-y += $(obj)/mem_encrypt.o
83 vmlinux-objs-y += $(obj)/pgtable_64.o
83endif 84endif
84 85
85$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone 86$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 20919b4f3133..fc313e29fe2c 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -305,10 +305,18 @@ ENTRY(startup_64)
305 leaq boot_stack_end(%rbx), %rsp 305 leaq boot_stack_end(%rbx), %rsp
306 306
307#ifdef CONFIG_X86_5LEVEL 307#ifdef CONFIG_X86_5LEVEL
308 /* Check if 5-level paging has already enabled */ 308 /*
309 movq %cr4, %rax 309 * Check if we need to enable 5-level paging.
310 testl $X86_CR4_LA57, %eax 310 * RSI holds real mode data and need to be preserved across
311 jnz lvl5 311 * a function call.
312 */
313 pushq %rsi
314 call l5_paging_required
315 popq %rsi
316
317 /* If l5_paging_required() returned zero, we're done here. */
318 cmpq $0, %rax
319 je lvl5
312 320
313 /* 321 /*
314 * At this point we are in long mode with 4-level paging enabled, 322 * At this point we are in long mode with 4-level paging enabled,
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b50c42455e25..98761a1576ce 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -169,6 +169,16 @@ void __puthex(unsigned long value)
169 } 169 }
170} 170}
171 171
172static bool l5_supported(void)
173{
174 /* Check if leaf 7 is supported. */
175 if (native_cpuid_eax(0) < 7)
176 return 0;
177
178 /* Check if la57 is supported. */
179 return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31));
180}
181
172#if CONFIG_X86_NEED_RELOCS 182#if CONFIG_X86_NEED_RELOCS
173static void handle_relocations(void *output, unsigned long output_len, 183static void handle_relocations(void *output, unsigned long output_len,
174 unsigned long virt_addr) 184 unsigned long virt_addr)
@@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
362 console_init(); 372 console_init();
363 debug_putstr("early console in extract_kernel\n"); 373 debug_putstr("early console in extract_kernel\n");
364 374
375 if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) {
376 error("This linux kernel as configured requires 5-level paging\n"
377 "This CPU does not support the required 'cr4.la57' feature\n"
378 "Unable to boot - please use a kernel appropriate for your CPU\n");
379 }
380
365 free_mem_ptr = heap; /* Heap */ 381 free_mem_ptr = heap; /* Heap */
366 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 382 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
367 383
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
new file mode 100644
index 000000000000..b4469a37e9a1
--- /dev/null
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -0,0 +1,28 @@
1#include <asm/processor.h>
2
3/*
4 * __force_order is used by special_insns.h asm code to force instruction
5 * serialization.
6 *
7 * It is not referenced from the code, but GCC < 5 with -fPIE would fail
8 * due to an undefined symbol. Define it to make these ancient GCCs work.
9 */
10unsigned long __force_order;
11
12int l5_paging_required(void)
13{
14 /* Check if leaf 7 is supported. */
15
16 if (native_cpuid_eax(0) < 7)
17 return 0;
18
19 /* Check if la57 is supported. */
20 if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
21 return 0;
22
23 /* Check if 5-level paging has already been enabled. */
24 if (native_read_cr4() & X86_CR4_LA57)
25 return 0;
26
27 return 1;
28}
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index 49f4970f693b..c9e8499fbfe7 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -44,9 +44,9 @@ FDINITRD=$6
44 44
45# Make sure the files actually exist 45# Make sure the files actually exist
46verify "$FBZIMAGE" 46verify "$FBZIMAGE"
47verify "$MTOOLSRC"
48 47
49genbzdisk() { 48genbzdisk() {
49 verify "$MTOOLSRC"
50 mformat a: 50 mformat a:
51 syslinux $FIMAGE 51 syslinux $FIMAGE
52 echo "$KCMDLINE" | mcopy - a:syslinux.cfg 52 echo "$KCMDLINE" | mcopy - a:syslinux.cfg
@@ -57,6 +57,7 @@ genbzdisk() {
57} 57}
58 58
59genfdimage144() { 59genfdimage144() {
60 verify "$MTOOLSRC"
60 dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null 61 dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
61 mformat v: 62 mformat v:
62 syslinux $FIMAGE 63 syslinux $FIMAGE
@@ -68,6 +69,7 @@ genfdimage144() {
68} 69}
69 70
70genfdimage288() { 71genfdimage288() {
72 verify "$MTOOLSRC"
71 dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null 73 dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
72 mformat w: 74 mformat w:
73 syslinux $FIMAGE 75 syslinux $FIMAGE
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 399a29d067d6..cb91a64a99e7 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
59 59
60 salsa20_ivsetup(ctx, walk.iv); 60 salsa20_ivsetup(ctx, walk.iv);
61 61
62 if (likely(walk.nbytes == nbytes))
63 {
64 salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
65 walk.dst.virt.addr, nbytes);
66 return blkcipher_walk_done(desc, &walk, 0);
67 }
68
69 while (walk.nbytes >= 64) { 62 while (walk.nbytes >= 64) {
70 salsa20_encrypt_bytes(ctx, walk.src.virt.addr, 63 salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
71 walk.dst.virt.addr, 64 walk.dst.virt.addr,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 034caa1a084e..b24b1c8b3979 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -214,8 +214,6 @@ struct x86_emulate_ops {
214 void (*halt)(struct x86_emulate_ctxt *ctxt); 214 void (*halt)(struct x86_emulate_ctxt *ctxt);
215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
217 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
218 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
219 int (*intercept)(struct x86_emulate_ctxt *ctxt, 217 int (*intercept)(struct x86_emulate_ctxt *ctxt,
220 struct x86_instruction_info *info, 218 struct x86_instruction_info *info,
221 enum x86_intercept_stage stage); 219 enum x86_intercept_stage stage);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5fb968b..516798431328 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -536,7 +536,20 @@ struct kvm_vcpu_arch {
536 struct kvm_mmu_memory_cache mmu_page_cache; 536 struct kvm_mmu_memory_cache mmu_page_cache;
537 struct kvm_mmu_memory_cache mmu_page_header_cache; 537 struct kvm_mmu_memory_cache mmu_page_header_cache;
538 538
539 /*
540 * QEMU userspace and the guest each have their own FPU state.
541 * In vcpu_run, we switch between the user and guest FPU contexts.
542 * While running a VCPU, the VCPU thread will have the guest FPU
543 * context.
544 *
545 * Note that while the PKRU state lives inside the fpu registers,
546 * it is switched out separately at VMENTER and VMEXIT time. The
547 * "guest_fpu" state here contains the guest FPU context, with the
548 * host PRKU bits.
549 */
550 struct fpu user_fpu;
539 struct fpu guest_fpu; 551 struct fpu guest_fpu;
552
540 u64 xcr0; 553 u64 xcr0;
541 u64 guest_supported_xcr0; 554 u64 guest_supported_xcr0;
542 u32 guest_xstate_size; 555 u32 guest_xstate_size;
@@ -1435,4 +1448,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
1435#define put_smstate(type, buf, offset, val) \ 1448#define put_smstate(type, buf, offset, val) \
1436 *(type *)((buf) + (offset) - 0x7e00) = val 1449 *(type *)((buf) + (offset) - 0x7e00) = val
1437 1450
1451void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1452 unsigned long start, unsigned long end);
1453
1438#endif /* _ASM_X86_KVM_HOST_H */ 1454#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index 982c325dad33..8be6afb58471 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -12,7 +12,13 @@
12 12
13/* image of the saved processor state */ 13/* image of the saved processor state */
14struct saved_context { 14struct saved_context {
15 u16 es, fs, gs, ss; 15 /*
16 * On x86_32, all segment registers, with the possible exception of
17 * gs, are saved at kernel entry in pt_regs.
18 */
19#ifdef CONFIG_X86_32_LAZY_GS
20 u16 gs;
21#endif
16 unsigned long cr0, cr2, cr3, cr4; 22 unsigned long cr0, cr2, cr3, cr4;
17 u64 misc_enable; 23 u64 misc_enable;
18 bool misc_enable_saved; 24 bool misc_enable_saved;
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
index 7306e911faee..a7af9f53c0cb 100644
--- a/arch/x86/include/asm/suspend_64.h
+++ b/arch/x86/include/asm/suspend_64.h
@@ -20,8 +20,20 @@
20 */ 20 */
21struct saved_context { 21struct saved_context {
22 struct pt_regs regs; 22 struct pt_regs regs;
23 u16 ds, es, fs, gs, ss; 23
24 unsigned long gs_base, gs_kernel_base, fs_base; 24 /*
25 * User CS and SS are saved in current_pt_regs(). The rest of the
26 * segment selectors need to be saved and restored here.
27 */
28 u16 ds, es, fs, gs;
29
30 /*
31 * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
32 * so we save them separately. We save the kernelmode GSBASE to
33 * restore percpu access after resume.
34 */
35 unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
36
25 unsigned long cr0, cr2, cr3, cr4, cr8; 37 unsigned long cr0, cr2, cr3, cr4, cr8;
26 u64 misc_enable; 38 u64 misc_enable;
27 bool misc_enable_saved; 39 bool misc_enable_saved;
@@ -30,8 +42,7 @@ struct saved_context {
30 u16 gdt_pad; /* Unused */ 42 u16 gdt_pad; /* Unused */
31 struct desc_ptr gdt_desc; 43 struct desc_ptr gdt_desc;
32 u16 idt_pad; 44 u16 idt_pad;
33 u16 idt_limit; 45 struct desc_ptr idt;
34 unsigned long idt_base;
35 u16 ldt; 46 u16 ldt;
36 u16 tss; 47 u16 tss;
37 unsigned long tr; 48 unsigned long tr;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 05a97d5fe298..35cb20994e32 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -106,7 +106,7 @@ EXPORT_SYMBOL(__max_logical_packages);
106static unsigned int logical_packages __read_mostly; 106static unsigned int logical_packages __read_mostly;
107 107
108/* Maximum number of SMT threads on any online core */ 108/* Maximum number of SMT threads on any online core */
109int __max_smt_threads __read_mostly; 109int __read_mostly __max_smt_threads = 1;
110 110
111/* Flag to indicate if a complete sched domain rebuild is required */ 111/* Flag to indicate if a complete sched domain rebuild is required */
112bool x86_topology_update; 112bool x86_topology_update;
@@ -1304,7 +1304,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1304 * Today neither Intel nor AMD support heterogenous systems so 1304 * Today neither Intel nor AMD support heterogenous systems so
1305 * extrapolate the boot cpu's data to all packages. 1305 * extrapolate the boot cpu's data to all packages.
1306 */ 1306 */
1307 ncpus = cpu_data(0).booted_cores * smp_num_siblings; 1307 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1308 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); 1308 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
1309 pr_info("Max logical packages: %u\n", __max_logical_packages); 1309 pr_info("Max logical packages: %u\n", __max_logical_packages);
1310 1310
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e7d04d0c8008..abe74f779f9d 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1046,7 +1046,6 @@ static void fetch_register_operand(struct operand *op)
1046 1046
1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1047static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1048{ 1048{
1049 ctxt->ops->get_fpu(ctxt);
1050 switch (reg) { 1049 switch (reg) {
1051 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
1052 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; 1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1068,13 +1067,11 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
1068#endif 1067#endif
1069 default: BUG(); 1068 default: BUG();
1070 } 1069 }
1071 ctxt->ops->put_fpu(ctxt);
1072} 1070}
1073 1071
1074static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1072static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1075 int reg) 1073 int reg)
1076{ 1074{
1077 ctxt->ops->get_fpu(ctxt);
1078 switch (reg) { 1075 switch (reg) {
1079 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1076 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
1080 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; 1077 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1096,12 +1093,10 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
1096#endif 1093#endif
1097 default: BUG(); 1094 default: BUG();
1098 } 1095 }
1099 ctxt->ops->put_fpu(ctxt);
1100} 1096}
1101 1097
1102static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1098static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1103{ 1099{
1104 ctxt->ops->get_fpu(ctxt);
1105 switch (reg) { 1100 switch (reg) {
1106 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1101 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1107 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; 1102 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1113,12 +1108,10 @@ static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1113 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1108 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1114 default: BUG(); 1109 default: BUG();
1115 } 1110 }
1116 ctxt->ops->put_fpu(ctxt);
1117} 1111}
1118 1112
1119static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1113static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1120{ 1114{
1121 ctxt->ops->get_fpu(ctxt);
1122 switch (reg) { 1115 switch (reg) {
1123 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1116 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1124 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; 1117 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1130,7 +1123,6 @@ static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1130 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1123 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1131 default: BUG(); 1124 default: BUG();
1132 } 1125 }
1133 ctxt->ops->put_fpu(ctxt);
1134} 1126}
1135 1127
1136static int em_fninit(struct x86_emulate_ctxt *ctxt) 1128static int em_fninit(struct x86_emulate_ctxt *ctxt)
@@ -1138,9 +1130,7 @@ static int em_fninit(struct x86_emulate_ctxt *ctxt)
1138 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1130 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1139 return emulate_nm(ctxt); 1131 return emulate_nm(ctxt);
1140 1132
1141 ctxt->ops->get_fpu(ctxt);
1142 asm volatile("fninit"); 1133 asm volatile("fninit");
1143 ctxt->ops->put_fpu(ctxt);
1144 return X86EMUL_CONTINUE; 1134 return X86EMUL_CONTINUE;
1145} 1135}
1146 1136
@@ -1151,9 +1141,7 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1151 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1141 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1152 return emulate_nm(ctxt); 1142 return emulate_nm(ctxt);
1153 1143
1154 ctxt->ops->get_fpu(ctxt);
1155 asm volatile("fnstcw %0": "+m"(fcw)); 1144 asm volatile("fnstcw %0": "+m"(fcw));
1156 ctxt->ops->put_fpu(ctxt);
1157 1145
1158 ctxt->dst.val = fcw; 1146 ctxt->dst.val = fcw;
1159 1147
@@ -1167,9 +1155,7 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1167 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1155 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1168 return emulate_nm(ctxt); 1156 return emulate_nm(ctxt);
1169 1157
1170 ctxt->ops->get_fpu(ctxt);
1171 asm volatile("fnstsw %0": "+m"(fsw)); 1158 asm volatile("fnstsw %0": "+m"(fsw));
1172 ctxt->ops->put_fpu(ctxt);
1173 1159
1174 ctxt->dst.val = fsw; 1160 ctxt->dst.val = fsw;
1175 1161
@@ -4001,12 +3987,8 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4001 if (rc != X86EMUL_CONTINUE) 3987 if (rc != X86EMUL_CONTINUE)
4002 return rc; 3988 return rc;
4003 3989
4004 ctxt->ops->get_fpu(ctxt);
4005
4006 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 3990 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4007 3991
4008 ctxt->ops->put_fpu(ctxt);
4009
4010 if (rc != X86EMUL_CONTINUE) 3992 if (rc != X86EMUL_CONTINUE)
4011 return rc; 3993 return rc;
4012 3994
@@ -4049,8 +4031,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4049 if (rc != X86EMUL_CONTINUE) 4031 if (rc != X86EMUL_CONTINUE)
4050 return rc; 4032 return rc;
4051 4033
4052 ctxt->ops->get_fpu(ctxt);
4053
4054 if (size < __fxstate_size(16)) { 4034 if (size < __fxstate_size(16)) {
4055 rc = fxregs_fixup(&fx_state, size); 4035 rc = fxregs_fixup(&fx_state, size);
4056 if (rc != X86EMUL_CONTINUE) 4036 if (rc != X86EMUL_CONTINUE)
@@ -4066,8 +4046,6 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4066 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4046 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4067 4047
4068out: 4048out:
4069 ctxt->ops->put_fpu(ctxt);
4070
4071 return rc; 4049 return rc;
4072} 4050}
4073 4051
@@ -5317,9 +5295,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5317{ 5295{
5318 int rc; 5296 int rc;
5319 5297
5320 ctxt->ops->get_fpu(ctxt);
5321 rc = asm_safe("fwait"); 5298 rc = asm_safe("fwait");
5322 ctxt->ops->put_fpu(ctxt);
5323 5299
5324 if (unlikely(rc != X86EMUL_CONTINUE)) 5300 if (unlikely(rc != X86EMUL_CONTINUE))
5325 return emulate_exception(ctxt, MF_VECTOR, 0, false); 5301 return emulate_exception(ctxt, MF_VECTOR, 0, false);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4704aaf6d19e..8eba631c4dbd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6751,16 +6751,10 @@ static __init int hardware_setup(void)
6751 goto out; 6751 goto out;
6752 } 6752 }
6753 6753
6754 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
6755 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 6754 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
6756 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 6755 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
6757 6756
6758 /*
6759 * Allow direct access to the PC debug port (it is often used for I/O
6760 * delays, but the vmexits simply slow things down).
6761 */
6762 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); 6757 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
6763 clear_bit(0x80, vmx_io_bitmap_a);
6764 6758
6765 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 6759 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
6766 6760
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eee8e7faf1af..faf843c9b916 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2937,7 +2937,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2937 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2937 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2938 pagefault_enable(); 2938 pagefault_enable();
2939 kvm_x86_ops->vcpu_put(vcpu); 2939 kvm_x86_ops->vcpu_put(vcpu);
2940 kvm_put_guest_fpu(vcpu);
2941 vcpu->arch.last_host_tsc = rdtsc(); 2940 vcpu->arch.last_host_tsc = rdtsc();
2942} 2941}
2943 2942
@@ -5252,17 +5251,6 @@ static void emulator_halt(struct x86_emulate_ctxt *ctxt)
5252 emul_to_vcpu(ctxt)->arch.halt_request = 1; 5251 emul_to_vcpu(ctxt)->arch.halt_request = 1;
5253} 5252}
5254 5253
5255static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
5256{
5257 preempt_disable();
5258 kvm_load_guest_fpu(emul_to_vcpu(ctxt));
5259}
5260
5261static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
5262{
5263 preempt_enable();
5264}
5265
5266static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 5254static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
5267 struct x86_instruction_info *info, 5255 struct x86_instruction_info *info,
5268 enum x86_intercept_stage stage) 5256 enum x86_intercept_stage stage)
@@ -5340,8 +5328,6 @@ static const struct x86_emulate_ops emulate_ops = {
5340 .halt = emulator_halt, 5328 .halt = emulator_halt,
5341 .wbinvd = emulator_wbinvd, 5329 .wbinvd = emulator_wbinvd,
5342 .fix_hypercall = emulator_fix_hypercall, 5330 .fix_hypercall = emulator_fix_hypercall,
5343 .get_fpu = emulator_get_fpu,
5344 .put_fpu = emulator_put_fpu,
5345 .intercept = emulator_intercept, 5331 .intercept = emulator_intercept,
5346 .get_cpuid = emulator_get_cpuid, 5332 .get_cpuid = emulator_get_cpuid,
5347 .set_nmi_mask = emulator_set_nmi_mask, 5333 .set_nmi_mask = emulator_set_nmi_mask,
@@ -6778,6 +6764,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
6778 kvm_x86_ops->tlb_flush(vcpu); 6764 kvm_x86_ops->tlb_flush(vcpu);
6779} 6765}
6780 6766
6767void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
6768 unsigned long start, unsigned long end)
6769{
6770 unsigned long apic_address;
6771
6772 /*
6773 * The physical address of apic access page is stored in the VMCS.
6774 * Update it when it becomes invalid.
6775 */
6776 apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
6777 if (start <= apic_address && apic_address < end)
6778 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
6779}
6780
6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 6781void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
6782{ 6782{
6783 struct page *page = NULL; 6783 struct page *page = NULL;
@@ -6952,7 +6952,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6952 preempt_disable(); 6952 preempt_disable();
6953 6953
6954 kvm_x86_ops->prepare_guest_switch(vcpu); 6954 kvm_x86_ops->prepare_guest_switch(vcpu);
6955 kvm_load_guest_fpu(vcpu);
6956 6955
6957 /* 6956 /*
6958 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt 6957 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt
@@ -7297,12 +7296,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7297 } 7296 }
7298 } 7297 }
7299 7298
7299 kvm_load_guest_fpu(vcpu);
7300
7300 if (unlikely(vcpu->arch.complete_userspace_io)) { 7301 if (unlikely(vcpu->arch.complete_userspace_io)) {
7301 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 7302 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
7302 vcpu->arch.complete_userspace_io = NULL; 7303 vcpu->arch.complete_userspace_io = NULL;
7303 r = cui(vcpu); 7304 r = cui(vcpu);
7304 if (r <= 0) 7305 if (r <= 0)
7305 goto out; 7306 goto out_fpu;
7306 } else 7307 } else
7307 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 7308 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
7308 7309
@@ -7311,6 +7312,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
7311 else 7312 else
7312 r = vcpu_run(vcpu); 7313 r = vcpu_run(vcpu);
7313 7314
7315out_fpu:
7316 kvm_put_guest_fpu(vcpu);
7314out: 7317out:
7315 post_kvm_run_save(vcpu); 7318 post_kvm_run_save(vcpu);
7316 kvm_sigset_deactivate(vcpu); 7319 kvm_sigset_deactivate(vcpu);
@@ -7704,32 +7707,25 @@ static void fx_init(struct kvm_vcpu *vcpu)
7704 vcpu->arch.cr0 |= X86_CR0_ET; 7707 vcpu->arch.cr0 |= X86_CR0_ET;
7705} 7708}
7706 7709
7710/* Swap (qemu) user FPU context for the guest FPU context. */
7707void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 7711void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
7708{ 7712{
7709 if (vcpu->guest_fpu_loaded) 7713 preempt_disable();
7710 return; 7714 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
7711
7712 /*
7713 * Restore all possible states in the guest,
7714 * and assume host would use all available bits.
7715 * Guest xcr0 would be loaded later.
7716 */
7717 vcpu->guest_fpu_loaded = 1;
7718 __kernel_fpu_begin();
7719 /* PKRU is separately restored in kvm_x86_ops->run. */ 7715 /* PKRU is separately restored in kvm_x86_ops->run. */
7720 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, 7716 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
7721 ~XFEATURE_MASK_PKRU); 7717 ~XFEATURE_MASK_PKRU);
7718 preempt_enable();
7722 trace_kvm_fpu(1); 7719 trace_kvm_fpu(1);
7723} 7720}
7724 7721
7722/* When vcpu_run ends, restore user space FPU context. */
7725void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7723void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
7726{ 7724{
7727 if (!vcpu->guest_fpu_loaded) 7725 preempt_disable();
7728 return;
7729
7730 vcpu->guest_fpu_loaded = 0;
7731 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); 7726 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
7732 __kernel_fpu_end(); 7727 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
7728 preempt_enable();
7733 ++vcpu->stat.fpu_reload; 7729 ++vcpu->stat.fpu_reload;
7734 trace_kvm_fpu(0); 7730 trace_kvm_fpu(0);
7735} 7731}
@@ -7846,7 +7842,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7846 * To avoid have the INIT path from kvm_apic_has_events() that be 7842 * To avoid have the INIT path from kvm_apic_has_events() that be
7847 * called with loaded FPU and does not let userspace fix the state. 7843 * called with loaded FPU and does not let userspace fix the state.
7848 */ 7844 */
7849 kvm_put_guest_fpu(vcpu); 7845 if (init_event)
7846 kvm_put_guest_fpu(vcpu);
7850 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, 7847 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
7851 XFEATURE_MASK_BNDREGS); 7848 XFEATURE_MASK_BNDREGS);
7852 if (mpx_state_buffer) 7849 if (mpx_state_buffer)
@@ -7855,6 +7852,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
7855 XFEATURE_MASK_BNDCSR); 7852 XFEATURE_MASK_BNDCSR);
7856 if (mpx_state_buffer) 7853 if (mpx_state_buffer)
7857 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); 7854 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
7855 if (init_event)
7856 kvm_load_guest_fpu(vcpu);
7858 } 7857 }
7859 7858
7860 if (!init_event) { 7859 if (!init_event) {
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index c4d55919fac1..e0b85930dd77 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 6e4573b1da34..c45b6ec5357b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr)
404 return; 404 return;
405 } 405 }
406 406
407 mmiotrace_iounmap(addr);
408
407 addr = (volatile void __iomem *) 409 addr = (volatile void __iomem *)
408 (PAGE_MASK & (unsigned long __force)addr); 410 (PAGE_MASK & (unsigned long __force)addr);
409 411
410 mmiotrace_iounmap(addr);
411
412 /* Use the vm area unlocked, assuming the caller 412 /* Use the vm area unlocked, assuming the caller
413 ensures there isn't another iounmap for the same address 413 ensures there isn't another iounmap for the same address
414 in parallel. Reuse of the virtual address is prevented by 414 in parallel. Reuse of the virtual address is prevented by
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index c21c2ed04612..58477ec3d66d 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
435 unsigned long flags; 435 unsigned long flags;
436 int ret = 0; 436 int ret = 0;
437 unsigned long size = 0; 437 unsigned long size = 0;
438 unsigned long addr = p->addr & PAGE_MASK;
438 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 439 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
439 unsigned int l; 440 unsigned int l;
440 pte_t *pte; 441 pte_t *pte;
441 442
442 spin_lock_irqsave(&kmmio_lock, flags); 443 spin_lock_irqsave(&kmmio_lock, flags);
443 if (get_kmmio_probe(p->addr)) { 444 if (get_kmmio_probe(addr)) {
444 ret = -EEXIST; 445 ret = -EEXIST;
445 goto out; 446 goto out;
446 } 447 }
447 448
448 pte = lookup_address(p->addr, &l); 449 pte = lookup_address(addr, &l);
449 if (!pte) { 450 if (!pte) {
450 ret = -EINVAL; 451 ret = -EINVAL;
451 goto out; 452 goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
454 kmmio_count++; 455 kmmio_count++;
455 list_add_rcu(&p->list, &kmmio_probes); 456 list_add_rcu(&p->list, &kmmio_probes);
456 while (size < size_lim) { 457 while (size < size_lim) {
457 if (add_kmmio_fault_page(p->addr + size)) 458 if (add_kmmio_fault_page(addr + size))
458 pr_err("Unable to set page fault.\n"); 459 pr_err("Unable to set page fault.\n");
459 size += page_level_size(l); 460 size += page_level_size(l);
460 } 461 }
@@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
528{ 529{
529 unsigned long flags; 530 unsigned long flags;
530 unsigned long size = 0; 531 unsigned long size = 0;
532 unsigned long addr = p->addr & PAGE_MASK;
531 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); 533 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
532 struct kmmio_fault_page *release_list = NULL; 534 struct kmmio_fault_page *release_list = NULL;
533 struct kmmio_delayed_release *drelease; 535 struct kmmio_delayed_release *drelease;
534 unsigned int l; 536 unsigned int l;
535 pte_t *pte; 537 pte_t *pte;
536 538
537 pte = lookup_address(p->addr, &l); 539 pte = lookup_address(addr, &l);
538 if (!pte) 540 if (!pte)
539 return; 541 return;
540 542
541 spin_lock_irqsave(&kmmio_lock, flags); 543 spin_lock_irqsave(&kmmio_lock, flags);
542 while (size < size_lim) { 544 while (size < size_lim) {
543 release_kmmio_fault_page(p->addr + size, &release_list); 545 release_kmmio_fault_page(addr + size, &release_list);
544 size += page_level_size(l); 546 size += page_level_size(l);
545 } 547 }
546 list_del_rcu(&p->list); 548 list_del_rcu(&p->list);
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 1e996df687a3..e663d6bf1328 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -665,6 +665,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
665 unsigned i; 665 unsigned i;
666 u32 base, limit, high; 666 u32 base, limit, high;
667 struct resource *res, *conflict; 667 struct resource *res, *conflict;
668 struct pci_dev *other;
669
670 /* Check that we are the only device of that type */
671 other = pci_get_device(dev->vendor, dev->device, NULL);
672 if (other != dev ||
673 (other = pci_get_device(dev->vendor, dev->device, other))) {
674 /* This is a multi-socket system, don't touch it for now */
675 pci_dev_put(other);
676 return;
677 }
668 678
669 for (i = 0; i < 8; i++) { 679 for (i = 0; i < 8; i++) {
670 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base); 680 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
@@ -696,8 +706,13 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
696 res->end = 0xfd00000000ull - 1; 706 res->end = 0xfd00000000ull - 1;
697 707
698 /* Just grab the free area behind system memory for this */ 708 /* Just grab the free area behind system memory for this */
699 while ((conflict = request_resource_conflict(&iomem_resource, res))) 709 while ((conflict = request_resource_conflict(&iomem_resource, res))) {
710 if (conflict->end >= res->end) {
711 kfree(res);
712 return;
713 }
700 res->start = conflict->end + 1; 714 res->start = conflict->end + 1;
715 }
701 716
702 dev_info(&dev->dev, "adding root bus resource %pR\n", res); 717 dev_info(&dev->dev, "adding root bus resource %pR\n", res);
703 718
@@ -714,10 +729,10 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
714 729
715 pci_bus_add_resource(dev->bus, res, 0); 730 pci_bus_add_resource(dev->bus, res, 0);
716} 731}
717DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); 732DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
718DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); 733DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
719DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); 734DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
720DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); 735DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
721DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); 736DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
722 737
723#endif 738#endif
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 5191de14f4df..36a28eddb435 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt)
82 /* 82 /*
83 * descriptor tables 83 * descriptor tables
84 */ 84 */
85#ifdef CONFIG_X86_32
86 store_idt(&ctxt->idt); 85 store_idt(&ctxt->idt);
87#else 86
88/* CONFIG_X86_64 */
89 store_idt((struct desc_ptr *)&ctxt->idt_limit);
90#endif
91 /* 87 /*
92 * We save it here, but restore it only in the hibernate case. 88 * We save it here, but restore it only in the hibernate case.
93 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit 89 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
@@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
103 /* 99 /*
104 * segment registers 100 * segment registers
105 */ 101 */
106#ifdef CONFIG_X86_32 102#ifdef CONFIG_X86_32_LAZY_GS
107 savesegment(es, ctxt->es);
108 savesegment(fs, ctxt->fs);
109 savesegment(gs, ctxt->gs); 103 savesegment(gs, ctxt->gs);
110 savesegment(ss, ctxt->ss); 104#endif
111#else 105#ifdef CONFIG_X86_64
112/* CONFIG_X86_64 */ 106 savesegment(gs, ctxt->gs);
113 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 107 savesegment(fs, ctxt->fs);
114 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 108 savesegment(ds, ctxt->ds);
115 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 109 savesegment(es, ctxt->es);
116 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
117 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
118 110
119 rdmsrl(MSR_FS_BASE, ctxt->fs_base); 111 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
120 rdmsrl(MSR_GS_BASE, ctxt->gs_base); 112 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
121 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 113 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
122 mtrr_save_fixed_ranges(NULL); 114 mtrr_save_fixed_ranges(NULL);
123 115
124 rdmsrl(MSR_EFER, ctxt->efer); 116 rdmsrl(MSR_EFER, ctxt->efer);
@@ -178,6 +170,9 @@ static void fix_processor_context(void)
178 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); 170 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
179 171
180 syscall_init(); /* This sets MSR_*STAR and related */ 172 syscall_init(); /* This sets MSR_*STAR and related */
173#else
174 if (boot_cpu_has(X86_FEATURE_SEP))
175 enable_sep_cpu();
181#endif 176#endif
182 load_TR_desc(); /* This does ltr */ 177 load_TR_desc(); /* This does ltr */
183 load_mm_ldt(current->active_mm); /* This does lldt */ 178 load_mm_ldt(current->active_mm); /* This does lldt */
@@ -190,9 +185,12 @@ static void fix_processor_context(void)
190} 185}
191 186
192/** 187/**
193 * __restore_processor_state - restore the contents of CPU registers saved 188 * __restore_processor_state - restore the contents of CPU registers saved
194 * by __save_processor_state() 189 * by __save_processor_state()
195 * @ctxt - structure to load the registers contents from 190 * @ctxt - structure to load the registers contents from
191 *
192 * The asm code that gets us here will have restored a usable GDT, although
193 * it will be pointing to the wrong alias.
196 */ 194 */
197static void notrace __restore_processor_state(struct saved_context *ctxt) 195static void notrace __restore_processor_state(struct saved_context *ctxt)
198{ 196{
@@ -215,57 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
215 write_cr2(ctxt->cr2); 213 write_cr2(ctxt->cr2);
216 write_cr0(ctxt->cr0); 214 write_cr0(ctxt->cr0);
217 215
216 /* Restore the IDT. */
217 load_idt(&ctxt->idt);
218
218 /* 219 /*
219 * now restore the descriptor tables to their proper values 220 * Just in case the asm code got us here with the SS, DS, or ES
220 * ltr is done i fix_processor_context(). 221 * out of sync with the GDT, update them.
221 */ 222 */
222#ifdef CONFIG_X86_32 223 loadsegment(ss, __KERNEL_DS);
223 load_idt(&ctxt->idt); 224 loadsegment(ds, __USER_DS);
224#else 225 loadsegment(es, __USER_DS);
225/* CONFIG_X86_64 */
226 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
227#endif
228 226
229#ifdef CONFIG_X86_64
230 /* 227 /*
231 * We need GSBASE restored before percpu access can work. 228 * Restore percpu access. Percpu access can happen in exception
232 * percpu access can happen in exception handlers or in complicated 229 * handlers or in complicated helpers like load_gs_index().
233 * helpers like load_gs_index().
234 */ 230 */
235 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 231#ifdef CONFIG_X86_64
232 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
233#else
234 loadsegment(fs, __KERNEL_PERCPU);
235 loadsegment(gs, __KERNEL_STACK_CANARY);
236#endif 236#endif
237 237
238 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
238 fix_processor_context(); 239 fix_processor_context();
239 240
240 /* 241 /*
241 * Restore segment registers. This happens after restoring the GDT 242 * Now that we have descriptor tables fully restored and working
242 * and LDT, which happen in fix_processor_context(). 243 * exception handling, restore the usermode segments.
243 */ 244 */
244#ifdef CONFIG_X86_32 245#ifdef CONFIG_X86_64
246 loadsegment(ds, ctxt->es);
245 loadsegment(es, ctxt->es); 247 loadsegment(es, ctxt->es);
246 loadsegment(fs, ctxt->fs); 248 loadsegment(fs, ctxt->fs);
247 loadsegment(gs, ctxt->gs);
248 loadsegment(ss, ctxt->ss);
249
250 /*
251 * sysenter MSRs
252 */
253 if (boot_cpu_has(X86_FEATURE_SEP))
254 enable_sep_cpu();
255#else
256/* CONFIG_X86_64 */
257 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
258 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
259 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
260 load_gs_index(ctxt->gs); 249 load_gs_index(ctxt->gs);
261 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
262 250
263 /* 251 /*
264 * Restore FSBASE and user GSBASE after reloading the respective 252 * Restore FSBASE and GSBASE after restoring the selectors, since
265 * segment selectors. 253 * restoring the selectors clobbers the bases. Keep in mind
254 * that MSR_KERNEL_GS_BASE is horribly misnamed.
266 */ 255 */
267 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 256 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
268 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 257 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
258#elif defined(CONFIG_X86_32_LAZY_GS)
259 loadsegment(gs, ctxt->gs);
269#endif 260#endif
270 261
271 do_fpu_end(); 262 do_fpu_end();
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
index 6b830d4cb4c8..de58533d3664 100644
--- a/arch/x86/xen/apic.c
+++ b/arch/x86/xen/apic.c
@@ -57,7 +57,7 @@ static u32 xen_apic_read(u32 reg)
57 return 0; 57 return 0;
58 58
59 if (reg == APIC_LVR) 59 if (reg == APIC_LVR)
60 return 0x10; 60 return 0x14;
61#ifdef CONFIG_X86_32 61#ifdef CONFIG_X86_32
62 if (reg == APIC_LDR) 62 if (reg == APIC_LDR)
63 return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 63 return SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 358749c38894..415a54ced4d6 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -672,14 +672,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
672 } 672 }
673 673
674 tsgl = areq->tsgl; 674 tsgl = areq->tsgl;
675 for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 675 if (tsgl) {
676 if (!sg_page(sg)) 676 for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
677 continue; 677 if (!sg_page(sg))
678 put_page(sg_page(sg)); 678 continue;
679 } 679 put_page(sg_page(sg));
680 }
680 681
681 if (areq->tsgl && areq->tsgl_entries)
682 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 682 sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
683 }
683} 684}
684EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); 685EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls);
685 686
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 805f485ddf1b..48b34e9c6834 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -503,6 +503,7 @@ static void aead_release(void *private)
503 struct aead_tfm *tfm = private; 503 struct aead_tfm *tfm = private;
504 504
505 crypto_free_aead(tfm->aead); 505 crypto_free_aead(tfm->aead);
506 crypto_put_default_null_skcipher2();
506 kfree(tfm); 507 kfree(tfm);
507} 508}
508 509
@@ -535,7 +536,6 @@ static void aead_sock_destruct(struct sock *sk)
535 unsigned int ivlen = crypto_aead_ivsize(tfm); 536 unsigned int ivlen = crypto_aead_ivsize(tfm);
536 537
537 af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 538 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
538 crypto_put_default_null_skcipher2();
539 sock_kzfree_s(sk, ctx->iv, ivlen); 539 sock_kzfree_s(sk, ctx->iv, ivlen);
540 sock_kfree_s(sk, ctx, ctx->len); 540 sock_kfree_s(sk, ctx, ctx->len);
541 af_alg_release_parent(sk); 541 af_alg_release_parent(sk);
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index c1ca1e86f5c4..a6dcaa659aa8 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -148,8 +148,10 @@ struct pkcs7_message *pkcs7_parse_message(const void *data, size_t datalen)
148 } 148 }
149 149
150 ret = pkcs7_check_authattrs(ctx->msg); 150 ret = pkcs7_check_authattrs(ctx->msg);
151 if (ret < 0) 151 if (ret < 0) {
152 msg = ERR_PTR(ret);
152 goto out; 153 goto out;
154 }
153 155
154 msg = ctx->msg; 156 msg = ctx->msg;
155 ctx->msg = NULL; 157 ctx->msg = NULL;
diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
index f6a009d88a33..1f4e25f10049 100644
--- a/crypto/asymmetric_keys/pkcs7_trust.c
+++ b/crypto/asymmetric_keys/pkcs7_trust.c
@@ -69,7 +69,7 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
69 /* Self-signed certificates form roots of their own, and if we 69 /* Self-signed certificates form roots of their own, and if we
70 * don't know them, then we can't accept them. 70 * don't know them, then we can't accept them.
71 */ 71 */
72 if (x509->next == x509) { 72 if (x509->signer == x509) {
73 kleave(" = -ENOKEY [unknown self-signed]"); 73 kleave(" = -ENOKEY [unknown self-signed]");
74 return -ENOKEY; 74 return -ENOKEY;
75 } 75 }
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 2d93d9eccb4d..39e6de0c2761 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -59,11 +59,8 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
59 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 59 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
60 60
61 /* Digest the message [RFC2315 9.3] */ 61 /* Digest the message [RFC2315 9.3] */
62 ret = crypto_shash_init(desc); 62 ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
63 if (ret < 0) 63 sig->digest);
64 goto error;
65 ret = crypto_shash_finup(desc, pkcs7->data, pkcs7->data_len,
66 sig->digest);
67 if (ret < 0) 64 if (ret < 0)
68 goto error; 65 goto error;
69 pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest); 66 pr_devel("MsgDigest = [%*ph]\n", 8, sig->digest);
@@ -150,7 +147,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
150 pr_devel("Sig %u: Found cert serial match X.509[%u]\n", 147 pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
151 sinfo->index, certix); 148 sinfo->index, certix);
152 149
153 if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) { 150 if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
154 pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n", 151 pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
155 sinfo->index); 152 sinfo->index);
156 continue; 153 continue;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index bc3035ef27a2..de996586762a 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -73,7 +73,7 @@ int public_key_verify_signature(const struct public_key *pkey,
73 char alg_name_buf[CRYPTO_MAX_ALG_NAME]; 73 char alg_name_buf[CRYPTO_MAX_ALG_NAME];
74 void *output; 74 void *output;
75 unsigned int outlen; 75 unsigned int outlen;
76 int ret = -ENOMEM; 76 int ret;
77 77
78 pr_devel("==>%s()\n", __func__); 78 pr_devel("==>%s()\n", __func__);
79 79
@@ -99,6 +99,7 @@ int public_key_verify_signature(const struct public_key *pkey,
99 if (IS_ERR(tfm)) 99 if (IS_ERR(tfm))
100 return PTR_ERR(tfm); 100 return PTR_ERR(tfm);
101 101
102 ret = -ENOMEM;
102 req = akcipher_request_alloc(tfm, GFP_KERNEL); 103 req = akcipher_request_alloc(tfm, GFP_KERNEL);
103 if (!req) 104 if (!req)
104 goto error_free_tfm; 105 goto error_free_tfm;
@@ -127,7 +128,7 @@ int public_key_verify_signature(const struct public_key *pkey,
127 * signature and returns that to us. 128 * signature and returns that to us.
128 */ 129 */
129 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); 130 ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
130 if (ret < 0) 131 if (ret)
131 goto out_free_output; 132 goto out_free_output;
132 133
133 /* Do the actual verification step. */ 134 /* Do the actual verification step. */
@@ -142,6 +143,8 @@ error_free_req:
142error_free_tfm: 143error_free_tfm:
143 crypto_free_akcipher(tfm); 144 crypto_free_akcipher(tfm);
144 pr_devel("<==%s() = %d\n", __func__, ret); 145 pr_devel("<==%s() = %d\n", __func__, ret);
146 if (WARN_ON_ONCE(ret > 0))
147 ret = -EINVAL;
145 return ret; 148 return ret;
146} 149}
147EXPORT_SYMBOL_GPL(public_key_verify_signature); 150EXPORT_SYMBOL_GPL(public_key_verify_signature);
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index dd03fead1ca3..ce2df8c9c583 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
409 ctx->cert->pub->pkey_algo = "rsa"; 409 ctx->cert->pub->pkey_algo = "rsa";
410 410
411 /* Discard the BIT STRING metadata */ 411 /* Discard the BIT STRING metadata */
412 if (vlen < 1 || *(const u8 *)value != 0)
413 return -EBADMSG;
412 ctx->key = value + 1; 414 ctx->key = value + 1;
413 ctx->key_size = vlen - 1; 415 ctx->key_size = vlen - 1;
414 return 0; 416 return 0;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index c9013582c026..9338b4558cdc 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -79,11 +79,7 @@ int x509_get_sig_params(struct x509_certificate *cert)
79 desc->tfm = tfm; 79 desc->tfm = tfm;
80 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; 80 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
81 81
82 ret = crypto_shash_init(desc); 82 ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
83 if (ret < 0)
84 goto error_2;
85 might_sleep();
86 ret = crypto_shash_finup(desc, cert->tbs, cert->tbs_size, sig->digest);
87 if (ret < 0) 83 if (ret < 0)
88 goto error_2; 84 goto error_2;
89 85
@@ -135,7 +131,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
135 } 131 }
136 132
137 ret = -EKEYREJECTED; 133 ret = -EKEYREJECTED;
138 if (cert->pub->pkey_algo != cert->sig->pkey_algo) 134 if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
139 goto out; 135 goto out;
140 136
141 ret = public_key_verify_signature(cert->pub, cert->sig); 137 ret = public_key_verify_signature(cert->pub, cert->sig);
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 92871dc2a63e..e74730224f0a 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
195 salg = shash_attr_alg(tb[1], 0, 0); 195 salg = shash_attr_alg(tb[1], 0, 0);
196 if (IS_ERR(salg)) 196 if (IS_ERR(salg))
197 return PTR_ERR(salg); 197 return PTR_ERR(salg);
198 alg = &salg->base;
198 199
200 /* The underlying hash algorithm must be unkeyed */
199 err = -EINVAL; 201 err = -EINVAL;
202 if (crypto_shash_alg_has_setkey(salg))
203 goto out_put_alg;
204
200 ds = salg->digestsize; 205 ds = salg->digestsize;
201 ss = salg->statesize; 206 ss = salg->statesize;
202 alg = &salg->base;
203 if (ds > alg->cra_blocksize || 207 if (ds > alg->cra_blocksize ||
204 ss < alg->cra_blocksize) 208 ss < alg->cra_blocksize)
205 goto out_put_alg; 209 goto out_put_alg;
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 0b66dc824606..cad395d70d78 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
30 return -EINVAL; 30 return -EINVAL;
31 31
32 if (fips_enabled) { 32 if (fips_enabled) {
33 while (!*ptr && n_sz) { 33 while (n_sz && !*ptr) {
34 ptr++; 34 ptr++;
35 n_sz--; 35 n_sz--;
36 } 36 }
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index f550b5d94630..d7da0eea5622 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
188 188
189 salsa20_ivsetup(ctx, walk.iv); 189 salsa20_ivsetup(ctx, walk.iv);
190 190
191 if (likely(walk.nbytes == nbytes))
192 {
193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
194 walk.src.virt.addr, nbytes);
195 return blkcipher_walk_done(desc, &walk, 0);
196 }
197
198 while (walk.nbytes >= 64) { 191 while (walk.nbytes >= 64) {
199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, 192 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
200 walk.src.virt.addr, 193 walk.src.virt.addr,
diff --git a/crypto/shash.c b/crypto/shash.c
index 325a14da5827..e849d3ee2e27 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -25,11 +25,12 @@
25 25
26static const struct crypto_type crypto_shash_type; 26static const struct crypto_type crypto_shash_type;
27 27
28static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, 28int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
29 unsigned int keylen) 29 unsigned int keylen)
30{ 30{
31 return -ENOSYS; 31 return -ENOSYS;
32} 32}
33EXPORT_SYMBOL_GPL(shash_no_setkey);
33 34
34static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, 35static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
35 unsigned int keylen) 36 unsigned int keylen)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index e4ffaeec9ec2..a4c8ad98560d 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev)
1138 * skip all of the subsequent "thaw" callbacks for the device. 1138 * skip all of the subsequent "thaw" callbacks for the device.
1139 */ 1139 */
1140 if (dev_pm_smart_suspend_and_suspended(dev)) { 1140 if (dev_pm_smart_suspend_and_suspended(dev)) {
1141 dev->power.direct_complete = true; 1141 dev_pm_skip_next_resume_phases(dev);
1142 return 0; 1142 return 0;
1143 } 1143 }
1144 1144
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 80854f71559a..0ae6971c2a4c 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MeidaTek AHCI SATA driver 2 * MediaTek AHCI SATA driver
3 * 3 *
4 * Copyright (c) 2017 MediaTek Inc. 4 * Copyright (c) 2017 MediaTek Inc.
5 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Author: Ryder Lee <ryder.lee@mediatek.com>
@@ -25,7 +25,7 @@
25#include <linux/reset.h> 25#include <linux/reset.h>
26#include "ahci.h" 26#include "ahci.h"
27 27
28#define DRV_NAME "ahci" 28#define DRV_NAME "ahci-mtk"
29 29
30#define SYS_CFG 0x14 30#define SYS_CFG 0x14
31#define SYS_CFG_SATA_MSK GENMASK(31, 30) 31#define SYS_CFG_SATA_MSK GENMASK(31, 30)
@@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = {
192}; 192};
193module_platform_driver(mtk_ahci_driver); 193module_platform_driver(mtk_ahci_driver);
194 194
195MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver"); 195MODULE_DESCRIPTION("MediaTek SATA AHCI Driver");
196MODULE_LICENSE("GPL v2"); 196MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index b6b0bf76dfc7..2685f28160f7 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -35,6 +35,8 @@
35 35
36/* port register default value */ 36/* port register default value */
37#define AHCI_PORT_PHY_1_CFG 0xa003fffe 37#define AHCI_PORT_PHY_1_CFG 0xa003fffe
38#define AHCI_PORT_PHY2_CFG 0x28184d1f
39#define AHCI_PORT_PHY3_CFG 0x0e081509
38#define AHCI_PORT_TRANS_CFG 0x08000029 40#define AHCI_PORT_TRANS_CFG 0x08000029
39#define AHCI_PORT_AXICC_CFG 0x3fffffff 41#define AHCI_PORT_AXICC_CFG 0x3fffffff
40 42
@@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
183 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 185 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
184 qpriv->ecc_addr); 186 qpriv->ecc_addr);
185 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 187 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
188 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
189 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
186 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 190 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
187 if (qpriv->is_dmacoherent) 191 if (qpriv->is_dmacoherent)
188 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 192 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
190 194
191 case AHCI_LS2080A: 195 case AHCI_LS2080A:
192 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 196 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
197 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
198 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
193 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 199 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
194 if (qpriv->is_dmacoherent) 200 if (qpriv->is_dmacoherent)
195 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 201 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
201 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, 207 writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
202 qpriv->ecc_addr); 208 qpriv->ecc_addr);
203 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 209 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
210 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
211 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
204 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 212 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
205 if (qpriv->is_dmacoherent) 213 if (qpriv->is_dmacoherent)
206 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 214 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
212 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, 220 writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
213 qpriv->ecc_addr); 221 qpriv->ecc_addr);
214 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 222 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
223 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
224 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
215 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 225 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
216 if (qpriv->is_dmacoherent) 226 if (qpriv->is_dmacoherent)
217 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 227 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
@@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
219 229
220 case AHCI_LS2088A: 230 case AHCI_LS2088A:
221 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); 231 writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
232 writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
233 writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
222 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); 234 writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
223 if (qpriv->is_dmacoherent) 235 if (qpriv->is_dmacoherent)
224 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); 236 writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2a882929de4a..8193b38a1cae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3082 bit = fls(mask) - 1; 3082 bit = fls(mask) - 1;
3083 mask &= ~(1 << bit); 3083 mask &= ~(1 << bit);
3084 3084
3085 /* Mask off all speeds higher than or equal to the current 3085 /*
3086 * one. Force 1.5Gbps if current SPD is not available. 3086 * Mask off all speeds higher than or equal to the current one. At
3087 * this point, if current SPD is not available and we previously
3088 * recorded the link speed from SStatus, the driver has already
3089 * masked off the highest bit so mask should already be 1 or 0.
3090 * Otherwise, we should not force 1.5Gbps on a link where we have
3091 * not previously recorded speed from SStatus. Just return in this
3092 * case.
3087 */ 3093 */
3088 if (spd > 1) 3094 if (spd > 1)
3089 mask &= (1 << (spd - 1)) - 1; 3095 mask &= (1 << (spd - 1)) - 1;
3090 else 3096 else
3091 mask &= 1; 3097 return -EINVAL;
3092 3098
3093 /* were we already at the bottom? */ 3099 /* were we already at the bottom? */
3094 if (!mask) 3100 if (!mask)
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index ffd8d33c6e0f..6db2e34bd52f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
82 * is issued to the device. However, if the controller clock is 133MHz, 82 * is issued to the device. However, if the controller clock is 133MHz,
83 * the following tables must be used. 83 * the following tables must be used.
84 */ 84 */
85static struct pdc2027x_pio_timing { 85static const struct pdc2027x_pio_timing {
86 u8 value0, value1, value2; 86 u8 value0, value1, value2;
87} pdc2027x_pio_timing_tbl[] = { 87} pdc2027x_pio_timing_tbl[] = {
88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */ 88 { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
@@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing {
92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */ 92 { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
93}; 93};
94 94
95static struct pdc2027x_mdma_timing { 95static const struct pdc2027x_mdma_timing {
96 u8 value0, value1; 96 u8 value0, value1;
97} pdc2027x_mdma_timing_tbl[] = { 97} pdc2027x_mdma_timing_tbl[] = {
98 { 0xdf, 0x5f }, /* MDMA mode 0 */ 98 { 0xdf, 0x5f }, /* MDMA mode 0 */
@@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing {
100 { 0x69, 0x25 }, /* MDMA mode 2 */ 100 { 0x69, 0x25 }, /* MDMA mode 2 */
101}; 101};
102 102
103static struct pdc2027x_udma_timing { 103static const struct pdc2027x_udma_timing {
104 u8 value0, value1, value2; 104 u8 value0, value1, value2;
105} pdc2027x_udma_timing_tbl[] = { 105} pdc2027x_udma_timing_tbl[] = {
106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */ 106 { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
@@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
649 * @host: target ATA host 649 * @host: target ATA host
650 * @board_idx: board identifier 650 * @board_idx: board identifier
651 */ 651 */
652static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) 652static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
653{ 653{
654 long pll_clock; 654 long pll_clock;
655 655
@@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
665 665
666 /* Adjust PLL control register */ 666 /* Adjust PLL control register */
667 pdc_adjust_pll(host, pll_clock, board_idx); 667 pdc_adjust_pll(host, pll_clock, board_idx);
668
669 return 0;
670} 668}
671 669
672/** 670/**
@@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev,
753 //pci_enable_intx(pdev); 751 //pci_enable_intx(pdev);
754 752
755 /* initialize adapter */ 753 /* initialize adapter */
756 if (pdc_hardware_init(host, board_idx) != 0) 754 pdc_hardware_init(host, board_idx);
757 return -EIO;
758 755
759 pci_set_master(pdev); 756 pci_set_master(pdev);
760 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, 757 return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
@@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev)
778 else 775 else
779 board_idx = PDC_UDMA_133; 776 board_idx = PDC_UDMA_133;
780 777
781 if (pdc_hardware_init(host, board_idx)) 778 pdc_hardware_init(host, board_idx);
782 return -EIO;
783 779
784 ata_host_resume(host); 780 ata_host_resume(host);
785 return 0; 781 return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index db2f04415927..08744b572af6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -526,6 +526,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
526/*------------------------- Resume routines -------------------------*/ 526/*------------------------- Resume routines -------------------------*/
527 527
528/** 528/**
529 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
530 * @dev: Target device.
531 *
532 * Make the core skip the "early resume" and "resume" phases for @dev.
533 *
534 * This function can be called by middle-layer code during the "noirq" phase of
535 * system resume if necessary, but not by device drivers.
536 */
537void dev_pm_skip_next_resume_phases(struct device *dev)
538{
539 dev->power.is_late_suspended = false;
540 dev->power.is_suspended = false;
541}
542
543/**
529 * device_resume_noirq - Execute a "noirq resume" callback for given device. 544 * device_resume_noirq - Execute a "noirq resume" callback for given device.
530 * @dev: Device to handle. 545 * @dev: Device to handle.
531 * @state: PM transition of the system being carried out. 546 * @state: PM transition of the system being carried out.
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 3c29d36702a8..5426c04fe24b 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
1755 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); 1755 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1756 mutex_init(&cci_pmu->reserve_mutex); 1756 mutex_init(&cci_pmu->reserve_mutex);
1757 atomic_set(&cci_pmu->active_events, 0); 1757 atomic_set(&cci_pmu->active_events, 0);
1758 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); 1758 cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
1759 1759
1760 ret = cci_pmu_init(cci_pmu, pdev); 1760 ret = cci_pmu_init(cci_pmu, pdev);
1761 if (ret) 1761 if (ret) {
1762 put_cpu();
1762 return ret; 1763 return ret;
1764 }
1763 1765
1764 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, 1766 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
1765 &cci_pmu->node); 1767 &cci_pmu->node);
1768 put_cpu();
1766 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); 1769 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
1767 return 0; 1770 return 0;
1768} 1771}
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 3063f5312397..b52332e52ca5 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -262,7 +262,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
262 NULL 262 NULL
263}; 263};
264 264
265static struct attribute_group arm_ccn_pmu_format_attr_group = { 265static const struct attribute_group arm_ccn_pmu_format_attr_group = {
266 .name = "format", 266 .name = "format",
267 .attrs = arm_ccn_pmu_format_attrs, 267 .attrs = arm_ccn_pmu_format_attrs,
268}; 268};
@@ -451,7 +451,7 @@ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
451static struct attribute 451static struct attribute
452 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; 452 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
453 453
454static struct attribute_group arm_ccn_pmu_events_attr_group = { 454static const struct attribute_group arm_ccn_pmu_events_attr_group = {
455 .name = "events", 455 .name = "events",
456 .is_visible = arm_ccn_pmu_events_is_visible, 456 .is_visible = arm_ccn_pmu_events_is_visible,
457 .attrs = arm_ccn_pmu_events_attrs, 457 .attrs = arm_ccn_pmu_events_attrs,
@@ -548,7 +548,7 @@ static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
548 NULL 548 NULL
549}; 549};
550 550
551static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { 551static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
552 .name = "cmp_mask", 552 .name = "cmp_mask",
553 .attrs = arm_ccn_pmu_cmp_mask_attrs, 553 .attrs = arm_ccn_pmu_cmp_mask_attrs,
554}; 554};
@@ -569,7 +569,7 @@ static struct attribute *arm_ccn_pmu_cpumask_attrs[] = {
569 NULL, 569 NULL,
570}; 570};
571 571
572static struct attribute_group arm_ccn_pmu_cpumask_attr_group = { 572static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = {
573 .attrs = arm_ccn_pmu_cpumask_attrs, 573 .attrs = arm_ccn_pmu_cpumask_attrs,
574}; 574};
575 575
@@ -1268,10 +1268,12 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1268 if (ccn->dt.id == 0) { 1268 if (ccn->dt.id == 0) {
1269 name = "ccn"; 1269 name = "ccn";
1270 } else { 1270 } else {
1271 int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id); 1271 name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d",
1272 1272 ccn->dt.id);
1273 name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL); 1273 if (!name) {
1274 snprintf(name, len + 1, "ccn_%d", ccn->dt.id); 1274 err = -ENOMEM;
1275 goto error_choose_name;
1276 }
1275 } 1277 }
1276 1278
1277 /* Perf driver registration */ 1279 /* Perf driver registration */
@@ -1298,7 +1300,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1298 } 1300 }
1299 1301
1300 /* Pick one CPU which we will use to collect data from CCN... */ 1302 /* Pick one CPU which we will use to collect data from CCN... */
1301 cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); 1303 cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
1302 1304
1303 /* Also make sure that the overflow interrupt is handled by this CPU */ 1305 /* Also make sure that the overflow interrupt is handled by this CPU */
1304 if (ccn->irq) { 1306 if (ccn->irq) {
@@ -1315,10 +1317,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1315 1317
1316 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, 1318 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1317 &ccn->dt.node); 1319 &ccn->dt.node);
1320 put_cpu();
1318 return 0; 1321 return 0;
1319 1322
1320error_pmu_register: 1323error_pmu_register:
1321error_set_affinity: 1324error_set_affinity:
1325 put_cpu();
1326error_choose_name:
1322 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); 1327 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1323 for (i = 0; i < ccn->num_xps; i++) 1328 for (i = 0; i < ccn->num_xps; i++)
1324 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); 1329 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1581,8 +1586,8 @@ static int __init arm_ccn_init(void)
1581 1586
1582static void __exit arm_ccn_exit(void) 1587static void __exit arm_ccn_exit(void)
1583{ 1588{
1584 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1585 platform_driver_unregister(&arm_ccn_driver); 1589 platform_driver_unregister(&arm_ccn_driver);
1590 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1586} 1591}
1587 1592
1588module_init(arm_ccn_init); 1593module_init(arm_ccn_init);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 779869ed32b1..71fad747c0c7 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -199,6 +199,9 @@ struct smi_info {
199 /* The timer for this si. */ 199 /* The timer for this si. */
200 struct timer_list si_timer; 200 struct timer_list si_timer;
201 201
202 /* This flag is set, if the timer can be set */
203 bool timer_can_start;
204
202 /* This flag is set, if the timer is running (timer_pending() isn't enough) */ 205 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
203 bool timer_running; 206 bool timer_running;
204 207
@@ -355,6 +358,8 @@ out:
355 358
356static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 359static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
357{ 360{
361 if (!smi_info->timer_can_start)
362 return;
358 smi_info->last_timeout_jiffies = jiffies; 363 smi_info->last_timeout_jiffies = jiffies;
359 mod_timer(&smi_info->si_timer, new_val); 364 mod_timer(&smi_info->si_timer, new_val);
360 smi_info->timer_running = true; 365 smi_info->timer_running = true;
@@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
374 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); 379 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
375} 380}
376 381
377static void start_check_enables(struct smi_info *smi_info, bool start_timer) 382static void start_check_enables(struct smi_info *smi_info)
378{ 383{
379 unsigned char msg[2]; 384 unsigned char msg[2];
380 385
381 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 386 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
382 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 387 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
383 388
384 if (start_timer) 389 start_new_msg(smi_info, msg, 2);
385 start_new_msg(smi_info, msg, 2);
386 else
387 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
388 smi_info->si_state = SI_CHECKING_ENABLES; 390 smi_info->si_state = SI_CHECKING_ENABLES;
389} 391}
390 392
391static void start_clear_flags(struct smi_info *smi_info, bool start_timer) 393static void start_clear_flags(struct smi_info *smi_info)
392{ 394{
393 unsigned char msg[3]; 395 unsigned char msg[3];
394 396
@@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
397 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; 399 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
398 msg[2] = WDT_PRE_TIMEOUT_INT; 400 msg[2] = WDT_PRE_TIMEOUT_INT;
399 401
400 if (start_timer) 402 start_new_msg(smi_info, msg, 3);
401 start_new_msg(smi_info, msg, 3);
402 else
403 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
404 smi_info->si_state = SI_CLEARING_FLAGS; 403 smi_info->si_state = SI_CLEARING_FLAGS;
405} 404}
406 405
@@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info)
435 * Note that we cannot just use disable_irq(), since the interrupt may 434 * Note that we cannot just use disable_irq(), since the interrupt may
436 * be shared. 435 * be shared.
437 */ 436 */
438static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) 437static inline bool disable_si_irq(struct smi_info *smi_info)
439{ 438{
440 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { 439 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
441 smi_info->interrupt_disabled = true; 440 smi_info->interrupt_disabled = true;
442 start_check_enables(smi_info, start_timer); 441 start_check_enables(smi_info);
443 return true; 442 return true;
444 } 443 }
445 return false; 444 return false;
@@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
449{ 448{
450 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { 449 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
451 smi_info->interrupt_disabled = false; 450 smi_info->interrupt_disabled = false;
452 start_check_enables(smi_info, true); 451 start_check_enables(smi_info);
453 return true; 452 return true;
454 } 453 }
455 return false; 454 return false;
@@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
467 466
468 msg = ipmi_alloc_smi_msg(); 467 msg = ipmi_alloc_smi_msg();
469 if (!msg) { 468 if (!msg) {
470 if (!disable_si_irq(smi_info, true)) 469 if (!disable_si_irq(smi_info))
471 smi_info->si_state = SI_NORMAL; 470 smi_info->si_state = SI_NORMAL;
472 } else if (enable_si_irq(smi_info)) { 471 } else if (enable_si_irq(smi_info)) {
473 ipmi_free_smi_msg(msg); 472 ipmi_free_smi_msg(msg);
@@ -483,7 +482,7 @@ retry:
483 /* Watchdog pre-timeout */ 482 /* Watchdog pre-timeout */
484 smi_inc_stat(smi_info, watchdog_pretimeouts); 483 smi_inc_stat(smi_info, watchdog_pretimeouts);
485 484
486 start_clear_flags(smi_info, true); 485 start_clear_flags(smi_info);
487 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 486 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
488 if (smi_info->intf) 487 if (smi_info->intf)
489 ipmi_smi_watchdog_pretimeout(smi_info->intf); 488 ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -866,7 +865,7 @@ restart:
866 * disable and messages disabled. 865 * disable and messages disabled.
867 */ 866 */
868 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { 867 if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
869 start_check_enables(smi_info, true); 868 start_check_enables(smi_info);
870 } else { 869 } else {
871 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); 870 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
872 if (!smi_info->curr_msg) 871 if (!smi_info->curr_msg)
@@ -1167,6 +1166,7 @@ static int smi_start_processing(void *send_info,
1167 1166
1168 /* Set up the timer that drives the interface. */ 1167 /* Set up the timer that drives the interface. */
1169 timer_setup(&new_smi->si_timer, smi_timeout, 0); 1168 timer_setup(&new_smi->si_timer, smi_timeout, 0);
1169 new_smi->timer_can_start = true;
1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); 1170 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1171 1171
1172 /* Try to claim any interrupts. */ 1172 /* Try to claim any interrupts. */
@@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
1936 check_set_rcv_irq(smi_info); 1936 check_set_rcv_irq(smi_info);
1937} 1937}
1938 1938
1939static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 1939static inline void stop_timer_and_thread(struct smi_info *smi_info)
1940{ 1940{
1941 if (smi_info->thread != NULL) 1941 if (smi_info->thread != NULL)
1942 kthread_stop(smi_info->thread); 1942 kthread_stop(smi_info->thread);
1943
1944 smi_info->timer_can_start = false;
1943 if (smi_info->timer_running) 1945 if (smi_info->timer_running)
1944 del_timer_sync(&smi_info->si_timer); 1946 del_timer_sync(&smi_info->si_timer);
1945} 1947}
@@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi)
2152 * Start clearing the flags before we enable interrupts or the 2154 * Start clearing the flags before we enable interrupts or the
2153 * timer to avoid racing with the timer. 2155 * timer to avoid racing with the timer.
2154 */ 2156 */
2155 start_clear_flags(new_smi, false); 2157 start_clear_flags(new_smi);
2156 2158
2157 /* 2159 /*
2158 * IRQ is defined to be set when non-zero. req_events will 2160 * IRQ is defined to be set when non-zero. req_events will
@@ -2238,7 +2240,7 @@ out_err_remove_attrs:
2238 dev_set_drvdata(new_smi->io.dev, NULL); 2240 dev_set_drvdata(new_smi->io.dev, NULL);
2239 2241
2240out_err_stop_timer: 2242out_err_stop_timer:
2241 wait_for_timer_and_thread(new_smi); 2243 stop_timer_and_thread(new_smi);
2242 2244
2243out_err: 2245out_err:
2244 new_smi->interrupt_disabled = true; 2246 new_smi->interrupt_disabled = true;
@@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2388 */ 2390 */
2389 if (to_clean->io.irq_cleanup) 2391 if (to_clean->io.irq_cleanup)
2390 to_clean->io.irq_cleanup(&to_clean->io); 2392 to_clean->io.irq_cleanup(&to_clean->io);
2391 wait_for_timer_and_thread(to_clean); 2393 stop_timer_and_thread(to_clean);
2392 2394
2393 /* 2395 /*
2394 * Timeouts are stopped, now make sure the interrupts are off 2396 * Timeouts are stopped, now make sure the interrupts are off
@@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
2400 schedule_timeout_uninterruptible(1); 2402 schedule_timeout_uninterruptible(1);
2401 } 2403 }
2402 if (to_clean->handlers) 2404 if (to_clean->handlers)
2403 disable_si_irq(to_clean, false); 2405 disable_si_irq(to_clean);
2404 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 2406 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2405 poll(to_clean); 2407 poll(to_clean);
2406 schedule_timeout_uninterruptible(1); 2408 schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c
index 090b073ab441..6b10f0e18a95 100644
--- a/drivers/char/ipmi/ipmi_si_parisc.c
+++ b/drivers/char/ipmi/ipmi_si_parisc.c
@@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev)
10{ 10{
11 struct si_sm_io io; 11 struct si_sm_io io;
12 12
13 memset(&io, 0, sizeof(io));
14
13 io.si_type = SI_KCS; 15 io.si_type = SI_KCS;
14 io.addr_source = SI_DEVICETREE; 16 io.addr_source = SI_DEVICETREE;
15 io.addr_type = IPMI_MEM_ADDR_SPACE; 17 io.addr_type = IPMI_MEM_ADDR_SPACE;
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c
index 99771f5cad07..27dd11c49d21 100644
--- a/drivers/char/ipmi/ipmi_si_pci.c
+++ b/drivers/char/ipmi/ipmi_si_pci.c
@@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev,
103 io.addr_source_cleanup = ipmi_pci_cleanup; 103 io.addr_source_cleanup = ipmi_pci_cleanup;
104 io.addr_source_data = pdev; 104 io.addr_source_data = pdev;
105 105
106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) 106 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
107 io.addr_type = IPMI_IO_ADDR_SPACE; 107 io.addr_type = IPMI_IO_ADDR_SPACE;
108 else 108 io.io_setup = ipmi_si_port_setup;
109 } else {
109 io.addr_type = IPMI_MEM_ADDR_SPACE; 110 io.addr_type = IPMI_MEM_ADDR_SPACE;
111 io.io_setup = ipmi_si_mem_setup;
112 }
110 io.addr_data = pci_resource_start(pdev, 0); 113 io.addr_data = pci_resource_start(pdev, 0);
111 114
112 io.regspacing = ipmi_pci_probe_regspacing(&io); 115 io.regspacing = ipmi_pci_probe_regspacing(&io);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index dfb373c8ba2a..7da9f1b83ebe 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -28,7 +28,6 @@
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 29
30#include <linux/bitmap.h> 30#include <linux/bitmap.h>
31#include <linux/bitfield.h>
32#include <linux/device.h> 31#include <linux/device.h>
33#include <linux/err.h> 32#include <linux/err.h>
34#include <linux/export.h> 33#include <linux/export.h>
@@ -73,13 +72,21 @@
73 72
74#define MAX_DVFS_DOMAINS 8 73#define MAX_DVFS_DOMAINS 8
75#define MAX_DVFS_OPPS 16 74#define MAX_DVFS_OPPS 16
76 75#define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16)
77#define PROTO_REV_MAJOR_MASK GENMASK(31, 16) 76#define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff)
78#define PROTO_REV_MINOR_MASK GENMASK(15, 0) 77
79 78#define PROTOCOL_REV_MINOR_BITS 16
80#define FW_REV_MAJOR_MASK GENMASK(31, 24) 79#define PROTOCOL_REV_MINOR_MASK ((1U << PROTOCOL_REV_MINOR_BITS) - 1)
81#define FW_REV_MINOR_MASK GENMASK(23, 16) 80#define PROTOCOL_REV_MAJOR(x) ((x) >> PROTOCOL_REV_MINOR_BITS)
82#define FW_REV_PATCH_MASK GENMASK(15, 0) 81#define PROTOCOL_REV_MINOR(x) ((x) & PROTOCOL_REV_MINOR_MASK)
82
83#define FW_REV_MAJOR_BITS 24
84#define FW_REV_MINOR_BITS 16
85#define FW_REV_PATCH_MASK ((1U << FW_REV_MINOR_BITS) - 1)
86#define FW_REV_MINOR_MASK ((1U << FW_REV_MAJOR_BITS) - 1)
87#define FW_REV_MAJOR(x) ((x) >> FW_REV_MAJOR_BITS)
88#define FW_REV_MINOR(x) (((x) & FW_REV_MINOR_MASK) >> FW_REV_MINOR_BITS)
89#define FW_REV_PATCH(x) ((x) & FW_REV_PATCH_MASK)
83 90
84#define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) 91#define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
85 92
@@ -304,6 +311,10 @@ struct clk_get_info {
304 u8 name[20]; 311 u8 name[20];
305} __packed; 312} __packed;
306 313
314struct clk_get_value {
315 __le32 rate;
316} __packed;
317
307struct clk_set_value { 318struct clk_set_value {
308 __le16 id; 319 __le16 id;
309 __le16 reserved; 320 __le16 reserved;
@@ -317,9 +328,7 @@ struct legacy_clk_set_value {
317} __packed; 328} __packed;
318 329
319struct dvfs_info { 330struct dvfs_info {
320 u8 domain; 331 __le32 header;
321 u8 opp_count;
322 __le16 latency;
323 struct { 332 struct {
324 __le32 freq; 333 __le32 freq;
325 __le32 m_volt; 334 __le32 m_volt;
@@ -342,6 +351,11 @@ struct _scpi_sensor_info {
342 char name[20]; 351 char name[20];
343}; 352};
344 353
354struct sensor_value {
355 __le32 lo_val;
356 __le32 hi_val;
357} __packed;
358
345struct dev_pstate_set { 359struct dev_pstate_set {
346 __le16 dev_id; 360 __le16 dev_id;
347 u8 pstate; 361 u8 pstate;
@@ -405,20 +419,19 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
405 unsigned int len; 419 unsigned int len;
406 420
407 if (scpi_info->is_legacy) { 421 if (scpi_info->is_legacy) {
408 struct legacy_scpi_shared_mem __iomem *mem = 422 struct legacy_scpi_shared_mem *mem = ch->rx_payload;
409 ch->rx_payload;
410 423
411 /* RX Length is not replied by the legacy Firmware */ 424 /* RX Length is not replied by the legacy Firmware */
412 len = match->rx_len; 425 len = match->rx_len;
413 426
414 match->status = ioread32(&mem->status); 427 match->status = le32_to_cpu(mem->status);
415 memcpy_fromio(match->rx_buf, mem->payload, len); 428 memcpy_fromio(match->rx_buf, mem->payload, len);
416 } else { 429 } else {
417 struct scpi_shared_mem __iomem *mem = ch->rx_payload; 430 struct scpi_shared_mem *mem = ch->rx_payload;
418 431
419 len = min(match->rx_len, CMD_SIZE(cmd)); 432 len = min(match->rx_len, CMD_SIZE(cmd));
420 433
421 match->status = ioread32(&mem->status); 434 match->status = le32_to_cpu(mem->status);
422 memcpy_fromio(match->rx_buf, mem->payload, len); 435 memcpy_fromio(match->rx_buf, mem->payload, len);
423 } 436 }
424 437
@@ -432,11 +445,11 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd)
432static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) 445static void scpi_handle_remote_msg(struct mbox_client *c, void *msg)
433{ 446{
434 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 447 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
435 struct scpi_shared_mem __iomem *mem = ch->rx_payload; 448 struct scpi_shared_mem *mem = ch->rx_payload;
436 u32 cmd = 0; 449 u32 cmd = 0;
437 450
438 if (!scpi_info->is_legacy) 451 if (!scpi_info->is_legacy)
439 cmd = ioread32(&mem->command); 452 cmd = le32_to_cpu(mem->command);
440 453
441 scpi_process_cmd(ch, cmd); 454 scpi_process_cmd(ch, cmd);
442} 455}
@@ -446,7 +459,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
446 unsigned long flags; 459 unsigned long flags;
447 struct scpi_xfer *t = msg; 460 struct scpi_xfer *t = msg;
448 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); 461 struct scpi_chan *ch = container_of(c, struct scpi_chan, cl);
449 struct scpi_shared_mem __iomem *mem = ch->tx_payload; 462 struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload;
450 463
451 if (t->tx_buf) { 464 if (t->tx_buf) {
452 if (scpi_info->is_legacy) 465 if (scpi_info->is_legacy)
@@ -465,7 +478,7 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg)
465 } 478 }
466 479
467 if (!scpi_info->is_legacy) 480 if (!scpi_info->is_legacy)
468 iowrite32(t->cmd, &mem->command); 481 mem->command = cpu_to_le32(t->cmd);
469} 482}
470 483
471static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) 484static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
@@ -570,13 +583,13 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max)
570static unsigned long scpi_clk_get_val(u16 clk_id) 583static unsigned long scpi_clk_get_val(u16 clk_id)
571{ 584{
572 int ret; 585 int ret;
573 __le32 rate; 586 struct clk_get_value clk;
574 __le16 le_clk_id = cpu_to_le16(clk_id); 587 __le16 le_clk_id = cpu_to_le16(clk_id);
575 588
576 ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, 589 ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
577 sizeof(le_clk_id), &rate, sizeof(rate)); 590 sizeof(le_clk_id), &clk, sizeof(clk));
578 591
579 return ret ? ret : le32_to_cpu(rate); 592 return ret ? ret : le32_to_cpu(clk.rate);
580} 593}
581 594
582static int scpi_clk_set_val(u16 clk_id, unsigned long rate) 595static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
@@ -632,34 +645,34 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
632 645
633static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) 646static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
634{ 647{
635 if (domain >= MAX_DVFS_DOMAINS)
636 return ERR_PTR(-EINVAL);
637
638 return scpi_info->dvfs[domain] ?: ERR_PTR(-EINVAL);
639}
640
641static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
642{
643 struct scpi_dvfs_info *info; 648 struct scpi_dvfs_info *info;
644 struct scpi_opp *opp; 649 struct scpi_opp *opp;
645 struct dvfs_info buf; 650 struct dvfs_info buf;
646 int ret, i; 651 int ret, i;
647 652
653 if (domain >= MAX_DVFS_DOMAINS)
654 return ERR_PTR(-EINVAL);
655
656 if (scpi_info->dvfs[domain]) /* data already populated */
657 return scpi_info->dvfs[domain];
658
648 ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), 659 ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain),
649 &buf, sizeof(buf)); 660 &buf, sizeof(buf));
650 if (ret) 661 if (ret)
651 return ret; 662 return ERR_PTR(ret);
652 663
653 info = devm_kmalloc(dev, sizeof(*info), GFP_KERNEL); 664 info = kmalloc(sizeof(*info), GFP_KERNEL);
654 if (!info) 665 if (!info)
655 return -ENOMEM; 666 return ERR_PTR(-ENOMEM);
656 667
657 info->count = buf.opp_count; 668 info->count = DVFS_OPP_COUNT(buf.header);
658 info->latency = le16_to_cpu(buf.latency) * 1000; /* uS to nS */ 669 info->latency = DVFS_LATENCY(buf.header) * 1000; /* uS to nS */
659 670
660 info->opps = devm_kcalloc(dev, info->count, sizeof(*opp), GFP_KERNEL); 671 info->opps = kcalloc(info->count, sizeof(*opp), GFP_KERNEL);
661 if (!info->opps) 672 if (!info->opps) {
662 return -ENOMEM; 673 kfree(info);
674 return ERR_PTR(-ENOMEM);
675 }
663 676
664 for (i = 0, opp = info->opps; i < info->count; i++, opp++) { 677 for (i = 0, opp = info->opps; i < info->count; i++, opp++) {
665 opp->freq = le32_to_cpu(buf.opps[i].freq); 678 opp->freq = le32_to_cpu(buf.opps[i].freq);
@@ -669,15 +682,7 @@ static int scpi_dvfs_populate_info(struct device *dev, u8 domain)
669 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL); 682 sort(info->opps, info->count, sizeof(*opp), opp_cmp_func, NULL);
670 683
671 scpi_info->dvfs[domain] = info; 684 scpi_info->dvfs[domain] = info;
672 return 0; 685 return info;
673}
674
675static void scpi_dvfs_populate(struct device *dev)
676{
677 int domain;
678
679 for (domain = 0; domain < MAX_DVFS_DOMAINS; domain++)
680 scpi_dvfs_populate_info(dev, domain);
681} 686}
682 687
683static int scpi_dev_domain_id(struct device *dev) 688static int scpi_dev_domain_id(struct device *dev)
@@ -708,6 +713,9 @@ static int scpi_dvfs_get_transition_latency(struct device *dev)
708 if (IS_ERR(info)) 713 if (IS_ERR(info))
709 return PTR_ERR(info); 714 return PTR_ERR(info);
710 715
716 if (!info->latency)
717 return 0;
718
711 return info->latency; 719 return info->latency;
712} 720}
713 721
@@ -768,19 +776,20 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info)
768static int scpi_sensor_get_value(u16 sensor, u64 *val) 776static int scpi_sensor_get_value(u16 sensor, u64 *val)
769{ 777{
770 __le16 id = cpu_to_le16(sensor); 778 __le16 id = cpu_to_le16(sensor);
771 __le64 value; 779 struct sensor_value buf;
772 int ret; 780 int ret;
773 781
774 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), 782 ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id),
775 &value, sizeof(value)); 783 &buf, sizeof(buf));
776 if (ret) 784 if (ret)
777 return ret; 785 return ret;
778 786
779 if (scpi_info->is_legacy) 787 if (scpi_info->is_legacy)
780 /* only 32-bits supported, upper 32 bits can be junk */ 788 /* only 32-bits supported, hi_val can be junk */
781 *val = le32_to_cpup((__le32 *)&value); 789 *val = le32_to_cpu(buf.lo_val);
782 else 790 else
783 *val = le64_to_cpu(value); 791 *val = (u64)le32_to_cpu(buf.hi_val) << 32 |
792 le32_to_cpu(buf.lo_val);
784 793
785 return 0; 794 return 0;
786} 795}
@@ -853,19 +862,23 @@ static int scpi_init_versions(struct scpi_drvinfo *info)
853static ssize_t protocol_version_show(struct device *dev, 862static ssize_t protocol_version_show(struct device *dev,
854 struct device_attribute *attr, char *buf) 863 struct device_attribute *attr, char *buf)
855{ 864{
856 return sprintf(buf, "%lu.%lu\n", 865 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
857 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), 866
858 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version)); 867 return sprintf(buf, "%d.%d\n",
868 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
869 PROTOCOL_REV_MINOR(scpi_info->protocol_version));
859} 870}
860static DEVICE_ATTR_RO(protocol_version); 871static DEVICE_ATTR_RO(protocol_version);
861 872
862static ssize_t firmware_version_show(struct device *dev, 873static ssize_t firmware_version_show(struct device *dev,
863 struct device_attribute *attr, char *buf) 874 struct device_attribute *attr, char *buf)
864{ 875{
865 return sprintf(buf, "%lu.%lu.%lu\n", 876 struct scpi_drvinfo *scpi_info = dev_get_drvdata(dev);
866 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), 877
867 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), 878 return sprintf(buf, "%d.%d.%d\n",
868 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version)); 879 FW_REV_MAJOR(scpi_info->firmware_version),
880 FW_REV_MINOR(scpi_info->firmware_version),
881 FW_REV_PATCH(scpi_info->firmware_version));
869} 882}
870static DEVICE_ATTR_RO(firmware_version); 883static DEVICE_ATTR_RO(firmware_version);
871 884
@@ -876,13 +889,39 @@ static struct attribute *versions_attrs[] = {
876}; 889};
877ATTRIBUTE_GROUPS(versions); 890ATTRIBUTE_GROUPS(versions);
878 891
879static void scpi_free_channels(void *data) 892static void
893scpi_free_channels(struct device *dev, struct scpi_chan *pchan, int count)
880{ 894{
881 struct scpi_drvinfo *info = data;
882 int i; 895 int i;
883 896
884 for (i = 0; i < info->num_chans; i++) 897 for (i = 0; i < count && pchan->chan; i++, pchan++) {
885 mbox_free_channel(info->channels[i].chan); 898 mbox_free_channel(pchan->chan);
899 devm_kfree(dev, pchan->xfers);
900 devm_iounmap(dev, pchan->rx_payload);
901 }
902}
903
904static int scpi_remove(struct platform_device *pdev)
905{
906 int i;
907 struct device *dev = &pdev->dev;
908 struct scpi_drvinfo *info = platform_get_drvdata(pdev);
909
910 scpi_info = NULL; /* stop exporting SCPI ops through get_scpi_ops */
911
912 of_platform_depopulate(dev);
913 sysfs_remove_groups(&dev->kobj, versions_groups);
914 scpi_free_channels(dev, info->channels, info->num_chans);
915 platform_set_drvdata(pdev, NULL);
916
917 for (i = 0; i < MAX_DVFS_DOMAINS && info->dvfs[i]; i++) {
918 kfree(info->dvfs[i]->opps);
919 kfree(info->dvfs[i]);
920 }
921 devm_kfree(dev, info->channels);
922 devm_kfree(dev, info);
923
924 return 0;
886} 925}
887 926
888#define MAX_SCPI_XFERS 10 927#define MAX_SCPI_XFERS 10
@@ -913,6 +952,7 @@ static int scpi_probe(struct platform_device *pdev)
913{ 952{
914 int count, idx, ret; 953 int count, idx, ret;
915 struct resource res; 954 struct resource res;
955 struct scpi_chan *scpi_chan;
916 struct device *dev = &pdev->dev; 956 struct device *dev = &pdev->dev;
917 struct device_node *np = dev->of_node; 957 struct device_node *np = dev->of_node;
918 958
@@ -929,19 +969,13 @@ static int scpi_probe(struct platform_device *pdev)
929 return -ENODEV; 969 return -ENODEV;
930 } 970 }
931 971
932 scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), 972 scpi_chan = devm_kcalloc(dev, count, sizeof(*scpi_chan), GFP_KERNEL);
933 GFP_KERNEL); 973 if (!scpi_chan)
934 if (!scpi_info->channels)
935 return -ENOMEM; 974 return -ENOMEM;
936 975
937 ret = devm_add_action(dev, scpi_free_channels, scpi_info); 976 for (idx = 0; idx < count; idx++) {
938 if (ret)
939 return ret;
940
941 for (; scpi_info->num_chans < count; scpi_info->num_chans++) {
942 resource_size_t size; 977 resource_size_t size;
943 int idx = scpi_info->num_chans; 978 struct scpi_chan *pchan = scpi_chan + idx;
944 struct scpi_chan *pchan = scpi_info->channels + idx;
945 struct mbox_client *cl = &pchan->cl; 979 struct mbox_client *cl = &pchan->cl;
946 struct device_node *shmem = of_parse_phandle(np, "shmem", idx); 980 struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
947 981
@@ -949,14 +983,15 @@ static int scpi_probe(struct platform_device *pdev)
949 of_node_put(shmem); 983 of_node_put(shmem);
950 if (ret) { 984 if (ret) {
951 dev_err(dev, "failed to get SCPI payload mem resource\n"); 985 dev_err(dev, "failed to get SCPI payload mem resource\n");
952 return ret; 986 goto err;
953 } 987 }
954 988
955 size = resource_size(&res); 989 size = resource_size(&res);
956 pchan->rx_payload = devm_ioremap(dev, res.start, size); 990 pchan->rx_payload = devm_ioremap(dev, res.start, size);
957 if (!pchan->rx_payload) { 991 if (!pchan->rx_payload) {
958 dev_err(dev, "failed to ioremap SCPI payload\n"); 992 dev_err(dev, "failed to ioremap SCPI payload\n");
959 return -EADDRNOTAVAIL; 993 ret = -EADDRNOTAVAIL;
994 goto err;
960 } 995 }
961 pchan->tx_payload = pchan->rx_payload + (size >> 1); 996 pchan->tx_payload = pchan->rx_payload + (size >> 1);
962 997
@@ -982,11 +1017,17 @@ static int scpi_probe(struct platform_device *pdev)
982 dev_err(dev, "failed to get channel%d err %d\n", 1017 dev_err(dev, "failed to get channel%d err %d\n",
983 idx, ret); 1018 idx, ret);
984 } 1019 }
1020err:
1021 scpi_free_channels(dev, scpi_chan, idx);
1022 scpi_info = NULL;
985 return ret; 1023 return ret;
986 } 1024 }
987 1025
1026 scpi_info->channels = scpi_chan;
1027 scpi_info->num_chans = count;
988 scpi_info->commands = scpi_std_commands; 1028 scpi_info->commands = scpi_std_commands;
989 scpi_info->scpi_ops = &scpi_ops; 1029
1030 platform_set_drvdata(pdev, scpi_info);
990 1031
991 if (scpi_info->is_legacy) { 1032 if (scpi_info->is_legacy) {
992 /* Replace with legacy variants */ 1033 /* Replace with legacy variants */
@@ -1002,23 +1043,23 @@ static int scpi_probe(struct platform_device *pdev)
1002 ret = scpi_init_versions(scpi_info); 1043 ret = scpi_init_versions(scpi_info);
1003 if (ret) { 1044 if (ret) {
1004 dev_err(dev, "incorrect or no SCP firmware found\n"); 1045 dev_err(dev, "incorrect or no SCP firmware found\n");
1046 scpi_remove(pdev);
1005 return ret; 1047 return ret;
1006 } 1048 }
1007 1049
1008 scpi_dvfs_populate(dev); 1050 _dev_info(dev, "SCP Protocol %d.%d Firmware %d.%d.%d version\n",
1009 1051 PROTOCOL_REV_MAJOR(scpi_info->protocol_version),
1010 _dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", 1052 PROTOCOL_REV_MINOR(scpi_info->protocol_version),
1011 FIELD_GET(PROTO_REV_MAJOR_MASK, scpi_info->protocol_version), 1053 FW_REV_MAJOR(scpi_info->firmware_version),
1012 FIELD_GET(PROTO_REV_MINOR_MASK, scpi_info->protocol_version), 1054 FW_REV_MINOR(scpi_info->firmware_version),
1013 FIELD_GET(FW_REV_MAJOR_MASK, scpi_info->firmware_version), 1055 FW_REV_PATCH(scpi_info->firmware_version));
1014 FIELD_GET(FW_REV_MINOR_MASK, scpi_info->firmware_version), 1056 scpi_info->scpi_ops = &scpi_ops;
1015 FIELD_GET(FW_REV_PATCH_MASK, scpi_info->firmware_version));
1016 1057
1017 ret = devm_device_add_groups(dev, versions_groups); 1058 ret = sysfs_create_groups(&dev->kobj, versions_groups);
1018 if (ret) 1059 if (ret)
1019 dev_err(dev, "unable to create sysfs version group\n"); 1060 dev_err(dev, "unable to create sysfs version group\n");
1020 1061
1021 return devm_of_platform_populate(dev); 1062 return of_platform_populate(dev->of_node, NULL, NULL, dev);
1022} 1063}
1023 1064
1024static const struct of_device_id scpi_of_match[] = { 1065static const struct of_device_id scpi_of_match[] = {
@@ -1035,6 +1076,7 @@ static struct platform_driver scpi_driver = {
1035 .of_match_table = scpi_of_match, 1076 .of_match_table = scpi_of_match,
1036 }, 1077 },
1037 .probe = scpi_probe, 1078 .probe = scpi_probe,
1079 .remove = scpi_remove,
1038}; 1080};
1039module_platform_driver(scpi_driver); 1081module_platform_driver(scpi_driver);
1040 1082
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 482014137953..9ae236036e32 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -152,14 +152,23 @@ static void drm_connector_free(struct kref *kref)
152 connector->funcs->destroy(connector); 152 connector->funcs->destroy(connector);
153} 153}
154 154
155static void drm_connector_free_work_fn(struct work_struct *work) 155void drm_connector_free_work_fn(struct work_struct *work)
156{ 156{
157 struct drm_connector *connector = 157 struct drm_connector *connector, *n;
158 container_of(work, struct drm_connector, free_work); 158 struct drm_device *dev =
159 struct drm_device *dev = connector->dev; 159 container_of(work, struct drm_device, mode_config.connector_free_work);
160 struct drm_mode_config *config = &dev->mode_config;
161 unsigned long flags;
162 struct llist_node *freed;
160 163
161 drm_mode_object_unregister(dev, &connector->base); 164 spin_lock_irqsave(&config->connector_list_lock, flags);
162 connector->funcs->destroy(connector); 165 freed = llist_del_all(&config->connector_free_list);
166 spin_unlock_irqrestore(&config->connector_list_lock, flags);
167
168 llist_for_each_entry_safe(connector, n, freed, free_node) {
169 drm_mode_object_unregister(dev, &connector->base);
170 connector->funcs->destroy(connector);
171 }
163} 172}
164 173
165/** 174/**
@@ -191,8 +200,6 @@ int drm_connector_init(struct drm_device *dev,
191 if (ret) 200 if (ret)
192 return ret; 201 return ret;
193 202
194 INIT_WORK(&connector->free_work, drm_connector_free_work_fn);
195
196 connector->base.properties = &connector->properties; 203 connector->base.properties = &connector->properties;
197 connector->dev = dev; 204 connector->dev = dev;
198 connector->funcs = funcs; 205 connector->funcs = funcs;
@@ -547,10 +554,17 @@ EXPORT_SYMBOL(drm_connector_list_iter_begin);
547 * actually release the connector when dropping our final reference. 554 * actually release the connector when dropping our final reference.
548 */ 555 */
549static void 556static void
550drm_connector_put_safe(struct drm_connector *conn) 557__drm_connector_put_safe(struct drm_connector *conn)
551{ 558{
552 if (refcount_dec_and_test(&conn->base.refcount.refcount)) 559 struct drm_mode_config *config = &conn->dev->mode_config;
553 schedule_work(&conn->free_work); 560
561 lockdep_assert_held(&config->connector_list_lock);
562
563 if (!refcount_dec_and_test(&conn->base.refcount.refcount))
564 return;
565
566 llist_add(&conn->free_node, &config->connector_free_list);
567 schedule_work(&config->connector_free_work);
554} 568}
555 569
556/** 570/**
@@ -582,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
582 596
583 /* loop until it's not a zombie connector */ 597 /* loop until it's not a zombie connector */
584 } while (!kref_get_unless_zero(&iter->conn->base.refcount)); 598 } while (!kref_get_unless_zero(&iter->conn->base.refcount));
585 spin_unlock_irqrestore(&config->connector_list_lock, flags);
586 599
587 if (old_conn) 600 if (old_conn)
588 drm_connector_put_safe(old_conn); 601 __drm_connector_put_safe(old_conn);
602 spin_unlock_irqrestore(&config->connector_list_lock, flags);
589 603
590 return iter->conn; 604 return iter->conn;
591} 605}
@@ -602,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
602 */ 616 */
603void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) 617void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
604{ 618{
619 struct drm_mode_config *config = &iter->dev->mode_config;
620 unsigned long flags;
621
605 iter->dev = NULL; 622 iter->dev = NULL;
606 if (iter->conn) 623 if (iter->conn) {
607 drm_connector_put_safe(iter->conn); 624 spin_lock_irqsave(&config->connector_list_lock, flags);
625 __drm_connector_put_safe(iter->conn);
626 spin_unlock_irqrestore(&config->connector_list_lock, flags);
627 }
608 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); 628 lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
609} 629}
610EXPORT_SYMBOL(drm_connector_list_iter_end); 630EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1231,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
1231 if (edid) 1251 if (edid)
1232 size = EDID_LENGTH * (1 + edid->extensions); 1252 size = EDID_LENGTH * (1 + edid->extensions);
1233 1253
1254 /* Set the display info, using edid if available, otherwise
1255 * reseting the values to defaults. This duplicates the work
1256 * done in drm_add_edid_modes, but that function is not
1257 * consistently called before this one in all drivers and the
1258 * computation is cheap enough that it seems better to
1259 * duplicate it rather than attempt to ensure some arbitrary
1260 * ordering of calls.
1261 */
1262 if (edid)
1263 drm_add_display_info(connector, edid);
1264 else
1265 drm_reset_display_info(connector);
1266
1234 drm_object_property_set_value(&connector->base, 1267 drm_object_property_set_value(&connector->base,
1235 dev->mode_config.non_desktop_property, 1268 dev->mode_config.non_desktop_property,
1236 connector->display_info.non_desktop); 1269 connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb8841778c..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
142 uint64_t value); 142 uint64_t value);
143int drm_connector_create_standard_properties(struct drm_device *dev); 143int drm_connector_create_standard_properties(struct drm_device *dev);
144const char *drm_get_connector_force_name(enum drm_connector_force force); 144const char *drm_get_connector_force_name(enum drm_connector_force force);
145void drm_connector_free_work_fn(struct work_struct *work);
145 146
146/* IOCTL */ 147/* IOCTL */
147int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 148int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dfe14763871..cb487148359a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
1731 * 1731 *
1732 * Returns true if @vendor is in @edid, false otherwise 1732 * Returns true if @vendor is in @edid, false otherwise
1733 */ 1733 */
1734static bool edid_vendor(struct edid *edid, const char *vendor) 1734static bool edid_vendor(const struct edid *edid, const char *vendor)
1735{ 1735{
1736 char edid_vendor[3]; 1736 char edid_vendor[3];
1737 1737
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
1749 * 1749 *
1750 * This tells subsequent routines what fixes they need to apply. 1750 * This tells subsequent routines what fixes they need to apply.
1751 */ 1751 */
1752static u32 edid_get_quirks(struct edid *edid) 1752static u32 edid_get_quirks(const struct edid *edid)
1753{ 1753{
1754 const struct edid_quirk *quirk; 1754 const struct edid_quirk *quirk;
1755 int i; 1755 int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2813/* 2813/*
2814 * Search EDID for CEA extension block. 2814 * Search EDID for CEA extension block.
2815 */ 2815 */
2816static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) 2816static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
2817{ 2817{
2818 u8 *edid_ext = NULL; 2818 u8 *edid_ext = NULL;
2819 int i; 2819 int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
2835 return edid_ext; 2835 return edid_ext;
2836} 2836}
2837 2837
2838static u8 *drm_find_cea_extension(struct edid *edid) 2838static u8 *drm_find_cea_extension(const struct edid *edid)
2839{ 2839{
2840 return drm_find_edid_extension(edid, CEA_EXT); 2840 return drm_find_edid_extension(edid, CEA_EXT);
2841} 2841}
2842 2842
2843static u8 *drm_find_displayid_extension(struct edid *edid) 2843static u8 *drm_find_displayid_extension(const struct edid *edid)
2844{ 2844{
2845 return drm_find_edid_extension(edid, DISPLAYID_EXT); 2845 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2846} 2846}
@@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
4363} 4363}
4364 4364
4365static void drm_parse_cea_ext(struct drm_connector *connector, 4365static void drm_parse_cea_ext(struct drm_connector *connector,
4366 struct edid *edid) 4366 const struct edid *edid)
4367{ 4367{
4368 struct drm_display_info *info = &connector->display_info; 4368 struct drm_display_info *info = &connector->display_info;
4369 const u8 *edid_ext; 4369 const u8 *edid_ext;
@@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
4397 } 4397 }
4398} 4398}
4399 4399
4400static void drm_add_display_info(struct drm_connector *connector, 4400/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
4401 struct edid *edid, u32 quirks) 4401 * all of the values which would have been set from EDID
4402 */
4403void
4404drm_reset_display_info(struct drm_connector *connector)
4402{ 4405{
4403 struct drm_display_info *info = &connector->display_info; 4406 struct drm_display_info *info = &connector->display_info;
4404 4407
4408 info->width_mm = 0;
4409 info->height_mm = 0;
4410
4411 info->bpc = 0;
4412 info->color_formats = 0;
4413 info->cea_rev = 0;
4414 info->max_tmds_clock = 0;
4415 info->dvi_dual = false;
4416
4417 info->non_desktop = 0;
4418}
4419EXPORT_SYMBOL_GPL(drm_reset_display_info);
4420
4421u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
4422{
4423 struct drm_display_info *info = &connector->display_info;
4424
4425 u32 quirks = edid_get_quirks(edid);
4426
4405 info->width_mm = edid->width_cm * 10; 4427 info->width_mm = edid->width_cm * 10;
4406 info->height_mm = edid->height_cm * 10; 4428 info->height_mm = edid->height_cm * 10;
4407 4429
@@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector,
4414 4436
4415 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4437 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
4416 4438
4439 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
4440
4417 if (edid->revision < 3) 4441 if (edid->revision < 3)
4418 return; 4442 return quirks;
4419 4443
4420 if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) 4444 if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
4421 return; 4445 return quirks;
4422 4446
4423 drm_parse_cea_ext(connector, edid); 4447 drm_parse_cea_ext(connector, edid);
4424 4448
@@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector,
4438 4462
4439 /* Only defined for 1.4 with digital displays */ 4463 /* Only defined for 1.4 with digital displays */
4440 if (edid->revision < 4) 4464 if (edid->revision < 4)
4441 return; 4465 return quirks;
4442 4466
4443 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { 4467 switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
4444 case DRM_EDID_DIGITAL_DEPTH_6: 4468 case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector,
4473 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; 4497 info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
4474 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) 4498 if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
4475 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; 4499 info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
4500 return quirks;
4476} 4501}
4502EXPORT_SYMBOL_GPL(drm_add_display_info);
4477 4503
4478static int validate_displayid(u8 *displayid, int length, int idx) 4504static int validate_displayid(u8 *displayid, int length, int idx)
4479{ 4505{
@@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
4627 return 0; 4653 return 0;
4628 } 4654 }
4629 4655
4630 quirks = edid_get_quirks(edid);
4631
4632 /* 4656 /*
4633 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. 4657 * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
4634 * To avoid multiple parsing of same block, lets parse that map 4658 * To avoid multiple parsing of same block, lets parse that map
4635 * from sink info, before parsing CEA modes. 4659 * from sink info, before parsing CEA modes.
4636 */ 4660 */
4637 drm_add_display_info(connector, edid, quirks); 4661 quirks = drm_add_display_info(connector, edid);
4638 4662
4639 /* 4663 /*
4640 * EDID spec says modes should be preferred in this order: 4664 * EDID spec says modes should be preferred in this order:
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a1eff4..59849f02e2ad 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
254 return lessee; 254 return lessee;
255 255
256out_lessee: 256out_lessee:
257 drm_master_put(&lessee);
258
259 mutex_unlock(&dev->mode_config.idr_mutex); 257 mutex_unlock(&dev->mode_config.idr_mutex);
260 258
259 drm_master_put(&lessee);
260
261 return ERR_PTR(error); 261 return ERR_PTR(error);
262} 262}
263 263
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 61a1c8ea74bc..c3c79ee6119e 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
575 */ 575 */
576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
577{ 577{
578 struct drm_mm *mm = old->mm;
579
578 DRM_MM_BUG_ON(!old->allocated); 580 DRM_MM_BUG_ON(!old->allocated);
579 581
580 *new = *old; 582 *new = *old;
581 583
582 list_replace(&old->node_list, &new->node_list); 584 list_replace(&old->node_list, &new->node_list);
583 rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); 585 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
584 586
585 if (drm_mm_hole_follows(old)) { 587 if (drm_mm_hole_follows(old)) {
586 list_replace(&old->hole_stack, &new->hole_stack); 588 list_replace(&old->hole_stack, &new->hole_stack);
587 rb_replace_node(&old->rb_hole_size, 589 rb_replace_node(&old->rb_hole_size,
588 &new->rb_hole_size, 590 &new->rb_hole_size,
589 &old->mm->holes_size); 591 &mm->holes_size);
590 rb_replace_node(&old->rb_hole_addr, 592 rb_replace_node(&old->rb_hole_addr,
591 &new->rb_hole_addr, 593 &new->rb_hole_addr,
592 &old->mm->holes_addr); 594 &mm->holes_addr);
593 } 595 }
594 596
595 old->allocated = false; 597 old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cc78b3d9e5e4..256de7313612 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
382 ida_init(&dev->mode_config.connector_ida); 382 ida_init(&dev->mode_config.connector_ida);
383 spin_lock_init(&dev->mode_config.connector_list_lock); 383 spin_lock_init(&dev->mode_config.connector_list_lock);
384 384
385 init_llist_head(&dev->mode_config.connector_free_list);
386 INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
387
385 drm_mode_create_standard_properties(dev); 388 drm_mode_create_standard_properties(dev);
386 389
387 /* Just to be sure */ 390 /* Just to be sure */
@@ -432,7 +435,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
432 } 435 }
433 drm_connector_list_iter_end(&conn_iter); 436 drm_connector_list_iter_end(&conn_iter);
434 /* connector_iter drops references in a work item. */ 437 /* connector_iter drops references in a work item. */
435 flush_scheduled_work(); 438 flush_work(&dev->mode_config.connector_free_work);
436 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { 439 if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
437 drm_connector_list_iter_begin(dev, &conn_iter); 440 drm_connector_list_iter_begin(dev, &conn_iter);
438 drm_for_each_connector_iter(connector, &conn_iter) 441 drm_for_each_connector_iter(connector, &conn_iter)
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89a83a9..638540943c61 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -888,8 +888,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
888 /* If we got force-completed because of GPU reset rather than 888 /* If we got force-completed because of GPU reset rather than
889 * through our IRQ handler, signal the fence now. 889 * through our IRQ handler, signal the fence now.
890 */ 890 */
891 if (exec->fence) 891 if (exec->fence) {
892 dma_fence_signal(exec->fence); 892 dma_fence_signal(exec->fence);
893 dma_fence_put(exec->fence);
894 }
893 895
894 if (exec->bo) { 896 if (exec->bo) {
895 for (i = 0; i < exec->bo_count; i++) { 897 for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e5377993..26eddbb62893 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
139 list_move_tail(&exec->head, &vc4->job_done_list); 139 list_move_tail(&exec->head, &vc4->job_done_list);
140 if (exec->fence) { 140 if (exec->fence) {
141 dma_fence_signal_locked(exec->fence); 141 dma_fence_signal_locked(exec->fence);
142 dma_fence_put(exec->fence);
142 exec->fence = NULL; 143 exec->fence = NULL;
143 } 144 }
144 vc4_submit_next_render_job(dev); 145 vc4_submit_next_render_job(dev);
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c
index bd126a7c6da2..7da75644c750 100644
--- a/drivers/hwtracing/stm/ftrace.c
+++ b/drivers/hwtracing/stm/ftrace.c
@@ -42,9 +42,11 @@ static struct stm_ftrace {
42 * @len: length of the data packet 42 * @len: length of the data packet
43 */ 43 */
44static void notrace 44static void notrace
45stm_ftrace_write(const void *buf, unsigned int len) 45stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
46{ 46{
47 stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len); 47 struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);
48
49 stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len);
48} 50}
49 51
50static int stm_ftrace_link(struct stm_source_data *data) 52static int stm_ftrace_link(struct stm_source_data *data)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 0d05dadb2dc5..44cffad43701 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev)
379 return 0; 379 return 0;
380} 380}
381 381
382static struct platform_device_id cht_wc_i2c_adap_id_table[] = { 382static const struct platform_device_id cht_wc_i2c_adap_id_table[] = {
383 { .name = "cht_wcove_ext_chgr" }, 383 { .name = "cht_wcove_ext_chgr" },
384 {}, 384 {},
385}; 385};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 174579d32e5f..462948e2c535 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
983 983
984 if (adapdata->smba) { 984 if (adapdata->smba) {
985 i2c_del_adapter(adap); 985 i2c_del_adapter(adap);
986 if (adapdata->port == (0 << 1)) { 986 if (adapdata->port == (0 << piix4_port_shift_sb800)) {
987 release_region(adapdata->smba, SMBIOSIZE); 987 release_region(adapdata->smba, SMBIOSIZE);
988 if (adapdata->sb800_main) 988 if (adapdata->sb800_main)
989 release_region(SB800_PIIX4_SMB_IDX, 2); 989 release_region(SB800_PIIX4_SMB_IDX, 2);
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index dab51761f8c5..d4f9cef251ac 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,10 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * i2c-stm32.h 3 * i2c-stm32.h
3 * 4 *
4 * Copyright (C) M'boumba Cedric Madianga 2017 5 * Copyright (C) M'boumba Cedric Madianga 2017
6 * Copyright (C) STMicroelectronics 2017
5 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 7 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
6 * 8 *
7 * License terms: GNU General Public License (GPL), version 2
8 */ 9 */
9 10
10#ifndef _I2C_STM32_H 11#ifndef _I2C_STM32_H
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 4ec108496f15..47c8d00de53f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32 I2C controller 3 * Driver for STMicroelectronics STM32 I2C controller
3 * 4 *
@@ -6,11 +7,11 @@
6 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf 7 * http://www.st.com/resource/en/reference_manual/DM00031020.pdf
7 * 8 *
8 * Copyright (C) M'boumba Cedric Madianga 2016 9 * Copyright (C) M'boumba Cedric Madianga 2016
10 * Copyright (C) STMicroelectronics 2017
9 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 11 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
10 * 12 *
11 * This driver is based on i2c-st.c 13 * This driver is based on i2c-st.c
12 * 14 *
13 * License terms: GNU General Public License (GPL), version 2
14 */ 15 */
15 16
16#include <linux/clk.h> 17#include <linux/clk.h>
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index d4a6e9c2e9aa..b445b3bb0bb1 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for STMicroelectronics STM32F7 I2C controller 3 * Driver for STMicroelectronics STM32F7 I2C controller
3 * 4 *
@@ -7,11 +8,11 @@
7 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf 8 * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
8 * 9 *
9 * Copyright (C) M'boumba Cedric Madianga 2017 10 * Copyright (C) M'boumba Cedric Madianga 2017
11 * Copyright (C) STMicroelectronics 2017
10 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> 12 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
11 * 13 *
12 * This driver is based on i2c-stm32f4.c 14 * This driver is based on i2c-stm32f4.c
13 * 15 *
14 * License terms: GNU General Public License (GPL), version 2
15 */ 16 */
16#include <linux/clk.h> 17#include <linux/clk.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f6983357145d..6294a7001d33 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4458,7 +4458,7 @@ out:
4458 return skb->len; 4458 return skb->len;
4459} 4459}
4460 4460
4461static const struct rdma_nl_cbs cma_cb_table[] = { 4461static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, 4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
4463}; 4463};
4464 4464
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5e1be4949d5f..30914f3baa5f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1146} 1146}
1147EXPORT_SYMBOL(ib_get_net_dev_by_params); 1147EXPORT_SYMBOL(ib_get_net_dev_by_params);
1148 1148
1149static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { 1149static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1150 [RDMA_NL_LS_OP_RESOLVE] = { 1150 [RDMA_NL_LS_OP_RESOLVE] = {
1151 .doit = ib_nl_handle_resolve_resp, 1151 .doit = ib_nl_handle_resolve_resp,
1152 .flags = RDMA_NL_ADMIN_PERM, 1152 .flags = RDMA_NL_ADMIN_PERM,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189ec7502..5d676cff41f4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
80} 80}
81EXPORT_SYMBOL(iwcm_reject_msg); 81EXPORT_SYMBOL(iwcm_reject_msg);
82 82
83static struct rdma_nl_cbs iwcm_nl_cb_table[] = { 83static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850a3eff..9a05245a1acf 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
303 return skb->len; 303 return skb->len;
304} 304}
305 305
306static const struct rdma_nl_cbs nldev_cb_table[] = { 306static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
307 [RDMA_NLDEV_CMD_GET] = { 307 [RDMA_NLDEV_CMD_GET] = {
308 .doit = nldev_get_doit, 308 .doit = nldev_get_doit,
309 .dump = nldev_get_dumpit, 309 .dump = nldev_get_dumpit,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index a337386652b0..feafdb961c48 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) 739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
740 return 0; 740 return 0;
741 741
742 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) 742 if (map->agent.qp->qp_type == IB_QPT_SMI) {
743 return -EACCES; 743 if (!map->agent.smp_allowed)
744 return -EACCES;
745 return 0;
746 }
744 747
745 return ib_security_pkey_access(map->agent.device, 748 return ib_security_pkey_access(map->agent.device,
746 map->agent.port_num, 749 map->agent.port_num,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d55710b116..d0202bb176a4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
1971 goto release_qp; 1971 goto release_qp;
1972 } 1972 }
1973 1973
1974 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1975 !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
1976 ret = -EINVAL;
1977 goto release_qp;
1978 }
1979
1974 attr->qp_state = cmd->base.qp_state; 1980 attr->qp_state = cmd->base.qp_state;
1975 attr->cur_qp_state = cmd->base.cur_qp_state; 1981 attr->cur_qp_state = cmd->base.cur_qp_state;
1976 attr->path_mtu = cmd->base.path_mtu; 1982 attr->path_mtu = cmd->base.path_mtu;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..b7bfc536e00f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
395 395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) 396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{ 397{
398 if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
400 return 0;
401 }
402
398 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) 403 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
399 return 0; 404 return 0;
400 405
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe433136..38bddd02a943 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
868 868
869 qhp = to_c4iw_qp(ibqp); 869 qhp = to_c4iw_qp(ibqp);
870 spin_lock_irqsave(&qhp->lock, flag); 870 spin_lock_irqsave(&qhp->lock, flag);
871 if (t4_wq_in_error(&qhp->wq)) { 871
872 /*
873 * If the qp has been flushed, then just insert a special
874 * drain cqe.
875 */
876 if (qhp->wq.flushed) {
872 spin_unlock_irqrestore(&qhp->lock, flag); 877 spin_unlock_irqrestore(&qhp->lock, flag);
873 complete_sq_drain_wr(qhp, wr); 878 complete_sq_drain_wr(qhp, wr);
874 return err; 879 return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1011 1016
1012 qhp = to_c4iw_qp(ibqp); 1017 qhp = to_c4iw_qp(ibqp);
1013 spin_lock_irqsave(&qhp->lock, flag); 1018 spin_lock_irqsave(&qhp->lock, flag);
1014 if (t4_wq_in_error(&qhp->wq)) { 1019
1020 /*
1021 * If the qp has been flushed, then just insert a special
1022 * drain cqe.
1023 */
1024 if (qhp->wq.flushed) {
1015 spin_unlock_irqrestore(&qhp->lock, flag); 1025 spin_unlock_irqrestore(&qhp->lock, flag);
1016 complete_rq_drain_wr(qhp, wr); 1026 complete_rq_drain_wr(qhp, wr);
1017 return err; 1027 return err;
@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1285 spin_unlock_irqrestore(&rchp->lock, flag); 1295 spin_unlock_irqrestore(&rchp->lock, flag);
1286 1296
1287 if (schp == rchp) { 1297 if (schp == rchp) {
1288 if (t4_clear_cq_armed(&rchp->cq) && 1298 if ((rq_flushed || sq_flushed) &&
1289 (rq_flushed || sq_flushed)) { 1299 t4_clear_cq_armed(&rchp->cq)) {
1290 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1300 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1291 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1301 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1292 rchp->ibcq.cq_context); 1302 rchp->ibcq.cq_context);
1293 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1303 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1294 } 1304 }
1295 } else { 1305 } else {
1296 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { 1306 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1297 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1307 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1298 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1308 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1299 rchp->ibcq.cq_context); 1309 rchp->ibcq.cq_context);
1300 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1310 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1301 } 1311 }
1302 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { 1312 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1303 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1313 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1304 (*schp->ibcq.comp_handler)(&schp->ibcq, 1314 (*schp->ibcq.comp_handler)(&schp->ibcq,
1305 schp->ibcq.cq_context); 1315 schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049bcdb53..caf490ab24c8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
666 return (-EOPNOTSUPP); 666 return (-EOPNOTSUPP);
667 } 667 }
668 668
669 if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
670 MLX4_IB_RX_HASH_DST_IPV4 |
671 MLX4_IB_RX_HASH_SRC_IPV6 |
672 MLX4_IB_RX_HASH_DST_IPV6 |
673 MLX4_IB_RX_HASH_SRC_PORT_TCP |
674 MLX4_IB_RX_HASH_DST_PORT_TCP |
675 MLX4_IB_RX_HASH_SRC_PORT_UDP |
676 MLX4_IB_RX_HASH_DST_PORT_UDP)) {
677 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
678 ucmd->rx_hash_fields_mask);
679 return (-EOPNOTSUPP);
680 }
681
669 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && 682 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
670 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { 683 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
671 rss_ctx->flags = MLX4_RSS_IPV4; 684 rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
691 return (-EOPNOTSUPP); 704 return (-EOPNOTSUPP);
692 } 705 }
693 706
694 if (rss_ctx->flags & MLX4_RSS_IPV4) { 707 if (rss_ctx->flags & MLX4_RSS_IPV4)
695 rss_ctx->flags |= MLX4_RSS_UDP_IPV4; 708 rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
696 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 709 if (rss_ctx->flags & MLX4_RSS_IPV6)
697 rss_ctx->flags |= MLX4_RSS_UDP_IPV6; 710 rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
698 } else { 711 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
699 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); 712 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
700 return (-EOPNOTSUPP); 713 return (-EOPNOTSUPP);
701 } 714 }
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
707 720
708 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && 721 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
709 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 722 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
710 if (rss_ctx->flags & MLX4_RSS_IPV4) { 723 if (rss_ctx->flags & MLX4_RSS_IPV4)
711 rss_ctx->flags |= MLX4_RSS_TCP_IPV4; 724 rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
712 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 725 if (rss_ctx->flags & MLX4_RSS_IPV6)
713 rss_ctx->flags |= MLX4_RSS_TCP_IPV6; 726 rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
714 } else { 727 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
715 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); 728 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
716 return (-EOPNOTSUPP); 729 return (-EOPNOTSUPP);
717 } 730 }
718
719 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || 731 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
720 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 732 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
721 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); 733 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd99cdf7..2c13123bfd69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1145 noio_flag = memalloc_noio_save(); 1145 noio_flag = memalloc_noio_save();
1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); 1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1147 if (!p->tx_ring) { 1147 if (!p->tx_ring) {
1148 memalloc_noio_restore(noio_flag);
1148 ret = -ENOMEM; 1149 ret = -ENOMEM;
1149 goto err_tx; 1150 goto err_tx;
1150 } 1151 }
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b8ac591aaaa7..c546b567f3b5 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1611 int l; 1611 int l;
1612 struct dm_buffer *b, *tmp; 1612 struct dm_buffer *b, *tmp;
1613 unsigned long freed = 0; 1613 unsigned long freed = 0;
1614 unsigned long count = nr_to_scan; 1614 unsigned long count = c->n_buffers[LIST_CLEAN] +
1615 c->n_buffers[LIST_DIRTY];
1615 unsigned long retain_target = get_retain_buffers(c); 1616 unsigned long retain_target = get_retain_buffers(c);
1616 1617
1617 for (l = 0; l < LIST_SIZE; l++) { 1618 for (l = 0; l < LIST_SIZE; l++) {
@@ -1647,8 +1648,11 @@ static unsigned long
1647dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 1648dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1648{ 1649{
1649 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); 1650 struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker);
1651 unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) +
1652 READ_ONCE(c->n_buffers[LIST_DIRTY]);
1653 unsigned long retain_target = get_retain_buffers(c);
1650 1654
1651 return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); 1655 return (count < retain_target) ? 0 : (count - retain_target);
1652} 1656}
1653 1657
1654/* 1658/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index cf23a14f9c6a..47407e43b96a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void)
3472{ 3472{
3473 int r; 3473 int r;
3474 3474
3475 r = dm_register_target(&cache_target);
3476 if (r) {
3477 DMERR("cache target registration failed: %d", r);
3478 return r;
3479 }
3480
3481 migration_cache = KMEM_CACHE(dm_cache_migration, 0); 3475 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3482 if (!migration_cache) { 3476 if (!migration_cache) {
3483 dm_unregister_target(&cache_target); 3477 dm_unregister_target(&cache_target);
3484 return -ENOMEM; 3478 return -ENOMEM;
3485 } 3479 }
3486 3480
3481 r = dm_register_target(&cache_target);
3482 if (r) {
3483 DMERR("cache target registration failed: %d", r);
3484 return r;
3485 }
3486
3487 return 0; 3487 return 0;
3488} 3488}
3489 3489
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c8faa2b85842..f7810cc869ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -458,6 +458,38 @@ do { \
458} while (0) 458} while (0)
459 459
460/* 460/*
461 * Check whether bios must be queued in the device-mapper core rather
462 * than here in the target.
463 *
464 * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold
465 * the same value then we are not between multipath_presuspend()
466 * and multipath_resume() calls and we have no need to check
467 * for the DMF_NOFLUSH_SUSPENDING flag.
468 */
469static bool __must_push_back(struct multipath *m, unsigned long flags)
470{
471 return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) !=
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) &&
473 dm_noflush_suspending(m->ti));
474}
475
476/*
477 * Following functions use READ_ONCE to get atomic access to
478 * all m->flags to avoid taking spinlock
479 */
480static bool must_push_back_rq(struct multipath *m)
481{
482 unsigned long flags = READ_ONCE(m->flags);
483 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags);
484}
485
486static bool must_push_back_bio(struct multipath *m)
487{
488 unsigned long flags = READ_ONCE(m->flags);
489 return __must_push_back(m, flags);
490}
491
492/*
461 * Map cloned requests (request-based multipath) 493 * Map cloned requests (request-based multipath)
462 */ 494 */
463static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, 495static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
478 pgpath = choose_pgpath(m, nr_bytes); 510 pgpath = choose_pgpath(m, nr_bytes);
479 511
480 if (!pgpath) { 512 if (!pgpath) {
481 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 513 if (must_push_back_rq(m))
482 return DM_MAPIO_DELAY_REQUEUE; 514 return DM_MAPIO_DELAY_REQUEUE;
483 dm_report_EIO(m); /* Failed */ 515 dm_report_EIO(m); /* Failed */
484 return DM_MAPIO_KILL; 516 return DM_MAPIO_KILL;
@@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
553 } 585 }
554 586
555 if (!pgpath) { 587 if (!pgpath) {
556 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) 588 if (must_push_back_bio(m))
557 return DM_MAPIO_REQUEUE; 589 return DM_MAPIO_REQUEUE;
558 dm_report_EIO(m); 590 dm_report_EIO(m);
559 return DM_MAPIO_KILL; 591 return DM_MAPIO_KILL;
@@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
651 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, 683 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags,
652 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || 684 (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) ||
653 (!save_old_value && queue_if_no_path)); 685 (!save_old_value && queue_if_no_path));
654 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, 686 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
655 queue_if_no_path || dm_noflush_suspending(m->ti));
656 spin_unlock_irqrestore(&m->lock, flags); 687 spin_unlock_irqrestore(&m->lock, flags);
657 688
658 if (!queue_if_no_path) { 689 if (!queue_if_no_path) {
@@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
1486 fail_path(pgpath); 1517 fail_path(pgpath);
1487 1518
1488 if (atomic_read(&m->nr_valid_paths) == 0 && 1519 if (atomic_read(&m->nr_valid_paths) == 0 &&
1489 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1520 !must_push_back_rq(m)) {
1490 if (error == BLK_STS_IOERR) 1521 if (error == BLK_STS_IOERR)
1491 dm_report_EIO(m); 1522 dm_report_EIO(m);
1492 /* complete with the original error */ 1523 /* complete with the original error */
@@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1521 1552
1522 if (atomic_read(&m->nr_valid_paths) == 0 && 1553 if (atomic_read(&m->nr_valid_paths) == 0 &&
1523 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { 1554 !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1524 dm_report_EIO(m); 1555 if (must_push_back_bio(m)) {
1525 *error = BLK_STS_IOERR; 1556 r = DM_ENDIO_REQUEUE;
1557 } else {
1558 dm_report_EIO(m);
1559 *error = BLK_STS_IOERR;
1560 }
1526 goto done; 1561 goto done;
1527 } 1562 }
1528 1563
@@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void)
1957{ 1992{
1958 int r; 1993 int r;
1959 1994
1960 r = dm_register_target(&multipath_target);
1961 if (r < 0) {
1962 DMERR("request-based register failed %d", r);
1963 r = -EINVAL;
1964 goto bad_register_target;
1965 }
1966
1967 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 1995 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
1968 if (!kmultipathd) { 1996 if (!kmultipathd) {
1969 DMERR("failed to create workqueue kmpathd"); 1997 DMERR("failed to create workqueue kmpathd");
@@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void)
1985 goto bad_alloc_kmpath_handlerd; 2013 goto bad_alloc_kmpath_handlerd;
1986 } 2014 }
1987 2015
2016 r = dm_register_target(&multipath_target);
2017 if (r < 0) {
2018 DMERR("request-based register failed %d", r);
2019 r = -EINVAL;
2020 goto bad_register_target;
2021 }
2022
1988 return 0; 2023 return 0;
1989 2024
2025bad_register_target:
2026 destroy_workqueue(kmpath_handlerd);
1990bad_alloc_kmpath_handlerd: 2027bad_alloc_kmpath_handlerd:
1991 destroy_workqueue(kmultipathd); 2028 destroy_workqueue(kmultipathd);
1992bad_alloc_kmultipathd: 2029bad_alloc_kmultipathd:
1993 dm_unregister_target(&multipath_target);
1994bad_register_target:
1995 return r; 2030 return r;
1996} 2031}
1997 2032
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 1113b42e1eda..a0613bd8ed00 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void)
2411 return r; 2411 return r;
2412 } 2412 }
2413 2413
2414 r = dm_register_target(&snapshot_target);
2415 if (r < 0) {
2416 DMERR("snapshot target register failed %d", r);
2417 goto bad_register_snapshot_target;
2418 }
2419
2420 r = dm_register_target(&origin_target);
2421 if (r < 0) {
2422 DMERR("Origin target register failed %d", r);
2423 goto bad_register_origin_target;
2424 }
2425
2426 r = dm_register_target(&merge_target);
2427 if (r < 0) {
2428 DMERR("Merge target register failed %d", r);
2429 goto bad_register_merge_target;
2430 }
2431
2432 r = init_origin_hash(); 2414 r = init_origin_hash();
2433 if (r) { 2415 if (r) {
2434 DMERR("init_origin_hash failed."); 2416 DMERR("init_origin_hash failed.");
@@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void)
2449 goto bad_pending_cache; 2431 goto bad_pending_cache;
2450 } 2432 }
2451 2433
2434 r = dm_register_target(&snapshot_target);
2435 if (r < 0) {
2436 DMERR("snapshot target register failed %d", r);
2437 goto bad_register_snapshot_target;
2438 }
2439
2440 r = dm_register_target(&origin_target);
2441 if (r < 0) {
2442 DMERR("Origin target register failed %d", r);
2443 goto bad_register_origin_target;
2444 }
2445
2446 r = dm_register_target(&merge_target);
2447 if (r < 0) {
2448 DMERR("Merge target register failed %d", r);
2449 goto bad_register_merge_target;
2450 }
2451
2452 return 0; 2452 return 0;
2453 2453
2454bad_pending_cache:
2455 kmem_cache_destroy(exception_cache);
2456bad_exception_cache:
2457 exit_origin_hash();
2458bad_origin_hash:
2459 dm_unregister_target(&merge_target);
2460bad_register_merge_target: 2454bad_register_merge_target:
2461 dm_unregister_target(&origin_target); 2455 dm_unregister_target(&origin_target);
2462bad_register_origin_target: 2456bad_register_origin_target:
2463 dm_unregister_target(&snapshot_target); 2457 dm_unregister_target(&snapshot_target);
2464bad_register_snapshot_target: 2458bad_register_snapshot_target:
2459 kmem_cache_destroy(pending_cache);
2460bad_pending_cache:
2461 kmem_cache_destroy(exception_cache);
2462bad_exception_cache:
2463 exit_origin_hash();
2464bad_origin_hash:
2465 dm_exception_store_exit(); 2465 dm_exception_store_exit();
2466 2466
2467 return r; 2467 return r;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 88130b5d95f9..aaffd0c0ee9a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
453 453
454 refcount_set(&dd->count, 1); 454 refcount_set(&dd->count, 1);
455 list_add(&dd->list, &t->devices); 455 list_add(&dd->list, &t->devices);
456 goto out;
456 457
457 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { 458 } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
458 r = upgrade_mode(dd, mode, t->md); 459 r = upgrade_mode(dd, mode, t->md);
459 if (r) 460 if (r)
460 return r; 461 return r;
461 refcount_inc(&dd->count);
462 } 462 }
463 463 refcount_inc(&dd->count);
464out:
464 *result = dd->dm_dev; 465 *result = dd->dm_dev;
465 return 0; 466 return 0;
466} 467}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 89e5dff9b4cf..f91d771fff4b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4355,30 +4355,28 @@ static struct target_type thin_target = {
4355 4355
4356static int __init dm_thin_init(void) 4356static int __init dm_thin_init(void)
4357{ 4357{
4358 int r; 4358 int r = -ENOMEM;
4359 4359
4360 pool_table_init(); 4360 pool_table_init();
4361 4361
4362 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4363 if (!_new_mapping_cache)
4364 return r;
4365
4362 r = dm_register_target(&thin_target); 4366 r = dm_register_target(&thin_target);
4363 if (r) 4367 if (r)
4364 return r; 4368 goto bad_new_mapping_cache;
4365 4369
4366 r = dm_register_target(&pool_target); 4370 r = dm_register_target(&pool_target);
4367 if (r) 4371 if (r)
4368 goto bad_pool_target; 4372 goto bad_thin_target;
4369
4370 r = -ENOMEM;
4371
4372 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
4373 if (!_new_mapping_cache)
4374 goto bad_new_mapping_cache;
4375 4373
4376 return 0; 4374 return 0;
4377 4375
4378bad_new_mapping_cache: 4376bad_thin_target:
4379 dm_unregister_target(&pool_target);
4380bad_pool_target:
4381 dm_unregister_target(&thin_target); 4377 dm_unregister_target(&thin_target);
4378bad_new_mapping_cache:
4379 kmem_cache_destroy(_new_mapping_cache);
4382 4380
4383 return r; 4381 return r;
4384} 4382}
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 305a7a464d09..4d63ac8a82e0 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
562static int at24_read(void *priv, unsigned int off, void *val, size_t count) 562static int at24_read(void *priv, unsigned int off, void *val, size_t count)
563{ 563{
564 struct at24_data *at24 = priv; 564 struct at24_data *at24 = priv;
565 struct i2c_client *client; 565 struct device *dev = &at24->client[0]->dev;
566 char *buf = val; 566 char *buf = val;
567 int ret; 567 int ret;
568 568
@@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
572 if (off + count > at24->chip.byte_len) 572 if (off + count > at24->chip.byte_len)
573 return -EINVAL; 573 return -EINVAL;
574 574
575 client = at24_translate_offset(at24, &off); 575 ret = pm_runtime_get_sync(dev);
576
577 ret = pm_runtime_get_sync(&client->dev);
578 if (ret < 0) { 576 if (ret < 0) {
579 pm_runtime_put_noidle(&client->dev); 577 pm_runtime_put_noidle(dev);
580 return ret; 578 return ret;
581 } 579 }
582 580
@@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
592 status = at24->read_func(at24, buf, off, count); 590 status = at24->read_func(at24, buf, off, count);
593 if (status < 0) { 591 if (status < 0) {
594 mutex_unlock(&at24->lock); 592 mutex_unlock(&at24->lock);
595 pm_runtime_put(&client->dev); 593 pm_runtime_put(dev);
596 return status; 594 return status;
597 } 595 }
598 buf += status; 596 buf += status;
@@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
602 600
603 mutex_unlock(&at24->lock); 601 mutex_unlock(&at24->lock);
604 602
605 pm_runtime_put(&client->dev); 603 pm_runtime_put(dev);
606 604
607 return 0; 605 return 0;
608} 606}
@@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count)
610static int at24_write(void *priv, unsigned int off, void *val, size_t count) 608static int at24_write(void *priv, unsigned int off, void *val, size_t count)
611{ 609{
612 struct at24_data *at24 = priv; 610 struct at24_data *at24 = priv;
613 struct i2c_client *client; 611 struct device *dev = &at24->client[0]->dev;
614 char *buf = val; 612 char *buf = val;
615 int ret; 613 int ret;
616 614
@@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
620 if (off + count > at24->chip.byte_len) 618 if (off + count > at24->chip.byte_len)
621 return -EINVAL; 619 return -EINVAL;
622 620
623 client = at24_translate_offset(at24, &off); 621 ret = pm_runtime_get_sync(dev);
624
625 ret = pm_runtime_get_sync(&client->dev);
626 if (ret < 0) { 622 if (ret < 0) {
627 pm_runtime_put_noidle(&client->dev); 623 pm_runtime_put_noidle(dev);
628 return ret; 624 return ret;
629 } 625 }
630 626
@@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
640 status = at24->write_func(at24, buf, off, count); 636 status = at24->write_func(at24, buf, off, count);
641 if (status < 0) { 637 if (status < 0) {
642 mutex_unlock(&at24->lock); 638 mutex_unlock(&at24->lock);
643 pm_runtime_put(&client->dev); 639 pm_runtime_put(dev);
644 return status; 640 return status;
645 } 641 }
646 buf += status; 642 buf += status;
@@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
650 646
651 mutex_unlock(&at24->lock); 647 mutex_unlock(&at24->lock);
652 648
653 pm_runtime_put(&client->dev); 649 pm_runtime_put(dev);
654 650
655 return 0; 651 return 0;
656} 652}
@@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
880 at24->nvmem_config.reg_read = at24_read; 876 at24->nvmem_config.reg_read = at24_read;
881 at24->nvmem_config.reg_write = at24_write; 877 at24->nvmem_config.reg_write = at24_write;
882 at24->nvmem_config.priv = at24; 878 at24->nvmem_config.priv = at24;
883 at24->nvmem_config.stride = 4; 879 at24->nvmem_config.stride = 1;
884 at24->nvmem_config.word_size = 1; 880 at24->nvmem_config.word_size = 1;
885 at24->nvmem_config.size = chip.byte_len; 881 at24->nvmem_config.size = chip.byte_len;
886 882
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index f06cd91964ce..79a5b985ccf5 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -75,9 +75,11 @@ struct mmc_fixup {
75#define EXT_CSD_REV_ANY (-1u) 75#define EXT_CSD_REV_ANY (-1u)
76 76
77#define CID_MANFID_SANDISK 0x2 77#define CID_MANFID_SANDISK 0x2
78#define CID_MANFID_ATP 0x9
78#define CID_MANFID_TOSHIBA 0x11 79#define CID_MANFID_TOSHIBA 0x11
79#define CID_MANFID_MICRON 0x13 80#define CID_MANFID_MICRON 0x13
80#define CID_MANFID_SAMSUNG 0x15 81#define CID_MANFID_SAMSUNG 0x15
82#define CID_MANFID_APACER 0x27
81#define CID_MANFID_KINGSTON 0x70 83#define CID_MANFID_KINGSTON 0x70
82#define CID_MANFID_HYNIX 0x90 84#define CID_MANFID_HYNIX 0x90
83 85
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d209fb466979..208a762b87ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1290,7 +1290,7 @@ out_err:
1290 1290
1291static void mmc_select_driver_type(struct mmc_card *card) 1291static void mmc_select_driver_type(struct mmc_card *card)
1292{ 1292{
1293 int card_drv_type, drive_strength, drv_type; 1293 int card_drv_type, drive_strength, drv_type = 0;
1294 int fixed_drv_type = card->host->fixed_drv_type; 1294 int fixed_drv_type = card->host->fixed_drv_type;
1295 1295
1296 card_drv_type = card->ext_csd.raw_driver_strength | 1296 card_drv_type = card->ext_csd.raw_driver_strength |
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index f664e9cbc9f8..75d317623852 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = {
53 MMC_QUIRK_BLK_NO_CMD23), 53 MMC_QUIRK_BLK_NO_CMD23),
54 54
55 /* 55 /*
56 * Some SD cards lockup while using CMD23 multiblock transfers.
57 */
58 MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd,
59 MMC_QUIRK_BLK_NO_CMD23),
60 MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd,
61 MMC_QUIRK_BLK_NO_CMD23),
62
63 /*
56 * Some MMC cards need longer data read timeout than indicated in CSD. 64 * Some MMC cards need longer data read timeout than indicated in CSD.
57 */ 65 */
58 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, 66 MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index a7801f6668a5..6315774d72b3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; 338 cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX;
339 break; 339 break;
340 case PHY_INTERFACE_MODE_XGMII: 340 case PHY_INTERFACE_MODE_XGMII:
341 case PHY_INTERFACE_MODE_XAUI:
341 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; 342 cmode = MV88E6XXX_PORT_STS_CMODE_XAUI;
342 break; 343 break;
343 case PHY_INTERFACE_MODE_RXAUI: 344 case PHY_INTERFACE_MODE_RXAUI:
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 57e796870595..105fdb958cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -50,7 +50,7 @@
50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U 50#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
51#define AQ_CFG_PCI_FUNC_PORTS 2U 51#define AQ_CFG_PCI_FUNC_PORTS 2U
52 52
53#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) 53#define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ)
54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) 54#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
55 55
56#define AQ_CFG_SKB_FRAGS_MAX 32U 56#define AQ_CFG_SKB_FRAGS_MAX 32U
@@ -80,6 +80,7 @@
80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ 80#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\ 81 __stringify(NIC_MINOR_DRIVER_VERSION)"."\
82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\ 82 __stringify(NIC_BUILD_DRIVER_VERSION)"."\
83 __stringify(NIC_REVISION_DRIVER_VERSION) 83 __stringify(NIC_REVISION_DRIVER_VERSION) \
84 AQ_CFG_DRV_VERSION_SUFFIX
84 85
85#endif /* AQ_CFG_H */ 86#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 70efb7467bf3..f2d8063a2cef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
66 "OutUCast", 66 "OutUCast",
67 "OutMCast", 67 "OutMCast",
68 "OutBCast", 68 "OutBCast",
69 "InUCastOctects", 69 "InUCastOctets",
70 "OutUCastOctects", 70 "OutUCastOctets",
71 "InMCastOctects", 71 "InMCastOctets",
72 "OutMCastOctects", 72 "OutMCastOctets",
73 "InBCastOctects", 73 "InBCastOctets",
74 "OutBCastOctects", 74 "OutBCastOctets",
75 "InOctects", 75 "InOctets",
76 "OutOctects", 76 "OutOctets",
77 "InPacketsDma", 77 "InPacketsDma",
78 "OutPacketsDma", 78 "OutPacketsDma",
79 "InOctetsDma", 79 "InOctetsDma",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 0207927dc8a6..b3825de6cdfb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -46,6 +46,28 @@ struct aq_hw_link_status_s {
46 unsigned int mbps; 46 unsigned int mbps;
47}; 47};
48 48
49struct aq_stats_s {
50 u64 uprc;
51 u64 mprc;
52 u64 bprc;
53 u64 erpt;
54 u64 uptc;
55 u64 mptc;
56 u64 bptc;
57 u64 erpr;
58 u64 mbtc;
59 u64 bbtc;
60 u64 mbrc;
61 u64 bbrc;
62 u64 ubrc;
63 u64 ubtc;
64 u64 dpc;
65 u64 dma_pkt_rc;
66 u64 dma_pkt_tc;
67 u64 dma_oct_rc;
68 u64 dma_oct_tc;
69};
70
49#define AQ_HW_IRQ_INVALID 0U 71#define AQ_HW_IRQ_INVALID 0U
50#define AQ_HW_IRQ_LEGACY 1U 72#define AQ_HW_IRQ_LEGACY 1U
51#define AQ_HW_IRQ_MSI 2U 73#define AQ_HW_IRQ_MSI 2U
@@ -85,7 +107,9 @@ struct aq_hw_ops {
85 void (*destroy)(struct aq_hw_s *self); 107 void (*destroy)(struct aq_hw_s *self);
86 108
87 int (*get_hw_caps)(struct aq_hw_s *self, 109 int (*get_hw_caps)(struct aq_hw_s *self,
88 struct aq_hw_caps_s *aq_hw_caps); 110 struct aq_hw_caps_s *aq_hw_caps,
111 unsigned short device,
112 unsigned short subsystem_device);
89 113
90 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, 114 int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
91 unsigned int frags); 115 unsigned int frags);
@@ -164,8 +188,7 @@ struct aq_hw_ops {
164 188
165 int (*hw_update_stats)(struct aq_hw_s *self); 189 int (*hw_update_stats)(struct aq_hw_s *self);
166 190
167 int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, 191 struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self);
168 unsigned int *p_count);
169 192
170 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); 193 int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
171 194
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 78dfb2ab78ce..75a894a9251c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -37,6 +37,8 @@ static unsigned int aq_itr_rx;
37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); 37module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); 38MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
39 39
40static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
41
40static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) 42static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41{ 43{
42 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; 44 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
166static void aq_nic_service_timer_cb(struct timer_list *t) 168static void aq_nic_service_timer_cb(struct timer_list *t)
167{ 169{
168 struct aq_nic_s *self = from_timer(self, t, service_timer); 170 struct aq_nic_s *self = from_timer(self, t, service_timer);
169 struct net_device *ndev = aq_nic_get_ndev(self); 171 int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
170 int err = 0; 172 int err = 0;
171 unsigned int i = 0U;
172 struct aq_ring_stats_rx_s stats_rx;
173 struct aq_ring_stats_tx_s stats_tx;
174 173
175 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 174 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
176 goto err_exit; 175 goto err_exit;
@@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
182 if (self->aq_hw_ops.hw_update_stats) 181 if (self->aq_hw_ops.hw_update_stats)
183 self->aq_hw_ops.hw_update_stats(self->aq_hw); 182 self->aq_hw_ops.hw_update_stats(self->aq_hw);
184 183
185 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 184 aq_nic_update_ndev_stats(self);
186 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
187 for (i = AQ_DIMOF(self->aq_vec); i--;) {
188 if (self->aq_vec[i])
189 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
190 }
191 185
192 ndev->stats.rx_packets = stats_rx.packets; 186 /* If no link - use faster timer rate to detect link up asap */
193 ndev->stats.rx_bytes = stats_rx.bytes; 187 if (!netif_carrier_ok(self->ndev))
194 ndev->stats.rx_errors = stats_rx.errors; 188 ctimer = max(ctimer / 2, 1);
195 ndev->stats.tx_packets = stats_tx.packets;
196 ndev->stats.tx_bytes = stats_tx.bytes;
197 ndev->stats.tx_errors = stats_tx.errors;
198 189
199err_exit: 190err_exit:
200 mod_timer(&self->service_timer, 191 mod_timer(&self->service_timer, jiffies + ctimer);
201 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
202} 192}
203 193
204static void aq_nic_polling_timer_cb(struct timer_list *t) 194static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void)
222 212
223struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 213struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
224 const struct ethtool_ops *et_ops, 214 const struct ethtool_ops *et_ops,
225 struct device *dev, 215 struct pci_dev *pdev,
226 struct aq_pci_func_s *aq_pci_func, 216 struct aq_pci_func_s *aq_pci_func,
227 unsigned int port, 217 unsigned int port,
228 const struct aq_hw_ops *aq_hw_ops) 218 const struct aq_hw_ops *aq_hw_ops)
@@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
242 ndev->netdev_ops = ndev_ops; 232 ndev->netdev_ops = ndev_ops;
243 ndev->ethtool_ops = et_ops; 233 ndev->ethtool_ops = et_ops;
244 234
245 SET_NETDEV_DEV(ndev, dev); 235 SET_NETDEV_DEV(ndev, &pdev->dev);
246 236
247 ndev->if_port = port; 237 ndev->if_port = port;
248 self->ndev = ndev; 238 self->ndev = ndev;
@@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
254 244
255 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, 245 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
256 &self->aq_hw_ops); 246 &self->aq_hw_ops);
257 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); 247 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps,
248 pdev->device, pdev->subsystem_device);
258 if (err < 0) 249 if (err < 0)
259 goto err_exit; 250 goto err_exit;
260 251
@@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
749 740
750void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) 741void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
751{ 742{
752 struct aq_vec_s *aq_vec = NULL;
753 unsigned int i = 0U; 743 unsigned int i = 0U;
754 unsigned int count = 0U; 744 unsigned int count = 0U;
755 int err = 0; 745 struct aq_vec_s *aq_vec = NULL;
746 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
756 747
757 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); 748 if (!stats)
758 if (err < 0)
759 goto err_exit; 749 goto err_exit;
760 750
761 data += count; 751 data[i] = stats->uprc + stats->mprc + stats->bprc;
752 data[++i] = stats->uprc;
753 data[++i] = stats->mprc;
754 data[++i] = stats->bprc;
755 data[++i] = stats->erpt;
756 data[++i] = stats->uptc + stats->mptc + stats->bptc;
757 data[++i] = stats->uptc;
758 data[++i] = stats->mptc;
759 data[++i] = stats->bptc;
760 data[++i] = stats->ubrc;
761 data[++i] = stats->ubtc;
762 data[++i] = stats->mbrc;
763 data[++i] = stats->mbtc;
764 data[++i] = stats->bbrc;
765 data[++i] = stats->bbtc;
766 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
767 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
768 data[++i] = stats->dma_pkt_rc;
769 data[++i] = stats->dma_pkt_tc;
770 data[++i] = stats->dma_oct_rc;
771 data[++i] = stats->dma_oct_tc;
772 data[++i] = stats->dpc;
773
774 i++;
775
776 data += i;
762 count = 0U; 777 count = 0U;
763 778
764 for (i = 0U, aq_vec = self->aq_vec[0]; 779 for (i = 0U, aq_vec = self->aq_vec[0];
@@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
768 } 783 }
769 784
770err_exit:; 785err_exit:;
771 (void)err; 786}
787
788static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
789{
790 struct net_device *ndev = self->ndev;
791 struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw);
792
793 ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
794 ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
795 ndev->stats.rx_errors = stats->erpr;
796 ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
797 ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
798 ndev->stats.tx_errors = stats->erpt;
799 ndev->stats.multicast = stats->mprc;
772} 800}
773 801
774void aq_nic_get_link_ksettings(struct aq_nic_s *self, 802void aq_nic_get_link_ksettings(struct aq_nic_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 4309983acdd6..3c9f8db03d5f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -71,7 +71,7 @@ struct aq_nic_cfg_s {
71 71
72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, 72struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
73 const struct ethtool_ops *et_ops, 73 const struct ethtool_ops *et_ops,
74 struct device *dev, 74 struct pci_dev *pdev,
75 struct aq_pci_func_s *aq_pci_func, 75 struct aq_pci_func_s *aq_pci_func,
76 unsigned int port, 76 unsigned int port,
77 const struct aq_hw_ops *aq_hw_ops); 77 const struct aq_hw_ops *aq_hw_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index cadaa646c89f..58c29d04b186 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
51 pci_set_drvdata(pdev, self); 51 pci_set_drvdata(pdev, self);
52 self->pdev = pdev; 52 self->pdev = pdev;
53 53
54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); 54 err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device,
55 pdev->subsystem_device);
55 if (err < 0) 56 if (err < 0)
56 goto err_exit; 57 goto err_exit;
57 58
@@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
59 60
60 for (port = 0; port < self->ports; ++port) { 61 for (port = 0; port < self->ports; ++port) {
61 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, 62 struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
62 &pdev->dev, self, 63 pdev, self,
63 port, aq_hw_ops); 64 port, aq_hw_ops);
64 65
65 if (!aq_nic) { 66 if (!aq_nic) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 07b3c49a16a4..f18dce14c93c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -18,9 +18,20 @@
18#include "hw_atl_a0_internal.h" 18#include "hw_atl_a0_internal.h"
19 19
20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, 20static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 21 struct aq_hw_caps_s *aq_hw_caps,
22 unsigned short device,
23 unsigned short subsystem_device)
22{ 24{
23 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); 25 memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
26
27 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
28 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
29
30 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
31 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G;
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G;
33 }
34
24 return 0; 35 return 0;
25} 36}
26 37
@@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self,
333 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 344 hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
334 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 345 hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
335 346
347 /* Reset link status and read out initial hardware counters */
348 self->aq_link_status.mbps = 0;
349 hw_atl_utils_update_stats(self);
350
336 err = aq_hw_err_from_flags(self); 351 err = aq_hw_err_from_flags(self);
337 if (err < 0) 352 if (err < 0)
338 goto err_exit; 353 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec68c20efcbd..e4a22ce7bf09 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -16,11 +16,23 @@
16#include "hw_atl_utils.h" 16#include "hw_atl_utils.h"
17#include "hw_atl_llh.h" 17#include "hw_atl_llh.h"
18#include "hw_atl_b0_internal.h" 18#include "hw_atl_b0_internal.h"
19#include "hw_atl_llh_internal.h"
19 20
20static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, 21static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
21 struct aq_hw_caps_s *aq_hw_caps) 22 struct aq_hw_caps_s *aq_hw_caps,
23 unsigned short device,
24 unsigned short subsystem_device)
22{ 25{
23 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); 26 memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
27
28 if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001)
29 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
30
31 if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) {
32 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G;
33 aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G;
34 }
35
24 return 0; 36 return 0;
25} 37}
26 38
@@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
357 }; 369 };
358 370
359 int err = 0; 371 int err = 0;
372 u32 val;
360 373
361 self->aq_nic_cfg = aq_nic_cfg; 374 self->aq_nic_cfg = aq_nic_cfg;
362 375
@@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self,
374 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); 387 hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
375 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); 388 hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
376 389
390 /* Force limit MRRS on RDM/TDM to 2K */
391 val = aq_hw_read_reg(self, pci_reg_control6_adr);
392 aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404);
393
394 /* TX DMA total request limit. B0 hardware is not capable to
395 * handle more than (8K-MRRS) incoming DMA data.
396 * Value 24 in 256byte units
397 */
398 aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24);
399
400 /* Reset link status and read out initial hardware counters */
401 self->aq_link_status.mbps = 0;
402 hw_atl_utils_update_stats(self);
403
377 err = aq_hw_err_from_flags(self); 404 err = aq_hw_err_from_flags(self);
378 if (err < 0) 405 if (err < 0)
379 goto err_exit; 406 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 5527fc0e5942..93450ec930e8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -2343,6 +2343,9 @@
2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \ 2343#define tx_dma_desc_base_addrmsw_adr(descriptor) \
2344 (0x00007c04u + (descriptor) * 0x40) 2344 (0x00007c04u + (descriptor) * 0x40)
2345 2345
2346/* tx dma total request limit */
2347#define tx_dma_total_req_limit_adr 0x00007b20u
2348
2346/* tx interrupt moderation control register definitions 2349/* tx interrupt moderation control register definitions
2347 * Preprocessor definitions for TX Interrupt Moderation Control Register 2350 * Preprocessor definitions for TX Interrupt Moderation Control Register
2348 * Base Address: 0x00008980 2351 * Base Address: 0x00008980
@@ -2369,6 +2372,9 @@
2369/* default value of bitfield reg_res_dsbl */ 2372/* default value of bitfield reg_res_dsbl */
2370#define pci_reg_res_dsbl_default 0x1 2373#define pci_reg_res_dsbl_default 0x1
2371 2374
2375/* PCI core control register */
2376#define pci_reg_control6_adr 0x1014u
2377
2372/* global microprocessor scratch pad definitions */ 2378/* global microprocessor scratch pad definitions */
2373#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) 2379#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
2374 2380
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 1fe016fc4bc7..f2ce12ed4218 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
503 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 503 struct hw_atl_s *hw_self = PHAL_ATLANTIC;
504 struct hw_aq_atl_utils_mbox mbox; 504 struct hw_aq_atl_utils_mbox mbox;
505 505
506 if (!self->aq_link_status.mbps)
507 return 0;
508
509 hw_atl_utils_mpi_read_stats(self, &mbox); 506 hw_atl_utils_mpi_read_stats(self, &mbox);
510 507
511#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ 508#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
512 mbox.stats._N_ - hw_self->last_stats._N_) 509 mbox.stats._N_ - hw_self->last_stats._N_)
513 510 if (self->aq_link_status.mbps) {
514 AQ_SDELTA(uprc); 511 AQ_SDELTA(uprc);
515 AQ_SDELTA(mprc); 512 AQ_SDELTA(mprc);
516 AQ_SDELTA(bprc); 513 AQ_SDELTA(bprc);
517 AQ_SDELTA(erpt); 514 AQ_SDELTA(erpt);
518 515
519 AQ_SDELTA(uptc); 516 AQ_SDELTA(uptc);
520 AQ_SDELTA(mptc); 517 AQ_SDELTA(mptc);
521 AQ_SDELTA(bptc); 518 AQ_SDELTA(bptc);
522 AQ_SDELTA(erpr); 519 AQ_SDELTA(erpr);
523 520
524 AQ_SDELTA(ubrc); 521 AQ_SDELTA(ubrc);
525 AQ_SDELTA(ubtc); 522 AQ_SDELTA(ubtc);
526 AQ_SDELTA(mbrc); 523 AQ_SDELTA(mbrc);
527 AQ_SDELTA(mbtc); 524 AQ_SDELTA(mbtc);
528 AQ_SDELTA(bbrc); 525 AQ_SDELTA(bbrc);
529 AQ_SDELTA(bbtc); 526 AQ_SDELTA(bbtc);
530 AQ_SDELTA(dpc); 527 AQ_SDELTA(dpc);
531 528 }
532#undef AQ_SDELTA 529#undef AQ_SDELTA
530 hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self);
531 hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self);
532 hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self);
533 hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self);
533 534
534 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); 535 memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
535 536
536 return 0; 537 return 0;
537} 538}
538 539
539int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 540struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
540 u64 *data, unsigned int *p_count)
541{ 541{
542 struct hw_atl_s *hw_self = PHAL_ATLANTIC; 542 return &PHAL_ATLANTIC->curr_stats;
543 struct hw_atl_stats_s *stats = &hw_self->curr_stats;
544 int i = 0;
545
546 data[i] = stats->uprc + stats->mprc + stats->bprc;
547 data[++i] = stats->uprc;
548 data[++i] = stats->mprc;
549 data[++i] = stats->bprc;
550 data[++i] = stats->erpt;
551 data[++i] = stats->uptc + stats->mptc + stats->bptc;
552 data[++i] = stats->uptc;
553 data[++i] = stats->mptc;
554 data[++i] = stats->bptc;
555 data[++i] = stats->ubrc;
556 data[++i] = stats->ubtc;
557 data[++i] = stats->mbrc;
558 data[++i] = stats->mbtc;
559 data[++i] = stats->bbrc;
560 data[++i] = stats->bbtc;
561 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
562 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
563 data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
564 data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
565 data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
566 data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
567 data[++i] = stats->dpc;
568
569 if (p_count)
570 *p_count = ++i;
571
572 return 0;
573} 543}
574 544
575static const u32 hw_atl_utils_hw_mac_regs[] = { 545static const u32 hw_atl_utils_hw_mac_regs[] = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index c99cc690e425..21aeca6908d3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox {
129struct __packed hw_atl_s { 129struct __packed hw_atl_s {
130 struct aq_hw_s base; 130 struct aq_hw_s base;
131 struct hw_atl_stats_s last_stats; 131 struct hw_atl_stats_s last_stats;
132 struct hw_atl_stats_s curr_stats; 132 struct aq_stats_s curr_stats;
133 u64 speed; 133 u64 speed;
134 unsigned int chip_features; 134 unsigned int chip_features;
135 u32 fw_ver_actual; 135 u32 fw_ver_actual;
@@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
207 207
208int hw_atl_utils_update_stats(struct aq_hw_s *self); 208int hw_atl_utils_update_stats(struct aq_hw_s *self);
209 209
210int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, 210struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self);
211 u64 *data,
212 unsigned int *p_count);
213 211
214#endif /* HW_ATL_UTILS_H */ 212#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 0de858d215c2..9009f2651e70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -11,8 +11,10 @@
11#define VER_H 11#define VER_H
12 12
13#define NIC_MAJOR_DRIVER_VERSION 1 13#define NIC_MAJOR_DRIVER_VERSION 1
14#define NIC_MINOR_DRIVER_VERSION 5 14#define NIC_MINOR_DRIVER_VERSION 6
15#define NIC_BUILD_DRIVER_VERSION 345 15#define NIC_BUILD_DRIVER_VERSION 13
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 0
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19
18#endif /* VER_H */ 20#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index c6163874e4e7..16f9bee992fe 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev)
199 199
200 /* RMII interface needs always a rate of 50MHz */ 200 /* RMII interface needs always a rate of 50MHz */
201 err = clk_set_rate(priv->refclk, 50000000); 201 err = clk_set_rate(priv->refclk, 50000000);
202 if (err) 202 if (err) {
203 dev_err(dev, 203 dev_err(dev,
204 "failed to change reference clock rate (%d)\n", err); 204 "failed to change reference clock rate (%d)\n", err);
205 goto out_regulator_disable;
206 }
205 207
206 if (priv->soc_data->need_div_macclk) { 208 if (priv->soc_data->need_div_macclk) {
207 priv->macclk = devm_clk_get(dev, "macclk"); 209 priv->macclk = devm_clk_get(dev, "macclk");
@@ -230,12 +232,14 @@ static int emac_rockchip_probe(struct platform_device *pdev)
230 err = arc_emac_probe(ndev, interface); 232 err = arc_emac_probe(ndev, interface);
231 if (err) { 233 if (err) {
232 dev_err(dev, "failed to probe arc emac (%d)\n", err); 234 dev_err(dev, "failed to probe arc emac (%d)\n", err);
233 goto out_regulator_disable; 235 goto out_clk_disable_macclk;
234 } 236 }
235 237
236 return 0; 238 return 0;
239
237out_clk_disable_macclk: 240out_clk_disable_macclk:
238 clk_disable_unprepare(priv->macclk); 241 if (priv->soc_data->need_div_macclk)
242 clk_disable_unprepare(priv->macclk);
239out_regulator_disable: 243out_regulator_disable:
240 if (priv->regulator) 244 if (priv->regulator)
241 regulator_disable(priv->regulator); 245 regulator_disable(priv->regulator);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6e423f098a60..31efc47c847e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev)
4081 if (hw->ports > 1) { 4081 if (hw->ports > 1) {
4082 skge_write32(hw, B0_IMSK, 0); 4082 skge_write32(hw, B0_IMSK, 0);
4083 skge_read32(hw, B0_IMSK); 4083 skge_read32(hw, B0_IMSK);
4084 free_irq(pdev->irq, hw);
4085 } 4084 }
4086 spin_unlock_irq(&hw->hw_lock); 4085 spin_unlock_irq(&hw->hw_lock);
4087 4086
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index e0eb695318e6..1fa4849a6f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
188 struct net_device *dev = mdev->pndev[port]; 188 struct net_device *dev = mdev->pndev[port];
189 struct mlx4_en_priv *priv = netdev_priv(dev); 189 struct mlx4_en_priv *priv = netdev_priv(dev);
190 struct net_device_stats *stats = &dev->stats; 190 struct net_device_stats *stats = &dev->stats;
191 struct mlx4_cmd_mailbox *mailbox; 191 struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
192 u64 in_mod = reset << 8 | port; 192 u64 in_mod = reset << 8 | port;
193 int err; 193 int err;
194 int i, counter_index; 194 int i, counter_index;
@@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 198 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
199 if (IS_ERR(mailbox)) 199 if (IS_ERR(mailbox))
200 return PTR_ERR(mailbox); 200 return PTR_ERR(mailbox);
201
202 mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
203 if (IS_ERR(mailbox_priority)) {
204 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
205 return PTR_ERR(mailbox_priority);
206 }
207
201 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, 208 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
202 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, 209 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
203 MLX4_CMD_NATIVE); 210 MLX4_CMD_NATIVE);
@@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 213
207 mlx4_en_stats = mailbox->buf; 214 mlx4_en_stats = mailbox->buf;
208 215
216 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
217 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
218 err = mlx4_get_counter_stats(mdev->dev, counter_index,
219 &tmp_counter_stats, reset);
220
221 /* 0xffs indicates invalid value */
222 memset(mailbox_priority->buf, 0xff,
223 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
224
225 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
226 memset(mailbox_priority->buf, 0,
227 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
228 err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
229 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
230 0, MLX4_CMD_DUMP_ETH_STATS,
231 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
232 if (err)
233 goto out;
234 }
235
236 flowstats = mailbox_priority->buf;
237
209 spin_lock_bh(&priv->stats_lock); 238 spin_lock_bh(&priv->stats_lock);
210 239
211 mlx4_en_fold_software_stats(dev); 240 mlx4_en_fold_software_stats(dev);
@@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
345 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); 374 priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
346 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); 375 priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
347 376
348 spin_unlock_bh(&priv->stats_lock);
349
350 memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
351 counter_index = mlx4_get_default_counter_index(mdev->dev, port);
352 err = mlx4_get_counter_stats(mdev->dev, counter_index,
353 &tmp_counter_stats, reset);
354
355 /* 0xffs indicates invalid value */
356 memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
357
358 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
359 memset(mailbox->buf, 0,
360 sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
361 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
362 in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
363 0, MLX4_CMD_DUMP_ETH_STATS,
364 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
365 if (err)
366 goto out;
367 }
368
369 flowstats = mailbox->buf;
370
371 spin_lock_bh(&priv->stats_lock);
372
373 if (tmp_counter_stats.counter_mode == 0) { 377 if (tmp_counter_stats.counter_mode == 0) {
374 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); 378 priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
375 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); 379 priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
@@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
410 414
411out: 415out:
412 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 416 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
417 mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
413 return err; 418 return err;
414} 419}
415 420
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 88699b181946..946d9db7c8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
185 if (priv->mdev->dev->caps.flags & 185 if (priv->mdev->dev->caps.flags &
186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { 186 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
187 buf[3] = mlx4_en_test_registers(priv); 187 buf[3] = mlx4_en_test_registers(priv);
188 if (priv->port_up) 188 if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
189 buf[4] = mlx4_en_test_loopback(priv); 189 buf[4] = mlx4_en_test_loopback(priv);
190 } 190 }
191 191
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 1856e279a7e0..2b72677eccd4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -153,6 +153,9 @@
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) 153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) 154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) 155#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
156#define PREAMBLE_LEN 8
157#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
158 ETH_HLEN + PREAMBLE_LEN)
156 159
157#define MLX4_EN_MIN_MTU 46 160#define MLX4_EN_MIN_MTU 46
158/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple 161/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 04304dd894c6..606a0e0beeae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
611 MLX4_MAX_PORTS; 611 MLX4_MAX_PORTS;
612 else 612 else
613 res_alloc->guaranteed[t] = 0; 613 res_alloc->guaranteed[t] = 0;
614 res_alloc->res_free -= res_alloc->guaranteed[t];
615 break; 614 break;
616 default: 615 default:
617 break; 616 break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 3b9c8a0437bf..d373df7b11bd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4346,6 +4346,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4346 4346
4347static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) 4347static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4348{ 4348{
4349 u16 vid = 1;
4349 int err; 4350 int err;
4350 4351
4351 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); 4352 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
@@ -4358,8 +4359,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4358 true, false); 4359 true, false);
4359 if (err) 4360 if (err)
4360 goto err_port_vlan_set; 4361 goto err_port_vlan_set;
4362
4363 for (; vid <= VLAN_N_VID - 1; vid++) {
4364 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4365 vid, false);
4366 if (err)
4367 goto err_vid_learning_set;
4368 }
4369
4361 return 0; 4370 return 0;
4362 4371
4372err_vid_learning_set:
4373 for (vid--; vid >= 1; vid--)
4374 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4363err_port_vlan_set: 4375err_port_vlan_set:
4364 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4376 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4365err_port_stp_set: 4377err_port_stp_set:
@@ -4369,6 +4381,12 @@ err_port_stp_set:
4369 4381
4370static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) 4382static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4371{ 4383{
4384 u16 vid;
4385
4386 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4387 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4388 vid, true);
4389
4372 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, 4390 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4373 false, false); 4391 false, false);
4374 mlxsw_sp_port_stp_set(mlxsw_sp_port, false); 4392 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 18461fcb9815..53dbf1e163a8 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -47,6 +47,7 @@
47#define MDIO_CLK_25_28 7 47#define MDIO_CLK_25_28 7
48 48
49#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
50#define MDIO_STATUS_DELAY_TIME 1
50 51
51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 52static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
52{ 53{
@@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
65 66
66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 67 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
67 !(reg & (MDIO_START | MDIO_BUSY)), 68 !(reg & (MDIO_START | MDIO_BUSY)),
68 100, MDIO_WAIT_TIMES * 100)) 69 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
69 return -EIO; 70 return -EIO;
70 71
71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 72 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
@@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
88 writel(reg, adpt->base + EMAC_MDIO_CTRL); 89 writel(reg, adpt->base + EMAC_MDIO_CTRL);
89 90
90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 91 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
91 !(reg & (MDIO_START | MDIO_BUSY)), 100, 92 !(reg & (MDIO_START | MDIO_BUSY)),
92 MDIO_WAIT_TIMES * 100)) 93 MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100))
93 return -EIO; 94 return -EIO;
94 95
95 return 0; 96 return 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 2b962d349f5f..009780df664b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
2308 struct ravb_private *priv = netdev_priv(ndev); 2308 struct ravb_private *priv = netdev_priv(ndev);
2309 int ret = 0; 2309 int ret = 0;
2310 2310
2311 if (priv->wol_enabled) { 2311 /* If WoL is enabled set reset mode to rearm the WoL logic */
2312 /* Reduce the usecount of the clock to zero and then 2312 if (priv->wol_enabled)
2313 * restore it to its original value. This is done to force
2314 * the clock to be re-enabled which is a workaround
2315 * for renesas-cpg-mssr driver which do not enable clocks
2316 * when resuming from PSCI suspend/resume.
2317 *
2318 * Without this workaround the driver fails to communicate
2319 * with the hardware if WoL was enabled when the system
2320 * entered PSCI suspend. This is due to that if WoL is enabled
2321 * we explicitly keep the clock from being turned off when
2322 * suspending, but in PSCI sleep power is cut so the clock
2323 * is disabled anyhow, the clock driver is not aware of this
2324 * so the clock is not turned back on when resuming.
2325 *
2326 * TODO: once the renesas-cpg-mssr suspend/resume is working
2327 * this clock dance should be removed.
2328 */
2329 clk_disable(priv->clk);
2330 clk_disable(priv->clk);
2331 clk_enable(priv->clk);
2332 clk_enable(priv->clk);
2333
2334 /* Set reset mode to rearm the WoL logic */
2335 ravb_write(ndev, CCC_OPC_RESET, CCC); 2313 ravb_write(ndev, CCC_OPC_RESET, CCC);
2336 }
2337 2314
2338 /* All register have been reset to default values. 2315 /* All register have been reset to default values.
2339 * Restore all registers which where setup at probe time and 2316 * Restore all registers which where setup at probe time and
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index db72d13cebb9..75323000c364 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1892,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev)
1892 return PTR_ERR(phydev); 1892 return PTR_ERR(phydev);
1893 } 1893 }
1894 1894
1895 /* mask with MAC supported features */
1896 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1897 int err = phy_set_max_speed(phydev, SPEED_100);
1898 if (err) {
1899 netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1900 phy_disconnect(phydev);
1901 return err;
1902 }
1903 }
1904
1895 phy_attached_info(phydev); 1905 phy_attached_info(phydev);
1896 1906
1897 return 0; 1907 return 0;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 8483f03d5a41..1ab97d99b9ba 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev)
1379 rrpriv->info_dma); 1379 rrpriv->info_dma);
1380 rrpriv->info = NULL; 1380 rrpriv->info = NULL;
1381 1381
1382 free_irq(pdev->irq, dev);
1383 spin_unlock_irqrestore(&rrpriv->lock, flags); 1382 spin_unlock_irqrestore(&rrpriv->lock, flags);
1383 free_irq(pdev->irq, dev);
1384 1384
1385 return 0; 1385 return 0;
1386} 1386}
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 29da7a3c7a37..1e190f3bca63 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -238,14 +238,10 @@ static int at803x_resume(struct phy_device *phydev)
238{ 238{
239 int value; 239 int value;
240 240
241 mutex_lock(&phydev->lock);
242
243 value = phy_read(phydev, MII_BMCR); 241 value = phy_read(phydev, MII_BMCR);
244 value &= ~(BMCR_PDOWN | BMCR_ISOLATE); 242 value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
245 phy_write(phydev, MII_BMCR, value); 243 phy_write(phydev, MII_BMCR, value);
246 244
247 mutex_unlock(&phydev->lock);
248
249 return 0; 245 return 0;
250} 246}
251 247
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6dbb0f4c34eb..2fc026dc170a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
637 if (err < 0) 637 if (err < 0)
638 goto error; 638 goto error;
639 639
640 /* Do not touch the fiber page if we're in copper->sgmii mode */
641 if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
642 return 0;
643
640 /* Then the fiber link */ 644 /* Then the fiber link */
641 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); 645 err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
642 if (err < 0) 646 if (err < 0)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8f8b7747c54b..a0f34c385cad 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -288,6 +288,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
288 288
289 if (addr == mdiodev->addr) { 289 if (addr == mdiodev->addr) {
290 dev->of_node = child; 290 dev->of_node = child;
291 dev->fwnode = of_fwnode_handle(child);
291 return; 292 return;
292 } 293 }
293 } 294 }
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 401e3234be58..4ee630afe43a 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -22,6 +22,7 @@
22#include <linux/ethtool.h> 22#include <linux/ethtool.h>
23#include <linux/phy.h> 23#include <linux/phy.h>
24#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/bitfield.h>
25 26
26static int meson_gxl_config_init(struct phy_device *phydev) 27static int meson_gxl_config_init(struct phy_device *phydev)
27{ 28{
@@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev)
50 return 0; 51 return 0;
51} 52}
52 53
54/* This function is provided to cope with the possible failures of this phy
55 * during aneg process. When aneg fails, the PHY reports that aneg is done
56 * but the value found in MII_LPA is wrong:
57 * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that
58 * the link partner (LP) supports aneg but the LP never acked our base
59 * code word, it is likely that we never sent it to begin with.
60 * - Late failures: MII_LPA is filled with a value which seems to make sense
61 * but it actually is not what the LP is advertising. It seems that we
62 * can detect this using a magic bit in the WOL bank (reg 12 - bit 12).
63 * If this particular bit is not set when aneg is reported being done,
64 * it means MII_LPA is likely to be wrong.
65 *
66 * In both case, forcing a restart of the aneg process solve the problem.
67 * When this failure happens, the first retry is usually successful but,
68 * in some cases, it may take up to 6 retries to get a decent result
69 */
70static int meson_gxl_read_status(struct phy_device *phydev)
71{
72 int ret, wol, lpa, exp;
73
74 if (phydev->autoneg == AUTONEG_ENABLE) {
75 ret = genphy_aneg_done(phydev);
76 if (ret < 0)
77 return ret;
78 else if (!ret)
79 goto read_status_continue;
80
81 /* Need to access WOL bank, make sure the access is open */
82 ret = phy_write(phydev, 0x14, 0x0000);
83 if (ret)
84 return ret;
85 ret = phy_write(phydev, 0x14, 0x0400);
86 if (ret)
87 return ret;
88 ret = phy_write(phydev, 0x14, 0x0000);
89 if (ret)
90 return ret;
91 ret = phy_write(phydev, 0x14, 0x0400);
92 if (ret)
93 return ret;
94
95 /* Request LPI_STATUS WOL register */
96 ret = phy_write(phydev, 0x14, 0x8D80);
97 if (ret)
98 return ret;
99
100 /* Read LPI_STATUS value */
101 wol = phy_read(phydev, 0x15);
102 if (wol < 0)
103 return wol;
104
105 lpa = phy_read(phydev, MII_LPA);
106 if (lpa < 0)
107 return lpa;
108
109 exp = phy_read(phydev, MII_EXPANSION);
110 if (exp < 0)
111 return exp;
112
113 if (!(wol & BIT(12)) ||
114 ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
115 /* Looks like aneg failed after all */
116 phydev_dbg(phydev, "LPA corruption - aneg restart\n");
117 return genphy_restart_aneg(phydev);
118 }
119 }
120
121read_status_continue:
122 return genphy_read_status(phydev);
123}
124
53static struct phy_driver meson_gxl_phy[] = { 125static struct phy_driver meson_gxl_phy[] = {
54 { 126 {
55 .phy_id = 0x01814400, 127 .phy_id = 0x01814400,
@@ -59,6 +131,7 @@ static struct phy_driver meson_gxl_phy[] = {
59 .flags = PHY_IS_INTERNAL, 131 .flags = PHY_IS_INTERNAL,
60 .config_init = meson_gxl_config_init, 132 .config_init = meson_gxl_config_init,
61 .aneg_done = genphy_aneg_done, 133 .aneg_done = genphy_aneg_done,
134 .read_status = meson_gxl_read_status,
62 .suspend = genphy_suspend, 135 .suspend = genphy_suspend,
63 .resume = genphy_resume, 136 .resume = genphy_resume,
64 }, 137 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 944143b521d7..0c165ad1d788 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -806,7 +806,6 @@ EXPORT_SYMBOL(phy_stop);
806 */ 806 */
807void phy_start(struct phy_device *phydev) 807void phy_start(struct phy_device *phydev)
808{ 808{
809 bool do_resume = false;
810 int err = 0; 809 int err = 0;
811 810
812 mutex_lock(&phydev->lock); 811 mutex_lock(&phydev->lock);
@@ -819,6 +818,9 @@ void phy_start(struct phy_device *phydev)
819 phydev->state = PHY_UP; 818 phydev->state = PHY_UP;
820 break; 819 break;
821 case PHY_HALTED: 820 case PHY_HALTED:
821 /* if phy was suspended, bring the physical link up again */
822 phy_resume(phydev);
823
822 /* make sure interrupts are re-enabled for the PHY */ 824 /* make sure interrupts are re-enabled for the PHY */
823 if (phydev->irq != PHY_POLL) { 825 if (phydev->irq != PHY_POLL) {
824 err = phy_enable_interrupts(phydev); 826 err = phy_enable_interrupts(phydev);
@@ -827,17 +829,12 @@ void phy_start(struct phy_device *phydev)
827 } 829 }
828 830
829 phydev->state = PHY_RESUMING; 831 phydev->state = PHY_RESUMING;
830 do_resume = true;
831 break; 832 break;
832 default: 833 default:
833 break; 834 break;
834 } 835 }
835 mutex_unlock(&phydev->lock); 836 mutex_unlock(&phydev->lock);
836 837
837 /* if phy was suspended, bring the physical link up again */
838 if (do_resume)
839 phy_resume(phydev);
840
841 phy_trigger_machine(phydev, true); 838 phy_trigger_machine(phydev, true);
842} 839}
843EXPORT_SYMBOL(phy_start); 840EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 462c17ed87b8..be13b5d6a8bf 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev)
135 if (!mdio_bus_phy_may_suspend(phydev)) 135 if (!mdio_bus_phy_may_suspend(phydev))
136 goto no_resume; 136 goto no_resume;
137 137
138 mutex_lock(&phydev->lock);
138 ret = phy_resume(phydev); 139 ret = phy_resume(phydev);
140 mutex_unlock(&phydev->lock);
139 if (ret < 0) 141 if (ret < 0)
140 return ret; 142 return ret;
141 143
@@ -1039,7 +1041,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1039 if (err) 1041 if (err)
1040 goto error; 1042 goto error;
1041 1043
1044 mutex_lock(&phydev->lock);
1042 phy_resume(phydev); 1045 phy_resume(phydev);
1046 mutex_unlock(&phydev->lock);
1043 phy_led_triggers_register(phydev); 1047 phy_led_triggers_register(phydev);
1044 1048
1045 return err; 1049 return err;
@@ -1173,6 +1177,8 @@ int phy_resume(struct phy_device *phydev)
1173 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); 1177 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1174 int ret = 0; 1178 int ret = 0;
1175 1179
1180 WARN_ON(!mutex_is_locked(&phydev->lock));
1181
1176 if (phydev->drv && phydrv->resume) 1182 if (phydev->drv && phydrv->resume)
1177 ret = phydrv->resume(phydev); 1183 ret = phydrv->resume(phydev);
1178 1184
@@ -1679,13 +1685,9 @@ int genphy_resume(struct phy_device *phydev)
1679{ 1685{
1680 int value; 1686 int value;
1681 1687
1682 mutex_lock(&phydev->lock);
1683
1684 value = phy_read(phydev, MII_BMCR); 1688 value = phy_read(phydev, MII_BMCR);
1685 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); 1689 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
1686 1690
1687 mutex_unlock(&phydev->lock);
1688
1689 return 0; 1691 return 0;
1690} 1692}
1691EXPORT_SYMBOL(genphy_resume); 1693EXPORT_SYMBOL(genphy_resume);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 1ed00519f29e..cfaa07f230e5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1204,12 +1204,14 @@ static const struct usb_device_id products[] = {
1204 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1204 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1205 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1205 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1206 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1206 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1207 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1207 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1208 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1208 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1209 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1209 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1210 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1210 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1211 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
1211 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1212 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
1212 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1213 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
1214 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
1213 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1215 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1214 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1216 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
1215 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1217 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 7c8767176315..f4c73292b304 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -85,6 +85,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
85 * can be looked up later */ 85 * can be looked up later */
86 of_node_get(child); 86 of_node_get(child);
87 phy->mdio.dev.of_node = child; 87 phy->mdio.dev.of_node = child;
88 phy->mdio.dev.fwnode = of_fwnode_handle(child);
88 89
89 /* All data is now stored in the phy struct; 90 /* All data is now stored in the phy struct;
90 * register it */ 91 * register it */
@@ -115,6 +116,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
115 */ 116 */
116 of_node_get(child); 117 of_node_get(child);
117 mdiodev->dev.of_node = child; 118 mdiodev->dev.of_node = child;
119 mdiodev->dev.fwnode = of_fwnode_handle(child);
118 120
119 /* All data is now stored in the mdiodev struct; register it. */ 121 /* All data is now stored in the mdiodev struct; register it. */
120 rc = mdio_device_register(mdiodev); 122 rc = mdio_device_register(mdiodev);
@@ -210,6 +212,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
210 mdio->phy_mask = ~0; 212 mdio->phy_mask = ~0;
211 213
212 mdio->dev.of_node = np; 214 mdio->dev.of_node = np;
215 mdio->dev.fwnode = of_fwnode_handle(np);
213 216
214 /* Get bus level PHY reset GPIO details */ 217 /* Get bus level PHY reset GPIO details */
215 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; 218 mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY;
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index 12796eccb2be..52ab3cb0a0bf 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1128 err = rcar_pcie_get_resources(pcie); 1128 err = rcar_pcie_get_resources(pcie);
1129 if (err < 0) { 1129 if (err < 0) {
1130 dev_err(dev, "failed to request resources: %d\n", err); 1130 dev_err(dev, "failed to request resources: %d\n", err);
1131 goto err_free_bridge; 1131 goto err_free_resource_list;
1132 } 1132 }
1133 1133
1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); 1134 err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
1135 if (err) 1135 if (err)
1136 goto err_free_bridge; 1136 goto err_free_resource_list;
1137 1137
1138 pm_runtime_enable(dev); 1138 pm_runtime_enable(dev);
1139 err = pm_runtime_get_sync(dev); 1139 err = pm_runtime_get_sync(dev);
@@ -1176,9 +1176,9 @@ err_pm_put:
1176err_pm_disable: 1176err_pm_disable:
1177 pm_runtime_disable(dev); 1177 pm_runtime_disable(dev);
1178 1178
1179err_free_bridge: 1179err_free_resource_list:
1180 pci_free_host_bridge(bridge);
1181 pci_free_resource_list(&pcie->resources); 1180 pci_free_resource_list(&pcie->resources);
1181 pci_free_host_bridge(bridge);
1182 1182
1183 return err; 1183 return err;
1184} 1184}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 7f47bb72bf30..945099d49f8f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
999 * the subsequent "thaw" callbacks for the device. 999 * the subsequent "thaw" callbacks for the device.
1000 */ 1000 */
1001 if (dev_pm_smart_suspend_and_suspended(dev)) { 1001 if (dev_pm_smart_suspend_and_suspended(dev)) {
1002 dev->power.direct_complete = true; 1002 dev_pm_skip_next_resume_phases(dev);
1003 return 0; 1003 return 0;
1004 } 1004 }
1005 1005
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
index f3796164329e..d4aeac3477f5 100644
--- a/drivers/platform/x86/asus-wireless.c
+++ b/drivers/platform/x86/asus-wireless.c
@@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event)
118 return; 118 return;
119 } 119 }
120 input_report_key(data->idev, KEY_RFKILL, 1); 120 input_report_key(data->idev, KEY_RFKILL, 1);
121 input_sync(data->idev);
121 input_report_key(data->idev, KEY_RFKILL, 0); 122 input_report_key(data->idev, KEY_RFKILL, 0);
122 input_sync(data->idev); 123 input_sync(data->idev);
123} 124}
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index bf897b1832b1..cd4725e7e0b5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -37,6 +37,7 @@
37 37
38struct quirk_entry { 38struct quirk_entry {
39 u8 touchpad_led; 39 u8 touchpad_led;
40 u8 kbd_led_levels_off_1;
40 41
41 int needs_kbd_timeouts; 42 int needs_kbd_timeouts;
42 /* 43 /*
@@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = {
67 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, 68 .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
68}; 69};
69 70
71static struct quirk_entry quirk_dell_latitude_e6410 = {
72 .kbd_led_levels_off_1 = 1,
73};
74
70static struct platform_driver platform_driver = { 75static struct platform_driver platform_driver = {
71 .driver = { 76 .driver = {
72 .name = "dell-laptop", 77 .name = "dell-laptop",
@@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
269 }, 274 },
270 .driver_data = &quirk_dell_xps13_9333, 275 .driver_data = &quirk_dell_xps13_9333,
271 }, 276 },
277 {
278 .callback = dmi_matched,
279 .ident = "Dell Latitude E6410",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
282 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"),
283 },
284 .driver_data = &quirk_dell_latitude_e6410,
285 },
272 { } 286 { }
273}; 287};
274 288
@@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info)
1149 units = (buffer->output[2] >> 8) & 0xFF; 1163 units = (buffer->output[2] >> 8) & 0xFF;
1150 info->levels = (buffer->output[2] >> 16) & 0xFF; 1164 info->levels = (buffer->output[2] >> 16) & 0xFF;
1151 1165
1166 if (quirks && quirks->kbd_led_levels_off_1 && info->levels)
1167 info->levels--;
1168
1152 if (units & BIT(0)) 1169 if (units & BIT(0))
1153 info->seconds = (buffer->output[3] >> 0) & 0xFF; 1170 info->seconds = (buffer->output[3] >> 0) & 0xFF;
1154 if (units & BIT(1)) 1171 if (units & BIT(1))
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 39d2f4518483..fb25b20df316 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable)
639 int ret; 639 int ret;
640 640
641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); 641 buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
642 if (!buffer)
643 return -ENOMEM;
642 buffer->cmd_class = CLASS_INFO; 644 buffer->cmd_class = CLASS_INFO;
643 buffer->cmd_select = SELECT_APP_REGISTRATION; 645 buffer->cmd_select = SELECT_APP_REGISTRATION;
644 buffer->input[0] = 0x10000; 646 buffer->input[0] = 0x10000;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 15015a24f8ad..badf42acbf95 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -565,9 +565,9 @@ enum qeth_cq {
565}; 565};
566 566
567struct qeth_ipato { 567struct qeth_ipato {
568 int enabled; 568 bool enabled;
569 int invert4; 569 bool invert4;
570 int invert6; 570 bool invert6;
571 struct list_head entries; 571 struct list_head entries;
572}; 572};
573 573
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 430e3214f7e2..6c815207f4f5 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1480,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card)
1480 qeth_set_intial_options(card); 1480 qeth_set_intial_options(card);
1481 /* IP address takeover */ 1481 /* IP address takeover */
1482 INIT_LIST_HEAD(&card->ipato.entries); 1482 INIT_LIST_HEAD(&card->ipato.entries);
1483 card->ipato.enabled = 0; 1483 card->ipato.enabled = false;
1484 card->ipato.invert4 = 0; 1484 card->ipato.invert4 = false;
1485 card->ipato.invert6 = 0; 1485 card->ipato.invert6 = false;
1486 /* init QDIO stuff */ 1486 /* init QDIO stuff */
1487 qeth_init_qdio_info(card); 1487 qeth_init_qdio_info(card);
1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1488 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 194ae9b577cc..e5833837b799 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 82int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 83void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
84 const u8 *); 84 const u8 *);
85int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 85void qeth_l3_update_ipato(struct qeth_card *card);
86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); 86struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); 87int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); 88int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6a73894b0cb5..ef0961e18686 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -164,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
164 } 164 }
165} 165}
166 166
167int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, 167static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
168 struct qeth_ipaddr *addr) 168 struct qeth_ipaddr *addr)
169{ 169{
170 struct qeth_ipato_entry *ipatoe; 170 struct qeth_ipato_entry *ipatoe;
171 u8 addr_bits[128] = {0, }; 171 u8 addr_bits[128] = {0, };
@@ -174,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
174 174
175 if (!card->ipato.enabled) 175 if (!card->ipato.enabled)
176 return 0; 176 return 0;
177 if (addr->type != QETH_IP_TYPE_NORMAL)
178 return 0;
177 179
178 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, 180 qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
179 (addr->proto == QETH_PROT_IPV4)? 4:16); 181 (addr->proto == QETH_PROT_IPV4)? 4:16);
@@ -290,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
290 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); 292 memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
291 addr->ref_counter = 1; 293 addr->ref_counter = 1;
292 294
293 if (addr->type == QETH_IP_TYPE_NORMAL && 295 if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
294 qeth_l3_is_addr_covered_by_ipato(card, addr)) {
295 QETH_CARD_TEXT(card, 2, "tkovaddr"); 296 QETH_CARD_TEXT(card, 2, "tkovaddr");
296 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; 297 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
297 } 298 }
@@ -605,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
605/* 606/*
606 * IP address takeover related functions 607 * IP address takeover related functions
607 */ 608 */
609
610/**
611 * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
612 *
613 * Caller must hold ip_lock.
614 */
615void qeth_l3_update_ipato(struct qeth_card *card)
616{
617 struct qeth_ipaddr *addr;
618 unsigned int i;
619
620 hash_for_each(card->ip_htable, i, addr, hnode) {
621 if (addr->type != QETH_IP_TYPE_NORMAL)
622 continue;
623 if (qeth_l3_is_addr_covered_by_ipato(card, addr))
624 addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
625 else
626 addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
627 }
628}
629
608static void qeth_l3_clear_ipato_list(struct qeth_card *card) 630static void qeth_l3_clear_ipato_list(struct qeth_card *card)
609{ 631{
610 struct qeth_ipato_entry *ipatoe, *tmp; 632 struct qeth_ipato_entry *ipatoe, *tmp;
@@ -616,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
616 kfree(ipatoe); 638 kfree(ipatoe);
617 } 639 }
618 640
641 qeth_l3_update_ipato(card);
619 spin_unlock_bh(&card->ip_lock); 642 spin_unlock_bh(&card->ip_lock);
620} 643}
621 644
@@ -640,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
640 } 663 }
641 } 664 }
642 665
643 if (!rc) 666 if (!rc) {
644 list_add_tail(&new->entry, &card->ipato.entries); 667 list_add_tail(&new->entry, &card->ipato.entries);
668 qeth_l3_update_ipato(card);
669 }
645 670
646 spin_unlock_bh(&card->ip_lock); 671 spin_unlock_bh(&card->ip_lock);
647 672
@@ -664,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
664 (proto == QETH_PROT_IPV4)? 4:16) && 689 (proto == QETH_PROT_IPV4)? 4:16) &&
665 (ipatoe->mask_bits == mask_bits)) { 690 (ipatoe->mask_bits == mask_bits)) {
666 list_del(&ipatoe->entry); 691 list_del(&ipatoe->entry);
692 qeth_l3_update_ipato(card);
667 kfree(ipatoe); 693 kfree(ipatoe);
668 } 694 }
669 } 695 }
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index bd12fdf678be..6ea2b528a64e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
370 struct device_attribute *attr, const char *buf, size_t count) 370 struct device_attribute *attr, const char *buf, size_t count)
371{ 371{
372 struct qeth_card *card = dev_get_drvdata(dev); 372 struct qeth_card *card = dev_get_drvdata(dev);
373 struct qeth_ipaddr *addr; 373 bool enable;
374 int i, rc = 0; 374 int rc = 0;
375 375
376 if (!card) 376 if (!card)
377 return -EINVAL; 377 return -EINVAL;
@@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
384 } 384 }
385 385
386 if (sysfs_streq(buf, "toggle")) { 386 if (sysfs_streq(buf, "toggle")) {
387 card->ipato.enabled = (card->ipato.enabled)? 0 : 1; 387 enable = !card->ipato.enabled;
388 } else if (sysfs_streq(buf, "1")) { 388 } else if (kstrtobool(buf, &enable)) {
389 card->ipato.enabled = 1;
390 hash_for_each(card->ip_htable, i, addr, hnode) {
391 if ((addr->type == QETH_IP_TYPE_NORMAL) &&
392 qeth_l3_is_addr_covered_by_ipato(card, addr))
393 addr->set_flags |=
394 QETH_IPA_SETIP_TAKEOVER_FLAG;
395 }
396 } else if (sysfs_streq(buf, "0")) {
397 card->ipato.enabled = 0;
398 hash_for_each(card->ip_htable, i, addr, hnode) {
399 if (addr->set_flags &
400 QETH_IPA_SETIP_TAKEOVER_FLAG)
401 addr->set_flags &=
402 ~QETH_IPA_SETIP_TAKEOVER_FLAG;
403 }
404 } else
405 rc = -EINVAL; 389 rc = -EINVAL;
390 goto out;
391 }
392
393 if (card->ipato.enabled != enable) {
394 card->ipato.enabled = enable;
395 spin_lock_bh(&card->ip_lock);
396 qeth_l3_update_ipato(card);
397 spin_unlock_bh(&card->ip_lock);
398 }
406out: 399out:
407 mutex_unlock(&card->conf_mutex); 400 mutex_unlock(&card->conf_mutex);
408 return rc ? rc : count; 401 return rc ? rc : count;
@@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
428 const char *buf, size_t count) 421 const char *buf, size_t count)
429{ 422{
430 struct qeth_card *card = dev_get_drvdata(dev); 423 struct qeth_card *card = dev_get_drvdata(dev);
424 bool invert;
431 int rc = 0; 425 int rc = 0;
432 426
433 if (!card) 427 if (!card)
434 return -EINVAL; 428 return -EINVAL;
435 429
436 mutex_lock(&card->conf_mutex); 430 mutex_lock(&card->conf_mutex);
437 if (sysfs_streq(buf, "toggle")) 431 if (sysfs_streq(buf, "toggle")) {
438 card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; 432 invert = !card->ipato.invert4;
439 else if (sysfs_streq(buf, "1")) 433 } else if (kstrtobool(buf, &invert)) {
440 card->ipato.invert4 = 1;
441 else if (sysfs_streq(buf, "0"))
442 card->ipato.invert4 = 0;
443 else
444 rc = -EINVAL; 434 rc = -EINVAL;
435 goto out;
436 }
437
438 if (card->ipato.invert4 != invert) {
439 card->ipato.invert4 = invert;
440 spin_lock_bh(&card->ip_lock);
441 qeth_l3_update_ipato(card);
442 spin_unlock_bh(&card->ip_lock);
443 }
444out:
445 mutex_unlock(&card->conf_mutex); 445 mutex_unlock(&card->conf_mutex);
446 return rc ? rc : count; 446 return rc ? rc : count;
447} 447}
@@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
607 struct device_attribute *attr, const char *buf, size_t count) 607 struct device_attribute *attr, const char *buf, size_t count)
608{ 608{
609 struct qeth_card *card = dev_get_drvdata(dev); 609 struct qeth_card *card = dev_get_drvdata(dev);
610 bool invert;
610 int rc = 0; 611 int rc = 0;
611 612
612 if (!card) 613 if (!card)
613 return -EINVAL; 614 return -EINVAL;
614 615
615 mutex_lock(&card->conf_mutex); 616 mutex_lock(&card->conf_mutex);
616 if (sysfs_streq(buf, "toggle")) 617 if (sysfs_streq(buf, "toggle")) {
617 card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; 618 invert = !card->ipato.invert6;
618 else if (sysfs_streq(buf, "1")) 619 } else if (kstrtobool(buf, &invert)) {
619 card->ipato.invert6 = 1;
620 else if (sysfs_streq(buf, "0"))
621 card->ipato.invert6 = 0;
622 else
623 rc = -EINVAL; 620 rc = -EINVAL;
621 goto out;
622 }
623
624 if (card->ipato.invert6 != invert) {
625 card->ipato.invert6 = invert;
626 spin_lock_bh(&card->ip_lock);
627 qeth_l3_update_ipato(card);
628 spin_unlock_bh(&card->ip_lock);
629 }
630out:
624 mutex_unlock(&card->conf_mutex); 631 mutex_unlock(&card->conf_mutex);
625 return rc ? rc : count; 632 return rc ? rc : count;
626} 633}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index bec9f3193f60..80a8cb26cdea 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2482,8 +2482,8 @@ int aac_command_thread(void *data)
2482 /* Synchronize our watches */ 2482 /* Synchronize our watches */
2483 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) 2483 if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
2484 && (now.tv_nsec > (NSEC_PER_SEC / HZ))) 2484 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
2485 difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) 2485 difference = HZ + HZ / 2 -
2486 + NSEC_PER_SEC / 2) / NSEC_PER_SEC; 2486 now.tv_nsec / (NSEC_PER_SEC / HZ);
2487 else { 2487 else {
2488 if (now.tv_nsec > NSEC_PER_SEC / 2) 2488 if (now.tv_nsec > NSEC_PER_SEC / 2)
2489 ++now.tv_sec; 2489 ++now.tv_sec;
@@ -2507,6 +2507,10 @@ int aac_command_thread(void *data)
2507 if (kthread_should_stop()) 2507 if (kthread_should_stop())
2508 break; 2508 break;
2509 2509
2510 /*
2511 * we probably want usleep_range() here instead of the
2512 * jiffies computation
2513 */
2510 schedule_timeout(difference); 2514 schedule_timeout(difference);
2511 2515
2512 if (kthread_should_stop()) 2516 if (kthread_should_stop())
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 72ca2a2e08e2..b2fa195adc7a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job)
3135 struct fc_bsg_request *bsg_request = job->request; 3135 struct fc_bsg_request *bsg_request = job->request;
3136 struct fc_bsg_reply *bsg_reply = job->reply; 3136 struct fc_bsg_reply *bsg_reply = job->reply;
3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; 3137 uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3138 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3138 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3139 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3139 struct bfad_s *bfad = im_port->bfad; 3140 struct bfad_s *bfad = im_port->bfad;
3140 void *payload_kbuf; 3141 void *payload_kbuf;
3141 int rc = -EINVAL; 3142 int rc = -EINVAL;
@@ -3350,7 +3351,8 @@ int
3350bfad_im_bsg_els_ct_request(struct bsg_job *job) 3351bfad_im_bsg_els_ct_request(struct bsg_job *job)
3351{ 3352{
3352 struct bfa_bsg_data *bsg_data; 3353 struct bfa_bsg_data *bsg_data;
3353 struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); 3354 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3355 struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3354 struct bfad_s *bfad = im_port->bfad; 3356 struct bfad_s *bfad = im_port->bfad;
3355 bfa_bsg_fcpt_t *bsg_fcpt; 3357 bfa_bsg_fcpt_t *bsg_fcpt;
3356 struct bfad_fcxp *drv_fcxp; 3358 struct bfad_fcxp *drv_fcxp;
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 24e657a4ec80..c05d6e91e4bd 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -546,6 +546,7 @@ int
546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 546bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
547 struct device *dev) 547 struct device *dev)
548{ 548{
549 struct bfad_im_port_pointer *im_portp;
549 int error = 1; 550 int error = 1;
550 551
551 mutex_lock(&bfad_mutex); 552 mutex_lock(&bfad_mutex);
@@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
564 goto out_free_idr; 565 goto out_free_idr;
565 } 566 }
566 567
567 im_port->shost->hostdata[0] = (unsigned long)im_port; 568 im_portp = shost_priv(im_port->shost);
569 im_portp->p = im_port;
568 im_port->shost->unique_id = im_port->idr_id; 570 im_port->shost->unique_id = im_port->idr_id;
569 im_port->shost->this_id = -1; 571 im_port->shost->this_id = -1;
570 im_port->shost->max_id = MAX_FCP_TARGET; 572 im_port->shost->max_id = MAX_FCP_TARGET;
@@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
748 750
749 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 751 sht->sg_tablesize = bfad->cfg_data.io_max_sge;
750 752
751 return scsi_host_alloc(sht, sizeof(unsigned long)); 753 return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer));
752} 754}
753 755
754void 756void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index c81ec2a77ef5..06ce4ba2b7bc 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -69,6 +69,16 @@ struct bfad_im_port_s {
69 struct fc_vport *fc_vport; 69 struct fc_vport *fc_vport;
70}; 70};
71 71
72struct bfad_im_port_pointer {
73 struct bfad_im_port_s *p;
74};
75
76static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host)
77{
78 struct bfad_im_port_pointer *im_portp = shost_priv(host);
79 return im_portp->p;
80}
81
72enum bfad_itnim_state { 82enum bfad_itnim_state {
73 ITNIM_STATE_NONE, 83 ITNIM_STATE_NONE,
74 ITNIM_STATE_ONLINE, 84 ITNIM_STATE_ONLINE,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 5da46052e179..21be672679fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport,
904 case ELS_FLOGI: 904 case ELS_FLOGI:
905 if (!lport->point_to_multipoint) 905 if (!lport->point_to_multipoint)
906 fc_lport_recv_flogi_req(lport, fp); 906 fc_lport_recv_flogi_req(lport, fp);
907 else
908 fc_rport_recv_req(lport, fp);
907 break; 909 break;
908 case ELS_LOGO: 910 case ELS_LOGO:
909 if (fc_frame_sid(fp) == FC_FID_FLOGI) 911 if (fc_frame_sid(fp) == FC_FID_FLOGI)
910 fc_lport_recv_logo_req(lport, fp); 912 fc_lport_recv_logo_req(lport, fp);
913 else
914 fc_rport_recv_req(lport, fp);
911 break; 915 break;
912 case ELS_RSCN: 916 case ELS_RSCN:
913 lport->tt.disc_recv_req(lport, fp); 917 lport->tt.disc_recv_req(lport, fp);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index ca1566237ae7..3183d63de4da 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2145 struct sas_rphy *rphy) 2145 struct sas_rphy *rphy)
2146{ 2146{
2147 struct domain_device *dev; 2147 struct domain_device *dev;
2148 unsigned int reslen = 0; 2148 unsigned int rcvlen = 0;
2149 int ret = -EINVAL; 2149 int ret = -EINVAL;
2150 2150
2151 /* no rphy means no smp target support (ie aic94xx host) */ 2151 /* no rphy means no smp target support (ie aic94xx host) */
@@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
2179 2179
2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list, 2180 ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
2181 job->reply_payload.sg_list); 2181 job->reply_payload.sg_list);
2182 if (ret > 0) { 2182 if (ret >= 0) {
2183 /* positive number is the untransferred residual */ 2183 /* bsg_job_done() requires the length received */
2184 reslen = ret; 2184 rcvlen = job->reply_payload.payload_len - ret;
2185 ret = 0; 2185 ret = 0;
2186 } 2186 }
2187 2187
2188out: 2188out:
2189 bsg_job_done(job, ret, reslen); 2189 bsg_job_done(job, ret, rcvlen);
2190} 2190}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 56faeb049b4a..87c08ff37ddd 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); 753 drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); 754 rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
755 if (rc < 0) { 755 if (rc < 0) {
756 (rqbp->rqb_free_buffer)(phba, rqb_entry);
757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
758 "6409 Cannot post to RQ %d: %x %x\n", 757 "6409 Cannot post to RQ %d: %x %x\n",
759 rqb_entry->hrq->queue_id, 758 rqb_entry->hrq->queue_id,
760 rqb_entry->hrq->host_index, 759 rqb_entry->hrq->host_index,
761 rqb_entry->hrq->hba_index); 760 rqb_entry->hrq->hba_index);
761 (rqbp->rqb_free_buffer)(phba, rqb_entry);
762 } else { 762 } else {
763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); 763 list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
764 rqbp->buffer_count++; 764 rqbp->buffer_count++;
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index 01f08c03f2c1..c3765d29fd3f 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq)
8{ 8{
9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); 9 struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req);
10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); 10 int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc);
11 char buf[80]; 11 const u8 *const cdb = READ_ONCE(cmd->cmnd);
12 char buf[80] = "(?)";
12 13
13 __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); 14 if (cdb)
15 __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len);
14 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, 16 seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf,
15 cmd->retries, msecs / 1000, msecs % 1000); 17 cmd->retries, msecs / 1000, msecs % 1000);
16} 18}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 78d4aa8df675..449ef5adbb2b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
34}; 34};
35 35
36 36
37static const char spaces[] = " "; /* 16 of them */
38static blist_flags_t scsi_default_dev_flags; 37static blist_flags_t scsi_default_dev_flags;
39static LIST_HEAD(scsi_dev_info_list); 38static LIST_HEAD(scsi_dev_info_list);
40static char scsi_dev_flags[256]; 39static char scsi_dev_flags[256];
@@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
298 size_t from_length; 297 size_t from_length;
299 298
300 from_length = strlen(from); 299 from_length = strlen(from);
301 strncpy(to, from, min(to_length, from_length)); 300 /* This zero-pads the destination */
302 if (from_length < to_length) { 301 strncpy(to, from, to_length);
303 if (compatible) { 302 if (from_length < to_length && !compatible) {
304 /* 303 /*
305 * NUL terminate the string if it is short. 304 * space pad the string if it is short.
306 */ 305 */
307 to[from_length] = '\0'; 306 memset(&to[from_length], ' ', to_length - from_length);
308 } else {
309 /*
310 * space pad the string if it is short.
311 */
312 strncpy(&to[from_length], spaces,
313 to_length - from_length);
314 }
315 } 307 }
316 if (from_length > to_length) 308 if (from_length > to_length)
317 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 309 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
@@ -458,7 +450,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
458 /* 450 /*
459 * vendor strings must be an exact match 451 * vendor strings must be an exact match
460 */ 452 */
461 if (vmax != strlen(devinfo->vendor) || 453 if (vmax != strnlen(devinfo->vendor,
454 sizeof(devinfo->vendor)) ||
462 memcmp(devinfo->vendor, vskip, vmax)) 455 memcmp(devinfo->vendor, vskip, vmax))
463 continue; 456 continue;
464 457
@@ -466,7 +459,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
466 * @model specifies the full string, and 459 * @model specifies the full string, and
467 * must be larger or equal to devinfo->model 460 * must be larger or equal to devinfo->model
468 */ 461 */
469 mlen = strlen(devinfo->model); 462 mlen = strnlen(devinfo->model, sizeof(devinfo->model));
470 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) 463 if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
471 continue; 464 continue;
472 return devinfo; 465 return devinfo;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 00742c50cd44..d9ca1dfab154 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
1967out_put_device: 1967out_put_device:
1968 put_device(&sdev->sdev_gendev); 1968 put_device(&sdev->sdev_gendev);
1969out: 1969out:
1970 if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
1971 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1970 return false; 1972 return false;
1971} 1973}
1972 1974
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 24fe68522716..a028ab3322a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1312static void sd_uninit_command(struct scsi_cmnd *SCpnt) 1312static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1313{ 1313{
1314 struct request *rq = SCpnt->request; 1314 struct request *rq = SCpnt->request;
1315 u8 *cmnd;
1315 1316
1316 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) 1317 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
1317 sd_zbc_write_unlock_zone(SCpnt); 1318 sd_zbc_write_unlock_zone(SCpnt);
@@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1320 __free_page(rq->special_vec.bv_page); 1321 __free_page(rq->special_vec.bv_page);
1321 1322
1322 if (SCpnt->cmnd != scsi_req(rq)->cmd) { 1323 if (SCpnt->cmnd != scsi_req(rq)->cmd) {
1323 mempool_free(SCpnt->cmnd, sd_cdb_pool); 1324 cmnd = SCpnt->cmnd;
1324 SCpnt->cmnd = NULL; 1325 SCpnt->cmnd = NULL;
1325 SCpnt->cmd_len = 0; 1326 SCpnt->cmd_len = 0;
1327 mempool_free(cmnd, sd_cdb_pool);
1326 } 1328 }
1327} 1329}
1328 1330
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 89f4cf507be6..f2d8c3c53ea4 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -20,8 +20,8 @@
20#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8 20#define AO_SEC_SOCINFO_OFFSET AO_SEC_SD_CFG8
21 21
22#define SOCINFO_MAJOR GENMASK(31, 24) 22#define SOCINFO_MAJOR GENMASK(31, 24)
23#define SOCINFO_MINOR GENMASK(23, 16) 23#define SOCINFO_PACK GENMASK(23, 16)
24#define SOCINFO_PACK GENMASK(15, 8) 24#define SOCINFO_MINOR GENMASK(15, 8)
25#define SOCINFO_MISC GENMASK(7, 0) 25#define SOCINFO_MISC GENMASK(7, 0)
26 26
27static const struct meson_gx_soc_id { 27static const struct meson_gx_soc_id {
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c
index 1799d3f26a9e..2035835b62dc 100644
--- a/drivers/staging/ccree/ssi_hash.c
+++ b/drivers/staging/ccree/ssi_hash.c
@@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in)
1769 struct device *dev = drvdata_to_dev(ctx->drvdata); 1769 struct device *dev = drvdata_to_dev(ctx->drvdata);
1770 struct ahash_req_ctx *state = ahash_request_ctx(req); 1770 struct ahash_req_ctx *state = ahash_request_ctx(req);
1771 u32 tmp; 1771 u32 tmp;
1772 int rc; 1772 int rc = 0;
1773 1773
1774 memcpy(&tmp, in, sizeof(u32)); 1774 memcpy(&tmp, in, sizeof(u32));
1775 if (tmp != CC_EXPORT_MAGIC) { 1775 if (tmp != CC_EXPORT_MAGIC) {
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index e69a2153c999..12c9df9cddde 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi)
102 102
103 currentValue = READ_REG(REG_DATAMODUL); 103 currentValue = READ_REG(REG_DATAMODUL);
104 104
105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define 105 switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) {
106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK; 106 case DATAMODUL_MODULATION_TYPE_OOK: return OOK;
107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK; 107 case DATAMODUL_MODULATION_TYPE_FSK: return FSK;
108 default: return undefined; 108 default: return undefined;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 7952357df9c8..edb6e4e9ef3a 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -590,7 +590,6 @@ static int __init optee_driver_init(void)
590 return -ENODEV; 590 return -ENODEV;
591 591
592 np = of_find_matching_node(fw_np, optee_match); 592 np = of_find_matching_node(fw_np, optee_match);
593 of_node_put(fw_np);
594 if (!np) 593 if (!np)
595 return -ENODEV; 594 return -ENODEV;
596 595
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 55b198ba629b..78e92d29f8d9 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
555 unsigned iad_num = 0; 555 unsigned iad_num = 0;
556 556
557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); 557 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
558 nintf = nintf_orig = config->desc.bNumInterfaces;
559 config->desc.bNumInterfaces = 0; // Adjusted later
560
558 if (config->desc.bDescriptorType != USB_DT_CONFIG || 561 if (config->desc.bDescriptorType != USB_DT_CONFIG ||
559 config->desc.bLength < USB_DT_CONFIG_SIZE || 562 config->desc.bLength < USB_DT_CONFIG_SIZE ||
560 config->desc.bLength > size) { 563 config->desc.bLength > size) {
@@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
568 buffer += config->desc.bLength; 571 buffer += config->desc.bLength;
569 size -= config->desc.bLength; 572 size -= config->desc.bLength;
570 573
571 nintf = nintf_orig = config->desc.bNumInterfaces;
572 if (nintf > USB_MAXINTERFACES) { 574 if (nintf > USB_MAXINTERFACES) {
573 dev_warn(ddev, "config %d has too many interfaces: %d, " 575 dev_warn(ddev, "config %d has too many interfaces: %d, "
574 "using maximum allowed: %d\n", 576 "using maximum allowed: %d\n",
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index f66c94130cac..31749c79045f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -537,6 +537,7 @@ struct dwc2_core_params {
537 * 2 - Internal DMA 537 * 2 - Internal DMA
538 * @power_optimized Are power optimizations enabled? 538 * @power_optimized Are power optimizations enabled?
539 * @num_dev_ep Number of device endpoints available 539 * @num_dev_ep Number of device endpoints available
540 * @num_dev_in_eps Number of device IN endpoints available
540 * @num_dev_perio_in_ep Number of device periodic IN endpoints 541 * @num_dev_perio_in_ep Number of device periodic IN endpoints
541 * available 542 * available
542 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue 543 * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
@@ -565,6 +566,7 @@ struct dwc2_core_params {
565 * 2 - 8 or 16 bits 566 * 2 - 8 or 16 bits
566 * @snpsid: Value from SNPSID register 567 * @snpsid: Value from SNPSID register
567 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) 568 * @dev_ep_dirs: Direction of device endpoints (GHWCFG1)
569 * @g_tx_fifo_size[] Power-on values of TxFIFO sizes
568 */ 570 */
569struct dwc2_hw_params { 571struct dwc2_hw_params {
570 unsigned op_mode:3; 572 unsigned op_mode:3;
@@ -586,12 +588,14 @@ struct dwc2_hw_params {
586 unsigned fs_phy_type:2; 588 unsigned fs_phy_type:2;
587 unsigned i2c_enable:1; 589 unsigned i2c_enable:1;
588 unsigned num_dev_ep:4; 590 unsigned num_dev_ep:4;
591 unsigned num_dev_in_eps : 4;
589 unsigned num_dev_perio_in_ep:4; 592 unsigned num_dev_perio_in_ep:4;
590 unsigned total_fifo_size:16; 593 unsigned total_fifo_size:16;
591 unsigned power_optimized:1; 594 unsigned power_optimized:1;
592 unsigned utmi_phy_data_width:2; 595 unsigned utmi_phy_data_width:2;
593 u32 snpsid; 596 u32 snpsid;
594 u32 dev_ep_dirs; 597 u32 dev_ep_dirs;
598 u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
595}; 599};
596 600
597/* Size of control and EP0 buffers */ 601/* Size of control and EP0 buffers */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88529d092503..e4c3ce0de5de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg)
195{ 195{
196 if (hsotg->hw_params.en_multiple_tx_fifo) 196 if (hsotg->hw_params.en_multiple_tx_fifo)
197 /* In dedicated FIFO mode we need count of IN EPs */ 197 /* In dedicated FIFO mode we need count of IN EPs */
198 return (dwc2_readl(hsotg->regs + GHWCFG4) & 198 return hsotg->hw_params.num_dev_in_eps;
199 GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT;
200 else 199 else
201 /* In shared FIFO mode we need count of Periodic IN EPs */ 200 /* In shared FIFO mode we need count of Periodic IN EPs */
202 return hsotg->hw_params.num_dev_perio_in_ep; 201 return hsotg->hw_params.num_dev_perio_in_ep;
203} 202}
204 203
205/** 204/**
206 * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs
207 */
208static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg)
209{
210 int val = 0;
211 int i;
212 u32 ep_dirs;
213
214 /*
215 * Don't need additional space for ep info control registers in
216 * slave mode.
217 */
218 if (!using_dma(hsotg)) {
219 dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n");
220 return 0;
221 }
222
223 /*
224 * Buffer DMA mode - 1 location per endpoit
225 * Descriptor DMA mode - 4 locations per endpoint
226 */
227 ep_dirs = hsotg->hw_params.dev_ep_dirs;
228
229 for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) {
230 val += ep_dirs & 3 ? 1 : 2;
231 ep_dirs >>= 2;
232 }
233
234 if (using_desc_dma(hsotg))
235 val = val * 4;
236
237 return val;
238}
239
240/**
241 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 205 * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for
242 * device mode TX FIFOs 206 * device mode TX FIFOs
243 */ 207 */
244int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 208int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
245{ 209{
246 int ep_info_size;
247 int addr; 210 int addr;
248 int tx_addr_max; 211 int tx_addr_max;
249 u32 np_tx_fifo_size; 212 u32 np_tx_fifo_size;
@@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
252 hsotg->params.g_np_tx_fifo_size); 215 hsotg->params.g_np_tx_fifo_size);
253 216
254 /* Get Endpoint Info Control block size in DWORDs. */ 217 /* Get Endpoint Info Control block size in DWORDs. */
255 ep_info_size = dwc2_hsotg_ep_info_size(hsotg); 218 tx_addr_max = hsotg->hw_params.total_fifo_size;
256 tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size;
257 219
258 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 220 addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size;
259 if (tx_addr_max <= addr) 221 if (tx_addr_max <= addr)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef73af6e03a9..03fd20f0b496 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg)
484 } 484 }
485 485
486 for (fifo = 1; fifo <= fifo_count; fifo++) { 486 for (fifo = 1; fifo <= fifo_count; fifo++) {
487 dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & 487 dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo];
488 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
489 488
490 if (hsotg->params.g_tx_fifo_size[fifo] < min || 489 if (hsotg->params.g_tx_fifo_size[fifo] < min ||
491 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { 490 hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) {
@@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
609 struct dwc2_hw_params *hw = &hsotg->hw_params; 608 struct dwc2_hw_params *hw = &hsotg->hw_params;
610 bool forced; 609 bool forced;
611 u32 gnptxfsiz; 610 u32 gnptxfsiz;
611 int fifo, fifo_count;
612 612
613 if (hsotg->dr_mode == USB_DR_MODE_HOST) 613 if (hsotg->dr_mode == USB_DR_MODE_HOST)
614 return; 614 return;
@@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
617 617
618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 618 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ);
619 619
620 fifo_count = dwc2_hsotg_tx_fifo_count(hsotg);
621
622 for (fifo = 1; fifo <= fifo_count; fifo++) {
623 hw->g_tx_fifo_size[fifo] =
624 (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) &
625 FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT;
626 }
627
620 if (forced) 628 if (forced)
621 dwc2_clear_force_mode(hsotg); 629 dwc2_clear_force_mode(hsotg);
622 630
@@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
661 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 669 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4);
662 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 670 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ);
663 671
664 /*
665 * Host specific hardware parameters. Reading these parameters
666 * requires the controller to be in host mode. The mode will
667 * be forced, if necessary, to read these values.
668 */
669 dwc2_get_host_hwparams(hsotg);
670 dwc2_get_dev_hwparams(hsotg);
671
672 /* hwcfg1 */ 672 /* hwcfg1 */
673 hw->dev_ep_dirs = hwcfg1; 673 hw->dev_ep_dirs = hwcfg1;
674 674
@@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); 711 hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> 712 hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; 713 GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
714 hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >>
715 GHWCFG4_NUM_IN_EPS_SHIFT;
714 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); 716 hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
715 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); 717 hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
716 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> 718 hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
@@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
719 /* fifo sizes */ 721 /* fifo sizes */
720 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 722 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
721 GRXFSIZ_DEPTH_SHIFT; 723 GRXFSIZ_DEPTH_SHIFT;
724 /*
725 * Host specific hardware parameters. Reading these parameters
726 * requires the controller to be in host mode. The mode will
727 * be forced, if necessary, to read these values.
728 */
729 dwc2_get_host_hwparams(hsotg);
730 dwc2_get_dev_hwparams(hsotg);
722 731
723 return 0; 732 return 0;
724} 733}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index c4a4d7bd2766..7ae0eefc7cc7 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
51 51
52 clk = of_clk_get(np, i); 52 clk = of_clk_get(np, i);
53 if (IS_ERR(clk)) { 53 if (IS_ERR(clk)) {
54 while (--i >= 0) 54 while (--i >= 0) {
55 clk_disable_unprepare(simple->clks[i]);
55 clk_put(simple->clks[i]); 56 clk_put(simple->clks[i]);
57 }
56 return PTR_ERR(clk); 58 return PTR_ERR(clk);
57 } 59 }
58 60
@@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = {
203 .driver = { 205 .driver = {
204 .name = "dwc3-of-simple", 206 .name = "dwc3-of-simple",
205 .of_match_table = of_dwc3_simple_match, 207 .of_match_table = of_dwc3_simple_match,
208 .pm = &dwc3_of_simple_dev_pm_ops,
206 }, 209 },
207}; 210};
208 211
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 981fd986cf82..639dd1b163a0 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
259{ 259{
260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; 260 const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
261 struct dwc3 *dwc = dep->dwc; 261 struct dwc3 *dwc = dep->dwc;
262 u32 timeout = 500; 262 u32 timeout = 1000;
263 u32 reg; 263 u32 reg;
264 264
265 int cmd_status = 0; 265 int cmd_status = 0;
@@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
912 */ 912 */
913 if (speed == USB_SPEED_HIGH) { 913 if (speed == USB_SPEED_HIGH) {
914 struct usb_ep *ep = &dep->endpoint; 914 struct usb_ep *ep = &dep->endpoint;
915 unsigned int mult = ep->mult - 1; 915 unsigned int mult = 2;
916 unsigned int maxp = usb_endpoint_maxp(ep->desc); 916 unsigned int maxp = usb_endpoint_maxp(ep->desc);
917 917
918 if (length <= (2 * maxp)) 918 if (length <= (2 * maxp))
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 0a19a76645ad..31cce7805eb2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -508,8 +508,8 @@ choice
508 controller, and the relevant drivers for each function declared 508 controller, and the relevant drivers for each function declared
509 by the device. 509 by the device.
510 510
511endchoice
512
513source "drivers/usb/gadget/legacy/Kconfig" 511source "drivers/usb/gadget/legacy/Kconfig"
514 512
513endchoice
514
515endif # USB_GADGET 515endif # USB_GADGET
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index 9570bbeced4f..784bf86dad4f 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -13,14 +13,6 @@
13# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG). 13# both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG).
14# 14#
15 15
16menuconfig USB_GADGET_LEGACY
17 bool "Legacy USB Gadget Support"
18 help
19 Legacy USB gadgets are USB gadgets that do not use the USB gadget
20 configfs interface.
21
22if USB_GADGET_LEGACY
23
24config USB_ZERO 16config USB_ZERO
25 tristate "Gadget Zero (DEVELOPMENT)" 17 tristate "Gadget Zero (DEVELOPMENT)"
26 select USB_LIBCOMPOSITE 18 select USB_LIBCOMPOSITE
@@ -487,7 +479,7 @@ endif
487# or video class gadget drivers), or specific hardware, here. 479# or video class gadget drivers), or specific hardware, here.
488config USB_G_WEBCAM 480config USB_G_WEBCAM
489 tristate "USB Webcam Gadget" 481 tristate "USB Webcam Gadget"
490 depends on VIDEO_DEV 482 depends on VIDEO_V4L2
491 select USB_LIBCOMPOSITE 483 select USB_LIBCOMPOSITE
492 select VIDEOBUF2_VMALLOC 484 select VIDEOBUF2_VMALLOC
493 select USB_F_UVC 485 select USB_F_UVC
@@ -498,5 +490,3 @@ config USB_G_WEBCAM
498 490
499 Say "y" to link the driver statically, or "m" to build a 491 Say "y" to link the driver statically, or "m" to build a
500 dynamically linked module called "g_webcam". 492 dynamically linked module called "g_webcam".
501
502endif
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 15f7d422885f..3a29b32a3bd0 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -971,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
971 return 0; 971 return 0;
972 } 972 }
973 973
974 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 974 dev = kzalloc(sizeof(*dev), flags);
975 if (!xhci->devs[slot_id]) 975 if (!dev)
976 return 0; 976 return 0;
977 dev = xhci->devs[slot_id];
978 977
979 /* Allocate the (output) device context that will be used in the HC. */ 978 /* Allocate the (output) device context that will be used in the HC. */
980 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 979 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
@@ -1015,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
1015 1014
1016 trace_xhci_alloc_virt_device(dev); 1015 trace_xhci_alloc_virt_device(dev);
1017 1016
1017 xhci->devs[slot_id] = dev;
1018
1018 return 1; 1019 return 1;
1019fail: 1020fail:
1020 xhci_free_virt_device(xhci, slot_id); 1021
1022 if (dev->in_ctx)
1023 xhci_free_container_ctx(xhci, dev->in_ctx);
1024 if (dev->out_ctx)
1025 xhci_free_container_ctx(xhci, dev->out_ctx);
1026 kfree(dev);
1027
1021 return 0; 1028 return 0;
1022} 1029}
1023 1030
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6eb87c6e4d24..c5cbc685c691 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3112,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3112{ 3112{
3113 u32 maxp, total_packet_count; 3113 u32 maxp, total_packet_count;
3114 3114
3115 /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ 3115 /* MTK xHCI 0.96 contains some features from 1.0 */
3116 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) 3116 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3117 return ((td_total_len - transferred) >> 10); 3117 return ((td_total_len - transferred) >> 10);
3118 3118
@@ -3121,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3121 trb_buff_len == td_total_len) 3121 trb_buff_len == td_total_len)
3122 return 0; 3122 return 0;
3123 3123
3124 /* for MTK xHCI, TD size doesn't include this TRB */ 3124 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3125 if (xhci->quirks & XHCI_MTK_HOST) 3125 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3126 trb_buff_len = 0; 3126 trb_buff_len = 0;
3127 3127
3128 maxp = usb_endpoint_maxp(&urb->ep->desc); 3128 maxp = usb_endpoint_maxp(&urb->ep->desc);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0397606a211b..6c036de63272 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; 284 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
285 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 285 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
286 del_timer(&musb->dev_timer); 286 del_timer(&musb->dev_timer);
287 } else { 287 } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) {
288 /*
289 * When babble condition happens, drvvbus interrupt
290 * is also generated. Ignore this drvvbus interrupt
291 * and let babble interrupt handler recovers the
292 * controller; otherwise, the host-mode flag is lost
293 * due to the MUSB_DEV_MODE() call below and babble
294 * recovery logic will not be called.
295 */
288 musb->is_active = 0; 296 musb->is_active = 0;
289 MUSB_DEV_MODE(musb); 297 MUSB_DEV_MODE(musb);
290 otg->default_a = 0; 298 otg->default_a = 0;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 2968046e7c05..f72d045ee9ef 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2100 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2101 US_FL_BROKEN_FUA ), 2101 US_FL_BROKEN_FUA ),
2102 2102
2103/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
2104UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
2105 "JMicron",
2106 "JMS567",
2107 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2108 US_FL_BROKEN_FUA),
2109
2103/* 2110/*
2104 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> 2111 * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
2105 * JMicron responds to USN and several other SCSI ioctls with a 2112 * JMicron responds to USN and several other SCSI ioctls with a
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index d520374a824e..e6127fb21c12 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
129 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 129 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), 130 US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
131 131
132/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
133UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
134 "JMicron",
135 "JMS567",
136 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
137 US_FL_BROKEN_FUA),
138
132/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ 139/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
133UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, 140UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
134 "VIA", 141 "VIA",
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 536e037f541f..493ac2928391 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -322,23 +322,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
322 return priv; 322 return priv;
323} 323}
324 324
325static int get_pipe(struct stub_device *sdev, int epnum, int dir) 325static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
326{ 326{
327 struct usb_device *udev = sdev->udev; 327 struct usb_device *udev = sdev->udev;
328 struct usb_host_endpoint *ep; 328 struct usb_host_endpoint *ep;
329 struct usb_endpoint_descriptor *epd = NULL; 329 struct usb_endpoint_descriptor *epd = NULL;
330 int epnum = pdu->base.ep;
331 int dir = pdu->base.direction;
332
333 if (epnum < 0 || epnum > 15)
334 goto err_ret;
330 335
331 if (dir == USBIP_DIR_IN) 336 if (dir == USBIP_DIR_IN)
332 ep = udev->ep_in[epnum & 0x7f]; 337 ep = udev->ep_in[epnum & 0x7f];
333 else 338 else
334 ep = udev->ep_out[epnum & 0x7f]; 339 ep = udev->ep_out[epnum & 0x7f];
335 if (!ep) { 340 if (!ep)
336 dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", 341 goto err_ret;
337 epnum);
338 BUG();
339 }
340 342
341 epd = &ep->desc; 343 epd = &ep->desc;
344
345 /* validate transfer_buffer_length */
346 if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
347 dev_err(&sdev->udev->dev,
348 "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
349 pdu->u.cmd_submit.transfer_buffer_length);
350 return -1;
351 }
352
342 if (usb_endpoint_xfer_control(epd)) { 353 if (usb_endpoint_xfer_control(epd)) {
343 if (dir == USBIP_DIR_OUT) 354 if (dir == USBIP_DIR_OUT)
344 return usb_sndctrlpipe(udev, epnum); 355 return usb_sndctrlpipe(udev, epnum);
@@ -361,15 +372,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
361 } 372 }
362 373
363 if (usb_endpoint_xfer_isoc(epd)) { 374 if (usb_endpoint_xfer_isoc(epd)) {
375 /* validate packet size and number of packets */
376 unsigned int maxp, packets, bytes;
377
378 maxp = usb_endpoint_maxp(epd);
379 maxp *= usb_endpoint_maxp_mult(epd);
380 bytes = pdu->u.cmd_submit.transfer_buffer_length;
381 packets = DIV_ROUND_UP(bytes, maxp);
382
383 if (pdu->u.cmd_submit.number_of_packets < 0 ||
384 pdu->u.cmd_submit.number_of_packets > packets) {
385 dev_err(&sdev->udev->dev,
386 "CMD_SUBMIT: isoc invalid num packets %d\n",
387 pdu->u.cmd_submit.number_of_packets);
388 return -1;
389 }
364 if (dir == USBIP_DIR_OUT) 390 if (dir == USBIP_DIR_OUT)
365 return usb_sndisocpipe(udev, epnum); 391 return usb_sndisocpipe(udev, epnum);
366 else 392 else
367 return usb_rcvisocpipe(udev, epnum); 393 return usb_rcvisocpipe(udev, epnum);
368 } 394 }
369 395
396err_ret:
370 /* NOT REACHED */ 397 /* NOT REACHED */
371 dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); 398 dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
372 return 0; 399 return -1;
373} 400}
374 401
375static void masking_bogus_flags(struct urb *urb) 402static void masking_bogus_flags(struct urb *urb)
@@ -433,7 +460,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
433 struct stub_priv *priv; 460 struct stub_priv *priv;
434 struct usbip_device *ud = &sdev->ud; 461 struct usbip_device *ud = &sdev->ud;
435 struct usb_device *udev = sdev->udev; 462 struct usb_device *udev = sdev->udev;
436 int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); 463 int pipe = get_pipe(sdev, pdu);
464
465 if (pipe == -1)
466 return;
437 467
438 priv = stub_priv_alloc(sdev, pdu); 468 priv = stub_priv_alloc(sdev, pdu);
439 if (!priv) 469 if (!priv)
@@ -452,7 +482,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
452 } 482 }
453 483
454 /* allocate urb transfer buffer, if needed */ 484 /* allocate urb transfer buffer, if needed */
455 if (pdu->u.cmd_submit.transfer_buffer_length > 0) { 485 if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
486 pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
456 priv->urb->transfer_buffer = 487 priv->urb->transfer_buffer =
457 kzalloc(pdu->u.cmd_submit.transfer_buffer_length, 488 kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
458 GFP_KERNEL); 489 GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index b18bce96c212..53172b1f6257 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
167 memset(&pdu_header, 0, sizeof(pdu_header)); 167 memset(&pdu_header, 0, sizeof(pdu_header));
168 memset(&msg, 0, sizeof(msg)); 168 memset(&msg, 0, sizeof(msg));
169 169
170 if (urb->actual_length > 0 && !urb->transfer_buffer) {
171 dev_err(&sdev->udev->dev,
172 "urb: actual_length %d transfer_buffer null\n",
173 urb->actual_length);
174 return -1;
175 }
176
170 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 177 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
171 iovnum = 2 + urb->number_of_packets; 178 iovnum = 2 + urb->number_of_packets;
172 else 179 else
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index e5de35c8c505..473fb8a87289 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -256,6 +256,7 @@ struct usbip_device {
256 /* lock for status */ 256 /* lock for status */
257 spinlock_t lock; 257 spinlock_t lock;
258 258
259 int sockfd;
259 struct socket *tcp_socket; 260 struct socket *tcp_socket;
260 261
261 struct task_struct *tcp_rx; 262 struct task_struct *tcp_rx;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index e78f7472cac4..091f76b7196d 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -17,15 +17,20 @@
17 17
18/* 18/*
19 * output example: 19 * output example:
20 * hub port sta spd dev socket local_busid 20 * hub port sta spd dev sockfd local_busid
21 * hs 0000 004 000 00000000 c5a7bb80 1-2.3 21 * hs 0000 004 000 00000000 3 1-2.3
22 * ................................................ 22 * ................................................
23 * ss 0008 004 000 00000000 d8cee980 2-3.4 23 * ss 0008 004 000 00000000 4 2-3.4
24 * ................................................ 24 * ................................................
25 * 25 *
26 * IP address can be retrieved from a socket pointer address by looking 26 * Output includes socket fd instead of socket pointer address to avoid
27 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a 27 * leaking kernel memory address in:
28 * port number and its peer IP address. 28 * /sys/devices/platform/vhci_hcd.0/status and in debug output.
29 * The socket pointer address is not used at the moment and it was made
30 * visible as a convenient way to find IP address from socket pointer
31 * address by looking up /proc/net/{tcp,tcp6}. As this opens a security
32 * hole, the change is made to use sockfd instead.
33 *
29 */ 34 */
30static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) 35static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev)
31{ 36{
@@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd
39 if (vdev->ud.status == VDEV_ST_USED) { 44 if (vdev->ud.status == VDEV_ST_USED) {
40 *out += sprintf(*out, "%03u %08x ", 45 *out += sprintf(*out, "%03u %08x ",
41 vdev->speed, vdev->devid); 46 vdev->speed, vdev->devid);
42 *out += sprintf(*out, "%16p %s", 47 *out += sprintf(*out, "%u %s",
43 vdev->ud.tcp_socket, 48 vdev->ud.sockfd,
44 dev_name(&vdev->udev->dev)); 49 dev_name(&vdev->udev->dev));
45 50
46 } else { 51 } else {
@@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr,
160 char *s = out; 165 char *s = out;
161 166
162 /* 167 /*
163 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. 168 * Half the ports are for SPEED_HIGH and half for SPEED_SUPER,
169 * thus the * 2.
164 */ 170 */
165 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); 171 out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers);
166 return out - s; 172 return out - s;
@@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
366 372
367 vdev->devid = devid; 373 vdev->devid = devid;
368 vdev->speed = speed; 374 vdev->speed = speed;
375 vdev->ud.sockfd = sockfd;
369 vdev->ud.tcp_socket = socket; 376 vdev->ud.tcp_socket = socket;
370 vdev->ud.status = VDEV_ST_NOTASSIGNED; 377 vdev->ud.status = VDEV_ST_NOTASSIGNED;
371 378
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index a9192fe4f345..c92131edfaba 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -522,10 +522,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
522 return -EBUSY; 522 return -EBUSY;
523 523
524 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 524 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
525 if (!vm_dev) { 525 if (!vm_dev)
526 rc = -ENOMEM; 526 return -ENOMEM;
527 goto free_mem;
528 }
529 527
530 vm_dev->vdev.dev.parent = &pdev->dev; 528 vm_dev->vdev.dev.parent = &pdev->dev;
531 vm_dev->vdev.dev.release = virtio_mmio_release_dev; 529 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
@@ -535,17 +533,14 @@ static int virtio_mmio_probe(struct platform_device *pdev)
535 spin_lock_init(&vm_dev->lock); 533 spin_lock_init(&vm_dev->lock);
536 534
537 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 535 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
538 if (vm_dev->base == NULL) { 536 if (vm_dev->base == NULL)
539 rc = -EFAULT; 537 return -EFAULT;
540 goto free_vmdev;
541 }
542 538
543 /* Check magic value */ 539 /* Check magic value */
544 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); 540 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
545 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { 541 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
546 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); 542 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
547 rc = -ENODEV; 543 return -ENODEV;
548 goto unmap;
549 } 544 }
550 545
551 /* Check device version */ 546 /* Check device version */
@@ -553,8 +548,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
553 if (vm_dev->version < 1 || vm_dev->version > 2) { 548 if (vm_dev->version < 1 || vm_dev->version > 2) {
554 dev_err(&pdev->dev, "Version %ld not supported!\n", 549 dev_err(&pdev->dev, "Version %ld not supported!\n",
555 vm_dev->version); 550 vm_dev->version);
556 rc = -ENXIO; 551 return -ENXIO;
557 goto unmap;
558 } 552 }
559 553
560 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 554 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -563,8 +557,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
563 * virtio-mmio device with an ID 0 is a (dummy) placeholder 557 * virtio-mmio device with an ID 0 is a (dummy) placeholder
564 * with no function. End probing now with no error reported. 558 * with no function. End probing now with no error reported.
565 */ 559 */
566 rc = -ENODEV; 560 return -ENODEV;
567 goto unmap;
568 } 561 }
569 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 562 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
570 563
@@ -590,33 +583,15 @@ static int virtio_mmio_probe(struct platform_device *pdev)
590 platform_set_drvdata(pdev, vm_dev); 583 platform_set_drvdata(pdev, vm_dev);
591 584
592 rc = register_virtio_device(&vm_dev->vdev); 585 rc = register_virtio_device(&vm_dev->vdev);
593 if (rc) { 586 if (rc)
594 iounmap(vm_dev->base);
595 devm_release_mem_region(&pdev->dev, mem->start,
596 resource_size(mem));
597 put_device(&vm_dev->vdev.dev); 587 put_device(&vm_dev->vdev.dev);
598 } 588
599 return rc;
600unmap:
601 iounmap(vm_dev->base);
602free_mem:
603 devm_release_mem_region(&pdev->dev, mem->start,
604 resource_size(mem));
605free_vmdev:
606 devm_kfree(&pdev->dev, vm_dev);
607 return rc; 589 return rc;
608} 590}
609 591
610static int virtio_mmio_remove(struct platform_device *pdev) 592static int virtio_mmio_remove(struct platform_device *pdev)
611{ 593{
612 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 594 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
613 struct resource *mem;
614
615 iounmap(vm_dev->base);
616 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
617 if (mem)
618 devm_release_mem_region(&pdev->dev, mem->start,
619 resource_size(mem));
620 unregister_virtio_device(&vm_dev->vdev); 595 unregister_virtio_device(&vm_dev->vdev);
621 596
622 return 0; 597 return 0;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d8dd54678ab7..e5d0c28372ea 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU
269 269
270config XEN_ACPI_PROCESSOR 270config XEN_ACPI_PROCESSOR
271 tristate "Xen ACPI processor" 271 tristate "Xen ACPI processor"
272 depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ 272 depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
273 default m 273 default m
274 help 274 help
275 This ACPI processor uploads Power Management information to the Xen 275 This ACPI processor uploads Power Management information to the Xen
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 8fc41705c7cd..961a12dc6dc8 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
170 170
171 mutex_unlock(&sbi->wq_mutex); 171 mutex_unlock(&sbi->wq_mutex);
172 172
173 if (autofs4_write(sbi, pipe, &pkt, pktsz))
174 switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { 173 switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
175 case 0: 174 case 0:
176 break; 175 break;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 531e0a8645b0..1e74cf826532 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1032,14 +1032,17 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && 1032 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) { 1033 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1034 ret = btrfs_inc_ref(trans, root, buf, 1); 1034 ret = btrfs_inc_ref(trans, root, buf, 1);
1035 BUG_ON(ret); /* -ENOMEM */ 1035 if (ret)
1036 return ret;
1036 1037
1037 if (root->root_key.objectid == 1038 if (root->root_key.objectid ==
1038 BTRFS_TREE_RELOC_OBJECTID) { 1039 BTRFS_TREE_RELOC_OBJECTID) {
1039 ret = btrfs_dec_ref(trans, root, buf, 0); 1040 ret = btrfs_dec_ref(trans, root, buf, 0);
1040 BUG_ON(ret); /* -ENOMEM */ 1041 if (ret)
1042 return ret;
1041 ret = btrfs_inc_ref(trans, root, cow, 1); 1043 ret = btrfs_inc_ref(trans, root, cow, 1);
1042 BUG_ON(ret); /* -ENOMEM */ 1044 if (ret)
1045 return ret;
1043 } 1046 }
1044 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; 1047 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1045 } else { 1048 } else {
@@ -1049,7 +1052,8 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1049 ret = btrfs_inc_ref(trans, root, cow, 1); 1052 ret = btrfs_inc_ref(trans, root, cow, 1);
1050 else 1053 else
1051 ret = btrfs_inc_ref(trans, root, cow, 0); 1054 ret = btrfs_inc_ref(trans, root, cow, 0);
1052 BUG_ON(ret); /* -ENOMEM */ 1055 if (ret)
1056 return ret;
1053 } 1057 }
1054 if (new_flags != 0) { 1058 if (new_flags != 0) {
1055 int level = btrfs_header_level(buf); 1059 int level = btrfs_header_level(buf);
@@ -1068,9 +1072,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
1068 ret = btrfs_inc_ref(trans, root, cow, 1); 1072 ret = btrfs_inc_ref(trans, root, cow, 1);
1069 else 1073 else
1070 ret = btrfs_inc_ref(trans, root, cow, 0); 1074 ret = btrfs_inc_ref(trans, root, cow, 0);
1071 BUG_ON(ret); /* -ENOMEM */ 1075 if (ret)
1076 return ret;
1072 ret = btrfs_dec_ref(trans, root, buf, 1); 1077 ret = btrfs_dec_ref(trans, root, buf, 1);
1073 BUG_ON(ret); /* -ENOMEM */ 1078 if (ret)
1079 return ret;
1074 } 1080 }
1075 clean_tree_block(fs_info, buf); 1081 clean_tree_block(fs_info, buf);
1076 *last_ref = 1; 1082 *last_ref = 1;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 10a2a579cc7f..a8ecccfc36de 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3231,6 +3231,7 @@ static int write_dev_supers(struct btrfs_device *device,
3231 int errors = 0; 3231 int errors = 0;
3232 u32 crc; 3232 u32 crc;
3233 u64 bytenr; 3233 u64 bytenr;
3234 int op_flags;
3234 3235
3235 if (max_mirrors == 0) 3236 if (max_mirrors == 0)
3236 max_mirrors = BTRFS_SUPER_MIRROR_MAX; 3237 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
@@ -3273,13 +3274,10 @@ static int write_dev_supers(struct btrfs_device *device,
3273 * we fua the first super. The others we allow 3274 * we fua the first super. The others we allow
3274 * to go down lazy. 3275 * to go down lazy.
3275 */ 3276 */
3276 if (i == 0) { 3277 op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
3277 ret = btrfsic_submit_bh(REQ_OP_WRITE, 3278 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3278 REQ_SYNC | REQ_FUA | REQ_META | REQ_PRIO, bh); 3279 op_flags |= REQ_FUA;
3279 } else { 3280 ret = btrfsic_submit_bh(REQ_OP_WRITE, op_flags, bh);
3280 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3281 REQ_SYNC | REQ_META | REQ_PRIO, bh);
3282 }
3283 if (ret) 3281 if (ret)
3284 errors++; 3282 errors++;
3285 } 3283 }
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4497f937e8fb..2f4328511ac8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -9206,6 +9206,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
9206 ret = btrfs_del_root(trans, fs_info, &root->root_key); 9206 ret = btrfs_del_root(trans, fs_info, &root->root_key);
9207 if (ret) { 9207 if (ret) {
9208 btrfs_abort_transaction(trans, ret); 9208 btrfs_abort_transaction(trans, ret);
9209 err = ret;
9209 goto out_end_trans; 9210 goto out_end_trans;
9210 } 9211 }
9211 9212
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 993061f83067..e1a7f3cb5be9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3005,6 +3005,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
3005 compress_type = ordered_extent->compress_type; 3005 compress_type = ordered_extent->compress_type;
3006 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 3006 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
3007 BUG_ON(compress_type); 3007 BUG_ON(compress_type);
3008 btrfs_qgroup_free_data(inode, NULL, ordered_extent->file_offset,
3009 ordered_extent->len);
3008 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), 3010 ret = btrfs_mark_extent_written(trans, BTRFS_I(inode),
3009 ordered_extent->file_offset, 3011 ordered_extent->file_offset,
3010 ordered_extent->file_offset + 3012 ordered_extent->file_offset +
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d748ad1c3620..2ef8acaac688 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2206,7 +2206,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
2206 if (!path) 2206 if (!path)
2207 return -ENOMEM; 2207 return -ENOMEM;
2208 2208
2209 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; 2209 ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
2210 2210
2211 key.objectid = tree_id; 2211 key.objectid = tree_id;
2212 key.type = BTRFS_ROOT_ITEM_KEY; 2212 key.type = BTRFS_ROOT_ITEM_KEY;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index ab69dcb70e8a..1b468250e947 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1440,6 +1440,29 @@ static int __close_session(struct ceph_mds_client *mdsc,
1440 return request_close_session(mdsc, session); 1440 return request_close_session(mdsc, session);
1441} 1441}
1442 1442
1443static bool drop_negative_children(struct dentry *dentry)
1444{
1445 struct dentry *child;
1446 bool all_negative = true;
1447
1448 if (!d_is_dir(dentry))
1449 goto out;
1450
1451 spin_lock(&dentry->d_lock);
1452 list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1453 if (d_really_is_positive(child)) {
1454 all_negative = false;
1455 break;
1456 }
1457 }
1458 spin_unlock(&dentry->d_lock);
1459
1460 if (all_negative)
1461 shrink_dcache_parent(dentry);
1462out:
1463 return all_negative;
1464}
1465
1443/* 1466/*
1444 * Trim old(er) caps. 1467 * Trim old(er) caps.
1445 * 1468 *
@@ -1490,16 +1513,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1490 if ((used | wanted) & ~oissued & mine) 1513 if ((used | wanted) & ~oissued & mine)
1491 goto out; /* we need these caps */ 1514 goto out; /* we need these caps */
1492 1515
1493 session->s_trim_caps--;
1494 if (oissued) { 1516 if (oissued) {
1495 /* we aren't the only cap.. just remove us */ 1517 /* we aren't the only cap.. just remove us */
1496 __ceph_remove_cap(cap, true); 1518 __ceph_remove_cap(cap, true);
1519 session->s_trim_caps--;
1497 } else { 1520 } else {
1521 struct dentry *dentry;
1498 /* try dropping referring dentries */ 1522 /* try dropping referring dentries */
1499 spin_unlock(&ci->i_ceph_lock); 1523 spin_unlock(&ci->i_ceph_lock);
1500 d_prune_aliases(inode); 1524 dentry = d_find_any_alias(inode);
1501 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1525 if (dentry && drop_negative_children(dentry)) {
1502 inode, cap, atomic_read(&inode->i_count)); 1526 int count;
1527 dput(dentry);
1528 d_prune_aliases(inode);
1529 count = atomic_read(&inode->i_count);
1530 if (count == 1)
1531 session->s_trim_caps--;
1532 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1533 inode, cap, count);
1534 } else {
1535 dput(dentry);
1536 }
1503 return 0; 1537 return 0;
1504 } 1538 }
1505 1539
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e06740436b92..ed88ab8a4774 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1406,7 +1406,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
1406 } while (rc == -EAGAIN); 1406 } while (rc == -EAGAIN);
1407 1407
1408 if (rc) { 1408 if (rc) {
1409 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); 1409 if (rc != -ENOENT)
1410 cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc);
1410 goto out; 1411 goto out;
1411 } 1412 }
1412 1413
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 5331631386a2..01346b8b6edb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2678 cifs_small_buf_release(req); 2678 cifs_small_buf_release(req);
2679 2679
2680 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; 2680 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
2681 shdr = get_sync_hdr(rsp);
2682 2681
2683 if (shdr->Status == STATUS_END_OF_FILE) { 2682 if (rc) {
2683 if (rc != -ENODATA) {
2684 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
2685 cifs_dbg(VFS, "Send error in read = %d\n", rc);
2686 }
2684 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 2687 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
2685 return 0; 2688 return rc == -ENODATA ? 0 : rc;
2686 } 2689 }
2687 2690
2688 if (rc) { 2691 *nbytes = le32_to_cpu(rsp->DataLength);
2689 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); 2692 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
2690 cifs_dbg(VFS, "Send error in read = %d\n", rc); 2693 (*nbytes > io_parms->length)) {
2691 } else { 2694 cifs_dbg(FYI, "bad length %d for count %d\n",
2692 *nbytes = le32_to_cpu(rsp->DataLength); 2695 *nbytes, io_parms->length);
2693 if ((*nbytes > CIFS_MAX_MSGSIZE) || 2696 rc = -EIO;
2694 (*nbytes > io_parms->length)) { 2697 *nbytes = 0;
2695 cifs_dbg(FYI, "bad length %d for count %d\n",
2696 *nbytes, io_parms->length);
2697 rc = -EIO;
2698 *nbytes = 0;
2699 }
2700 } 2698 }
2701 2699
2700 shdr = get_sync_hdr(rsp);
2701
2702 if (*buf) { 2702 if (*buf) {
2703 memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); 2703 memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes);
2704 free_rsp_buf(resp_buftype, rsp_iov.iov_base); 2704 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
diff --git a/fs/dax.c b/fs/dax.c
index 78b72c48374e..95981591977a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -627,8 +627,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
627 627
628 if (pfn != pmd_pfn(*pmdp)) 628 if (pfn != pmd_pfn(*pmdp))
629 goto unlock_pmd; 629 goto unlock_pmd;
630 if (!pmd_dirty(*pmdp) 630 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
631 && !pmd_access_permitted(*pmdp, WRITE))
632 goto unlock_pmd; 631 goto unlock_pmd;
633 632
634 flush_cache_page(vma, address, pfn); 633 flush_cache_page(vma, address, pfn);
diff --git a/fs/exec.c b/fs/exec.c
index 6be2aa0ab26f..156f56acfe8e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1216,15 +1216,14 @@ killed:
1216 return -EAGAIN; 1216 return -EAGAIN;
1217} 1217}
1218 1218
1219char *get_task_comm(char *buf, struct task_struct *tsk) 1219char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1220{ 1220{
1221 /* buf must be at least sizeof(tsk->comm) in size */
1222 task_lock(tsk); 1221 task_lock(tsk);
1223 strncpy(buf, tsk->comm, sizeof(tsk->comm)); 1222 strncpy(buf, tsk->comm, buf_size);
1224 task_unlock(tsk); 1223 task_unlock(tsk);
1225 return buf; 1224 return buf;
1226} 1225}
1227EXPORT_SYMBOL_GPL(get_task_comm); 1226EXPORT_SYMBOL_GPL(__get_task_comm);
1228 1227
1229/* 1228/*
1230 * These functions flushes out all traces of the currently running executable 1229 * These functions flushes out all traces of the currently running executable
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 8d6b7e35faf9..c83ece7facc5 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -150,7 +150,6 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx)
150 if (unlikely(ret < 0)) 150 if (unlikely(ret < 0))
151 goto out; 151 goto out;
152 ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; 152 ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1;
153 file->f_version = inode->i_version;
154 } 153 }
155 next_pos = ctx->pos; 154 next_pos = ctx->pos;
156 if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) { 155 if (!(de = map_pos_dirent(inode, &next_pos, &qbh))) {
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 3b834563b1f1..a4ad18afbdec 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -419,7 +419,6 @@ int hpfs_add_dirent(struct inode *i,
419 c = 1; 419 c = 1;
420 goto ret; 420 goto ret;
421 } 421 }
422 i->i_version++;
423 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0); 422 c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
424 ret: 423 ret:
425 return c; 424 return c;
@@ -726,7 +725,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
726 return 2; 725 return 2;
727 } 726 }
728 } 727 }
729 i->i_version++;
730 for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1); 728 for_all_poss(i, hpfs_pos_del, (t = get_pos(dnode, de)) + 1, 1);
731 hpfs_delete_de(i->i_sb, dnode, de); 729 hpfs_delete_de(i->i_sb, dnode, de);
732 hpfs_mark_4buffers_dirty(qbh); 730 hpfs_mark_4buffers_dirty(qbh);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index c45a3b9b9ac7..f2c3ebcd309c 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -235,7 +235,6 @@ static struct inode *hpfs_alloc_inode(struct super_block *sb)
235 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); 235 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
236 if (!ei) 236 if (!ei)
237 return NULL; 237 return NULL;
238 ei->vfs_inode.i_version = 1;
239 return &ei->vfs_inode; 238 return &ei->vfs_inode;
240} 239}
241 240
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0ac2fb1c6b63..b9129e2befea 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -291,12 +291,23 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
291 const struct sockaddr *sap = data->addr; 291 const struct sockaddr *sap = data->addr;
292 struct nfs_net *nn = net_generic(data->net, nfs_net_id); 292 struct nfs_net *nn = net_generic(data->net, nfs_net_id);
293 293
294again:
294 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { 295 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
295 const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; 296 const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
296 /* Don't match clients that failed to initialise properly */ 297 /* Don't match clients that failed to initialise properly */
297 if (clp->cl_cons_state < 0) 298 if (clp->cl_cons_state < 0)
298 continue; 299 continue;
299 300
301 /* If a client is still initializing then we need to wait */
302 if (clp->cl_cons_state > NFS_CS_READY) {
303 refcount_inc(&clp->cl_count);
304 spin_unlock(&nn->nfs_client_lock);
305 nfs_wait_client_init_complete(clp);
306 nfs_put_client(clp);
307 spin_lock(&nn->nfs_client_lock);
308 goto again;
309 }
310
300 /* Different NFS versions cannot share the same nfs_client */ 311 /* Different NFS versions cannot share the same nfs_client */
301 if (clp->rpc_ops != data->nfs_mod->rpc_ops) 312 if (clp->rpc_ops != data->nfs_mod->rpc_ops)
302 continue; 313 continue;
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 12bbab0becb4..65a7e5da508c 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -404,15 +404,19 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
404 if (error < 0) 404 if (error < 0)
405 goto error; 405 goto error;
406 406
407 if (!nfs4_has_session(clp))
408 nfs_mark_client_ready(clp, NFS_CS_READY);
409
410 error = nfs4_discover_server_trunking(clp, &old); 407 error = nfs4_discover_server_trunking(clp, &old);
411 if (error < 0) 408 if (error < 0)
412 goto error; 409 goto error;
413 410
414 if (clp != old) 411 if (clp != old) {
415 clp->cl_preserve_clid = true; 412 clp->cl_preserve_clid = true;
413 /*
414 * Mark the client as having failed initialization so other
415 * processes walking the nfs_client_list in nfs_match_client()
416 * won't try to use it.
417 */
418 nfs_mark_client_ready(clp, -EPERM);
419 }
416 nfs_put_client(clp); 420 nfs_put_client(clp);
417 clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags); 421 clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
418 return old; 422 return old;
@@ -539,6 +543,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
539 spin_lock(&nn->nfs_client_lock); 543 spin_lock(&nn->nfs_client_lock);
540 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { 544 list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
541 545
546 if (pos == new)
547 goto found;
548
542 status = nfs4_match_client(pos, new, &prev, nn); 549 status = nfs4_match_client(pos, new, &prev, nn);
543 if (status < 0) 550 if (status < 0)
544 goto out_unlock; 551 goto out_unlock;
@@ -559,6 +566,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
559 * way that a SETCLIENTID_CONFIRM to pos can succeed is 566 * way that a SETCLIENTID_CONFIRM to pos can succeed is
560 * if new and pos point to the same server: 567 * if new and pos point to the same server:
561 */ 568 */
569found:
562 refcount_inc(&pos->cl_count); 570 refcount_inc(&pos->cl_count);
563 spin_unlock(&nn->nfs_client_lock); 571 spin_unlock(&nn->nfs_client_lock);
564 572
@@ -572,6 +580,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
572 case 0: 580 case 0:
573 nfs4_swap_callback_idents(pos, new); 581 nfs4_swap_callback_idents(pos, new);
574 pos->cl_confirm = new->cl_confirm; 582 pos->cl_confirm = new->cl_confirm;
583 nfs_mark_client_ready(pos, NFS_CS_READY);
575 584
576 prev = NULL; 585 prev = NULL;
577 *result = pos; 586 *result = pos;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5b5f464f6f2a..4a379d7918f2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1890,6 +1890,8 @@ int nfs_commit_inode(struct inode *inode, int how)
1890 if (res) 1890 if (res)
1891 error = nfs_generic_commit_list(inode, &head, how, &cinfo); 1891 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1892 nfs_commit_end(cinfo.mds); 1892 nfs_commit_end(cinfo.mds);
1893 if (res == 0)
1894 return res;
1893 if (error < 0) 1895 if (error < 0)
1894 goto out_error; 1896 goto out_error;
1895 if (!may_wait) 1897 if (!may_wait)
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 697f8ae7792d..f650e475d8f0 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -60,6 +60,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
60 gi->gid[i] = exp->ex_anon_gid; 60 gi->gid[i] = exp->ex_anon_gid;
61 else 61 else
62 gi->gid[i] = rqgi->gid[i]; 62 gi->gid[i] = rqgi->gid[i];
63
64 /* Each thread allocates its own gi, no race */
65 groups_sort(gi);
63 } 66 }
64 } else { 67 } else {
65 gi = get_group_info(rqgi); 68 gi = get_group_info(rqgi);
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
index cbfc196e5dc5..5ac415466861 100644
--- a/fs/overlayfs/Kconfig
+++ b/fs/overlayfs/Kconfig
@@ -24,6 +24,16 @@ config OVERLAY_FS_REDIRECT_DIR
24 an overlay which has redirects on a kernel that doesn't support this 24 an overlay which has redirects on a kernel that doesn't support this
25 feature will have unexpected results. 25 feature will have unexpected results.
26 26
27config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW
28 bool "Overlayfs: follow redirects even if redirects are turned off"
29 default y
30 depends on OVERLAY_FS
31 help
32 Disable this to get a possibly more secure configuration, but that
33 might not be backward compatible with previous kernels.
34
35 For more information, see Documentation/filesystems/overlayfs.txt
36
27config OVERLAY_FS_INDEX 37config OVERLAY_FS_INDEX
28 bool "Overlayfs: turn on inodes index feature by default" 38 bool "Overlayfs: turn on inodes index feature by default"
29 depends on OVERLAY_FS 39 depends on OVERLAY_FS
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index e13921824c70..f9788bc116a8 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -887,7 +887,8 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir)
887 spin_unlock(&dentry->d_lock); 887 spin_unlock(&dentry->d_lock);
888 } else { 888 } else {
889 kfree(redirect); 889 kfree(redirect);
890 pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 890 pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n",
891 err);
891 /* Fall back to userspace copy-up */ 892 /* Fall back to userspace copy-up */
892 err = -EXDEV; 893 err = -EXDEV;
893 } 894 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 625ed8066570..beb945e1963c 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -435,7 +435,7 @@ int ovl_verify_index(struct dentry *index, struct ovl_path *lower,
435 435
436 /* Check if index is orphan and don't warn before cleaning it */ 436 /* Check if index is orphan and don't warn before cleaning it */
437 if (d_inode(index)->i_nlink == 1 && 437 if (d_inode(index)->i_nlink == 1 &&
438 ovl_get_nlink(index, origin.dentry, 0) == 0) 438 ovl_get_nlink(origin.dentry, index, 0) == 0)
439 err = -ENOENT; 439 err = -ENOENT;
440 440
441 dput(origin.dentry); 441 dput(origin.dentry);
@@ -681,6 +681,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
681 if (d.stop) 681 if (d.stop)
682 break; 682 break;
683 683
684 /*
685 * Following redirects can have security consequences: it's like
686 * a symlink into the lower layer without the permission checks.
687 * This is only a problem if the upper layer is untrusted (e.g
688 * comes from an USB drive). This can allow a non-readable file
689 * or directory to become readable.
690 *
691 * Only following redirects when redirects are enabled disables
692 * this attack vector when not necessary.
693 */
694 err = -EPERM;
695 if (d.redirect && !ofs->config.redirect_follow) {
696 pr_warn_ratelimited("overlay: refusing to follow redirect for (%pd2)\n", dentry);
697 goto out_put;
698 }
699
684 if (d.redirect && d.redirect[0] == '/' && poe != roe) { 700 if (d.redirect && d.redirect[0] == '/' && poe != roe) {
685 poe = roe; 701 poe = roe;
686 702
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 13eab09a6b6f..b489099ccd49 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -180,7 +180,7 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
180static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) 180static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode)
181{ 181{
182 struct dentry *ret = vfs_tmpfile(dentry, mode, 0); 182 struct dentry *ret = vfs_tmpfile(dentry, mode, 0);
183 int err = IS_ERR(ret) ? PTR_ERR(ret) : 0; 183 int err = PTR_ERR_OR_ZERO(ret);
184 184
185 pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); 185 pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err);
186 return ret; 186 return ret;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 752bab645879..9d0bc03bf6e4 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -14,6 +14,8 @@ struct ovl_config {
14 char *workdir; 14 char *workdir;
15 bool default_permissions; 15 bool default_permissions;
16 bool redirect_dir; 16 bool redirect_dir;
17 bool redirect_follow;
18 const char *redirect_mode;
17 bool index; 19 bool index;
18}; 20};
19 21
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 0daa4354fec4..8c98578d27a1 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -499,7 +499,7 @@ out:
499 return err; 499 return err;
500 500
501fail: 501fail:
502 pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n", 502 pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n",
503 p->name, err); 503 p->name, err);
504 goto out; 504 goto out;
505} 505}
@@ -663,7 +663,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
663 return PTR_ERR(rdt.cache); 663 return PTR_ERR(rdt.cache);
664 } 664 }
665 665
666 return iterate_dir(od->realfile, &rdt.ctx); 666 err = iterate_dir(od->realfile, &rdt.ctx);
667 ctx->pos = rdt.ctx.pos;
668
669 return err;
667} 670}
668 671
669 672
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 288d20f9a55a..76440feb79f6 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -33,6 +33,13 @@ module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644);
33MODULE_PARM_DESC(ovl_redirect_dir_def, 33MODULE_PARM_DESC(ovl_redirect_dir_def,
34 "Default to on or off for the redirect_dir feature"); 34 "Default to on or off for the redirect_dir feature");
35 35
36static bool ovl_redirect_always_follow =
37 IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW);
38module_param_named(redirect_always_follow, ovl_redirect_always_follow,
39 bool, 0644);
40MODULE_PARM_DESC(ovl_redirect_always_follow,
41 "Follow redirects even if redirect_dir feature is turned off");
42
36static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX); 43static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX);
37module_param_named(index, ovl_index_def, bool, 0644); 44module_param_named(index, ovl_index_def, bool, 0644);
38MODULE_PARM_DESC(ovl_index_def, 45MODULE_PARM_DESC(ovl_index_def,
@@ -232,6 +239,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
232 kfree(ofs->config.lowerdir); 239 kfree(ofs->config.lowerdir);
233 kfree(ofs->config.upperdir); 240 kfree(ofs->config.upperdir);
234 kfree(ofs->config.workdir); 241 kfree(ofs->config.workdir);
242 kfree(ofs->config.redirect_mode);
235 if (ofs->creator_cred) 243 if (ofs->creator_cred)
236 put_cred(ofs->creator_cred); 244 put_cred(ofs->creator_cred);
237 kfree(ofs); 245 kfree(ofs);
@@ -244,6 +252,7 @@ static void ovl_put_super(struct super_block *sb)
244 ovl_free_fs(ofs); 252 ovl_free_fs(ofs);
245} 253}
246 254
255/* Sync real dirty inodes in upper filesystem (if it exists) */
247static int ovl_sync_fs(struct super_block *sb, int wait) 256static int ovl_sync_fs(struct super_block *sb, int wait)
248{ 257{
249 struct ovl_fs *ofs = sb->s_fs_info; 258 struct ovl_fs *ofs = sb->s_fs_info;
@@ -252,14 +261,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
252 261
253 if (!ofs->upper_mnt) 262 if (!ofs->upper_mnt)
254 return 0; 263 return 0;
255 upper_sb = ofs->upper_mnt->mnt_sb; 264
256 if (!upper_sb->s_op->sync_fs) 265 /*
266 * If this is a sync(2) call or an emergency sync, all the super blocks
267 * will be iterated, including upper_sb, so no need to do anything.
268 *
269 * If this is a syncfs(2) call, then we do need to call
270 * sync_filesystem() on upper_sb, but enough if we do it when being
271 * called with wait == 1.
272 */
273 if (!wait)
257 return 0; 274 return 0;
258 275
259 /* real inodes have already been synced by sync_filesystem(ovl_sb) */ 276 upper_sb = ofs->upper_mnt->mnt_sb;
277
260 down_read(&upper_sb->s_umount); 278 down_read(&upper_sb->s_umount);
261 ret = upper_sb->s_op->sync_fs(upper_sb, wait); 279 ret = sync_filesystem(upper_sb);
262 up_read(&upper_sb->s_umount); 280 up_read(&upper_sb->s_umount);
281
263 return ret; 282 return ret;
264} 283}
265 284
@@ -295,6 +314,11 @@ static bool ovl_force_readonly(struct ovl_fs *ofs)
295 return (!ofs->upper_mnt || !ofs->workdir); 314 return (!ofs->upper_mnt || !ofs->workdir);
296} 315}
297 316
317static const char *ovl_redirect_mode_def(void)
318{
319 return ovl_redirect_dir_def ? "on" : "off";
320}
321
298/** 322/**
299 * ovl_show_options 323 * ovl_show_options
300 * 324 *
@@ -313,12 +337,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
313 } 337 }
314 if (ofs->config.default_permissions) 338 if (ofs->config.default_permissions)
315 seq_puts(m, ",default_permissions"); 339 seq_puts(m, ",default_permissions");
316 if (ofs->config.redirect_dir != ovl_redirect_dir_def) 340 if (strcmp(ofs->config.redirect_mode, ovl_redirect_mode_def()) != 0)
317 seq_printf(m, ",redirect_dir=%s", 341 seq_printf(m, ",redirect_dir=%s", ofs->config.redirect_mode);
318 ofs->config.redirect_dir ? "on" : "off");
319 if (ofs->config.index != ovl_index_def) 342 if (ofs->config.index != ovl_index_def)
320 seq_printf(m, ",index=%s", 343 seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off");
321 ofs->config.index ? "on" : "off");
322 return 0; 344 return 0;
323} 345}
324 346
@@ -348,8 +370,7 @@ enum {
348 OPT_UPPERDIR, 370 OPT_UPPERDIR,
349 OPT_WORKDIR, 371 OPT_WORKDIR,
350 OPT_DEFAULT_PERMISSIONS, 372 OPT_DEFAULT_PERMISSIONS,
351 OPT_REDIRECT_DIR_ON, 373 OPT_REDIRECT_DIR,
352 OPT_REDIRECT_DIR_OFF,
353 OPT_INDEX_ON, 374 OPT_INDEX_ON,
354 OPT_INDEX_OFF, 375 OPT_INDEX_OFF,
355 OPT_ERR, 376 OPT_ERR,
@@ -360,8 +381,7 @@ static const match_table_t ovl_tokens = {
360 {OPT_UPPERDIR, "upperdir=%s"}, 381 {OPT_UPPERDIR, "upperdir=%s"},
361 {OPT_WORKDIR, "workdir=%s"}, 382 {OPT_WORKDIR, "workdir=%s"},
362 {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, 383 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
363 {OPT_REDIRECT_DIR_ON, "redirect_dir=on"}, 384 {OPT_REDIRECT_DIR, "redirect_dir=%s"},
364 {OPT_REDIRECT_DIR_OFF, "redirect_dir=off"},
365 {OPT_INDEX_ON, "index=on"}, 385 {OPT_INDEX_ON, "index=on"},
366 {OPT_INDEX_OFF, "index=off"}, 386 {OPT_INDEX_OFF, "index=off"},
367 {OPT_ERR, NULL} 387 {OPT_ERR, NULL}
@@ -390,10 +410,37 @@ static char *ovl_next_opt(char **s)
390 return sbegin; 410 return sbegin;
391} 411}
392 412
413static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode)
414{
415 if (strcmp(mode, "on") == 0) {
416 config->redirect_dir = true;
417 /*
418 * Does not make sense to have redirect creation without
419 * redirect following.
420 */
421 config->redirect_follow = true;
422 } else if (strcmp(mode, "follow") == 0) {
423 config->redirect_follow = true;
424 } else if (strcmp(mode, "off") == 0) {
425 if (ovl_redirect_always_follow)
426 config->redirect_follow = true;
427 } else if (strcmp(mode, "nofollow") != 0) {
428 pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n",
429 mode);
430 return -EINVAL;
431 }
432
433 return 0;
434}
435
393static int ovl_parse_opt(char *opt, struct ovl_config *config) 436static int ovl_parse_opt(char *opt, struct ovl_config *config)
394{ 437{
395 char *p; 438 char *p;
396 439
440 config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
441 if (!config->redirect_mode)
442 return -ENOMEM;
443
397 while ((p = ovl_next_opt(&opt)) != NULL) { 444 while ((p = ovl_next_opt(&opt)) != NULL) {
398 int token; 445 int token;
399 substring_t args[MAX_OPT_ARGS]; 446 substring_t args[MAX_OPT_ARGS];
@@ -428,12 +475,11 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
428 config->default_permissions = true; 475 config->default_permissions = true;
429 break; 476 break;
430 477
431 case OPT_REDIRECT_DIR_ON: 478 case OPT_REDIRECT_DIR:
432 config->redirect_dir = true; 479 kfree(config->redirect_mode);
433 break; 480 config->redirect_mode = match_strdup(&args[0]);
434 481 if (!config->redirect_mode)
435 case OPT_REDIRECT_DIR_OFF: 482 return -ENOMEM;
436 config->redirect_dir = false;
437 break; 483 break;
438 484
439 case OPT_INDEX_ON: 485 case OPT_INDEX_ON:
@@ -458,7 +504,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
458 config->workdir = NULL; 504 config->workdir = NULL;
459 } 505 }
460 506
461 return 0; 507 return ovl_parse_redirect_mode(config, config->redirect_mode);
462} 508}
463 509
464#define OVL_WORKDIR_NAME "work" 510#define OVL_WORKDIR_NAME "work"
@@ -1160,7 +1206,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
1160 if (!cred) 1206 if (!cred)
1161 goto out_err; 1207 goto out_err;
1162 1208
1163 ofs->config.redirect_dir = ovl_redirect_dir_def;
1164 ofs->config.index = ovl_index_def; 1209 ofs->config.index = ovl_index_def;
1165 err = ovl_parse_opt((char *) data, &ofs->config); 1210 err = ovl_parse_opt((char *) data, &ofs->config);
1166 if (err) 1211 if (err)
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index de3f04a98656..3b57ef0f2f76 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -920,8 +920,7 @@ STATIC xfs_agnumber_t
920xfs_ialloc_ag_select( 920xfs_ialloc_ag_select(
921 xfs_trans_t *tp, /* transaction pointer */ 921 xfs_trans_t *tp, /* transaction pointer */
922 xfs_ino_t parent, /* parent directory inode number */ 922 xfs_ino_t parent, /* parent directory inode number */
923 umode_t mode, /* bits set to indicate file type */ 923 umode_t mode) /* bits set to indicate file type */
924 int okalloc) /* ok to allocate more space */
925{ 924{
926 xfs_agnumber_t agcount; /* number of ag's in the filesystem */ 925 xfs_agnumber_t agcount; /* number of ag's in the filesystem */
927 xfs_agnumber_t agno; /* current ag number */ 926 xfs_agnumber_t agno; /* current ag number */
@@ -978,9 +977,6 @@ xfs_ialloc_ag_select(
978 return agno; 977 return agno;
979 } 978 }
980 979
981 if (!okalloc)
982 goto nextag;
983
984 if (!pag->pagf_init) { 980 if (!pag->pagf_init) {
985 error = xfs_alloc_pagf_init(mp, tp, agno, flags); 981 error = xfs_alloc_pagf_init(mp, tp, agno, flags);
986 if (error) 982 if (error)
@@ -1680,7 +1676,6 @@ xfs_dialloc(
1680 struct xfs_trans *tp, 1676 struct xfs_trans *tp,
1681 xfs_ino_t parent, 1677 xfs_ino_t parent,
1682 umode_t mode, 1678 umode_t mode,
1683 int okalloc,
1684 struct xfs_buf **IO_agbp, 1679 struct xfs_buf **IO_agbp,
1685 xfs_ino_t *inop) 1680 xfs_ino_t *inop)
1686{ 1681{
@@ -1692,6 +1687,7 @@ xfs_dialloc(
1692 int noroom = 0; 1687 int noroom = 0;
1693 xfs_agnumber_t start_agno; 1688 xfs_agnumber_t start_agno;
1694 struct xfs_perag *pag; 1689 struct xfs_perag *pag;
1690 int okalloc = 1;
1695 1691
1696 if (*IO_agbp) { 1692 if (*IO_agbp) {
1697 /* 1693 /*
@@ -1707,7 +1703,7 @@ xfs_dialloc(
1707 * We do not have an agbp, so select an initial allocation 1703 * We do not have an agbp, so select an initial allocation
1708 * group for inode allocation. 1704 * group for inode allocation.
1709 */ 1705 */
1710 start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); 1706 start_agno = xfs_ialloc_ag_select(tp, parent, mode);
1711 if (start_agno == NULLAGNUMBER) { 1707 if (start_agno == NULLAGNUMBER) {
1712 *inop = NULLFSINO; 1708 *inop = NULLFSINO;
1713 return 0; 1709 return 0;
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index d2bdcd5e7312..66a8de0b1caa 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -81,7 +81,6 @@ xfs_dialloc(
81 struct xfs_trans *tp, /* transaction pointer */ 81 struct xfs_trans *tp, /* transaction pointer */
82 xfs_ino_t parent, /* parent inode (directory) */ 82 xfs_ino_t parent, /* parent inode (directory) */
83 umode_t mode, /* mode bits for new inode */ 83 umode_t mode, /* mode bits for new inode */
84 int okalloc, /* ok to allocate more space */
85 struct xfs_buf **agbp, /* buf for a.g. inode header */ 84 struct xfs_buf **agbp, /* buf for a.g. inode header */
86 xfs_ino_t *inop); /* inode number allocated */ 85 xfs_ino_t *inop); /* inode number allocated */
87 86
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 9c42c4efd01e..ab3aef2ae823 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -46,7 +46,6 @@
46#include "scrub/scrub.h" 46#include "scrub/scrub.h"
47#include "scrub/common.h" 47#include "scrub/common.h"
48#include "scrub/trace.h" 48#include "scrub/trace.h"
49#include "scrub/scrub.h"
50#include "scrub/btree.h" 49#include "scrub/btree.h"
51 50
52/* 51/*
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c
index 472080e75788..86daed0e3a45 100644
--- a/fs/xfs/scrub/trace.c
+++ b/fs/xfs/scrub/trace.c
@@ -26,7 +26,6 @@
26#include "xfs_mount.h" 26#include "xfs_mount.h"
27#include "xfs_defer.h" 27#include "xfs_defer.h"
28#include "xfs_da_format.h" 28#include "xfs_da_format.h"
29#include "xfs_defer.h"
30#include "xfs_inode.h" 29#include "xfs_inode.h"
31#include "xfs_btree.h" 30#include "xfs_btree.h"
32#include "xfs_trans.h" 31#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 801274126648..b41952a4ddd8 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -749,7 +749,6 @@ xfs_ialloc(
749 xfs_nlink_t nlink, 749 xfs_nlink_t nlink,
750 dev_t rdev, 750 dev_t rdev,
751 prid_t prid, 751 prid_t prid,
752 int okalloc,
753 xfs_buf_t **ialloc_context, 752 xfs_buf_t **ialloc_context,
754 xfs_inode_t **ipp) 753 xfs_inode_t **ipp)
755{ 754{
@@ -765,7 +764,7 @@ xfs_ialloc(
765 * Call the space management code to pick 764 * Call the space management code to pick
766 * the on-disk inode to be allocated. 765 * the on-disk inode to be allocated.
767 */ 766 */
768 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 767 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
769 ialloc_context, &ino); 768 ialloc_context, &ino);
770 if (error) 769 if (error)
771 return error; 770 return error;
@@ -957,7 +956,6 @@ xfs_dir_ialloc(
957 xfs_nlink_t nlink, 956 xfs_nlink_t nlink,
958 dev_t rdev, 957 dev_t rdev,
959 prid_t prid, /* project id */ 958 prid_t prid, /* project id */
960 int okalloc, /* ok to allocate new space */
961 xfs_inode_t **ipp, /* pointer to inode; it will be 959 xfs_inode_t **ipp, /* pointer to inode; it will be
962 locked. */ 960 locked. */
963 int *committed) 961 int *committed)
@@ -988,8 +986,8 @@ xfs_dir_ialloc(
988 * transaction commit so that no other process can steal 986 * transaction commit so that no other process can steal
989 * the inode(s) that we've just allocated. 987 * the inode(s) that we've just allocated.
990 */ 988 */
991 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, 989 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
992 &ialloc_context, &ip); 990 &ip);
993 991
994 /* 992 /*
995 * Return an error if we were unable to allocate a new inode. 993 * Return an error if we were unable to allocate a new inode.
@@ -1061,7 +1059,7 @@ xfs_dir_ialloc(
1061 * this call should always succeed. 1059 * this call should always succeed.
1062 */ 1060 */
1063 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, 1061 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1064 okalloc, &ialloc_context, &ip); 1062 &ialloc_context, &ip);
1065 1063
1066 /* 1064 /*
1067 * If we get an error at this point, return to the caller 1065 * If we get an error at this point, return to the caller
@@ -1182,11 +1180,6 @@ xfs_create(
1182 xfs_flush_inodes(mp); 1180 xfs_flush_inodes(mp);
1183 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1181 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1184 } 1182 }
1185 if (error == -ENOSPC) {
1186 /* No space at all so try a "no-allocation" reservation */
1187 resblks = 0;
1188 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
1189 }
1190 if (error) 1183 if (error)
1191 goto out_release_inode; 1184 goto out_release_inode;
1192 1185
@@ -1203,19 +1196,13 @@ xfs_create(
1203 if (error) 1196 if (error)
1204 goto out_trans_cancel; 1197 goto out_trans_cancel;
1205 1198
1206 if (!resblks) {
1207 error = xfs_dir_canenter(tp, dp, name);
1208 if (error)
1209 goto out_trans_cancel;
1210 }
1211
1212 /* 1199 /*
1213 * A newly created regular or special file just has one directory 1200 * A newly created regular or special file just has one directory
1214 * entry pointing to them, but a directory also the "." entry 1201 * entry pointing to them, but a directory also the "." entry
1215 * pointing to itself. 1202 * pointing to itself.
1216 */ 1203 */
1217 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, 1204 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
1218 prid, resblks > 0, &ip, NULL); 1205 NULL);
1219 if (error) 1206 if (error)
1220 goto out_trans_cancel; 1207 goto out_trans_cancel;
1221 1208
@@ -1340,11 +1327,6 @@ xfs_create_tmpfile(
1340 tres = &M_RES(mp)->tr_create_tmpfile; 1327 tres = &M_RES(mp)->tr_create_tmpfile;
1341 1328
1342 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); 1329 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1343 if (error == -ENOSPC) {
1344 /* No space at all so try a "no-allocation" reservation */
1345 resblks = 0;
1346 error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
1347 }
1348 if (error) 1330 if (error)
1349 goto out_release_inode; 1331 goto out_release_inode;
1350 1332
@@ -1353,8 +1335,7 @@ xfs_create_tmpfile(
1353 if (error) 1335 if (error)
1354 goto out_trans_cancel; 1336 goto out_trans_cancel;
1355 1337
1356 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, 1338 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
1357 prid, resblks > 0, &ip, NULL);
1358 if (error) 1339 if (error)
1359 goto out_trans_cancel; 1340 goto out_trans_cancel;
1360 1341
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index cc13c3763721..b2136af9289f 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -428,7 +428,7 @@ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
428xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip); 428xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
429 429
430int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t, 430int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
431 xfs_nlink_t, dev_t, prid_t, int, 431 xfs_nlink_t, dev_t, prid_t,
432 struct xfs_inode **, int *); 432 struct xfs_inode **, int *);
433 433
434/* from xfs_file.c */ 434/* from xfs_file.c */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 33eb4fb2e3fd..7ab52a8bc0a9 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1213,7 +1213,7 @@ xfs_xattr_iomap_begin(
1213 1213
1214 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); 1214 ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
1215 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1215 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1216 &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); 1216 &nimaps, XFS_BMAPI_ATTRFORK);
1217out_unlock: 1217out_unlock:
1218 xfs_iunlock(ip, lockmode); 1218 xfs_iunlock(ip, lockmode);
1219 1219
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 010a13a201aa..ec952dfad359 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -793,8 +793,8 @@ xfs_qm_qino_alloc(
793 return error; 793 return error;
794 794
795 if (need_alloc) { 795 if (need_alloc) {
796 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 796 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip,
797 &committed); 797 &committed);
798 if (error) { 798 if (error) {
799 xfs_trans_cancel(tp); 799 xfs_trans_cancel(tp);
800 return error; 800 return error;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index cc041a29eb70..cf7c8f81bebb 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -49,8 +49,6 @@
49#include "xfs_alloc.h" 49#include "xfs_alloc.h"
50#include "xfs_quota_defs.h" 50#include "xfs_quota_defs.h"
51#include "xfs_quota.h" 51#include "xfs_quota.h"
52#include "xfs_btree.h"
53#include "xfs_bmap_btree.h"
54#include "xfs_reflink.h" 52#include "xfs_reflink.h"
55#include "xfs_iomap.h" 53#include "xfs_iomap.h"
56#include "xfs_rmap_btree.h" 54#include "xfs_rmap_btree.h"
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 68d3ca2c4968..2e9e793a8f9d 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -232,11 +232,6 @@ xfs_symlink(
232 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); 232 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
233 233
234 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp); 234 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
235 if (error == -ENOSPC && fs_blocks == 0) {
236 resblks = 0;
237 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
238 &tp);
239 }
240 if (error) 235 if (error)
241 goto out_release_inode; 236 goto out_release_inode;
242 237
@@ -260,14 +255,6 @@ xfs_symlink(
260 goto out_trans_cancel; 255 goto out_trans_cancel;
261 256
262 /* 257 /*
263 * Check for ability to enter directory entry, if no space reserved.
264 */
265 if (!resblks) {
266 error = xfs_dir_canenter(tp, dp, link_name);
267 if (error)
268 goto out_trans_cancel;
269 }
270 /*
271 * Initialize the bmap freelist prior to calling either 258 * Initialize the bmap freelist prior to calling either
272 * bmapi or the directory create code. 259 * bmapi or the directory create code.
273 */ 260 */
@@ -277,7 +264,7 @@ xfs_symlink(
277 * Allocate an inode for the symlink. 264 * Allocate an inode for the symlink.
278 */ 265 */
279 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, 266 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
280 prid, resblks > 0, &ip, NULL); 267 prid, &ip, NULL);
281 if (error) 268 if (error)
282 goto out_trans_cancel; 269 goto out_trans_cancel;
283 270
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 5d95fe348294..35f3546b6af5 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -24,7 +24,6 @@
24#include "xfs_mount.h" 24#include "xfs_mount.h"
25#include "xfs_defer.h" 25#include "xfs_defer.h"
26#include "xfs_da_format.h" 26#include "xfs_da_format.h"
27#include "xfs_defer.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
29#include "xfs_btree.h" 28#include "xfs_btree.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f0b44c16e88f..c2bae8da642c 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
82 struct ahash_instance *inst); 82 struct ahash_instance *inst);
83void ahash_free_instance(struct crypto_instance *inst); 83void ahash_free_instance(struct crypto_instance *inst);
84 84
85int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
86 unsigned int keylen);
87
88static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
89{
90 return alg->setkey != shash_no_setkey;
91}
92
85int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 93int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
86 struct hash_alg_common *alg, 94 struct hash_alg_common *alg,
87 struct crypto_instance *inst); 95 struct crypto_instance *inst);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a4649c56ca2f..5971577016a2 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -24,6 +24,7 @@
24#define __DRM_CONNECTOR_H__ 24#define __DRM_CONNECTOR_H__
25 25
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/llist.h>
27#include <linux/ctype.h> 28#include <linux/ctype.h>
28#include <linux/hdmi.h> 29#include <linux/hdmi.h>
29#include <drm/drm_mode_object.h> 30#include <drm/drm_mode_object.h>
@@ -918,12 +919,13 @@ struct drm_connector {
918 uint16_t tile_h_size, tile_v_size; 919 uint16_t tile_h_size, tile_v_size;
919 920
920 /** 921 /**
921 * @free_work: 922 * @free_node:
922 * 923 *
923 * Work used only by &drm_connector_iter to be able to clean up a 924 * List used only by &drm_connector_iter to be able to clean up a
924 * connector from any context. 925 * connector from any context, in conjunction with
926 * &drm_mode_config.connector_free_work.
925 */ 927 */
926 struct work_struct free_work; 928 struct llist_node free_node;
927}; 929};
928 930
929#define obj_to_connector(x) container_of(x, struct drm_connector, base) 931#define obj_to_connector(x) container_of(x, struct drm_connector, base)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 2ec41d032e56..efe6d5a8e834 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector,
465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, 465struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
466 struct i2c_adapter *adapter); 466 struct i2c_adapter *adapter);
467struct edid *drm_edid_duplicate(const struct edid *edid); 467struct edid *drm_edid_duplicate(const struct edid *edid);
468void drm_reset_display_info(struct drm_connector *connector);
469u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
468int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 470int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
469 471
470u8 drm_match_cea_mode(const struct drm_display_mode *to_match); 472u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index b21e827c5c78..b0ce26d71296 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/llist.h>
30 31
31#include <drm/drm_modeset_lock.h> 32#include <drm/drm_modeset_lock.h>
32 33
@@ -393,7 +394,7 @@ struct drm_mode_config {
393 394
394 /** 395 /**
395 * @connector_list_lock: Protects @num_connector and 396 * @connector_list_lock: Protects @num_connector and
396 * @connector_list. 397 * @connector_list and @connector_free_list.
397 */ 398 */
398 spinlock_t connector_list_lock; 399 spinlock_t connector_list_lock;
399 /** 400 /**
@@ -414,6 +415,21 @@ struct drm_mode_config {
414 */ 415 */
415 struct list_head connector_list; 416 struct list_head connector_list;
416 /** 417 /**
418 * @connector_free_list:
419 *
420 * List of connector objects linked with &drm_connector.free_head.
421 * Protected by @connector_list_lock. Used by
422 * drm_for_each_connector_iter() and
423 * &struct drm_connector_list_iter to savely free connectors using
424 * @connector_free_work.
425 */
426 struct llist_head connector_free_list;
427 /**
428 * @connector_free_work: Work to clean up @connector_free_list.
429 */
430 struct work_struct connector_free_work;
431
432 /**
417 * @num_encoder: 433 * @num_encoder:
418 * 434 *
419 * Number of encoders on this device. This is invariant over the 435 * Number of encoders on this device. This is invariant over the
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 01ee473517e2..6e45608b2399 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -93,7 +93,4 @@ void kvm_timer_init_vhe(void);
93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) 93#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) 94#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
95 95
96void enable_el1_phys_timer_access(void);
97void disable_el1_phys_timer_access(void);
98
99#endif 96#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 188ed9f65517..52e611ab9a6c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
220/* 220/*
221 * Prevent the compiler from merging or refetching reads or writes. The 221 * Prevent the compiler from merging or refetching reads or writes. The
222 * compiler is also forbidden from reordering successive instances of 222 * compiler is also forbidden from reordering successive instances of
223 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 223 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
224 * compiler is aware of some particular ordering. One way to make the 224 * particular ordering. One way to make the compiler aware of ordering is to
225 * compiler aware of ordering is to put the two invocations of READ_ONCE, 225 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
226 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 226 * statements.
227 * 227 *
228 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 228 * These two macros will also work on aggregate data types like structs or
229 * data types like structs or unions. If the size of the accessed data 229 * unions. If the size of the accessed data type exceeds the word size of
230 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 230 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
231 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 231 * fall back to memcpy(). There's at least two memcpy()s: one for the
232 * least two memcpy()s: one for the __builtin_memcpy() and then one for 232 * __builtin_memcpy() and then one for the macro doing the copy of variable
233 * the macro doing the copy of variable - '__u' allocated on the stack. 233 * - '__u' allocated on the stack.
234 * 234 *
235 * Their two major use cases are: (1) Mediating communication between 235 * Their two major use cases are: (1) Mediating communication between
236 * process-level code and irq/NMI handlers, all running on the same CPU, 236 * process-level code and irq/NMI handlers, all running on the same CPU,
237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 237 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
238 * mutilate accesses that either do not require ordering or that interact 238 * mutilate accesses that either do not require ordering or that interact
239 * with an explicit memory barrier or atomic instruction that provides the 239 * with an explicit memory barrier or atomic instruction that provides the
240 * required ordering. 240 * required ordering.
@@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
327 compiletime_assert(__native_word(t), \ 327 compiletime_assert(__native_word(t), \
328 "Need native word sized stores/loads for atomicity.") 328 "Need native word sized stores/loads for atomicity.")
329 329
330/*
331 * Prevent the compiler from merging or refetching accesses. The compiler
332 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
333 * but only when the compiler is aware of some particular ordering. One way
334 * to make the compiler aware of ordering is to put the two invocations of
335 * ACCESS_ONCE() in different C statements.
336 *
337 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
338 * on a union member will work as long as the size of the member matches the
339 * size of the union and the size is smaller than word size.
340 *
341 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
342 * between process-level code and irq/NMI handlers, all running on the same CPU,
343 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
344 * mutilate accesses that either do not require ordering or that interact
345 * with an explicit memory barrier or atomic instruction that provides the
346 * required ordering.
347 *
348 * If possible use READ_ONCE()/WRITE_ONCE() instead.
349 */
350#define __ACCESS_ONCE(x) ({ \
351 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
352 (volatile typeof(x) *)&(x); })
353#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
354
355#endif /* __LINUX_COMPILER_H */ 330#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 0662a417febe..94a59ba7d422 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,9 +10,6 @@
10 */ 10 */
11 11
12#include <linux/wait.h> 12#include <linux/wait.h>
13#ifdef CONFIG_LOCKDEP_COMPLETIONS
14#include <linux/lockdep.h>
15#endif
16 13
17/* 14/*
18 * struct completion - structure used to maintain state for a "completion" 15 * struct completion - structure used to maintain state for a "completion"
@@ -29,58 +26,16 @@
29struct completion { 26struct completion {
30 unsigned int done; 27 unsigned int done;
31 wait_queue_head_t wait; 28 wait_queue_head_t wait;
32#ifdef CONFIG_LOCKDEP_COMPLETIONS
33 struct lockdep_map_cross map;
34#endif
35}; 29};
36 30
37#ifdef CONFIG_LOCKDEP_COMPLETIONS
38static inline void complete_acquire(struct completion *x)
39{
40 lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_);
41}
42
43static inline void complete_release(struct completion *x)
44{
45 lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_);
46}
47
48static inline void complete_release_commit(struct completion *x)
49{
50 lock_commit_crosslock((struct lockdep_map *)&x->map);
51}
52
53#define init_completion_map(x, m) \
54do { \
55 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
56 (m)->name, (m)->key, 0); \
57 __init_completion(x); \
58} while (0)
59
60#define init_completion(x) \
61do { \
62 static struct lock_class_key __key; \
63 lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \
64 "(completion)" #x, \
65 &__key, 0); \
66 __init_completion(x); \
67} while (0)
68#else
69#define init_completion_map(x, m) __init_completion(x) 31#define init_completion_map(x, m) __init_completion(x)
70#define init_completion(x) __init_completion(x) 32#define init_completion(x) __init_completion(x)
71static inline void complete_acquire(struct completion *x) {} 33static inline void complete_acquire(struct completion *x) {}
72static inline void complete_release(struct completion *x) {} 34static inline void complete_release(struct completion *x) {}
73static inline void complete_release_commit(struct completion *x) {} 35static inline void complete_release_commit(struct completion *x) {}
74#endif
75 36
76#ifdef CONFIG_LOCKDEP_COMPLETIONS
77#define COMPLETION_INITIALIZER(work) \
78 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
79 STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
80#else
81#define COMPLETION_INITIALIZER(work) \ 37#define COMPLETION_INITIALIZER(work) \
82 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
83#endif
84 39
85#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 40#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
86 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 (*({ init_completion_map(&(work), &(map)); &(work); }))
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 099058e1178b..631286535d0f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *);
83extern void set_groups(struct cred *, struct group_info *); 83extern void set_groups(struct cred *, struct group_info *);
84extern int groups_search(const struct group_info *, kgid_t); 84extern int groups_search(const struct group_info *, kgid_t);
85extern bool may_setgroups(void); 85extern bool may_setgroups(void);
86extern void groups_sort(struct group_info *);
86 87
87/* 88/*
88 * The security context of a task 89 * The security context of a task
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 7c3a365f7e12..fa14f834e4ed 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/bug.h>
18 19
19struct idr { 20struct idr {
20 struct radix_tree_root idr_rt; 21 struct radix_tree_root idr_rt;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 893d6d606cd0..6bdd4b9f6611 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -232,7 +232,7 @@ struct kvm_vcpu {
232 struct mutex mutex; 232 struct mutex mutex;
233 struct kvm_run *run; 233 struct kvm_run *run;
234 234
235 int guest_fpu_loaded, guest_xcr0_loaded; 235 int guest_xcr0_loaded;
236 struct swait_queue_head wq; 236 struct swait_queue_head wq;
237 struct pid __rcu *pid; 237 struct pid __rcu *pid;
238 int sigset_active; 238 int sigset_active;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index a842551fe044..2e75dc34bff5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -158,12 +158,6 @@ struct lockdep_map {
158 int cpu; 158 int cpu;
159 unsigned long ip; 159 unsigned long ip;
160#endif 160#endif
161#ifdef CONFIG_LOCKDEP_CROSSRELEASE
162 /*
163 * Whether it's a crosslock.
164 */
165 int cross;
166#endif
167}; 161};
168 162
169static inline void lockdep_copy_map(struct lockdep_map *to, 163static inline void lockdep_copy_map(struct lockdep_map *to,
@@ -267,96 +261,9 @@ struct held_lock {
267 unsigned int hardirqs_off:1; 261 unsigned int hardirqs_off:1;
268 unsigned int references:12; /* 32 bits */ 262 unsigned int references:12; /* 32 bits */
269 unsigned int pin_count; 263 unsigned int pin_count;
270#ifdef CONFIG_LOCKDEP_CROSSRELEASE
271 /*
272 * Generation id.
273 *
274 * A value of cross_gen_id will be stored when holding this,
275 * which is globally increased whenever each crosslock is held.
276 */
277 unsigned int gen_id;
278#endif
279};
280
281#ifdef CONFIG_LOCKDEP_CROSSRELEASE
282#define MAX_XHLOCK_TRACE_ENTRIES 5
283
284/*
285 * This is for keeping locks waiting for commit so that true dependencies
286 * can be added at commit step.
287 */
288struct hist_lock {
289 /*
290 * Id for each entry in the ring buffer. This is used to
291 * decide whether the ring buffer was overwritten or not.
292 *
293 * For example,
294 *
295 * |<----------- hist_lock ring buffer size ------->|
296 * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
297 * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
298 *
299 * where 'p' represents an acquisition in process
300 * context, 'i' represents an acquisition in irq
301 * context.
302 *
303 * In this example, the ring buffer was overwritten by
304 * acquisitions in irq context, that should be detected on
305 * rollback or commit.
306 */
307 unsigned int hist_id;
308
309 /*
310 * Seperate stack_trace data. This will be used at commit step.
311 */
312 struct stack_trace trace;
313 unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES];
314
315 /*
316 * Seperate hlock instance. This will be used at commit step.
317 *
318 * TODO: Use a smaller data structure containing only necessary
319 * data. However, we should make lockdep code able to handle the
320 * smaller one first.
321 */
322 struct held_lock hlock;
323}; 264};
324 265
325/* 266/*
326 * To initialize a lock as crosslock, lockdep_init_map_crosslock() should
327 * be called instead of lockdep_init_map().
328 */
329struct cross_lock {
330 /*
331 * When more than one acquisition of crosslocks are overlapped,
332 * we have to perform commit for them based on cross_gen_id of
333 * the first acquisition, which allows us to add more true
334 * dependencies.
335 *
336 * Moreover, when no acquisition of a crosslock is in progress,
337 * we should not perform commit because the lock might not exist
338 * any more, which might cause incorrect memory access. So we
339 * have to track the number of acquisitions of a crosslock.
340 */
341 int nr_acquire;
342
343 /*
344 * Seperate hlock instance. This will be used at commit step.
345 *
346 * TODO: Use a smaller data structure containing only necessary
347 * data. However, we should make lockdep code able to handle the
348 * smaller one first.
349 */
350 struct held_lock hlock;
351};
352
353struct lockdep_map_cross {
354 struct lockdep_map map;
355 struct cross_lock xlock;
356};
357#endif
358
359/*
360 * Initialization, self-test and debugging-output methods: 267 * Initialization, self-test and debugging-output methods:
361 */ 268 */
362extern void lockdep_info(void); 269extern void lockdep_info(void);
@@ -560,37 +467,6 @@ enum xhlock_context_t {
560 XHLOCK_CTX_NR, 467 XHLOCK_CTX_NR,
561}; 468};
562 469
563#ifdef CONFIG_LOCKDEP_CROSSRELEASE
564extern void lockdep_init_map_crosslock(struct lockdep_map *lock,
565 const char *name,
566 struct lock_class_key *key,
567 int subclass);
568extern void lock_commit_crosslock(struct lockdep_map *lock);
569
570/*
571 * What we essencially have to initialize is 'nr_acquire'. Other members
572 * will be initialized in add_xlock().
573 */
574#define STATIC_CROSS_LOCK_INIT() \
575 { .nr_acquire = 0,}
576
577#define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \
578 { .map.name = (_name), .map.key = (void *)(_key), \
579 .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), }
580
581/*
582 * To initialize a lockdep_map statically use this macro.
583 * Note that _name must not be NULL.
584 */
585#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
586 { .name = (_name), .key = (void *)(_key), .cross = 0, }
587
588extern void crossrelease_hist_start(enum xhlock_context_t c);
589extern void crossrelease_hist_end(enum xhlock_context_t c);
590extern void lockdep_invariant_state(bool force);
591extern void lockdep_init_task(struct task_struct *task);
592extern void lockdep_free_task(struct task_struct *task);
593#else /* !CROSSRELEASE */
594#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 470#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
595/* 471/*
596 * To initialize a lockdep_map statically use this macro. 472 * To initialize a lockdep_map statically use this macro.
@@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
604static inline void lockdep_invariant_state(bool force) {} 480static inline void lockdep_invariant_state(bool force) {}
605static inline void lockdep_init_task(struct task_struct *task) {} 481static inline void lockdep_init_task(struct task_struct *task) {}
606static inline void lockdep_free_task(struct task_struct *task) {} 482static inline void lockdep_free_task(struct task_struct *task) {}
607#endif /* CROSSRELEASE */
608 483
609#ifdef CONFIG_LOCK_STAT 484#ifdef CONFIG_LOCK_STAT
610 485
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 01c91d874a57..5bad038ac012 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
67} 67}
68 68
69/* 69/*
70 * Use this helper if tsk->mm != mm and the victim mm needs a special
71 * handling. This is guaranteed to stay true after once set.
72 */
73static inline bool mm_is_oom_victim(struct mm_struct *mm)
74{
75 return test_bit(MMF_OOM_VICTIM, &mm->flags);
76}
77
78/*
70 * Checks whether a page fault on the given mm is still reliable. 79 * Checks whether a page fault on the given mm is still reliable.
71 * This is no longer true if the oom reaper started to reap the 80 * This is no longer true if the oom reaper started to reap the
72 * address space which is reflected by MMF_UNSTABLE flag set in 81 * address space which is reflected by MMF_UNSTABLE flag set in
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 483b780655bb..0314e0716c30 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1675,6 +1675,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
1675static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, 1675static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1676 unsigned int devfn) 1676 unsigned int devfn)
1677{ return NULL; } 1677{ return NULL; }
1678static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
1679 unsigned int bus, unsigned int devfn)
1680{ return NULL; }
1678 1681
1679static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1682static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1680static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1683static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 65d39115f06d..492ed473ba7e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev);
765extern int pm_generic_poweroff(struct device *dev); 765extern int pm_generic_poweroff(struct device *dev);
766extern void pm_generic_complete(struct device *dev); 766extern void pm_generic_complete(struct device *dev);
767 767
768extern void dev_pm_skip_next_resume_phases(struct device *dev);
768extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); 769extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
769 770
770#else /* !CONFIG_PM_SLEEP */ 771#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 37b4bb2545b3..6866df4f31b5 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
101 101
102/* Note: callers invoking this in a loop must use a compiler barrier, 102/* Note: callers invoking this in a loop must use a compiler barrier,
103 * for example cpu_relax(). Callers must hold producer_lock. 103 * for example cpu_relax(). Callers must hold producer_lock.
104 * Callers are responsible for making sure pointer that is being queued
105 * points to a valid data.
104 */ 106 */
105static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) 107static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
106{ 108{
107 if (unlikely(!r->size) || r->queue[r->producer]) 109 if (unlikely(!r->size) || r->queue[r->producer])
108 return -ENOSPC; 110 return -ENOSPC;
109 111
112 /* Make sure the pointer we are storing points to a valid data. */
113 /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
114 smp_wmb();
115
110 r->queue[r->producer++] = ptr; 116 r->queue[r->producer++] = ptr;
111 if (unlikely(r->producer >= r->size)) 117 if (unlikely(r->producer >= r->size))
112 r->producer = 0; 118 r->producer = 0;
@@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
275 if (ptr) 281 if (ptr)
276 __ptr_ring_discard_one(r); 282 __ptr_ring_discard_one(r);
277 283
284 /* Make sure anyone accessing data through the pointer is up to date. */
285 /* Pairs with smp_wmb in __ptr_ring_produce. */
286 smp_read_barrier_depends();
278 return ptr; 287 return ptr;
279} 288}
280 289
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d574361943ea..fcbeed4053ef 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
99 struct rb_root *root); 99 struct rb_root *root);
100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 100extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
101 struct rb_root *root); 101 struct rb_root *root);
102extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
103 struct rb_root_cached *root);
102 104
103static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 105static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
104 struct rb_node **rb_link) 106 struct rb_node **rb_link)
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index cc0072e93e36..857a72ceb794 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -10,9 +10,6 @@
10 */ 10 */
11typedef struct { 11typedef struct {
12 arch_rwlock_t raw_lock; 12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK 13#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu; 14 unsigned int magic, owner_cpu;
18 void *owner; 15 void *owner;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21991d668d35..d2588263a989 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -849,17 +849,6 @@ struct task_struct {
849 struct held_lock held_locks[MAX_LOCK_DEPTH]; 849 struct held_lock held_locks[MAX_LOCK_DEPTH];
850#endif 850#endif
851 851
852#ifdef CONFIG_LOCKDEP_CROSSRELEASE
853#define MAX_XHLOCKS_NR 64UL
854 struct hist_lock *xhlocks; /* Crossrelease history locks */
855 unsigned int xhlock_idx;
856 /* For restoring at history boundaries */
857 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
858 unsigned int hist_id;
859 /* For overwrite check at each context exit */
860 unsigned int hist_id_save[XHLOCK_CTX_NR];
861#endif
862
863#ifdef CONFIG_UBSAN 852#ifdef CONFIG_UBSAN
864 unsigned int in_ubsan; 853 unsigned int in_ubsan;
865#endif 854#endif
@@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
1503 __set_task_comm(tsk, from, false); 1492 __set_task_comm(tsk, from, false);
1504} 1493}
1505 1494
1506extern char *get_task_comm(char *to, struct task_struct *tsk); 1495extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1496#define get_task_comm(buf, tsk) ({ \
1497 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1498 __get_task_comm(buf, sizeof(buf), tsk); \
1499})
1507 1500
1508#ifdef CONFIG_SMP 1501#ifdef CONFIG_SMP
1509void scheduler_ipi(void); 1502void scheduler_ipi(void);
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 9c8847395b5e..ec912d01126f 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm)
70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ 70#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ 71#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ 72#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
73#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
73#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) 74#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
74 75
75#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ 76#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 10fd28b118ee..4894d322d258 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -107,16 +107,11 @@ do { \
107 107
108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 108#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
109 109
110#ifdef CONFIG_GENERIC_LOCKBREAK
111#define raw_spin_is_contended(lock) ((lock)->break_lock)
112#else
113
114#ifdef arch_spin_is_contended 110#ifdef arch_spin_is_contended
115#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 111#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
116#else 112#else
117#define raw_spin_is_contended(lock) (((void)(lock), 0)) 113#define raw_spin_is_contended(lock) (((void)(lock), 0))
118#endif /*arch_spin_is_contended*/ 114#endif /*arch_spin_is_contended*/
119#endif
120 115
121/* 116/*
122 * This barrier must provide two things: 117 * This barrier must provide two things:
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 73548eb13a5d..24b4e6f2c1a2 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -19,9 +19,6 @@
19 19
20typedef struct raw_spinlock { 20typedef struct raw_spinlock {
21 arch_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock;
24#endif
25#ifdef CONFIG_DEBUG_SPINLOCK 22#ifdef CONFIG_DEBUG_SPINLOCK
26 unsigned int magic, owner_cpu; 23 unsigned int magic, owner_cpu;
27 void *owner; 24 void *owner;
diff --git a/include/linux/string.h b/include/linux/string.h
index 410ecf17de3c..cfd83eb2f926 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p)
259{ 259{
260 __kernel_size_t ret; 260 __kernel_size_t ret;
261 size_t p_size = __builtin_object_size(p, 0); 261 size_t p_size = __builtin_object_size(p, 0);
262 if (p_size == (size_t)-1) 262
263 /* Work around gcc excess stack consumption issue */
264 if (p_size == (size_t)-1 ||
265 (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
263 return __builtin_strlen(p); 266 return __builtin_strlen(p);
264 ret = strnlen(p, p_size); 267 ret = strnlen(p, p_size);
265 if (p_size <= ret) 268 if (p_size <= ret)
diff --git a/include/linux/trace.h b/include/linux/trace.h
index d24991c1fef3..b95ffb2188ab 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -18,7 +18,7 @@
18 */ 18 */
19struct trace_export { 19struct trace_export {
20 struct trace_export __rcu *next; 20 struct trace_export __rcu *next;
21 void (*write)(const void *, unsigned int); 21 void (*write)(struct trace_export *, const void *, unsigned int);
22}; 22};
23 23
24int register_ftrace_export(struct trace_export *export); 24int register_ftrace_export(struct trace_export *export);
diff --git a/include/net/gue.h b/include/net/gue.h
index 2fdb29ca74c2..fdad41469b65 100644
--- a/include/net/gue.h
+++ b/include/net/gue.h
@@ -44,10 +44,10 @@ struct guehdr {
44#else 44#else
45#error "Please fix <asm/byteorder.h>" 45#error "Please fix <asm/byteorder.h>"
46#endif 46#endif
47 __u8 proto_ctype; 47 __u8 proto_ctype;
48 __u16 flags; 48 __be16 flags;
49 }; 49 };
50 __u32 word; 50 __be32 word;
51 }; 51 };
52}; 52};
53 53
@@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags)
84 * if there is an unknown standard or private flags, or the options length for 84 * if there is an unknown standard or private flags, or the options length for
85 * the flags exceeds the options length specific in hlen of the GUE header. 85 * the flags exceeds the options length specific in hlen of the GUE header.
86 */ 86 */
87static inline int validate_gue_flags(struct guehdr *guehdr, 87static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen)
88 size_t optlen)
89{ 88{
89 __be16 flags = guehdr->flags;
90 size_t len; 90 size_t len;
91 __be32 flags = guehdr->flags;
92 91
93 if (flags & ~GUE_FLAGS_ALL) 92 if (flags & ~GUE_FLAGS_ALL)
94 return 1; 93 return 1;
@@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr,
101 /* Private flags are last four bytes accounted in 100 /* Private flags are last four bytes accounted in
102 * guehdr_flags_len 101 * guehdr_flags_len
103 */ 102 */
104 flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); 103 __be32 pflags = *(__be32 *)((void *)&guehdr[1] +
104 len - GUE_LEN_PRIV);
105 105
106 if (flags & ~GUE_PFLAGS_ALL) 106 if (pflags & ~GUE_PFLAGS_ALL)
107 return 1; 107 return 1;
108 108
109 len += guehdr_priv_flags_len(flags); 109 len += guehdr_priv_flags_len(pflags);
110 if (len > optlen) 110 if (len > optlen)
111 return 1; 111 return 1;
112 } 112 }
diff --git a/include/net/ip.h b/include/net/ip.h
index fc9bf1b1fe2c..746abff9ce51 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -36,6 +36,7 @@
36#include <net/netns/hash.h> 36#include <net/netns/hash.h>
37 37
38#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 38#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
39#define IPV4_MIN_MTU 68 /* RFC 791 */
39 40
40struct sock; 41struct sock;
41 42
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 8f8c0afe529b..bc6b25faba99 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -72,6 +72,7 @@ struct Qdisc {
72 */ 72 */
73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ 73#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
74#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ 74#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
75#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
75 u32 limit; 76 u32 limit;
76 const struct Qdisc_ops *ops; 77 const struct Qdisc_ops *ops;
77 struct qdisc_size_table __rcu *stab; 78 struct qdisc_size_table __rcu *stab;
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index f5024c560d8f..9c4eb33c5a1d 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable,
56 56
57#include <trace/define_trace.h> 57#include <trace/define_trace.h>
58 58
59#else /* !CONFIG_PREEMPTIRQ_EVENTS */ 59#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
60 60
61#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
61#define trace_irq_enable(...) 62#define trace_irq_enable(...)
62#define trace_irq_disable(...) 63#define trace_irq_disable(...)
63#define trace_preempt_enable(...)
64#define trace_preempt_disable(...)
65#define trace_irq_enable_rcuidle(...) 64#define trace_irq_enable_rcuidle(...)
66#define trace_irq_disable_rcuidle(...) 65#define trace_irq_disable_rcuidle(...)
66#endif
67
68#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
69#define trace_preempt_enable(...)
70#define trace_preempt_disable(...)
67#define trace_preempt_enable_rcuidle(...) 71#define trace_preempt_enable_rcuidle(...)
68#define trace_preempt_disable_rcuidle(...) 72#define trace_preempt_disable_rcuidle(...)
69
70#endif 73#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 282d7613fce8..496e59a2738b 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index af3cc2f4e1ad..37b5096ae97b 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -256,7 +256,6 @@ struct tc_red_qopt {
256#define TC_RED_ECN 1 256#define TC_RED_ECN 1
257#define TC_RED_HARDDROP 2 257#define TC_RED_HARDDROP 2
258#define TC_RED_ADAPTATIVE 4 258#define TC_RED_ADAPTATIVE 4
259#define TC_RED_OFFLOADED 8
260}; 259};
261 260
262struct tc_red_xstats { 261struct tc_red_xstats {
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index d8b5f80c2ea6..843e29aa3cac 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -557,6 +557,7 @@ enum {
557 TCA_PAD, 557 TCA_PAD,
558 TCA_DUMP_INVISIBLE, 558 TCA_DUMP_INVISIBLE,
559 TCA_CHAIN, 559 TCA_CHAIN,
560 TCA_HW_OFFLOAD,
560 __TCA_MAX 561 __TCA_MAX
561}; 562};
562 563
diff --git a/init/main.c b/init/main.c
index dfec3809e740..e96e3a14533c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -589,6 +589,12 @@ asmlinkage __visible void __init start_kernel(void)
589 radix_tree_init(); 589 radix_tree_init();
590 590
591 /* 591 /*
592 * Set up housekeeping before setting up workqueues to allow the unbound
593 * workqueue to take non-housekeeping into account.
594 */
595 housekeeping_init();
596
597 /*
592 * Allow workqueue creation and work item queueing/cancelling 598 * Allow workqueue creation and work item queueing/cancelling
593 * early. Work item execution depends on kthreads and starts after 599 * early. Work item execution depends on kthreads and starts after
594 * workqueue_init(). 600 * workqueue_init().
@@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void)
605 early_irq_init(); 611 early_irq_init();
606 init_IRQ(); 612 init_IRQ();
607 tick_init(); 613 tick_init();
608 housekeeping_init();
609 rcu_init_nohz(); 614 rcu_init_nohz();
610 init_timers(); 615 init_timers();
611 hrtimers_init(); 616 hrtimers_init();
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index e469e05c8e83..3905d4bc5b80 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab)
114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), 114 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
115 htab->map.key_size); 115 htab->map.key_size);
116 free_percpu(pptr); 116 free_percpu(pptr);
117 cond_resched();
117 } 118 }
118free_elems: 119free_elems:
119 bpf_map_area_free(htab->elems); 120 bpf_map_area_free(htab->elems);
@@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab)
159 goto free_elems; 160 goto free_elems;
160 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, 161 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
161 pptr); 162 pptr);
163 cond_resched();
162 } 164 }
163 165
164skip_percpu_elems: 166skip_percpu_elems:
diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c
index 5f780d8f6a9d..9caeda610249 100644
--- a/kernel/cgroup/debug.c
+++ b/kernel/cgroup/debug.c
@@ -50,7 +50,7 @@ static int current_css_set_read(struct seq_file *seq, void *v)
50 50
51 spin_lock_irq(&css_set_lock); 51 spin_lock_irq(&css_set_lock);
52 rcu_read_lock(); 52 rcu_read_lock();
53 cset = rcu_dereference(current->cgroups); 53 cset = task_css_set(current);
54 refcnt = refcount_read(&cset->refcount); 54 refcnt = refcount_read(&cset->refcount);
55 seq_printf(seq, "css_set %pK %d", cset, refcnt); 55 seq_printf(seq, "css_set %pK %d", cset, refcnt);
56 if (refcnt > cset->nr_tasks) 56 if (refcnt > cset->nr_tasks)
@@ -96,7 +96,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
96 96
97 spin_lock_irq(&css_set_lock); 97 spin_lock_irq(&css_set_lock);
98 rcu_read_lock(); 98 rcu_read_lock();
99 cset = rcu_dereference(current->cgroups); 99 cset = task_css_set(current);
100 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { 100 list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
101 struct cgroup *c = link->cgrp; 101 struct cgroup *c = link->cgrp;
102 102
diff --git a/kernel/cgroup/stat.c b/kernel/cgroup/stat.c
index 133b465691d6..1e111dd455c4 100644
--- a/kernel/cgroup/stat.c
+++ b/kernel/cgroup/stat.c
@@ -296,8 +296,12 @@ int cgroup_stat_init(struct cgroup *cgrp)
296 } 296 }
297 297
298 /* ->updated_children list is self terminated */ 298 /* ->updated_children list is self terminated */
299 for_each_possible_cpu(cpu) 299 for_each_possible_cpu(cpu) {
300 cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp; 300 struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu);
301
302 cstat->updated_children = cgrp;
303 u64_stats_init(&cstat->sync);
304 }
301 305
302 prev_cputime_init(&cgrp->stat.prev_cputime); 306 prev_cputime_init(&cgrp->stat.prev_cputime);
303 307
diff --git a/kernel/exit.c b/kernel/exit.c
index 6b4298a41167..df0c91d5606c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1755,3 +1755,11 @@ Efault:
1755 return -EFAULT; 1755 return -EFAULT;
1756} 1756}
1757#endif 1757#endif
1758
1759__weak void abort(void)
1760{
1761 BUG();
1762
1763 /* if that doesn't kill us, halt */
1764 panic("Oops failed to kill thread");
1765}
diff --git a/kernel/futex.c b/kernel/futex.c
index 76ed5921117a..57d0b3657e16 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1582,8 +1582,8 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1582{ 1582{
1583 unsigned int op = (encoded_op & 0x70000000) >> 28; 1583 unsigned int op = (encoded_op & 0x70000000) >> 28;
1584 unsigned int cmp = (encoded_op & 0x0f000000) >> 24; 1584 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
1585 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 12); 1585 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1586 int cmparg = sign_extend32(encoded_op & 0x00000fff, 12); 1586 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
1587 int oldval, ret; 1587 int oldval, ret;
1588 1588
1589 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { 1589 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
diff --git a/kernel/groups.c b/kernel/groups.c
index e357bc800111..daae2f2dc6d4 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b)
86 return gid_gt(a, b) - gid_lt(a, b); 86 return gid_gt(a, b) - gid_lt(a, b);
87} 87}
88 88
89static void groups_sort(struct group_info *group_info) 89void groups_sort(struct group_info *group_info)
90{ 90{
91 sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), 91 sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid),
92 gid_cmp, NULL); 92 gid_cmp, NULL);
93} 93}
94EXPORT_SYMBOL(groups_sort);
94 95
95/* a simple bsearch */ 96/* a simple bsearch */
96int groups_search(const struct group_info *group_info, kgid_t grp) 97int groups_search(const struct group_info *group_info, kgid_t grp)
@@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
122void set_groups(struct cred *new, struct group_info *group_info) 123void set_groups(struct cred *new, struct group_info *group_info)
123{ 124{
124 put_group_info(new->group_info); 125 put_group_info(new->group_info);
125 groups_sort(group_info);
126 get_group_info(group_info); 126 get_group_info(group_info);
127 new->group_info = group_info; 127 new->group_info = group_info;
128} 128}
@@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
206 return retval; 206 return retval;
207 } 207 }
208 208
209 groups_sort(group_info);
209 retval = set_current_groups(group_info); 210 retval = set_current_groups(group_info);
210 put_group_info(group_info); 211 put_group_info(group_info);
211 212
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 15f33faf4013..7594c033d98a 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
157} 157}
158EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); 158EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159 159
160void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2) 160void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161{ 161{
162 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); 162 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163} 163}
@@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
183} 183}
184EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); 184EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185 185
186void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2) 186void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187{ 187{
188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, 188 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
189 _RET_IP_); 189 _RET_IP_);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 670d8d7d8087..5fa1324a4f29 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -57,10 +57,6 @@
57#define CREATE_TRACE_POINTS 57#define CREATE_TRACE_POINTS
58#include <trace/events/lock.h> 58#include <trace/events/lock.h>
59 59
60#ifdef CONFIG_LOCKDEP_CROSSRELEASE
61#include <linux/slab.h>
62#endif
63
64#ifdef CONFIG_PROVE_LOCKING 60#ifdef CONFIG_PROVE_LOCKING
65int prove_locking = 1; 61int prove_locking = 1;
66module_param(prove_locking, int, 0644); 62module_param(prove_locking, int, 0644);
@@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644);
75#define lock_stat 0 71#define lock_stat 0
76#endif 72#endif
77 73
78#ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
79static int crossrelease_fullstack = 1;
80#else
81static int crossrelease_fullstack;
82#endif
83static int __init allow_crossrelease_fullstack(char *str)
84{
85 crossrelease_fullstack = 1;
86 return 0;
87}
88
89early_param("crossrelease_fullstack", allow_crossrelease_fullstack);
90
91/* 74/*
92 * lockdep_lock: protects the lockdep graph, the hashes and the 75 * lockdep_lock: protects the lockdep graph, the hashes and the
93 * class/list/hash allocators. 76 * class/list/hash allocators.
@@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
740 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 723 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
741} 724}
742 725
743#ifdef CONFIG_LOCKDEP_CROSSRELEASE
744static void cross_init(struct lockdep_map *lock, int cross);
745static int cross_lock(struct lockdep_map *lock);
746static int lock_acquire_crosslock(struct held_lock *hlock);
747static int lock_release_crosslock(struct lockdep_map *lock);
748#else
749static inline void cross_init(struct lockdep_map *lock, int cross) {}
750static inline int cross_lock(struct lockdep_map *lock) { return 0; }
751static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; }
752static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; }
753#endif
754
755/* 726/*
756 * Register a lock's class in the hash-table, if the class is not present 727 * Register a lock's class in the hash-table, if the class is not present
757 * yet. Otherwise we look it up. We cache the result in the lock object 728 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1151,41 +1122,22 @@ print_circular_lock_scenario(struct held_lock *src,
1151 printk(KERN_CONT "\n\n"); 1122 printk(KERN_CONT "\n\n");
1152 } 1123 }
1153 1124
1154 if (cross_lock(tgt->instance)) { 1125 printk(" Possible unsafe locking scenario:\n\n");
1155 printk(" Possible unsafe locking scenario by crosslock:\n\n"); 1126 printk(" CPU0 CPU1\n");
1156 printk(" CPU0 CPU1\n"); 1127 printk(" ---- ----\n");
1157 printk(" ---- ----\n"); 1128 printk(" lock(");
1158 printk(" lock("); 1129 __print_lock_name(target);
1159 __print_lock_name(parent); 1130 printk(KERN_CONT ");\n");
1160 printk(KERN_CONT ");\n"); 1131 printk(" lock(");
1161 printk(" lock("); 1132 __print_lock_name(parent);
1162 __print_lock_name(target); 1133 printk(KERN_CONT ");\n");
1163 printk(KERN_CONT ");\n"); 1134 printk(" lock(");
1164 printk(" lock("); 1135 __print_lock_name(target);
1165 __print_lock_name(source); 1136 printk(KERN_CONT ");\n");
1166 printk(KERN_CONT ");\n"); 1137 printk(" lock(");
1167 printk(" unlock("); 1138 __print_lock_name(source);
1168 __print_lock_name(target); 1139 printk(KERN_CONT ");\n");
1169 printk(KERN_CONT ");\n"); 1140 printk("\n *** DEADLOCK ***\n\n");
1170 printk("\n *** DEADLOCK ***\n\n");
1171 } else {
1172 printk(" Possible unsafe locking scenario:\n\n");
1173 printk(" CPU0 CPU1\n");
1174 printk(" ---- ----\n");
1175 printk(" lock(");
1176 __print_lock_name(target);
1177 printk(KERN_CONT ");\n");
1178 printk(" lock(");
1179 __print_lock_name(parent);
1180 printk(KERN_CONT ");\n");
1181 printk(" lock(");
1182 __print_lock_name(target);
1183 printk(KERN_CONT ");\n");
1184 printk(" lock(");
1185 __print_lock_name(source);
1186 printk(KERN_CONT ");\n");
1187 printk("\n *** DEADLOCK ***\n\n");
1188 }
1189} 1141}
1190 1142
1191/* 1143/*
@@ -1211,10 +1163,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1211 curr->comm, task_pid_nr(curr)); 1163 curr->comm, task_pid_nr(curr));
1212 print_lock(check_src); 1164 print_lock(check_src);
1213 1165
1214 if (cross_lock(check_tgt->instance)) 1166 pr_warn("\nbut task is already holding lock:\n");
1215 pr_warn("\nbut now in release context of a crosslock acquired at the following:\n");
1216 else
1217 pr_warn("\nbut task is already holding lock:\n");
1218 1167
1219 print_lock(check_tgt); 1168 print_lock(check_tgt);
1220 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1169 pr_warn("\nwhich lock already depends on the new lock.\n\n");
@@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this,
1244 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1193 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1245 return 0; 1194 return 0;
1246 1195
1247 if (cross_lock(check_tgt->instance)) 1196 if (!save_trace(&this->trace))
1248 this->trace = *trace;
1249 else if (!save_trace(&this->trace))
1250 return 0; 1197 return 0;
1251 1198
1252 depth = get_lock_depth(target); 1199 depth = get_lock_depth(target);
@@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1850 if (nest) 1797 if (nest)
1851 return 2; 1798 return 2;
1852 1799
1853 if (cross_lock(prev->instance))
1854 continue;
1855
1856 return print_deadlock_bug(curr, prev, next); 1800 return print_deadlock_bug(curr, prev, next);
1857 } 1801 }
1858 return 1; 1802 return 1;
@@ -2018,31 +1962,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
2018 for (;;) { 1962 for (;;) {
2019 int distance = curr->lockdep_depth - depth + 1; 1963 int distance = curr->lockdep_depth - depth + 1;
2020 hlock = curr->held_locks + depth - 1; 1964 hlock = curr->held_locks + depth - 1;
1965
2021 /* 1966 /*
2022 * Only non-crosslock entries get new dependencies added. 1967 * Only non-recursive-read entries get new dependencies
2023 * Crosslock entries will be added by commit later: 1968 * added:
2024 */ 1969 */
2025 if (!cross_lock(hlock->instance)) { 1970 if (hlock->read != 2 && hlock->check) {
1971 int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1972 if (!ret)
1973 return 0;
1974
2026 /* 1975 /*
2027 * Only non-recursive-read entries get new dependencies 1976 * Stop after the first non-trylock entry,
2028 * added: 1977 * as non-trylock entries have added their
1978 * own direct dependencies already, so this
1979 * lock is connected to them indirectly:
2029 */ 1980 */
2030 if (hlock->read != 2 && hlock->check) { 1981 if (!hlock->trylock)
2031 int ret = check_prev_add(curr, hlock, next, 1982 break;
2032 distance, &trace, save_trace);
2033 if (!ret)
2034 return 0;
2035
2036 /*
2037 * Stop after the first non-trylock entry,
2038 * as non-trylock entries have added their
2039 * own direct dependencies already, so this
2040 * lock is connected to them indirectly:
2041 */
2042 if (!hlock->trylock)
2043 break;
2044 }
2045 } 1983 }
1984
2046 depth--; 1985 depth--;
2047 /* 1986 /*
2048 * End of lock-stack? 1987 * End of lock-stack?
@@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3292void lockdep_init_map(struct lockdep_map *lock, const char *name, 3231void lockdep_init_map(struct lockdep_map *lock, const char *name,
3293 struct lock_class_key *key, int subclass) 3232 struct lock_class_key *key, int subclass)
3294{ 3233{
3295 cross_init(lock, 0);
3296 __lockdep_init_map(lock, name, key, subclass); 3234 __lockdep_init_map(lock, name, key, subclass);
3297} 3235}
3298EXPORT_SYMBOL_GPL(lockdep_init_map); 3236EXPORT_SYMBOL_GPL(lockdep_init_map);
3299 3237
3300#ifdef CONFIG_LOCKDEP_CROSSRELEASE
3301void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name,
3302 struct lock_class_key *key, int subclass)
3303{
3304 cross_init(lock, 1);
3305 __lockdep_init_map(lock, name, key, subclass);
3306}
3307EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock);
3308#endif
3309
3310struct lock_class_key __lockdep_no_validate__; 3238struct lock_class_key __lockdep_no_validate__;
3311EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3239EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3312 3240
@@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3362 int chain_head = 0; 3290 int chain_head = 0;
3363 int class_idx; 3291 int class_idx;
3364 u64 chain_key; 3292 u64 chain_key;
3365 int ret;
3366 3293
3367 if (unlikely(!debug_locks)) 3294 if (unlikely(!debug_locks))
3368 return 0; 3295 return 0;
@@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3411 3338
3412 class_idx = class - lock_classes + 1; 3339 class_idx = class - lock_classes + 1;
3413 3340
3414 /* TODO: nest_lock is not implemented for crosslock yet. */ 3341 if (depth) {
3415 if (depth && !cross_lock(lock)) {
3416 hlock = curr->held_locks + depth - 1; 3342 hlock = curr->held_locks + depth - 1;
3417 if (hlock->class_idx == class_idx && nest_lock) { 3343 if (hlock->class_idx == class_idx && nest_lock) {
3418 if (hlock->references) { 3344 if (hlock->references) {
@@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3500 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3426 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3501 return 0; 3427 return 0;
3502 3428
3503 ret = lock_acquire_crosslock(hlock);
3504 /*
3505 * 2 means normal acquire operations are needed. Otherwise, it's
3506 * ok just to return with '0:fail, 1:success'.
3507 */
3508 if (ret != 2)
3509 return ret;
3510
3511 curr->curr_chain_key = chain_key; 3429 curr->curr_chain_key = chain_key;
3512 curr->lockdep_depth++; 3430 curr->lockdep_depth++;
3513 check_chain_key(curr); 3431 check_chain_key(curr);
@@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3745 struct task_struct *curr = current; 3663 struct task_struct *curr = current;
3746 struct held_lock *hlock; 3664 struct held_lock *hlock;
3747 unsigned int depth; 3665 unsigned int depth;
3748 int ret, i; 3666 int i;
3749 3667
3750 if (unlikely(!debug_locks)) 3668 if (unlikely(!debug_locks))
3751 return 0; 3669 return 0;
3752 3670
3753 ret = lock_release_crosslock(lock);
3754 /*
3755 * 2 means normal release operations are needed. Otherwise, it's
3756 * ok just to return with '0:fail, 1:success'.
3757 */
3758 if (ret != 2)
3759 return ret;
3760
3761 depth = curr->lockdep_depth; 3671 depth = curr->lockdep_depth;
3762 /* 3672 /*
3763 * So we're all set to release this lock.. wait what lock? We don't 3673 * So we're all set to release this lock.. wait what lock? We don't
@@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4675 dump_stack(); 4585 dump_stack();
4676} 4586}
4677EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4587EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4678
4679#ifdef CONFIG_LOCKDEP_CROSSRELEASE
4680
4681/*
4682 * Crossrelease works by recording a lock history for each thread and
4683 * connecting those historic locks that were taken after the
4684 * wait_for_completion() in the complete() context.
4685 *
4686 * Task-A Task-B
4687 *
4688 * mutex_lock(&A);
4689 * mutex_unlock(&A);
4690 *
4691 * wait_for_completion(&C);
4692 * lock_acquire_crosslock();
4693 * atomic_inc_return(&cross_gen_id);
4694 * |
4695 * | mutex_lock(&B);
4696 * | mutex_unlock(&B);
4697 * |
4698 * | complete(&C);
4699 * `-- lock_commit_crosslock();
4700 *
4701 * Which will then add a dependency between B and C.
4702 */
4703
4704#define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR])
4705
4706/*
4707 * Whenever a crosslock is held, cross_gen_id will be increased.
4708 */
4709static atomic_t cross_gen_id; /* Can be wrapped */
4710
4711/*
4712 * Make an entry of the ring buffer invalid.
4713 */
4714static inline void invalidate_xhlock(struct hist_lock *xhlock)
4715{
4716 /*
4717 * Normally, xhlock->hlock.instance must be !NULL.
4718 */
4719 xhlock->hlock.instance = NULL;
4720}
4721
4722/*
4723 * Lock history stacks; we have 2 nested lock history stacks:
4724 *
4725 * HARD(IRQ)
4726 * SOFT(IRQ)
4727 *
4728 * The thing is that once we complete a HARD/SOFT IRQ the future task locks
4729 * should not depend on any of the locks observed while running the IRQ. So
4730 * what we do is rewind the history buffer and erase all our knowledge of that
4731 * temporal event.
4732 */
4733
4734void crossrelease_hist_start(enum xhlock_context_t c)
4735{
4736 struct task_struct *cur = current;
4737
4738 if (!cur->xhlocks)
4739 return;
4740
4741 cur->xhlock_idx_hist[c] = cur->xhlock_idx;
4742 cur->hist_id_save[c] = cur->hist_id;
4743}
4744
4745void crossrelease_hist_end(enum xhlock_context_t c)
4746{
4747 struct task_struct *cur = current;
4748
4749 if (cur->xhlocks) {
4750 unsigned int idx = cur->xhlock_idx_hist[c];
4751 struct hist_lock *h = &xhlock(idx);
4752
4753 cur->xhlock_idx = idx;
4754
4755 /* Check if the ring was overwritten. */
4756 if (h->hist_id != cur->hist_id_save[c])
4757 invalidate_xhlock(h);
4758 }
4759}
4760
4761/*
4762 * lockdep_invariant_state() is used to annotate independence inside a task, to
4763 * make one task look like multiple independent 'tasks'.
4764 *
4765 * Take for instance workqueues; each work is independent of the last. The
4766 * completion of a future work does not depend on the completion of a past work
4767 * (in general). Therefore we must not carry that (lock) dependency across
4768 * works.
4769 *
4770 * This is true for many things; pretty much all kthreads fall into this
4771 * pattern, where they have an invariant state and future completions do not
4772 * depend on past completions. Its just that since they all have the 'same'
4773 * form -- the kthread does the same over and over -- it doesn't typically
4774 * matter.
4775 *
4776 * The same is true for system-calls, once a system call is completed (we've
4777 * returned to userspace) the next system call does not depend on the lock
4778 * history of the previous system call.
4779 *
4780 * They key property for independence, this invariant state, is that it must be
4781 * a point where we hold no locks and have no history. Because if we were to
4782 * hold locks, the restore at _end() would not necessarily recover it's history
4783 * entry. Similarly, independence per-definition means it does not depend on
4784 * prior state.
4785 */
4786void lockdep_invariant_state(bool force)
4787{
4788 /*
4789 * We call this at an invariant point, no current state, no history.
4790 * Verify the former, enforce the latter.
4791 */
4792 WARN_ON_ONCE(!force && current->lockdep_depth);
4793 if (current->xhlocks)
4794 invalidate_xhlock(&xhlock(current->xhlock_idx));
4795}
4796
4797static int cross_lock(struct lockdep_map *lock)
4798{
4799 return lock ? lock->cross : 0;
4800}
4801
4802/*
4803 * This is needed to decide the relationship between wrapable variables.
4804 */
4805static inline int before(unsigned int a, unsigned int b)
4806{
4807 return (int)(a - b) < 0;
4808}
4809
4810static inline struct lock_class *xhlock_class(struct hist_lock *xhlock)
4811{
4812 return hlock_class(&xhlock->hlock);
4813}
4814
4815static inline struct lock_class *xlock_class(struct cross_lock *xlock)
4816{
4817 return hlock_class(&xlock->hlock);
4818}
4819
4820/*
4821 * Should we check a dependency with previous one?
4822 */
4823static inline int depend_before(struct held_lock *hlock)
4824{
4825 return hlock->read != 2 && hlock->check && !hlock->trylock;
4826}
4827
4828/*
4829 * Should we check a dependency with next one?
4830 */
4831static inline int depend_after(struct held_lock *hlock)
4832{
4833 return hlock->read != 2 && hlock->check;
4834}
4835
4836/*
4837 * Check if the xhlock is valid, which would be false if,
4838 *
4839 * 1. Has not used after initializaion yet.
4840 * 2. Got invalidated.
4841 *
4842 * Remind hist_lock is implemented as a ring buffer.
4843 */
4844static inline int xhlock_valid(struct hist_lock *xhlock)
4845{
4846 /*
4847 * xhlock->hlock.instance must be !NULL.
4848 */
4849 return !!xhlock->hlock.instance;
4850}
4851
4852/*
4853 * Record a hist_lock entry.
4854 *
4855 * Irq disable is only required.
4856 */
4857static void add_xhlock(struct held_lock *hlock)
4858{
4859 unsigned int idx = ++current->xhlock_idx;
4860 struct hist_lock *xhlock = &xhlock(idx);
4861
4862#ifdef CONFIG_DEBUG_LOCKDEP
4863 /*
4864 * This can be done locklessly because they are all task-local
4865 * state, we must however ensure IRQs are disabled.
4866 */
4867 WARN_ON_ONCE(!irqs_disabled());
4868#endif
4869
4870 /* Initialize hist_lock's members */
4871 xhlock->hlock = *hlock;
4872 xhlock->hist_id = ++current->hist_id;
4873
4874 xhlock->trace.nr_entries = 0;
4875 xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
4876 xhlock->trace.entries = xhlock->trace_entries;
4877
4878 if (crossrelease_fullstack) {
4879 xhlock->trace.skip = 3;
4880 save_stack_trace(&xhlock->trace);
4881 } else {
4882 xhlock->trace.nr_entries = 1;
4883 xhlock->trace.entries[0] = hlock->acquire_ip;
4884 }
4885}
4886
4887static inline int same_context_xhlock(struct hist_lock *xhlock)
4888{
4889 return xhlock->hlock.irq_context == task_irq_context(current);
4890}
4891
4892/*
4893 * This should be lockless as far as possible because this would be
4894 * called very frequently.
4895 */
4896static void check_add_xhlock(struct held_lock *hlock)
4897{
4898 /*
4899 * Record a hist_lock, only in case that acquisitions ahead
4900 * could depend on the held_lock. For example, if the held_lock
4901 * is trylock then acquisitions ahead never depends on that.
4902 * In that case, we don't need to record it. Just return.
4903 */
4904 if (!current->xhlocks || !depend_before(hlock))
4905 return;
4906
4907 add_xhlock(hlock);
4908}
4909
4910/*
4911 * For crosslock.
4912 */
4913static int add_xlock(struct held_lock *hlock)
4914{
4915 struct cross_lock *xlock;
4916 unsigned int gen_id;
4917
4918 if (!graph_lock())
4919 return 0;
4920
4921 xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock;
4922
4923 /*
4924 * When acquisitions for a crosslock are overlapped, we use
4925 * nr_acquire to perform commit for them, based on cross_gen_id
4926 * of the first acquisition, which allows to add additional
4927 * dependencies.
4928 *
4929 * Moreover, when no acquisition of a crosslock is in progress,
4930 * we should not perform commit because the lock might not exist
4931 * any more, which might cause incorrect memory access. So we
4932 * have to track the number of acquisitions of a crosslock.
4933 *
4934 * depend_after() is necessary to initialize only the first
4935 * valid xlock so that the xlock can be used on its commit.
4936 */
4937 if (xlock->nr_acquire++ && depend_after(&xlock->hlock))
4938 goto unlock;
4939
4940 gen_id = (unsigned int)atomic_inc_return(&cross_gen_id);
4941 xlock->hlock = *hlock;
4942 xlock->hlock.gen_id = gen_id;
4943unlock:
4944 graph_unlock();
4945 return 1;
4946}
4947
4948/*
4949 * Called for both normal and crosslock acquires. Normal locks will be
4950 * pushed on the hist_lock queue. Cross locks will record state and
4951 * stop regular lock_acquire() to avoid being placed on the held_lock
4952 * stack.
4953 *
4954 * Return: 0 - failure;
4955 * 1 - crosslock, done;
4956 * 2 - normal lock, continue to held_lock[] ops.
4957 */
4958static int lock_acquire_crosslock(struct held_lock *hlock)
4959{
4960 /*
4961 * CONTEXT 1 CONTEXT 2
4962 * --------- ---------
4963 * lock A (cross)
4964 * X = atomic_inc_return(&cross_gen_id)
4965 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4966 * Y = atomic_read_acquire(&cross_gen_id)
4967 * lock B
4968 *
4969 * atomic_read_acquire() is for ordering between A and B,
4970 * IOW, A happens before B, when CONTEXT 2 see Y >= X.
4971 *
4972 * Pairs with atomic_inc_return() in add_xlock().
4973 */
4974 hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id);
4975
4976 if (cross_lock(hlock->instance))
4977 return add_xlock(hlock);
4978
4979 check_add_xhlock(hlock);
4980 return 2;
4981}
4982
4983static int copy_trace(struct stack_trace *trace)
4984{
4985 unsigned long *buf = stack_trace + nr_stack_trace_entries;
4986 unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
4987 unsigned int nr = min(max_nr, trace->nr_entries);
4988
4989 trace->nr_entries = nr;
4990 memcpy(buf, trace->entries, nr * sizeof(trace->entries[0]));
4991 trace->entries = buf;
4992 nr_stack_trace_entries += nr;
4993
4994 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
4995 if (!debug_locks_off_graph_unlock())
4996 return 0;
4997
4998 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
4999 dump_stack();
5000
5001 return 0;
5002 }
5003
5004 return 1;
5005}
5006
5007static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
5008{
5009 unsigned int xid, pid;
5010 u64 chain_key;
5011
5012 xid = xlock_class(xlock) - lock_classes;
5013 chain_key = iterate_chain_key((u64)0, xid);
5014 pid = xhlock_class(xhlock) - lock_classes;
5015 chain_key = iterate_chain_key(chain_key, pid);
5016
5017 if (lookup_chain_cache(chain_key))
5018 return 1;
5019
5020 if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context,
5021 chain_key))
5022 return 0;
5023
5024 if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1,
5025 &xhlock->trace, copy_trace))
5026 return 0;
5027
5028 return 1;
5029}
5030
5031static void commit_xhlocks(struct cross_lock *xlock)
5032{
5033 unsigned int cur = current->xhlock_idx;
5034 unsigned int prev_hist_id = xhlock(cur).hist_id;
5035 unsigned int i;
5036
5037 if (!graph_lock())
5038 return;
5039
5040 if (xlock->nr_acquire) {
5041 for (i = 0; i < MAX_XHLOCKS_NR; i++) {
5042 struct hist_lock *xhlock = &xhlock(cur - i);
5043
5044 if (!xhlock_valid(xhlock))
5045 break;
5046
5047 if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id))
5048 break;
5049
5050 if (!same_context_xhlock(xhlock))
5051 break;
5052
5053 /*
5054 * Filter out the cases where the ring buffer was
5055 * overwritten and the current entry has a bigger
5056 * hist_id than the previous one, which is impossible
5057 * otherwise:
5058 */
5059 if (unlikely(before(prev_hist_id, xhlock->hist_id)))
5060 break;
5061
5062 prev_hist_id = xhlock->hist_id;
5063
5064 /*
5065 * commit_xhlock() returns 0 with graph_lock already
5066 * released if fail.
5067 */
5068 if (!commit_xhlock(xlock, xhlock))
5069 return;
5070 }
5071 }
5072
5073 graph_unlock();
5074}
5075
5076void lock_commit_crosslock(struct lockdep_map *lock)
5077{
5078 struct cross_lock *xlock;
5079 unsigned long flags;
5080
5081 if (unlikely(!debug_locks || current->lockdep_recursion))
5082 return;
5083
5084 if (!current->xhlocks)
5085 return;
5086
5087 /*
5088 * Do commit hist_locks with the cross_lock, only in case that
5089 * the cross_lock could depend on acquisitions after that.
5090 *
5091 * For example, if the cross_lock does not have the 'check' flag
5092 * then we don't need to check dependencies and commit for that.
5093 * Just skip it. In that case, of course, the cross_lock does
5094 * not depend on acquisitions ahead, either.
5095 *
5096 * WARNING: Don't do that in add_xlock() in advance. When an
5097 * acquisition context is different from the commit context,
5098 * invalid(skipped) cross_lock might be accessed.
5099 */
5100 if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock))
5101 return;
5102
5103 raw_local_irq_save(flags);
5104 check_flags(flags);
5105 current->lockdep_recursion = 1;
5106 xlock = &((struct lockdep_map_cross *)lock)->xlock;
5107 commit_xhlocks(xlock);
5108 current->lockdep_recursion = 0;
5109 raw_local_irq_restore(flags);
5110}
5111EXPORT_SYMBOL_GPL(lock_commit_crosslock);
5112
5113/*
5114 * Return: 0 - failure;
5115 * 1 - crosslock, done;
5116 * 2 - normal lock, continue to held_lock[] ops.
5117 */
5118static int lock_release_crosslock(struct lockdep_map *lock)
5119{
5120 if (cross_lock(lock)) {
5121 if (!graph_lock())
5122 return 0;
5123 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--;
5124 graph_unlock();
5125 return 1;
5126 }
5127 return 2;
5128}
5129
5130static void cross_init(struct lockdep_map *lock, int cross)
5131{
5132 if (cross)
5133 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0;
5134
5135 lock->cross = cross;
5136
5137 /*
5138 * Crossrelease assumes that the ring buffer size of xhlocks
5139 * is aligned with power of 2. So force it on build.
5140 */
5141 BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1));
5142}
5143
5144void lockdep_init_task(struct task_struct *task)
5145{
5146 int i;
5147
5148 task->xhlock_idx = UINT_MAX;
5149 task->hist_id = 0;
5150
5151 for (i = 0; i < XHLOCK_CTX_NR; i++) {
5152 task->xhlock_idx_hist[i] = UINT_MAX;
5153 task->hist_id_save[i] = 0;
5154 }
5155
5156 task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
5157 GFP_KERNEL);
5158}
5159
5160void lockdep_free_task(struct task_struct *task)
5161{
5162 if (task->xhlocks) {
5163 void *tmp = task->xhlocks;
5164 /* Diable crossrelease for current */
5165 task->xhlocks = NULL;
5166 kfree(tmp);
5167 }
5168}
5169#endif
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 1fd1a7543cdd..936f3d14dd6b 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
66 break; \ 66 break; \
67 preempt_enable(); \ 67 preempt_enable(); \
68 \ 68 \
69 if (!(lock)->break_lock) \ 69 arch_##op##_relax(&lock->raw_lock); \
70 (lock)->break_lock = 1; \
71 while ((lock)->break_lock) \
72 arch_##op##_relax(&lock->raw_lock); \
73 } \ 70 } \
74 (lock)->break_lock = 0; \
75} \ 71} \
76 \ 72 \
77unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 73unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
@@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
86 local_irq_restore(flags); \ 82 local_irq_restore(flags); \
87 preempt_enable(); \ 83 preempt_enable(); \
88 \ 84 \
89 if (!(lock)->break_lock) \ 85 arch_##op##_relax(&lock->raw_lock); \
90 (lock)->break_lock = 1; \
91 while ((lock)->break_lock) \
92 arch_##op##_relax(&lock->raw_lock); \
93 } \ 86 } \
94 (lock)->break_lock = 0; \ 87 \
95 return flags; \ 88 return flags; \
96} \ 89} \
97 \ 90 \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 75554f366fd3..644fa2e3d993 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5097 return ret; 5097 return ret;
5098} 5098}
5099 5099
5100/**
5101 * sys_sched_rr_get_interval - return the default timeslice of a process.
5102 * @pid: pid of the process.
5103 * @interval: userspace pointer to the timeslice value.
5104 *
5105 * this syscall writes the default timeslice value of a given process
5106 * into the user-space timespec buffer. A value of '0' means infinity.
5107 *
5108 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5109 * an error code.
5110 */
5111static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) 5100static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
5112{ 5101{
5113 struct task_struct *p; 5102 struct task_struct *p;
@@ -5144,6 +5133,17 @@ out_unlock:
5144 return retval; 5133 return retval;
5145} 5134}
5146 5135
5136/**
5137 * sys_sched_rr_get_interval - return the default timeslice of a process.
5138 * @pid: pid of the process.
5139 * @interval: userspace pointer to the timeslice value.
5140 *
5141 * this syscall writes the default timeslice value of a given process
5142 * into the user-space timespec buffer. A value of '0' means infinity.
5143 *
5144 * Return: On success, 0 and the timeslice is in @interval. Otherwise,
5145 * an error code.
5146 */
5147SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, 5147SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5148 struct timespec __user *, interval) 5148 struct timespec __user *, interval)
5149{ 5149{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 4056c19ca3f0..665ace2fc558 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq)
2034 bool resched = false; 2034 bool resched = false;
2035 struct task_struct *p; 2035 struct task_struct *p;
2036 struct rq *src_rq; 2036 struct rq *src_rq;
2037 int rt_overload_count = rt_overloaded(this_rq);
2037 2038
2038 if (likely(!rt_overloaded(this_rq))) 2039 if (likely(!rt_overload_count))
2039 return; 2040 return;
2040 2041
2041 /* 2042 /*
@@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq)
2044 */ 2045 */
2045 smp_rmb(); 2046 smp_rmb();
2046 2047
2048 /* If we are the only overloaded CPU do nothing */
2049 if (rt_overload_count == 1 &&
2050 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2051 return;
2052
2047#ifdef HAVE_RT_PUSH_IPI 2053#ifdef HAVE_RT_PUSH_IPI
2048 if (sched_feat(RT_PUSH_IPI)) { 2054 if (sched_feat(RT_PUSH_IPI)) {
2049 tell_cpu_to_push(this_rq); 2055 tell_cpu_to_push(this_rq);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index af7dad126c13..904c952ac383 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS
164 bool "Enable trace events for preempt and irq disable/enable" 164 bool "Enable trace events for preempt and irq disable/enable"
165 select TRACE_IRQFLAGS 165 select TRACE_IRQFLAGS
166 depends on DEBUG_PREEMPT || !PROVE_LOCKING 166 depends on DEBUG_PREEMPT || !PROVE_LOCKING
167 depends on TRACING
167 default n 168 default n
168 help 169 help
169 Enable tracing of disable and enable events for preemption and irqs. 170 Enable tracing of disable and enable events for preemption and irqs.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0ce99c379c30..40207c2a4113 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
343 .arg4_type = ARG_CONST_SIZE, 343 .arg4_type = ARG_CONST_SIZE,
344}; 344};
345 345
346static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); 346static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
347 347
348static __always_inline u64 348static __always_inline u64
349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
350 u64 flags, struct perf_raw_record *raw) 350 u64 flags, struct perf_sample_data *sd)
351{ 351{
352 struct bpf_array *array = container_of(map, struct bpf_array, map); 352 struct bpf_array *array = container_of(map, struct bpf_array, map);
353 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
354 unsigned int cpu = smp_processor_id(); 353 unsigned int cpu = smp_processor_id();
355 u64 index = flags & BPF_F_INDEX_MASK; 354 u64 index = flags & BPF_F_INDEX_MASK;
356 struct bpf_event_entry *ee; 355 struct bpf_event_entry *ee;
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
373 if (unlikely(event->oncpu != cpu)) 372 if (unlikely(event->oncpu != cpu))
374 return -EOPNOTSUPP; 373 return -EOPNOTSUPP;
375 374
376 perf_sample_data_init(sd, 0, 0);
377 sd->raw = raw;
378 perf_event_output(event, sd, regs); 375 perf_event_output(event, sd, regs);
379 return 0; 376 return 0;
380} 377}
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
382BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 379BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
383 u64, flags, void *, data, u64, size) 380 u64, flags, void *, data, u64, size)
384{ 381{
382 struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
385 struct perf_raw_record raw = { 383 struct perf_raw_record raw = {
386 .frag = { 384 .frag = {
387 .size = size, 385 .size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
392 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 390 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
393 return -EINVAL; 391 return -EINVAL;
394 392
395 return __bpf_perf_event_output(regs, map, flags, &raw); 393 perf_sample_data_init(sd, 0, 0);
394 sd->raw = &raw;
395
396 return __bpf_perf_event_output(regs, map, flags, sd);
396} 397}
397 398
398static const struct bpf_func_proto bpf_perf_event_output_proto = { 399static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
407}; 408};
408 409
409static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 410static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
411static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
410 412
411u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 413u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
412 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 414 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
413{ 415{
416 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
414 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 417 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
415 struct perf_raw_frag frag = { 418 struct perf_raw_frag frag = {
416 .copy = ctx_copy, 419 .copy = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
428 }; 431 };
429 432
430 perf_fetch_caller_regs(regs); 433 perf_fetch_caller_regs(regs);
434 perf_sample_data_init(sd, 0, 0);
435 sd->raw = &raw;
431 436
432 return __bpf_perf_event_output(regs, map, flags, &raw); 437 return __bpf_perf_event_output(regs, map, flags, sd);
433} 438}
434 439
435BPF_CALL_0(bpf_get_current_task) 440BPF_CALL_0(bpf_get_current_task)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 91874a95060d..c87766c1c204 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1799} 1799}
1800EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); 1800EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1801 1801
1802static __always_inline void *
1803__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1804{
1805 return bpage->data + index;
1806}
1807
1808static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) 1802static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1809{ 1803{
1810 return bpage->page->data + index; 1804 return bpage->page->data + index;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 73e67b68c53b..59518b8126d0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct
362} 362}
363 363
364/** 364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list 365 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
366 * @pid_list: The list to modify 366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit 367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove 368 * @task: The task to add or remove
@@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr)
925} 925}
926 926
927/** 927/**
928 * trace_snapshot - take a snapshot of the current buffer. 928 * tracing_snapshot - take a snapshot of the current buffer.
929 * 929 *
930 * This causes a swap between the snapshot buffer and the current live 930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live 931 * tracing buffer. You can use this to take snapshots of the live
@@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void)
1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); 1004EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005 1005
1006/** 1006/**
1007 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. 1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 * 1008 *
1009 * This is similar to trace_snapshot(), but it will allocate the 1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only 1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep. 1011 * where it is safe to sleep, as the allocation may sleep.
1012 * 1012 *
@@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh;
1303/* 1303/*
1304 * Copy the new maximum trace into the separate maximum-trace 1304 * Copy the new maximum trace into the separate maximum-trace
1305 * structure. (this way the maximum trace is permanently saved, 1305 * structure. (this way the maximum trace is permanently saved,
1306 * for later retrieval via /sys/kernel/debug/tracing/latency_trace) 1306 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1307 */ 1307 */
1308static void 1308static void
1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 1309__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export,
2415 2415
2416 entry = ring_buffer_event_data(event); 2416 entry = ring_buffer_event_data(event);
2417 size = ring_buffer_event_length(event); 2417 size = ring_buffer_event_length(event);
2418 export->write(entry, size); 2418 export->write(export, entry, size);
2419} 2419}
2420 2420
2421static DEFINE_MUTEX(ftrace_export_lock); 2421static DEFINE_MUTEX(ftrace_export_lock);
@@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = {
4178 .llseek = seq_lseek, 4178 .llseek = seq_lseek,
4179}; 4179};
4180 4180
4181/*
4182 * The tracer itself will not take this lock, but still we want
4183 * to provide a consistent cpumask to user-space:
4184 */
4185static DEFINE_MUTEX(tracing_cpumask_update_lock);
4186
4187/*
4188 * Temporary storage for the character representation of the
4189 * CPU bitmask (and one more byte for the newline):
4190 */
4191static char mask_str[NR_CPUS + 1];
4192
4193static ssize_t 4181static ssize_t
4194tracing_cpumask_read(struct file *filp, char __user *ubuf, 4182tracing_cpumask_read(struct file *filp, char __user *ubuf,
4195 size_t count, loff_t *ppos) 4183 size_t count, loff_t *ppos)
4196{ 4184{
4197 struct trace_array *tr = file_inode(filp)->i_private; 4185 struct trace_array *tr = file_inode(filp)->i_private;
4186 char *mask_str;
4198 int len; 4187 int len;
4199 4188
4200 mutex_lock(&tracing_cpumask_update_lock); 4189 len = snprintf(NULL, 0, "%*pb\n",
4190 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4191 mask_str = kmalloc(len, GFP_KERNEL);
4192 if (!mask_str)
4193 return -ENOMEM;
4201 4194
4202 len = snprintf(mask_str, count, "%*pb\n", 4195 len = snprintf(mask_str, len, "%*pb\n",
4203 cpumask_pr_args(tr->tracing_cpumask)); 4196 cpumask_pr_args(tr->tracing_cpumask));
4204 if (len >= count) { 4197 if (len >= count) {
4205 count = -EINVAL; 4198 count = -EINVAL;
4206 goto out_err; 4199 goto out_err;
4207 } 4200 }
4208 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); 4201 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4209 4202
4210out_err: 4203out_err:
4211 mutex_unlock(&tracing_cpumask_update_lock); 4204 kfree(mask_str);
4212 4205
4213 return count; 4206 return count;
4214} 4207}
@@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4228 if (err) 4221 if (err)
4229 goto err_unlock; 4222 goto err_unlock;
4230 4223
4231 mutex_lock(&tracing_cpumask_update_lock);
4232
4233 local_irq_disable(); 4224 local_irq_disable();
4234 arch_spin_lock(&tr->max_lock); 4225 arch_spin_lock(&tr->max_lock);
4235 for_each_tracing_cpu(cpu) { 4226 for_each_tracing_cpu(cpu) {
@@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4252 local_irq_enable(); 4243 local_irq_enable();
4253 4244
4254 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 4245 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4255
4256 mutex_unlock(&tracing_cpumask_update_lock);
4257 free_cpumask_var(tracing_cpumask_new); 4246 free_cpumask_var(tracing_cpumask_new);
4258 4247
4259 return count; 4248 return count;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 734accc02418..3c7bfc4bf5e9 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
209 if (__this_cpu_read(disable_stack_tracer) != 1) 209 if (__this_cpu_read(disable_stack_tracer) != 1)
210 goto out; 210 goto out;
211 211
212 /* If rcu is not watching, then save stack trace can fail */
213 if (!rcu_is_watching())
214 goto out;
215
212 ip += MCOUNT_INSN_SIZE; 216 ip += MCOUNT_INSN_SIZE;
213 217
214 check_stack(ip, &stack); 218 check_stack(ip, &stack);
diff --git a/kernel/uid16.c b/kernel/uid16.c
index ce74a4901d2b..ef1da2a5f9bd 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
192 return retval; 192 return retval;
193 } 193 }
194 194
195 groups_sort(group_info);
195 retval = set_current_groups(group_info); 196 retval = set_current_groups(group_info);
196 put_group_info(group_info); 197 put_group_info(group_info);
197 198
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8fdb710bfdd7..43d18cb46308 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -38,7 +38,6 @@
38#include <linux/hardirq.h> 38#include <linux/hardirq.h>
39#include <linux/mempolicy.h> 39#include <linux/mempolicy.h>
40#include <linux/freezer.h> 40#include <linux/freezer.h>
41#include <linux/kallsyms.h>
42#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
43#include <linux/lockdep.h> 42#include <linux/lockdep.h>
44#include <linux/idr.h> 43#include <linux/idr.h>
@@ -48,6 +47,7 @@
48#include <linux/nodemask.h> 47#include <linux/nodemask.h>
49#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
50#include <linux/uaccess.h> 49#include <linux/uaccess.h>
50#include <linux/sched/isolation.h>
51 51
52#include "workqueue_internal.h" 52#include "workqueue_internal.h"
53 53
@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker)
1634 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); 1634 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1635 1635
1636 /* 1636 /*
1637 * Sanity check nr_running. Because wq_unbind_fn() releases 1637 * Sanity check nr_running. Because unbind_workers() releases
1638 * pool->lock between setting %WORKER_UNBOUND and zapping 1638 * pool->lock between setting %WORKER_UNBOUND and zapping
1639 * nr_running, the warning may trigger spuriously. Check iff 1639 * nr_running, the warning may trigger spuriously. Check iff
1640 * unbind is not in progress. 1640 * unbind is not in progress.
@@ -4510,9 +4510,8 @@ void show_workqueue_state(void)
4510 * cpu comes back online. 4510 * cpu comes back online.
4511 */ 4511 */
4512 4512
4513static void wq_unbind_fn(struct work_struct *work) 4513static void unbind_workers(int cpu)
4514{ 4514{
4515 int cpu = smp_processor_id();
4516 struct worker_pool *pool; 4515 struct worker_pool *pool;
4517 struct worker *worker; 4516 struct worker *worker;
4518 4517
@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool)
4589 4588
4590 spin_lock_irq(&pool->lock); 4589 spin_lock_irq(&pool->lock);
4591 4590
4592 /*
4593 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4594 * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
4595 * being reworked and this can go away in time.
4596 */
4597 if (!(pool->flags & POOL_DISASSOCIATED)) {
4598 spin_unlock_irq(&pool->lock);
4599 return;
4600 }
4601
4602 pool->flags &= ~POOL_DISASSOCIATED; 4591 pool->flags &= ~POOL_DISASSOCIATED;
4603 4592
4604 for_each_pool_worker(worker, pool) { 4593 for_each_pool_worker(worker, pool) {
@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu)
4709 4698
4710int workqueue_offline_cpu(unsigned int cpu) 4699int workqueue_offline_cpu(unsigned int cpu)
4711{ 4700{
4712 struct work_struct unbind_work;
4713 struct workqueue_struct *wq; 4701 struct workqueue_struct *wq;
4714 4702
4715 /* unbinding per-cpu workers should happen on the local CPU */ 4703 /* unbinding per-cpu workers should happen on the local CPU */
4716 INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); 4704 if (WARN_ON(cpu != smp_processor_id()))
4717 queue_work_on(cpu, system_highpri_wq, &unbind_work); 4705 return -1;
4706
4707 unbind_workers(cpu);
4718 4708
4719 /* update NUMA affinity of unbound workqueues */ 4709 /* update NUMA affinity of unbound workqueues */
4720 mutex_lock(&wq_pool_mutex); 4710 mutex_lock(&wq_pool_mutex);
@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu)
4722 wq_update_unbound_numa(wq, cpu, false); 4712 wq_update_unbound_numa(wq, cpu, false);
4723 mutex_unlock(&wq_pool_mutex); 4713 mutex_unlock(&wq_pool_mutex);
4724 4714
4725 /* wait for per-cpu unbinding to finish */
4726 flush_work(&unbind_work);
4727 destroy_work_on_stack(&unbind_work);
4728 return 0; 4715 return 0;
4729} 4716}
4730 4717
@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4957 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) 4944 if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4958 return -ENOMEM; 4945 return -ENOMEM;
4959 4946
4947 /*
4948 * Not excluding isolated cpus on purpose.
4949 * If the user wishes to include them, we allow that.
4950 */
4960 cpumask_and(cpumask, cpumask, cpu_possible_mask); 4951 cpumask_and(cpumask, cpumask, cpu_possible_mask);
4961 if (!cpumask_empty(cpumask)) { 4952 if (!cpumask_empty(cpumask)) {
4962 apply_wqattrs_lock(); 4953 apply_wqattrs_lock();
@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void)
5555 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); 5546 WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5556 5547
5557 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); 5548 BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5558 cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); 5549 cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));
5559 5550
5560 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5551 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5561 5552
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 947d3e2ed5c2..9d5b78aad4c5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1099,8 +1099,6 @@ config PROVE_LOCKING
1099 select DEBUG_MUTEXES 1099 select DEBUG_MUTEXES
1100 select DEBUG_RT_MUTEXES if RT_MUTEXES 1100 select DEBUG_RT_MUTEXES if RT_MUTEXES
1101 select DEBUG_LOCK_ALLOC 1101 select DEBUG_LOCK_ALLOC
1102 select LOCKDEP_CROSSRELEASE
1103 select LOCKDEP_COMPLETIONS
1104 select TRACE_IRQFLAGS 1102 select TRACE_IRQFLAGS
1105 default n 1103 default n
1106 help 1104 help
@@ -1170,37 +1168,6 @@ config LOCK_STAT
1170 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. 1168 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
1171 (CONFIG_LOCKDEP defines "acquire" and "release" events.) 1169 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
1172 1170
1173config LOCKDEP_CROSSRELEASE
1174 bool
1175 help
1176 This makes lockdep work for crosslock which is a lock allowed to
1177 be released in a different context from the acquisition context.
1178 Normally a lock must be released in the context acquiring the lock.
1179 However, relexing this constraint helps synchronization primitives
1180 such as page locks or completions can use the lock correctness
1181 detector, lockdep.
1182
1183config LOCKDEP_COMPLETIONS
1184 bool
1185 help
1186 A deadlock caused by wait_for_completion() and complete() can be
1187 detected by lockdep using crossrelease feature.
1188
1189config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK
1190 bool "Enable the boot parameter, crossrelease_fullstack"
1191 depends on LOCKDEP_CROSSRELEASE
1192 default n
1193 help
1194 The lockdep "cross-release" feature needs to record stack traces
1195 (of calling functions) for all acquisitions, for eventual later
1196 use during analysis. By default only a single caller is recorded,
1197 because the unwind operation can be very expensive with deeper
1198 stack chains.
1199
1200 However a boot parameter, crossrelease_fullstack, was
1201 introduced since sometimes deeper traces are required for full
1202 analysis. This option turns on the boot parameter.
1203
1204config DEBUG_LOCKDEP 1171config DEBUG_LOCKDEP
1205 bool "Lock dependency engine debugging" 1172 bool "Lock dependency engine debugging"
1206 depends on DEBUG_KERNEL && LOCKDEP 1173 depends on DEBUG_KERNEL && LOCKDEP
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1ef0cec38d78..dc14beae2c9a 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -313,42 +313,47 @@ next_op:
313 313
314 /* Decide how to handle the operation */ 314 /* Decide how to handle the operation */
315 switch (op) { 315 switch (op) {
316 case ASN1_OP_MATCH_ANY_ACT:
317 case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
318 case ASN1_OP_COND_MATCH_ANY_ACT:
319 case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
320 ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
321 if (ret < 0)
322 return ret;
323 goto skip_data;
324
325 case ASN1_OP_MATCH_ACT:
326 case ASN1_OP_MATCH_ACT_OR_SKIP:
327 case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
328 ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
329 if (ret < 0)
330 return ret;
331 goto skip_data;
332
333 case ASN1_OP_MATCH: 316 case ASN1_OP_MATCH:
334 case ASN1_OP_MATCH_OR_SKIP: 317 case ASN1_OP_MATCH_OR_SKIP:
318 case ASN1_OP_MATCH_ACT:
319 case ASN1_OP_MATCH_ACT_OR_SKIP:
335 case ASN1_OP_MATCH_ANY: 320 case ASN1_OP_MATCH_ANY:
336 case ASN1_OP_MATCH_ANY_OR_SKIP: 321 case ASN1_OP_MATCH_ANY_OR_SKIP:
322 case ASN1_OP_MATCH_ANY_ACT:
323 case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
337 case ASN1_OP_COND_MATCH_OR_SKIP: 324 case ASN1_OP_COND_MATCH_OR_SKIP:
325 case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
338 case ASN1_OP_COND_MATCH_ANY: 326 case ASN1_OP_COND_MATCH_ANY:
339 case ASN1_OP_COND_MATCH_ANY_OR_SKIP: 327 case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
340 skip_data: 328 case ASN1_OP_COND_MATCH_ANY_ACT:
329 case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
330
341 if (!(flags & FLAG_CONS)) { 331 if (!(flags & FLAG_CONS)) {
342 if (flags & FLAG_INDEFINITE_LENGTH) { 332 if (flags & FLAG_INDEFINITE_LENGTH) {
333 size_t tmp = dp;
334
343 ret = asn1_find_indefinite_length( 335 ret = asn1_find_indefinite_length(
344 data, datalen, &dp, &len, &errmsg); 336 data, datalen, &tmp, &len, &errmsg);
345 if (ret < 0) 337 if (ret < 0)
346 goto error; 338 goto error;
347 } else {
348 dp += len;
349 } 339 }
350 pr_debug("- LEAF: %zu\n", len); 340 pr_debug("- LEAF: %zu\n", len);
351 } 341 }
342
343 if (op & ASN1_OP_MATCH__ACT) {
344 unsigned char act;
345
346 if (op & ASN1_OP_MATCH__ANY)
347 act = machine[pc + 1];
348 else
349 act = machine[pc + 2];
350 ret = actions[act](context, hdr, tag, data + dp, len);
351 if (ret < 0)
352 return ret;
353 }
354
355 if (!(flags & FLAG_CONS))
356 dp += len;
352 pc += asn1_op_lengths[op]; 357 pc += asn1_op_lengths[op];
353 goto next_op; 358 goto next_op;
354 359
@@ -434,6 +439,8 @@ next_op:
434 else 439 else
435 act = machine[pc + 1]; 440 act = machine[pc + 1];
436 ret = actions[act](context, hdr, 0, data + tdp, len); 441 ret = actions[act](context, hdr, 0, data + tdp, len);
442 if (ret < 0)
443 return ret;
437 } 444 }
438 pc += asn1_op_lengths[op]; 445 pc += asn1_op_lengths[op];
439 goto next_op; 446 goto next_op;
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index 41b9e50711a7..0bcac6ccb1b2 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
116 int count; 116 int count;
117 117
118 if (v >= end) 118 if (v >= end)
119 return -EBADMSG; 119 goto bad;
120 120
121 n = *v++; 121 n = *v++;
122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); 122 ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
123 if (count >= bufsize)
124 return -ENOBUFS;
123 buffer += count; 125 buffer += count;
124 bufsize -= count; 126 bufsize -= count;
125 if (bufsize == 0)
126 return -ENOBUFS;
127 127
128 while (v < end) { 128 while (v < end) {
129 num = 0; 129 num = 0;
@@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
134 num = n & 0x7f; 134 num = n & 0x7f;
135 do { 135 do {
136 if (v >= end) 136 if (v >= end)
137 return -EBADMSG; 137 goto bad;
138 n = *v++; 138 n = *v++;
139 num <<= 7; 139 num <<= 7;
140 num |= n & 0x7f; 140 num |= n & 0x7f;
141 } while (n & 0x80); 141 } while (n & 0x80);
142 } 142 }
143 ret += count = snprintf(buffer, bufsize, ".%lu", num); 143 ret += count = snprintf(buffer, bufsize, ".%lu", num);
144 buffer += count; 144 if (count >= bufsize)
145 if (bufsize <= count)
146 return -ENOBUFS; 145 return -ENOBUFS;
146 buffer += count;
147 bufsize -= count; 147 bufsize -= count;
148 } 148 }
149 149
150 return ret; 150 return ret;
151
152bad:
153 snprintf(buffer, bufsize, "(bad)");
154 return -EBADMSG;
151} 155}
152EXPORT_SYMBOL_GPL(sprint_oid); 156EXPORT_SYMBOL_GPL(sprint_oid);
153 157
diff --git a/lib/rbtree.c b/lib/rbtree.c
index ba4a9d165f1b..d3ff682fd4b8 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,
603} 603}
604EXPORT_SYMBOL(rb_replace_node); 604EXPORT_SYMBOL(rb_replace_node);
605 605
606void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,
607 struct rb_root_cached *root)
608{
609 rb_replace_node(victim, new, &root->rb_root);
610
611 if (root->rb_leftmost == victim)
612 root->rb_leftmost = new;
613}
614EXPORT_SYMBOL(rb_replace_node_cached);
615
606void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 616void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
607 struct rb_root *root) 617 struct rb_root *root)
608{ 618{
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index d04ac1ec0559..1826f191e72c 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
111 enum fixed_addresses idx; 111 enum fixed_addresses idx;
112 int i, slot; 112 int i, slot;
113 113
114 WARN_ON(system_state != SYSTEM_BOOTING); 114 WARN_ON(system_state >= SYSTEM_RUNNING);
115 115
116 slot = -1; 116 slot = -1;
117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { 117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 297c7238f7d4..c64dca6e27c2 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
62 * get_user_pages_longterm() and disallow it for filesystem-dax 62 * get_user_pages_longterm() and disallow it for filesystem-dax
63 * mappings. 63 * mappings.
64 */ 64 */
65 if (vma_is_fsdax(vma)) 65 if (vma_is_fsdax(vma)) {
66 return -EOPNOTSUPP; 66 ret = -EOPNOTSUPP;
67 goto out;
68 }
67 69
68 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { 70 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
69 vec->got_ref = true; 71 vec->got_ref = true;
diff --git a/mm/gup.c b/mm/gup.c
index d3fb60e5bfac..e0d82b6706d7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
66 */ 66 */
67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) 67static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
68{ 68{
69 return pte_access_permitted(pte, WRITE) || 69 return pte_write(pte) ||
70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); 70 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
71} 71}
72 72
diff --git a/mm/hmm.c b/mm/hmm.c
index 3a5c172af560..ea19742a5d60 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -391,11 +391,11 @@ again:
391 if (pmd_protnone(pmd)) 391 if (pmd_protnone(pmd))
392 return hmm_vma_walk_clear(start, end, walk); 392 return hmm_vma_walk_clear(start, end, walk);
393 393
394 if (!pmd_access_permitted(pmd, write_fault)) 394 if (write_fault && !pmd_write(pmd))
395 return hmm_vma_walk_clear(start, end, walk); 395 return hmm_vma_walk_clear(start, end, walk);
396 396
397 pfn = pmd_pfn(pmd) + pte_index(addr); 397 pfn = pmd_pfn(pmd) + pte_index(addr);
398 flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0; 398 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) 399 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; 400 pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
401 return 0; 401 return 0;
@@ -456,11 +456,11 @@ again:
456 continue; 456 continue;
457 } 457 }
458 458
459 if (!pte_access_permitted(pte, write_fault)) 459 if (write_fault && !pte_write(pte))
460 goto fault; 460 goto fault;
461 461
462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; 462 pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
463 pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0; 463 pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
464 continue; 464 continue;
465 465
466fault: 466fault:
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2f2f5e774902..0e7ded98d114 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -870,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
870 */ 870 */
871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); 871 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
872 872
873 if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE)) 873 if (flags & FOLL_WRITE && !pmd_write(*pmd))
874 return NULL; 874 return NULL;
875 875
876 if (pmd_present(*pmd) && pmd_devmap(*pmd)) 876 if (pmd_present(*pmd) && pmd_devmap(*pmd))
@@ -1012,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1012 1012
1013 assert_spin_locked(pud_lockptr(mm, pud)); 1013 assert_spin_locked(pud_lockptr(mm, pud));
1014 1014
1015 if (!pud_access_permitted(*pud, flags & FOLL_WRITE)) 1015 if (flags & FOLL_WRITE && !pud_write(*pud))
1016 return NULL; 1016 return NULL;
1017 1017
1018 if (pud_present(*pud) && pud_devmap(*pud)) 1018 if (pud_present(*pud) && pud_devmap(*pud))
@@ -1386,7 +1386,7 @@ out_unlock:
1386 */ 1386 */
1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) 1387static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1388{ 1388{
1389 return pmd_access_permitted(pmd, WRITE) || 1389 return pmd_write(pmd) ||
1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); 1390 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1391} 1391}
1392 1392
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 3d4781756d50..d73c14294f3a 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1523,7 +1523,7 @@ static void kmemleak_scan(void)
1523 if (page_count(page) == 0) 1523 if (page_count(page) == 0)
1524 continue; 1524 continue;
1525 scan_block(page, page + 1, NULL); 1525 scan_block(page, page + 1, NULL);
1526 if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) 1526 if (!(pfn & 63))
1527 cond_resched(); 1527 cond_resched();
1528 } 1528 }
1529 } 1529 }
diff --git a/mm/memory.c b/mm/memory.c
index 5eb3d2524bdc..ca5674cbaff2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3831,7 +3831,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf)
3831 return VM_FAULT_FALLBACK; 3831 return VM_FAULT_FALLBACK;
3832} 3832}
3833 3833
3834static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) 3834/* `inline' is required to avoid gcc 4.1.2 build error */
3835static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3835{ 3836{
3836 if (vma_is_anonymous(vmf->vma)) 3837 if (vma_is_anonymous(vmf->vma))
3837 return do_huge_pmd_wp_page(vmf, orig_pmd); 3838 return do_huge_pmd_wp_page(vmf, orig_pmd);
@@ -3948,7 +3949,7 @@ static int handle_pte_fault(struct vm_fault *vmf)
3948 if (unlikely(!pte_same(*vmf->pte, entry))) 3949 if (unlikely(!pte_same(*vmf->pte, entry)))
3949 goto unlock; 3950 goto unlock;
3950 if (vmf->flags & FAULT_FLAG_WRITE) { 3951 if (vmf->flags & FAULT_FLAG_WRITE) {
3951 if (!pte_access_permitted(entry, WRITE)) 3952 if (!pte_write(entry))
3952 return do_wp_page(vmf); 3953 return do_wp_page(vmf);
3953 entry = pte_mkdirty(entry); 3954 entry = pte_mkdirty(entry);
3954 } 3955 }
@@ -4013,7 +4014,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4013 4014
4014 /* NUMA case for anonymous PUDs would go here */ 4015 /* NUMA case for anonymous PUDs would go here */
4015 4016
4016 if (dirty && !pud_access_permitted(orig_pud, WRITE)) { 4017 if (dirty && !pud_write(orig_pud)) {
4017 ret = wp_huge_pud(&vmf, orig_pud); 4018 ret = wp_huge_pud(&vmf, orig_pud);
4018 if (!(ret & VM_FAULT_FALLBACK)) 4019 if (!(ret & VM_FAULT_FALLBACK))
4019 return ret; 4020 return ret;
@@ -4046,7 +4047,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4046 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 4047 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4047 return do_huge_pmd_numa_page(&vmf, orig_pmd); 4048 return do_huge_pmd_numa_page(&vmf, orig_pmd);
4048 4049
4049 if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) { 4050 if (dirty && !pmd_write(orig_pmd)) {
4050 ret = wp_huge_pmd(&vmf, orig_pmd); 4051 ret = wp_huge_pmd(&vmf, orig_pmd);
4051 if (!(ret & VM_FAULT_FALLBACK)) 4052 if (!(ret & VM_FAULT_FALLBACK))
4052 return ret; 4053 return ret;
@@ -4336,7 +4337,7 @@ int follow_phys(struct vm_area_struct *vma,
4336 goto out; 4337 goto out;
4337 pte = *ptep; 4338 pte = *ptep;
4338 4339
4339 if (!pte_access_permitted(pte, flags & FOLL_WRITE)) 4340 if ((flags & FOLL_WRITE) && !pte_write(pte))
4340 goto unlock; 4341 goto unlock;
4341 4342
4342 *prot = pgprot_val(pte_pgprot(pte)); 4343 *prot = pgprot_val(pte_pgprot(pte));
diff --git a/mm/mmap.c b/mm/mmap.c
index a4d546821214..9efdc021ad22 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm)
3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 3019 /* Use -1 here to ensure all VMAs in the mm are unmapped */
3020 unmap_vmas(&tlb, vma, 0, -1); 3020 unmap_vmas(&tlb, vma, 0, -1);
3021 3021
3022 set_bit(MMF_OOM_SKIP, &mm->flags); 3022 if (unlikely(mm_is_oom_victim(mm))) {
3023 if (unlikely(tsk_is_oom_victim(current))) {
3024 /* 3023 /*
3025 * Wait for oom_reap_task() to stop working on this 3024 * Wait for oom_reap_task() to stop working on this
3026 * mm. Because MMF_OOM_SKIP is already set before 3025 * mm. Because MMF_OOM_SKIP is already set before
3027 * calling down_read(), oom_reap_task() will not run 3026 * calling down_read(), oom_reap_task() will not run
3028 * on this "mm" post up_write(). 3027 * on this "mm" post up_write().
3029 * 3028 *
3030 * tsk_is_oom_victim() cannot be set from under us 3029 * mm_is_oom_victim() cannot be set from under us
3031 * either because current->mm is already set to NULL 3030 * either because victim->mm is already set to NULL
3032 * under task_lock before calling mmput and oom_mm is 3031 * under task_lock before calling mmput and oom_mm is
3033 * set not NULL by the OOM killer only if current->mm 3032 * set not NULL by the OOM killer only if victim->mm
3034 * is found not NULL while holding the task_lock. 3033 * is found not NULL while holding the task_lock.
3035 */ 3034 */
3035 set_bit(MMF_OOM_SKIP, &mm->flags);
3036 down_write(&mm->mmap_sem); 3036 down_write(&mm->mmap_sem);
3037 up_write(&mm->mmap_sem); 3037 up_write(&mm->mmap_sem);
3038 } 3038 }
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index c957be32b27a..29f855551efe 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk)
683 return; 683 return;
684 684
685 /* oom_mm is bound to the signal struct life time. */ 685 /* oom_mm is bound to the signal struct life time. */
686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) 686 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
687 mmgrab(tsk->signal->oom_mm); 687 mmgrab(tsk->signal->oom_mm);
688 set_bit(MMF_OOM_VICTIM, &mm->flags);
689 }
688 690
689 /* 691 /*
690 * Make sure that the task is woken up from uninterruptible sleep 692 * Make sure that the task is woken up from uninterruptible sleep
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 73f5d4556b3d..7e5e775e97f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list)
2684{ 2684{
2685 struct page *page, *next; 2685 struct page *page, *next;
2686 unsigned long flags, pfn; 2686 unsigned long flags, pfn;
2687 int batch_count = 0;
2687 2688
2688 /* Prepare pages for freeing */ 2689 /* Prepare pages for freeing */
2689 list_for_each_entry_safe(page, next, list, lru) { 2690 list_for_each_entry_safe(page, next, list, lru) {
@@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list)
2700 set_page_private(page, 0); 2701 set_page_private(page, 0);
2701 trace_mm_page_free_batched(page); 2702 trace_mm_page_free_batched(page);
2702 free_unref_page_commit(page, pfn); 2703 free_unref_page_commit(page, pfn);
2704
2705 /*
2706 * Guard against excessive IRQ disabled times when we get
2707 * a large list of pages to free.
2708 */
2709 if (++batch_count == SWAP_CLUSTER_MAX) {
2710 local_irq_restore(flags);
2711 batch_count = 0;
2712 local_irq_save(flags);
2713 }
2703 } 2714 }
2704 local_irq_restore(flags); 2715 local_irq_restore(flags);
2705} 2716}
diff --git a/mm/percpu.c b/mm/percpu.c
index 79e3549cab0f..50e7fdf84055 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void)
2719 2719
2720 if (pcpu_setup_first_chunk(ai, fc) < 0) 2720 if (pcpu_setup_first_chunk(ai, fc) < 0)
2721 panic("Failed to initialize percpu areas."); 2721 panic("Failed to initialize percpu areas.");
2722#ifdef CONFIG_CRIS
2723#warning "the CRIS architecture has physical and virtual addresses confused"
2724#else
2722 pcpu_free_alloc_info(ai); 2725 pcpu_free_alloc_info(ai);
2726#endif
2723} 2727}
2724 2728
2725#endif /* CONFIG_SMP */ 2729#endif /* CONFIG_SMP */
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..4e51ef954026 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1584 *dbg_redzone2(cachep, objp)); 1584 *dbg_redzone2(cachep, objp));
1585 } 1585 }
1586 1586
1587 if (cachep->flags & SLAB_STORE_USER) { 1587 if (cachep->flags & SLAB_STORE_USER)
1588 pr_err("Last user: [<%p>](%pSR)\n", 1588 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp));
1589 *dbg_userword(cachep, objp),
1590 *dbg_userword(cachep, objp));
1591 }
1592 realobj = (char *)objp + obj_offset(cachep); 1589 realobj = (char *)objp + obj_offset(cachep);
1593 size = cachep->object_size; 1590 size = cachep->object_size;
1594 for (i = 0; i < size && lines; i += 16, lines--) { 1591 for (i = 0; i < size && lines; i += 16, lines--) {
@@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1621 /* Mismatch ! */ 1618 /* Mismatch ! */
1622 /* Print header */ 1619 /* Print header */
1623 if (lines == 0) { 1620 if (lines == 0) {
1624 pr_err("Slab corruption (%s): %s start=%p, len=%d\n", 1621 pr_err("Slab corruption (%s): %s start=%px, len=%d\n",
1625 print_tainted(), cachep->name, 1622 print_tainted(), cachep->name,
1626 realobj, size); 1623 realobj, size);
1627 print_objinfo(cachep, objp, 0); 1624 print_objinfo(cachep, objp, 0);
@@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1650 if (objnr) { 1647 if (objnr) {
1651 objp = index_to_obj(cachep, page, objnr - 1); 1648 objp = index_to_obj(cachep, page, objnr - 1);
1652 realobj = (char *)objp + obj_offset(cachep); 1649 realobj = (char *)objp + obj_offset(cachep);
1653 pr_err("Prev obj: start=%p, len=%d\n", realobj, size); 1650 pr_err("Prev obj: start=%px, len=%d\n", realobj, size);
1654 print_objinfo(cachep, objp, 2); 1651 print_objinfo(cachep, objp, 2);
1655 } 1652 }
1656 if (objnr + 1 < cachep->num) { 1653 if (objnr + 1 < cachep->num) {
1657 objp = index_to_obj(cachep, page, objnr + 1); 1654 objp = index_to_obj(cachep, page, objnr + 1);
1658 realobj = (char *)objp + obj_offset(cachep); 1655 realobj = (char *)objp + obj_offset(cachep);
1659 pr_err("Next obj: start=%p, len=%d\n", realobj, size); 1656 pr_err("Next obj: start=%px, len=%d\n", realobj, size);
1660 print_objinfo(cachep, objp, 2); 1657 print_objinfo(cachep, objp, 2);
1661 } 1658 }
1662 } 1659 }
@@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
2608 /* Verify double free bug */ 2605 /* Verify double free bug */
2609 for (i = page->active; i < cachep->num; i++) { 2606 for (i = page->active; i < cachep->num; i++) {
2610 if (get_free_obj(page, i) == objnr) { 2607 if (get_free_obj(page, i) == objnr) {
2611 pr_err("slab: double free detected in cache '%s', objp %p\n", 2608 pr_err("slab: double free detected in cache '%s', objp %px\n",
2612 cachep->name, objp); 2609 cachep->name, objp);
2613 BUG(); 2610 BUG();
2614 } 2611 }
@@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2772 else 2769 else
2773 slab_error(cache, "memory outside object was overwritten"); 2770 slab_error(cache, "memory outside object was overwritten");
2774 2771
2775 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 2772 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
2776 obj, redzone1, redzone2); 2773 obj, redzone1, redzone2);
2777} 2774}
2778 2775
@@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3078 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || 3075 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3079 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { 3076 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3080 slab_error(cachep, "double free, or memory outside object was overwritten"); 3077 slab_error(cachep, "double free, or memory outside object was overwritten");
3081 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", 3078 pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n",
3082 objp, *dbg_redzone1(cachep, objp), 3079 objp, *dbg_redzone1(cachep, objp),
3083 *dbg_redzone2(cachep, objp)); 3080 *dbg_redzone2(cachep, objp));
3084 } 3081 }
@@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3091 cachep->ctor(objp); 3088 cachep->ctor(objp);
3092 if (ARCH_SLAB_MINALIGN && 3089 if (ARCH_SLAB_MINALIGN &&
3093 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { 3090 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
3094 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", 3091 pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3095 objp, (int)ARCH_SLAB_MINALIGN); 3092 objp, (int)ARCH_SLAB_MINALIGN);
3096 } 3093 }
3097 return objp; 3094 return objp;
@@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
4283 return; 4280 return;
4284 } 4281 }
4285#endif 4282#endif
4286 seq_printf(m, "%p", (void *)address); 4283 seq_printf(m, "%px", (void *)address);
4287} 4284}
4288 4285
4289static int leaks_show(struct seq_file *m, void *p) 4286static int leaks_show(struct seq_file *m, void *p)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 1b659ab652fb..bbe8414b6ee7 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1214,7 +1214,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1214 orig_node->last_seen = jiffies; 1214 orig_node->last_seen = jiffies;
1215 1215
1216 /* find packet count of corresponding one hop neighbor */ 1216 /* find packet count of corresponding one hop neighbor */
1217 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1217 spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1218 if_num = if_incoming->if_num; 1218 if_num = if_incoming->if_num;
1219 orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; 1219 orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
1220 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); 1220 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1224,7 +1224,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1224 } else { 1224 } else {
1225 neigh_rq_count = 0; 1225 neigh_rq_count = 0;
1226 } 1226 }
1227 spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); 1227 spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1228 1228
1229 /* pay attention to not get a value bigger than 100 % */ 1229 /* pay attention to not get a value bigger than 100 % */
1230 if (orig_eq_count > neigh_rq_count) 1230 if (orig_eq_count > neigh_rq_count)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 341ceab8338d..e0e2bfcd6b3e 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -814,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
814 } 814 }
815 815
816 orig_gw = batadv_gw_node_get(bat_priv, orig_node); 816 orig_gw = batadv_gw_node_get(bat_priv, orig_node);
817 if (!orig_node) 817 if (!orig_gw)
818 goto out; 818 goto out;
819 819
820 if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0) 820 if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index a98cf1104a30..ebe6e38934e4 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -499,6 +499,8 @@ int batadv_frag_send_packet(struct sk_buff *skb,
499 */ 499 */
500 if (skb->priority >= 256 && skb->priority <= 263) 500 if (skb->priority >= 256 && skb->priority <= 263)
501 frag_header.priority = skb->priority - 256; 501 frag_header.priority = skb->priority - 256;
502 else
503 frag_header.priority = 0;
502 504
503 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); 505 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
504 ether_addr_copy(frag_header.dest, orig_node->orig); 506 ether_addr_copy(frag_header.dest, orig_node->orig);
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 15cd2139381e..ebc4e2241c77 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -482,7 +482,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
482 482
483/** 483/**
484 * batadv_tp_sender_timeout - timer that fires in case of packet loss 484 * batadv_tp_sender_timeout - timer that fires in case of packet loss
485 * @arg: address of the related tp_vars 485 * @t: address to timer_list inside tp_vars
486 * 486 *
487 * If fired it means that there was packet loss. 487 * If fired it means that there was packet loss.
488 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and 488 * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
@@ -1106,7 +1106,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
1106/** 1106/**
1107 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is 1107 * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
1108 * reached without received ack 1108 * reached without received ack
1109 * @arg: address of the related tp_vars 1109 * @t: address to timer_list inside tp_vars
1110 */ 1110 */
1111static void batadv_tp_receiver_shutdown(struct timer_list *t) 1111static void batadv_tp_receiver_shutdown(struct timer_list *t)
1112{ 1112{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 1c4810919a0a..b9057478d69c 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/module.h>
18#include <linux/string.h> 17#include <linux/string.h>
19#include <linux/errno.h> 18#include <linux/errno.h>
20#include <linux/skbuff.h> 19#include <linux/skbuff.h>
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6b0ff396fa9d..a592ca025fc4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4293,7 +4293,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
4293 struct sock *sk = skb->sk; 4293 struct sock *sk = skb->sk;
4294 4294
4295 if (!skb_may_tx_timestamp(sk, false)) 4295 if (!skb_may_tx_timestamp(sk, false))
4296 return; 4296 goto err;
4297 4297
4298 /* Take a reference to prevent skb_orphan() from freeing the socket, 4298 /* Take a reference to prevent skb_orphan() from freeing the socket,
4299 * but only if the socket refcount is not zero. 4299 * but only if the socket refcount is not zero.
@@ -4302,7 +4302,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
4302 *skb_hwtstamps(skb) = *hwtstamps; 4302 *skb_hwtstamps(skb) = *hwtstamps;
4303 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4303 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4304 sock_put(sk); 4304 sock_put(sk);
4305 return;
4305 } 4306 }
4307
4308err:
4309 kfree_skb(skb);
4306} 4310}
4307EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4311EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4308 4312
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 5d6475a6cc5d..f52307296de4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -16,7 +16,6 @@
16#include <linux/of_net.h> 16#include <linux/of_net.h>
17#include <linux/of_mdio.h> 17#include <linux/of_mdio.h>
18#include <linux/mdio.h> 18#include <linux/mdio.h>
19#include <linux/list.h>
20#include <net/rtnetlink.h> 19#include <net/rtnetlink.h>
21#include <net/pkt_cls.h> 20#include <net/pkt_cls.h>
22#include <net/tc_act/tc_mirred.h> 21#include <net/tc_act/tc_mirred.h>
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a4573bccd6da..7a93359fbc72 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1428,7 +1428,7 @@ skip:
1428 1428
1429static bool inetdev_valid_mtu(unsigned int mtu) 1429static bool inetdev_valid_mtu(unsigned int mtu)
1430{ 1430{
1431 return mtu >= 68; 1431 return mtu >= IPV4_MIN_MTU;
1432} 1432}
1433 1433
1434static void inetdev_send_gratuitous_arp(struct net_device *dev, 1434static void inetdev_send_gratuitous_arp(struct net_device *dev,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d1f8f302dbf3..726f6b608274 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -89,6 +89,7 @@
89#include <linux/rtnetlink.h> 89#include <linux/rtnetlink.h>
90#include <linux/times.h> 90#include <linux/times.h>
91#include <linux/pkt_sched.h> 91#include <linux/pkt_sched.h>
92#include <linux/byteorder/generic.h>
92 93
93#include <net/net_namespace.h> 94#include <net/net_namespace.h>
94#include <net/arp.h> 95#include <net/arp.h>
@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
321 return scount; 322 return scount;
322} 323}
323 324
325/* source address selection per RFC 3376 section 4.2.13 */
326static __be32 igmpv3_get_srcaddr(struct net_device *dev,
327 const struct flowi4 *fl4)
328{
329 struct in_device *in_dev = __in_dev_get_rcu(dev);
330
331 if (!in_dev)
332 return htonl(INADDR_ANY);
333
334 for_ifa(in_dev) {
335 if (inet_ifa_match(fl4->saddr, ifa))
336 return fl4->saddr;
337 } endfor_ifa(in_dev);
338
339 return htonl(INADDR_ANY);
340}
341
324static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) 342static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
325{ 343{
326 struct sk_buff *skb; 344 struct sk_buff *skb;
@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
368 pip->frag_off = htons(IP_DF); 386 pip->frag_off = htons(IP_DF);
369 pip->ttl = 1; 387 pip->ttl = 1;
370 pip->daddr = fl4.daddr; 388 pip->daddr = fl4.daddr;
371 pip->saddr = fl4.saddr; 389 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
372 pip->protocol = IPPROTO_IGMP; 390 pip->protocol = IPPROTO_IGMP;
373 pip->tot_len = 0; /* filled in later */ 391 pip->tot_len = 0; /* filled in later */
374 ip_select_ident(net, skb, NULL); 392 ip_select_ident(net, skb, NULL);
@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
404} 422}
405 423
406static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, 424static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
407 int type, struct igmpv3_grec **ppgr) 425 int type, struct igmpv3_grec **ppgr, unsigned int mtu)
408{ 426{
409 struct net_device *dev = pmc->interface->dev; 427 struct net_device *dev = pmc->interface->dev;
410 struct igmpv3_report *pih; 428 struct igmpv3_report *pih;
411 struct igmpv3_grec *pgr; 429 struct igmpv3_grec *pgr;
412 430
413 if (!skb) 431 if (!skb) {
414 skb = igmpv3_newpack(dev, dev->mtu); 432 skb = igmpv3_newpack(dev, mtu);
415 if (!skb) 433 if (!skb)
416 return NULL; 434 return NULL;
435 }
417 pgr = skb_put(skb, sizeof(struct igmpv3_grec)); 436 pgr = skb_put(skb, sizeof(struct igmpv3_grec));
418 pgr->grec_type = type; 437 pgr->grec_type = type;
419 pgr->grec_auxwords = 0; 438 pgr->grec_auxwords = 0;
@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
436 struct igmpv3_grec *pgr = NULL; 455 struct igmpv3_grec *pgr = NULL;
437 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; 456 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
438 int scount, stotal, first, isquery, truncate; 457 int scount, stotal, first, isquery, truncate;
458 unsigned int mtu;
439 459
440 if (pmc->multiaddr == IGMP_ALL_HOSTS) 460 if (pmc->multiaddr == IGMP_ALL_HOSTS)
441 return skb; 461 return skb;
442 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) 462 if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
443 return skb; 463 return skb;
444 464
465 mtu = READ_ONCE(dev->mtu);
466 if (mtu < IPV4_MIN_MTU)
467 return skb;
468
445 isquery = type == IGMPV3_MODE_IS_INCLUDE || 469 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
446 type == IGMPV3_MODE_IS_EXCLUDE; 470 type == IGMPV3_MODE_IS_EXCLUDE;
447 truncate = type == IGMPV3_MODE_IS_EXCLUDE || 471 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
462 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 486 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
463 if (skb) 487 if (skb)
464 igmpv3_sendpack(skb); 488 igmpv3_sendpack(skb);
465 skb = igmpv3_newpack(dev, dev->mtu); 489 skb = igmpv3_newpack(dev, mtu);
466 } 490 }
467 } 491 }
468 first = 1; 492 first = 1;
@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
498 pgr->grec_nsrcs = htons(scount); 522 pgr->grec_nsrcs = htons(scount);
499 if (skb) 523 if (skb)
500 igmpv3_sendpack(skb); 524 igmpv3_sendpack(skb);
501 skb = igmpv3_newpack(dev, dev->mtu); 525 skb = igmpv3_newpack(dev, mtu);
502 first = 1; 526 first = 1;
503 scount = 0; 527 scount = 0;
504 } 528 }
505 if (first) { 529 if (first) {
506 skb = add_grhead(skb, pmc, type, &pgr); 530 skb = add_grhead(skb, pmc, type, &pgr, mtu);
507 first = 0; 531 first = 0;
508 } 532 }
509 if (!skb) 533 if (!skb)
@@ -538,7 +562,7 @@ empty_source:
538 igmpv3_sendpack(skb); 562 igmpv3_sendpack(skb);
539 skb = NULL; /* add_grhead will get a new one */ 563 skb = NULL; /* add_grhead will get a new one */
540 } 564 }
541 skb = add_grhead(skb, pmc, type, &pgr); 565 skb = add_grhead(skb, pmc, type, &pgr, mtu);
542 } 566 }
543 } 567 }
544 if (pgr) 568 if (pgr)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 004800b923c6..9a80d84fc182 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -269,7 +269,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
269 269
270 /* Check based hdr len */ 270 /* Check based hdr len */
271 if (unlikely(!pskb_may_pull(skb, len))) 271 if (unlikely(!pskb_may_pull(skb, len)))
272 return -ENOMEM; 272 return PACKET_REJECT;
273 273
274 iph = ip_hdr(skb); 274 iph = ip_hdr(skb);
275 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); 275 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index fe6fee728ce4..5ddb1cb52bd4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
349 dev->needed_headroom = t_hlen + hlen; 349 dev->needed_headroom = t_hlen + hlen;
350 mtu -= (dev->hard_header_len + t_hlen); 350 mtu -= (dev->hard_header_len + t_hlen);
351 351
352 if (mtu < 68) 352 if (mtu < IPV4_MIN_MTU)
353 mtu = 68; 353 mtu = IPV4_MIN_MTU;
354 354
355 return mtu; 355 return mtu;
356} 356}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f88221aebc9d..0c3c944a7b72 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -373,7 +373,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
373 if (!xt_find_jump_offset(offsets, newpos, 373 if (!xt_find_jump_offset(offsets, newpos,
374 newinfo->number)) 374 newinfo->number))
375 return 0; 375 return 0;
376 e = entry0 + newpos;
377 } else { 376 } else {
378 /* ... this is a fallthru */ 377 /* ... this is a fallthru */
379 newpos = pos + e->next_offset; 378 newpos = pos + e->next_offset;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4cbe5e80f3bf..2e0d339028bb 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -439,7 +439,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
439 if (!xt_find_jump_offset(offsets, newpos, 439 if (!xt_find_jump_offset(offsets, newpos,
440 newinfo->number)) 440 newinfo->number))
441 return 0; 441 return 0;
442 e = entry0 + newpos;
443 } else { 442 } else {
444 /* ... this is a fallthru */ 443 /* ... this is a fallthru */
445 newpos = pos + e->next_offset; 444 newpos = pos + e->next_offset;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 17b4ca562944..69060e3abe85 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -813,12 +813,13 @@ static int clusterip_net_init(struct net *net)
813 813
814static void clusterip_net_exit(struct net *net) 814static void clusterip_net_exit(struct net *net)
815{ 815{
816#ifdef CONFIG_PROC_FS
817 struct clusterip_net *cn = net_generic(net, clusterip_net_id); 816 struct clusterip_net *cn = net_generic(net, clusterip_net_id);
817#ifdef CONFIG_PROC_FS
818 proc_remove(cn->procdir); 818 proc_remove(cn->procdir);
819 cn->procdir = NULL; 819 cn->procdir = NULL;
820#endif 820#endif
821 nf_unregister_net_hook(net, &cip_arp_ops); 821 nf_unregister_net_hook(net, &cip_arp_ops);
822 WARN_ON_ONCE(!list_empty(&cn->configs));
822} 823}
823 824
824static struct pernet_operations clusterip_net_ops = { 825static struct pernet_operations clusterip_net_ops = {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 33b70bfd1122..125c1eab3eaa 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -513,11 +513,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
513 int err; 513 int err;
514 struct ip_options_data opt_copy; 514 struct ip_options_data opt_copy;
515 struct raw_frag_vec rfv; 515 struct raw_frag_vec rfv;
516 int hdrincl;
516 517
517 err = -EMSGSIZE; 518 err = -EMSGSIZE;
518 if (len > 0xFFFF) 519 if (len > 0xFFFF)
519 goto out; 520 goto out;
520 521
522 /* hdrincl should be READ_ONCE(inet->hdrincl)
523 * but READ_ONCE() doesn't work with bit fields
524 */
525 hdrincl = inet->hdrincl;
521 /* 526 /*
522 * Check the flags. 527 * Check the flags.
523 */ 528 */
@@ -593,7 +598,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
593 /* Linux does not mangle headers on raw sockets, 598 /* Linux does not mangle headers on raw sockets,
594 * so that IP options + IP_HDRINCL is non-sense. 599 * so that IP options + IP_HDRINCL is non-sense.
595 */ 600 */
596 if (inet->hdrincl) 601 if (hdrincl)
597 goto done; 602 goto done;
598 if (ipc.opt->opt.srr) { 603 if (ipc.opt->opt.srr) {
599 if (!daddr) 604 if (!daddr)
@@ -615,12 +620,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
615 620
616 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 621 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
617 RT_SCOPE_UNIVERSE, 622 RT_SCOPE_UNIVERSE,
618 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 623 hdrincl ? IPPROTO_RAW : sk->sk_protocol,
619 inet_sk_flowi_flags(sk) | 624 inet_sk_flowi_flags(sk) |
620 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), 625 (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
621 daddr, saddr, 0, 0, sk->sk_uid); 626 daddr, saddr, 0, 0, sk->sk_uid);
622 627
623 if (!inet->hdrincl) { 628 if (!hdrincl) {
624 rfv.msg = msg; 629 rfv.msg = msg;
625 rfv.hlen = 0; 630 rfv.hlen = 0;
626 631
@@ -645,7 +650,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
645 goto do_confirm; 650 goto do_confirm;
646back_from_confirm: 651back_from_confirm:
647 652
648 if (inet->hdrincl) 653 if (hdrincl)
649 err = raw_send_hdrinc(sk, &fl4, msg, len, 654 err = raw_send_hdrinc(sk, &fl4, msg, len,
650 &rt, msg->msg_flags, &ipc.sockc); 655 &rt, msg->msg_flags, &ipc.sockc);
651 656
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fefb46c16de7..4d55c4b338ee 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
508 u32 new_sample = tp->rcv_rtt_est.rtt_us; 508 u32 new_sample = tp->rcv_rtt_est.rtt_us;
509 long m = sample; 509 long m = sample;
510 510
511 if (m == 0)
512 m = 1;
513
514 if (new_sample != 0) { 511 if (new_sample != 0) {
515 /* If we sample in larger samples in the non-timestamp 512 /* If we sample in larger samples in the non-timestamp
516 * case, we could grossly overestimate the RTT especially 513 * case, we could grossly overestimate the RTT especially
@@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
547 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 544 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
548 return; 545 return;
549 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 546 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
547 if (!delta_us)
548 delta_us = 1;
550 tcp_rcv_rtt_update(tp, delta_us, 1); 549 tcp_rcv_rtt_update(tp, delta_us, 1);
551 550
552new_measure: 551new_measure:
@@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
563 (TCP_SKB_CB(skb)->end_seq - 562 (TCP_SKB_CB(skb)->end_seq -
564 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 563 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
565 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 564 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
566 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 565 u32 delta_us;
567 566
567 if (!delta)
568 delta = 1;
569 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
568 tcp_rcv_rtt_update(tp, delta_us, 0); 570 tcp_rcv_rtt_update(tp, delta_us, 0);
569 } 571 }
570} 572}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 77ea45da0fe9..94e28350f420 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -848,7 +848,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 848 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
849 req->ts_recent, 849 req->ts_recent,
850 0, 850 0,
851 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, 851 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
852 AF_INET), 852 AF_INET),
853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, 853 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
854 ip_hdr(skb)->tos); 854 ip_hdr(skb)->tos);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c9a63417af48..6db3124cdbda 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,6 +249,7 @@ void tcp_delack_timer_handler(struct sock *sk)
249 icsk->icsk_ack.pingpong = 0; 249 icsk->icsk_ack.pingpong = 0;
250 icsk->icsk_ack.ato = TCP_ATO_MIN; 250 icsk->icsk_ack.ato = TCP_ATO_MIN;
251 } 251 }
252 tcp_mstamp_refresh(tcp_sk(sk));
252 tcp_send_ack(sk); 253 tcp_send_ack(sk);
253 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); 254 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
254 } 255 }
@@ -617,6 +618,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
617 goto out; 618 goto out;
618 } 619 }
619 620
621 tcp_mstamp_refresh(tp);
620 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 622 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
621 if (tp->linger2 >= 0) { 623 if (tp->linger2 >= 0) {
622 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 624 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index fc6d7d143f2c..844642682b83 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1682} 1682}
1683 1683
1684static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1684static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1685 int type, struct mld2_grec **ppgr) 1685 int type, struct mld2_grec **ppgr, unsigned int mtu)
1686{ 1686{
1687 struct net_device *dev = pmc->idev->dev;
1688 struct mld2_report *pmr; 1687 struct mld2_report *pmr;
1689 struct mld2_grec *pgr; 1688 struct mld2_grec *pgr;
1690 1689
1691 if (!skb) 1690 if (!skb) {
1692 skb = mld_newpack(pmc->idev, dev->mtu); 1691 skb = mld_newpack(pmc->idev, mtu);
1693 if (!skb) 1692 if (!skb)
1694 return NULL; 1693 return NULL;
1694 }
1695 pgr = skb_put(skb, sizeof(struct mld2_grec)); 1695 pgr = skb_put(skb, sizeof(struct mld2_grec));
1696 pgr->grec_type = type; 1696 pgr->grec_type = type;
1697 pgr->grec_auxwords = 0; 1697 pgr->grec_auxwords = 0;
@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1714 struct mld2_grec *pgr = NULL; 1714 struct mld2_grec *pgr = NULL;
1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; 1715 struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1716 int scount, stotal, first, isquery, truncate; 1716 int scount, stotal, first, isquery, truncate;
1717 unsigned int mtu;
1717 1718
1718 if (pmc->mca_flags & MAF_NOREPORT) 1719 if (pmc->mca_flags & MAF_NOREPORT)
1719 return skb; 1720 return skb;
1720 1721
1722 mtu = READ_ONCE(dev->mtu);
1723 if (mtu < IPV6_MIN_MTU)
1724 return skb;
1725
1721 isquery = type == MLD2_MODE_IS_INCLUDE || 1726 isquery = type == MLD2_MODE_IS_INCLUDE ||
1722 type == MLD2_MODE_IS_EXCLUDE; 1727 type == MLD2_MODE_IS_EXCLUDE;
1723 truncate = type == MLD2_MODE_IS_EXCLUDE || 1728 truncate = type == MLD2_MODE_IS_EXCLUDE ||
@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1738 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { 1743 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1739 if (skb) 1744 if (skb)
1740 mld_sendpack(skb); 1745 mld_sendpack(skb);
1741 skb = mld_newpack(idev, dev->mtu); 1746 skb = mld_newpack(idev, mtu);
1742 } 1747 }
1743 } 1748 }
1744 first = 1; 1749 first = 1;
@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1774 pgr->grec_nsrcs = htons(scount); 1779 pgr->grec_nsrcs = htons(scount);
1775 if (skb) 1780 if (skb)
1776 mld_sendpack(skb); 1781 mld_sendpack(skb);
1777 skb = mld_newpack(idev, dev->mtu); 1782 skb = mld_newpack(idev, mtu);
1778 first = 1; 1783 first = 1;
1779 scount = 0; 1784 scount = 0;
1780 } 1785 }
1781 if (first) { 1786 if (first) {
1782 skb = add_grhead(skb, pmc, type, &pgr); 1787 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1783 first = 0; 1788 first = 0;
1784 } 1789 }
1785 if (!skb) 1790 if (!skb)
@@ -1814,7 +1819,7 @@ empty_source:
1814 mld_sendpack(skb); 1819 mld_sendpack(skb);
1815 skb = NULL; /* add_grhead will get a new one */ 1820 skb = NULL; /* add_grhead will get a new one */
1816 } 1821 }
1817 skb = add_grhead(skb, pmc, type, &pgr); 1822 skb = add_grhead(skb, pmc, type, &pgr, mtu);
1818 } 1823 }
1819 } 1824 }
1820 if (pgr) 1825 if (pgr)
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index f06e25065a34..1d7ae9366335 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -458,7 +458,6 @@ mark_source_chains(const struct xt_table_info *newinfo,
458 if (!xt_find_jump_offset(offsets, newpos, 458 if (!xt_find_jump_offset(offsets, newpos,
459 newinfo->number)) 459 newinfo->number))
460 return 0; 460 return 0;
461 e = entry0 + newpos;
462 } else { 461 } else {
463 /* ... this is a fallthru */ 462 /* ... this is a fallthru */
464 newpos = pos + e->next_offset; 463 newpos = pos + e->next_offset;
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 2b1a15846f9a..92c0047e7e33 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -33,13 +33,19 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
33 33
34 if (range->flags & NF_NAT_RANGE_MAP_IPS) 34 if (range->flags & NF_NAT_RANGE_MAP_IPS)
35 return -EINVAL; 35 return -EINVAL;
36 return 0; 36 return nf_ct_netns_get(par->net, par->family);
37}
38
39static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par)
40{
41 nf_ct_netns_put(par->net, par->family);
37} 42}
38 43
39static struct xt_target masquerade_tg6_reg __read_mostly = { 44static struct xt_target masquerade_tg6_reg __read_mostly = {
40 .name = "MASQUERADE", 45 .name = "MASQUERADE",
41 .family = NFPROTO_IPV6, 46 .family = NFPROTO_IPV6,
42 .checkentry = masquerade_tg6_checkentry, 47 .checkentry = masquerade_tg6_checkentry,
48 .destroy = masquerade_tg6_destroy,
43 .target = masquerade_tg6, 49 .target = masquerade_tg6,
44 .targetsize = sizeof(struct nf_nat_range), 50 .targetsize = sizeof(struct nf_nat_range),
45 .table = "nat", 51 .table = "nat",
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1f04ec0e4a7a..7178476b3d2f 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -994,7 +994,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, 994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, 995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
996 req->ts_recent, sk->sk_bound_dev_if, 996 req->ts_recent, sk->sk_bound_dev_if,
997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
998 0, 0); 998 0, 0);
999} 999}
1000 1000
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 167f83b853e6..1621b6ab17ba 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -291,16 +291,15 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
291 int i; 291 int i;
292 292
293 mutex_lock(&sta->ampdu_mlme.mtx); 293 mutex_lock(&sta->ampdu_mlme.mtx);
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 294 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
296 WLAN_REASON_QSTA_LEAVE_QBSS, 296 WLAN_REASON_QSTA_LEAVE_QBSS,
297 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
298 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
299 }
300 mutex_unlock(&sta->ampdu_mlme.mtx);
301 299
302 for (i = 0; i < IEEE80211_NUM_TIDS; i++) 300 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
303 ___ieee80211_stop_tx_ba_session(sta, i, reason); 301 ___ieee80211_stop_tx_ba_session(sta, i, reason);
302 mutex_unlock(&sta->ampdu_mlme.mtx);
304 303
305 /* stopping might queue the work again - so cancel only afterwards */ 304 /* stopping might queue the work again - so cancel only afterwards */
306 cancel_work_sync(&sta->ampdu_mlme.work); 305 cancel_work_sync(&sta->ampdu_mlme.work);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index cf1bf2605c10..dc6347342e34 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -103,7 +103,6 @@ struct bitstr {
103#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} 103#define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;}
104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} 104#define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;}
105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} 105#define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;}
106#define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND)
107static unsigned int get_len(struct bitstr *bs); 106static unsigned int get_len(struct bitstr *bs);
108static unsigned int get_bit(struct bitstr *bs); 107static unsigned int get_bit(struct bitstr *bs);
109static unsigned int get_bits(struct bitstr *bs, unsigned int b); 108static unsigned int get_bits(struct bitstr *bs, unsigned int b);
@@ -165,6 +164,19 @@ static unsigned int get_len(struct bitstr *bs)
165 return v; 164 return v;
166} 165}
167 166
167static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
168{
169 bits += bs->bit;
170 bytes += bits / BITS_PER_BYTE;
171 if (bits % BITS_PER_BYTE > 0)
172 bytes++;
173
174 if (*bs->cur + bytes > *bs->end)
175 return 1;
176
177 return 0;
178}
179
168/****************************************************************************/ 180/****************************************************************************/
169static unsigned int get_bit(struct bitstr *bs) 181static unsigned int get_bit(struct bitstr *bs)
170{ 182{
@@ -279,8 +291,8 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
279 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 291 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
280 292
281 INC_BIT(bs); 293 INC_BIT(bs);
282 294 if (nf_h323_error_boundary(bs, 0, 0))
283 CHECK_BOUND(bs, 0); 295 return H323_ERROR_BOUND;
284 return H323_ERROR_NONE; 296 return H323_ERROR_NONE;
285} 297}
286 298
@@ -293,11 +305,14 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
293 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 305 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
294 306
295 BYTE_ALIGN(bs); 307 BYTE_ALIGN(bs);
296 CHECK_BOUND(bs, 1); 308 if (nf_h323_error_boundary(bs, 1, 0))
309 return H323_ERROR_BOUND;
310
297 len = *bs->cur++; 311 len = *bs->cur++;
298 bs->cur += len; 312 bs->cur += len;
313 if (nf_h323_error_boundary(bs, 0, 0))
314 return H323_ERROR_BOUND;
299 315
300 CHECK_BOUND(bs, 0);
301 return H323_ERROR_NONE; 316 return H323_ERROR_NONE;
302} 317}
303 318
@@ -319,6 +334,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
319 bs->cur += 2; 334 bs->cur += 2;
320 break; 335 break;
321 case CONS: /* 64K < Range < 4G */ 336 case CONS: /* 64K < Range < 4G */
337 if (nf_h323_error_boundary(bs, 0, 2))
338 return H323_ERROR_BOUND;
322 len = get_bits(bs, 2) + 1; 339 len = get_bits(bs, 2) + 1;
323 BYTE_ALIGN(bs); 340 BYTE_ALIGN(bs);
324 if (base && (f->attr & DECODE)) { /* timeToLive */ 341 if (base && (f->attr & DECODE)) { /* timeToLive */
@@ -330,7 +347,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
330 break; 347 break;
331 case UNCO: 348 case UNCO:
332 BYTE_ALIGN(bs); 349 BYTE_ALIGN(bs);
333 CHECK_BOUND(bs, 2); 350 if (nf_h323_error_boundary(bs, 2, 0))
351 return H323_ERROR_BOUND;
334 len = get_len(bs); 352 len = get_len(bs);
335 bs->cur += len; 353 bs->cur += len;
336 break; 354 break;
@@ -341,7 +359,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
341 359
342 PRINT("\n"); 360 PRINT("\n");
343 361
344 CHECK_BOUND(bs, 0); 362 if (nf_h323_error_boundary(bs, 0, 0))
363 return H323_ERROR_BOUND;
345 return H323_ERROR_NONE; 364 return H323_ERROR_NONE;
346} 365}
347 366
@@ -357,7 +376,8 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
357 INC_BITS(bs, f->sz); 376 INC_BITS(bs, f->sz);
358 } 377 }
359 378
360 CHECK_BOUND(bs, 0); 379 if (nf_h323_error_boundary(bs, 0, 0))
380 return H323_ERROR_BOUND;
361 return H323_ERROR_NONE; 381 return H323_ERROR_NONE;
362} 382}
363 383
@@ -375,12 +395,14 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
375 len = f->lb; 395 len = f->lb;
376 break; 396 break;
377 case WORD: /* 2-byte length */ 397 case WORD: /* 2-byte length */
378 CHECK_BOUND(bs, 2); 398 if (nf_h323_error_boundary(bs, 2, 0))
399 return H323_ERROR_BOUND;
379 len = (*bs->cur++) << 8; 400 len = (*bs->cur++) << 8;
380 len += (*bs->cur++) + f->lb; 401 len += (*bs->cur++) + f->lb;
381 break; 402 break;
382 case SEMI: 403 case SEMI:
383 CHECK_BOUND(bs, 2); 404 if (nf_h323_error_boundary(bs, 2, 0))
405 return H323_ERROR_BOUND;
384 len = get_len(bs); 406 len = get_len(bs);
385 break; 407 break;
386 default: 408 default:
@@ -391,7 +413,8 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
391 bs->cur += len >> 3; 413 bs->cur += len >> 3;
392 bs->bit = len & 7; 414 bs->bit = len & 7;
393 415
394 CHECK_BOUND(bs, 0); 416 if (nf_h323_error_boundary(bs, 0, 0))
417 return H323_ERROR_BOUND;
395 return H323_ERROR_NONE; 418 return H323_ERROR_NONE;
396} 419}
397 420
@@ -404,12 +427,15 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
404 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); 427 PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name);
405 428
406 /* 2 <= Range <= 255 */ 429 /* 2 <= Range <= 255 */
430 if (nf_h323_error_boundary(bs, 0, f->sz))
431 return H323_ERROR_BOUND;
407 len = get_bits(bs, f->sz) + f->lb; 432 len = get_bits(bs, f->sz) + f->lb;
408 433
409 BYTE_ALIGN(bs); 434 BYTE_ALIGN(bs);
410 INC_BITS(bs, (len << 2)); 435 INC_BITS(bs, (len << 2));
411 436
412 CHECK_BOUND(bs, 0); 437 if (nf_h323_error_boundary(bs, 0, 0))
438 return H323_ERROR_BOUND;
413 return H323_ERROR_NONE; 439 return H323_ERROR_NONE;
414} 440}
415 441
@@ -440,15 +466,19 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
440 break; 466 break;
441 case BYTE: /* Range == 256 */ 467 case BYTE: /* Range == 256 */
442 BYTE_ALIGN(bs); 468 BYTE_ALIGN(bs);
443 CHECK_BOUND(bs, 1); 469 if (nf_h323_error_boundary(bs, 1, 0))
470 return H323_ERROR_BOUND;
444 len = (*bs->cur++) + f->lb; 471 len = (*bs->cur++) + f->lb;
445 break; 472 break;
446 case SEMI: 473 case SEMI:
447 BYTE_ALIGN(bs); 474 BYTE_ALIGN(bs);
448 CHECK_BOUND(bs, 2); 475 if (nf_h323_error_boundary(bs, 2, 0))
476 return H323_ERROR_BOUND;
449 len = get_len(bs) + f->lb; 477 len = get_len(bs) + f->lb;
450 break; 478 break;
451 default: /* 2 <= Range <= 255 */ 479 default: /* 2 <= Range <= 255 */
480 if (nf_h323_error_boundary(bs, 0, f->sz))
481 return H323_ERROR_BOUND;
452 len = get_bits(bs, f->sz) + f->lb; 482 len = get_bits(bs, f->sz) + f->lb;
453 BYTE_ALIGN(bs); 483 BYTE_ALIGN(bs);
454 break; 484 break;
@@ -458,7 +488,8 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
458 488
459 PRINT("\n"); 489 PRINT("\n");
460 490
461 CHECK_BOUND(bs, 0); 491 if (nf_h323_error_boundary(bs, 0, 0))
492 return H323_ERROR_BOUND;
462 return H323_ERROR_NONE; 493 return H323_ERROR_NONE;
463} 494}
464 495
@@ -473,10 +504,13 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
473 switch (f->sz) { 504 switch (f->sz) {
474 case BYTE: /* Range == 256 */ 505 case BYTE: /* Range == 256 */
475 BYTE_ALIGN(bs); 506 BYTE_ALIGN(bs);
476 CHECK_BOUND(bs, 1); 507 if (nf_h323_error_boundary(bs, 1, 0))
508 return H323_ERROR_BOUND;
477 len = (*bs->cur++) + f->lb; 509 len = (*bs->cur++) + f->lb;
478 break; 510 break;
479 default: /* 2 <= Range <= 255 */ 511 default: /* 2 <= Range <= 255 */
512 if (nf_h323_error_boundary(bs, 0, f->sz))
513 return H323_ERROR_BOUND;
480 len = get_bits(bs, f->sz) + f->lb; 514 len = get_bits(bs, f->sz) + f->lb;
481 BYTE_ALIGN(bs); 515 BYTE_ALIGN(bs);
482 break; 516 break;
@@ -484,7 +518,8 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
484 518
485 bs->cur += len << 1; 519 bs->cur += len << 1;
486 520
487 CHECK_BOUND(bs, 0); 521 if (nf_h323_error_boundary(bs, 0, 0))
522 return H323_ERROR_BOUND;
488 return H323_ERROR_NONE; 523 return H323_ERROR_NONE;
489} 524}
490 525
@@ -503,9 +538,13 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
503 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 538 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
504 539
505 /* Extensible? */ 540 /* Extensible? */
541 if (nf_h323_error_boundary(bs, 0, 1))
542 return H323_ERROR_BOUND;
506 ext = (f->attr & EXT) ? get_bit(bs) : 0; 543 ext = (f->attr & EXT) ? get_bit(bs) : 0;
507 544
508 /* Get fields bitmap */ 545 /* Get fields bitmap */
546 if (nf_h323_error_boundary(bs, 0, f->sz))
547 return H323_ERROR_BOUND;
509 bmp = get_bitmap(bs, f->sz); 548 bmp = get_bitmap(bs, f->sz);
510 if (base) 549 if (base)
511 *(unsigned int *)base = bmp; 550 *(unsigned int *)base = bmp;
@@ -525,9 +564,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
525 564
526 /* Decode */ 565 /* Decode */
527 if (son->attr & OPEN) { /* Open field */ 566 if (son->attr & OPEN) { /* Open field */
528 CHECK_BOUND(bs, 2); 567 if (nf_h323_error_boundary(bs, 2, 0))
568 return H323_ERROR_BOUND;
529 len = get_len(bs); 569 len = get_len(bs);
530 CHECK_BOUND(bs, len); 570 if (nf_h323_error_boundary(bs, len, 0))
571 return H323_ERROR_BOUND;
531 if (!base || !(son->attr & DECODE)) { 572 if (!base || !(son->attr & DECODE)) {
532 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 573 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
533 " ", son->name); 574 " ", son->name);
@@ -555,8 +596,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
555 return H323_ERROR_NONE; 596 return H323_ERROR_NONE;
556 597
557 /* Get the extension bitmap */ 598 /* Get the extension bitmap */
599 if (nf_h323_error_boundary(bs, 0, 7))
600 return H323_ERROR_BOUND;
558 bmp2_len = get_bits(bs, 7) + 1; 601 bmp2_len = get_bits(bs, 7) + 1;
559 CHECK_BOUND(bs, (bmp2_len + 7) >> 3); 602 if (nf_h323_error_boundary(bs, 0, bmp2_len))
603 return H323_ERROR_BOUND;
560 bmp2 = get_bitmap(bs, bmp2_len); 604 bmp2 = get_bitmap(bs, bmp2_len);
561 bmp |= bmp2 >> f->sz; 605 bmp |= bmp2 >> f->sz;
562 if (base) 606 if (base)
@@ -567,9 +611,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
567 for (opt = 0; opt < bmp2_len; opt++, i++, son++) { 611 for (opt = 0; opt < bmp2_len; opt++, i++, son++) {
568 /* Check Range */ 612 /* Check Range */
569 if (i >= f->ub) { /* Newer Version? */ 613 if (i >= f->ub) { /* Newer Version? */
570 CHECK_BOUND(bs, 2); 614 if (nf_h323_error_boundary(bs, 2, 0))
615 return H323_ERROR_BOUND;
571 len = get_len(bs); 616 len = get_len(bs);
572 CHECK_BOUND(bs, len); 617 if (nf_h323_error_boundary(bs, len, 0))
618 return H323_ERROR_BOUND;
573 bs->cur += len; 619 bs->cur += len;
574 continue; 620 continue;
575 } 621 }
@@ -583,9 +629,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
583 if (!((0x80000000 >> opt) & bmp2)) /* Not present */ 629 if (!((0x80000000 >> opt) & bmp2)) /* Not present */
584 continue; 630 continue;
585 631
586 CHECK_BOUND(bs, 2); 632 if (nf_h323_error_boundary(bs, 2, 0))
633 return H323_ERROR_BOUND;
587 len = get_len(bs); 634 len = get_len(bs);
588 CHECK_BOUND(bs, len); 635 if (nf_h323_error_boundary(bs, len, 0))
636 return H323_ERROR_BOUND;
589 if (!base || !(son->attr & DECODE)) { 637 if (!base || !(son->attr & DECODE)) {
590 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 638 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
591 son->name); 639 son->name);
@@ -623,22 +671,27 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
623 switch (f->sz) { 671 switch (f->sz) {
624 case BYTE: 672 case BYTE:
625 BYTE_ALIGN(bs); 673 BYTE_ALIGN(bs);
626 CHECK_BOUND(bs, 1); 674 if (nf_h323_error_boundary(bs, 1, 0))
675 return H323_ERROR_BOUND;
627 count = *bs->cur++; 676 count = *bs->cur++;
628 break; 677 break;
629 case WORD: 678 case WORD:
630 BYTE_ALIGN(bs); 679 BYTE_ALIGN(bs);
631 CHECK_BOUND(bs, 2); 680 if (nf_h323_error_boundary(bs, 2, 0))
681 return H323_ERROR_BOUND;
632 count = *bs->cur++; 682 count = *bs->cur++;
633 count <<= 8; 683 count <<= 8;
634 count += *bs->cur++; 684 count += *bs->cur++;
635 break; 685 break;
636 case SEMI: 686 case SEMI:
637 BYTE_ALIGN(bs); 687 BYTE_ALIGN(bs);
638 CHECK_BOUND(bs, 2); 688 if (nf_h323_error_boundary(bs, 2, 0))
689 return H323_ERROR_BOUND;
639 count = get_len(bs); 690 count = get_len(bs);
640 break; 691 break;
641 default: 692 default:
693 if (nf_h323_error_boundary(bs, 0, f->sz))
694 return H323_ERROR_BOUND;
642 count = get_bits(bs, f->sz); 695 count = get_bits(bs, f->sz);
643 break; 696 break;
644 } 697 }
@@ -658,8 +711,11 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
658 for (i = 0; i < count; i++) { 711 for (i = 0; i < count; i++) {
659 if (son->attr & OPEN) { 712 if (son->attr & OPEN) {
660 BYTE_ALIGN(bs); 713 BYTE_ALIGN(bs);
714 if (nf_h323_error_boundary(bs, 2, 0))
715 return H323_ERROR_BOUND;
661 len = get_len(bs); 716 len = get_len(bs);
662 CHECK_BOUND(bs, len); 717 if (nf_h323_error_boundary(bs, len, 0))
718 return H323_ERROR_BOUND;
663 if (!base || !(son->attr & DECODE)) { 719 if (!base || !(son->attr & DECODE)) {
664 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, 720 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE,
665 " ", son->name); 721 " ", son->name);
@@ -710,11 +766,17 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
710 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; 766 base = (base && (f->attr & DECODE)) ? base + f->offset : NULL;
711 767
712 /* Decode the choice index number */ 768 /* Decode the choice index number */
769 if (nf_h323_error_boundary(bs, 0, 1))
770 return H323_ERROR_BOUND;
713 if ((f->attr & EXT) && get_bit(bs)) { 771 if ((f->attr & EXT) && get_bit(bs)) {
714 ext = 1; 772 ext = 1;
773 if (nf_h323_error_boundary(bs, 0, 7))
774 return H323_ERROR_BOUND;
715 type = get_bits(bs, 7) + f->lb; 775 type = get_bits(bs, 7) + f->lb;
716 } else { 776 } else {
717 ext = 0; 777 ext = 0;
778 if (nf_h323_error_boundary(bs, 0, f->sz))
779 return H323_ERROR_BOUND;
718 type = get_bits(bs, f->sz); 780 type = get_bits(bs, f->sz);
719 if (type >= f->lb) 781 if (type >= f->lb)
720 return H323_ERROR_RANGE; 782 return H323_ERROR_RANGE;
@@ -727,8 +789,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
727 /* Check Range */ 789 /* Check Range */
728 if (type >= f->ub) { /* Newer version? */ 790 if (type >= f->ub) { /* Newer version? */
729 BYTE_ALIGN(bs); 791 BYTE_ALIGN(bs);
792 if (nf_h323_error_boundary(bs, 2, 0))
793 return H323_ERROR_BOUND;
730 len = get_len(bs); 794 len = get_len(bs);
731 CHECK_BOUND(bs, len); 795 if (nf_h323_error_boundary(bs, len, 0))
796 return H323_ERROR_BOUND;
732 bs->cur += len; 797 bs->cur += len;
733 return H323_ERROR_NONE; 798 return H323_ERROR_NONE;
734 } 799 }
@@ -742,8 +807,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
742 807
743 if (ext || (son->attr & OPEN)) { 808 if (ext || (son->attr & OPEN)) {
744 BYTE_ALIGN(bs); 809 BYTE_ALIGN(bs);
810 if (nf_h323_error_boundary(bs, len, 0))
811 return H323_ERROR_BOUND;
745 len = get_len(bs); 812 len = get_len(bs);
746 CHECK_BOUND(bs, len); 813 if (nf_h323_error_boundary(bs, len, 0))
814 return H323_ERROR_BOUND;
747 if (!base || !(son->attr & DECODE)) { 815 if (!base || !(son->attr & DECODE)) {
748 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", 816 PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ",
749 son->name); 817 son->name);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 59c08997bfdf..382d49792f42 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,7 +45,6 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h> 48#include <net/netfilter/nf_conntrack_synproxy.h>
50#ifdef CONFIG_NF_NAT_NEEDED 49#ifdef CONFIG_NF_NAT_NEEDED
51#include <net/netfilter/nf_nat_core.h> 50#include <net/netfilter/nf_nat_core.h>
@@ -1566,9 +1565,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
1566static int ctnetlink_change_timeout(struct nf_conn *ct, 1565static int ctnetlink_change_timeout(struct nf_conn *ct,
1567 const struct nlattr * const cda[]) 1566 const struct nlattr * const cda[])
1568{ 1567{
1569 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); 1568 u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1570 1569
1571 ct->timeout = nfct_time_stamp + timeout * HZ; 1570 if (timeout > INT_MAX)
1571 timeout = INT_MAX;
1572 ct->timeout = nfct_time_stamp + (u32)timeout;
1572 1573
1573 if (test_bit(IPS_DYING_BIT, &ct->status)) 1574 if (test_bit(IPS_DYING_BIT, &ct->status))
1574 return -ETIME; 1575 return -ETIME;
@@ -1768,6 +1769,7 @@ ctnetlink_create_conntrack(struct net *net,
1768 int err = -EINVAL; 1769 int err = -EINVAL;
1769 struct nf_conntrack_helper *helper; 1770 struct nf_conntrack_helper *helper;
1770 struct nf_conn_tstamp *tstamp; 1771 struct nf_conn_tstamp *tstamp;
1772 u64 timeout;
1771 1773
1772 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 1774 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1773 if (IS_ERR(ct)) 1775 if (IS_ERR(ct))
@@ -1776,7 +1778,10 @@ ctnetlink_create_conntrack(struct net *net,
1776 if (!cda[CTA_TIMEOUT]) 1778 if (!cda[CTA_TIMEOUT])
1777 goto err1; 1779 goto err1;
1778 1780
1779 ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 1781 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
1782 if (timeout > INT_MAX)
1783 timeout = INT_MAX;
1784 ct->timeout = (u32)timeout + nfct_time_stamp;
1780 1785
1781 rcu_read_lock(); 1786 rcu_read_lock();
1782 if (cda[CTA_HELP]) { 1787 if (cda[CTA_HELP]) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index b12fc07111d0..37ef35b861f2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1039,6 +1039,9 @@ static int tcp_packet(struct nf_conn *ct,
1039 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && 1039 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1040 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) 1040 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1041 timeout = timeouts[TCP_CONNTRACK_UNACK]; 1041 timeout = timeouts[TCP_CONNTRACK_UNACK];
1042 else if (ct->proto.tcp.last_win == 0 &&
1043 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1044 timeout = timeouts[TCP_CONNTRACK_RETRANS];
1042 else 1045 else
1043 timeout = timeouts[new_state]; 1046 timeout = timeouts[new_state];
1044 spin_unlock_bh(&ct->lock); 1047 spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d8327b43e4dc..10798b357481 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5847,6 +5847,12 @@ static int __net_init nf_tables_init_net(struct net *net)
5847 return 0; 5847 return 0;
5848} 5848}
5849 5849
5850static void __net_exit nf_tables_exit_net(struct net *net)
5851{
5852 WARN_ON_ONCE(!list_empty(&net->nft.af_info));
5853 WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
5854}
5855
5850int __nft_release_basechain(struct nft_ctx *ctx) 5856int __nft_release_basechain(struct nft_ctx *ctx)
5851{ 5857{
5852 struct nft_rule *rule, *nr; 5858 struct nft_rule *rule, *nr;
@@ -5917,6 +5923,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
5917 5923
5918static struct pernet_operations nf_tables_net_ops = { 5924static struct pernet_operations nf_tables_net_ops = {
5919 .init = nf_tables_init_net, 5925 .init = nf_tables_init_net,
5926 .exit = nf_tables_exit_net,
5920}; 5927};
5921 5928
5922static int __init nf_tables_module_init(void) 5929static int __init nf_tables_module_init(void)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 41628b393673..d33ce6d5ebce 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/capability.h>
20#include <net/netlink.h> 21#include <net/netlink.h>
21#include <net/sock.h> 22#include <net/sock.h>
22 23
@@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
407 struct nfnl_cthelper *nlcth; 408 struct nfnl_cthelper *nlcth;
408 int ret = 0; 409 int ret = 0;
409 410
411 if (!capable(CAP_NET_ADMIN))
412 return -EPERM;
413
410 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) 414 if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
411 return -EINVAL; 415 return -EINVAL;
412 416
@@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
611 struct nfnl_cthelper *nlcth; 615 struct nfnl_cthelper *nlcth;
612 bool tuple_set = false; 616 bool tuple_set = false;
613 617
618 if (!capable(CAP_NET_ADMIN))
619 return -EPERM;
620
614 if (nlh->nlmsg_flags & NLM_F_DUMP) { 621 if (nlh->nlmsg_flags & NLM_F_DUMP) {
615 struct netlink_dump_control c = { 622 struct netlink_dump_control c = {
616 .dump = nfnl_cthelper_dump_table, 623 .dump = nfnl_cthelper_dump_table,
@@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
678 struct nfnl_cthelper *nlcth, *n; 685 struct nfnl_cthelper *nlcth, *n;
679 int j = 0, ret; 686 int j = 0, ret;
680 687
688 if (!capable(CAP_NET_ADMIN))
689 return -EPERM;
690
681 if (tb[NFCTH_NAME]) 691 if (tb[NFCTH_NAME])
682 helper_name = nla_data(tb[NFCTH_NAME]); 692 helper_name = nla_data(tb[NFCTH_NAME]);
683 693
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e5afab86381c..e955bec0acc6 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1093,10 +1093,15 @@ static int __net_init nfnl_log_net_init(struct net *net)
1093 1093
1094static void __net_exit nfnl_log_net_exit(struct net *net) 1094static void __net_exit nfnl_log_net_exit(struct net *net)
1095{ 1095{
1096 struct nfnl_log_net *log = nfnl_log_pernet(net);
1097 unsigned int i;
1098
1096#ifdef CONFIG_PROC_FS 1099#ifdef CONFIG_PROC_FS
1097 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1100 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1098#endif 1101#endif
1099 nf_log_unset(net, &nfulnl_logger); 1102 nf_log_unset(net, &nfulnl_logger);
1103 for (i = 0; i < INSTANCE_BUCKETS; i++)
1104 WARN_ON_ONCE(!hlist_empty(&log->instance_table[i]));
1100} 1105}
1101 1106
1102static struct pernet_operations nfnl_log_net_ops = { 1107static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a16356cacec3..c09b36755ed7 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1512,10 +1512,15 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1512 1512
1513static void __net_exit nfnl_queue_net_exit(struct net *net) 1513static void __net_exit nfnl_queue_net_exit(struct net *net)
1514{ 1514{
1515 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1516 unsigned int i;
1517
1515 nf_unregister_queue_handler(net); 1518 nf_unregister_queue_handler(net);
1516#ifdef CONFIG_PROC_FS 1519#ifdef CONFIG_PROC_FS
1517 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1520 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1518#endif 1521#endif
1522 for (i = 0; i < INSTANCE_BUCKETS; i++)
1523 WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
1519} 1524}
1520 1525
1521static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) 1526static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a0a93d987a3b..47ec1046ad11 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -214,6 +214,8 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
214 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, 214 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
215 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, 215 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
216 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, 216 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
217 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
218 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
217}; 219};
218 220
219static int nft_exthdr_init(const struct nft_ctx *ctx, 221static int nft_exthdr_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a77dd514297c..55802e97f906 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1729,8 +1729,17 @@ static int __net_init xt_net_init(struct net *net)
1729 return 0; 1729 return 0;
1730} 1730}
1731 1731
1732static void __net_exit xt_net_exit(struct net *net)
1733{
1734 int i;
1735
1736 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1737 WARN_ON_ONCE(!list_empty(&net->xt.tables[i]));
1738}
1739
1732static struct pernet_operations xt_net_ops = { 1740static struct pernet_operations xt_net_ops = {
1733 .init = xt_net_init, 1741 .init = xt_net_init,
1742 .exit = xt_net_exit,
1734}; 1743};
1735 1744
1736static int __init xt_init(void) 1745static int __init xt_init(void)
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index 041da0d9c06f..1f7fbd3c7e5a 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
27{ 27{
28 struct sock_fprog_kern program; 28 struct sock_fprog_kern program;
29 29
30 if (len > XT_BPF_MAX_NUM_INSTR)
31 return -EINVAL;
32
30 program.len = len; 33 program.len = len;
31 program.filter = insns; 34 program.filter = insns;
32 35
@@ -55,6 +58,9 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret)
55 mm_segment_t oldfs = get_fs(); 58 mm_segment_t oldfs = get_fs();
56 int retval, fd; 59 int retval, fd;
57 60
61 if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX)
62 return -EINVAL;
63
58 set_fs(KERNEL_DS); 64 set_fs(KERNEL_DS);
59 fd = bpf_obj_get_user(path, 0); 65 fd = bpf_obj_get_user(path, 0);
60 set_fs(oldfs); 66 set_fs(oldfs);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 36e14b1f061d..a34f314a8c23 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21 21
22#include <linux/capability.h>
22#include <linux/if.h> 23#include <linux/if.h>
23#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
24#include <linux/ip.h> 25#include <linux/ip.h>
@@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
70 struct xt_osf_finger *kf = NULL, *sf; 71 struct xt_osf_finger *kf = NULL, *sf;
71 int err = 0; 72 int err = 0;
72 73
74 if (!capable(CAP_NET_ADMIN))
75 return -EPERM;
76
73 if (!osf_attrs[OSF_ATTR_FINGER]) 77 if (!osf_attrs[OSF_ATTR_FINGER])
74 return -EINVAL; 78 return -EINVAL;
75 79
@@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
115 struct xt_osf_finger *sf; 119 struct xt_osf_finger *sf;
116 int err = -ENOENT; 120 int err = -ENOENT;
117 121
122 if (!capable(CAP_NET_ADMIN))
123 return -EPERM;
124
118 if (!osf_attrs[OSF_ATTR_FINGER]) 125 if (!osf_attrs[OSF_ATTR_FINGER])
119 return -EINVAL; 126 return -EINVAL;
120 127
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b0fe1fb12b99..972bfe113043 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -284,6 +284,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
284 struct sock *sk = skb->sk; 284 struct sock *sk = skb->sk;
285 int ret = -ENOMEM; 285 int ret = -ENOMEM;
286 286
287 if (!net_eq(dev_net(dev), sock_net(sk)))
288 return 0;
289
287 dev_hold(dev); 290 dev_hold(dev);
288 291
289 if (is_vmalloc_addr(skb->head)) 292 if (is_vmalloc_addr(skb->head))
diff --git a/net/sched/act_meta_mark.c b/net/sched/act_meta_mark.c
index 1e3f10e5da99..6445184b2759 100644
--- a/net/sched/act_meta_mark.c
+++ b/net/sched/act_meta_mark.c
@@ -22,7 +22,6 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <uapi/linux/tc_act/tc_ife.h> 23#include <uapi/linux/tc_act/tc_ife.h>
24#include <net/tc_act/tc_ife.h> 24#include <net/tc_act/tc_ife.h>
25#include <linux/rtnetlink.h>
26 25
27static int skbmark_encode(struct sk_buff *skb, void *skbdata, 26static int skbmark_encode(struct sk_buff *skb, void *skbdata,
28 struct tcf_meta_info *e) 27 struct tcf_meta_info *e)
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c
index 2ea1f26c9e96..7221437ca3a6 100644
--- a/net/sched/act_meta_skbtcindex.c
+++ b/net/sched/act_meta_skbtcindex.c
@@ -22,7 +22,6 @@
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <uapi/linux/tc_act/tc_ife.h> 23#include <uapi/linux/tc_act/tc_ife.h>
24#include <net/tc_act/tc_ife.h> 24#include <net/tc_act/tc_ife.h>
25#include <linux/rtnetlink.h>
26 25
27static int skbtcindex_encode(struct sk_buff *skb, void *skbdata, 26static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
28 struct tcf_meta_info *e) 27 struct tcf_meta_info *e)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5b9b8a61e8c4..32b1ea7cf863 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -23,7 +23,6 @@
23#include <linux/skbuff.h> 23#include <linux/skbuff.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/kmod.h> 25#include <linux/kmod.h>
26#include <linux/err.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <net/net_namespace.h> 27#include <net/net_namespace.h>
29#include <net/sock.h> 28#include <net/sock.h>
@@ -345,6 +344,8 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
345 /* Hold a refcnt for all chains, so that they don't disappear 344 /* Hold a refcnt for all chains, so that they don't disappear
346 * while we are iterating. 345 * while we are iterating.
347 */ 346 */
347 if (!block)
348 return;
348 list_for_each_entry(chain, &block->chain_list, list) 349 list_for_each_entry(chain, &block->chain_list, list)
349 tcf_chain_hold(chain); 350 tcf_chain_hold(chain);
350 351
@@ -367,8 +368,6 @@ void tcf_block_put(struct tcf_block *block)
367{ 368{
368 struct tcf_block_ext_info ei = {0, }; 369 struct tcf_block_ext_info ei = {0, };
369 370
370 if (!block)
371 return;
372 tcf_block_put_ext(block, block->q, &ei); 371 tcf_block_put_ext(block, block->q, &ei);
373} 372}
374 373
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index ac152b4f4247..507859cdd1cb 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -45,7 +45,6 @@
45#include <net/netlink.h> 45#include <net/netlink.h>
46#include <net/act_api.h> 46#include <net/act_api.h>
47#include <net/pkt_cls.h> 47#include <net/pkt_cls.h>
48#include <linux/netdevice.h>
49#include <linux/idr.h> 48#include <linux/idr.h>
50 49
51struct tc_u_knode { 50struct tc_u_knode {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a904276b657d..74c22b4e365e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -795,6 +795,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
795 tcm->tcm_info = refcount_read(&q->refcnt); 795 tcm->tcm_info = refcount_read(&q->refcnt);
796 if (nla_put_string(skb, TCA_KIND, q->ops->id)) 796 if (nla_put_string(skb, TCA_KIND, q->ops->id))
797 goto nla_put_failure; 797 goto nla_put_failure;
798 if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
799 goto nla_put_failure;
798 if (q->ops->dump && q->ops->dump(q, skb) < 0) 800 if (q->ops->dump && q->ops->dump(q, skb) < 0)
799 goto nla_put_failure; 801 goto nla_put_failure;
800 802
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 5ecc38f35d47..fc1286f499c1 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -68,6 +68,8 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
68 struct net_device *dev = qdisc_dev(sch); 68 struct net_device *dev = qdisc_dev(sch);
69 int err; 69 int err;
70 70
71 net_inc_ingress_queue();
72
71 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); 73 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
72 74
73 q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 75 q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -78,7 +80,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
78 if (err) 80 if (err)
79 return err; 81 return err;
80 82
81 net_inc_ingress_queue();
82 sch->flags |= TCQ_F_CPUSTATS; 83 sch->flags |= TCQ_F_CPUSTATS;
83 84
84 return 0; 85 return 0;
@@ -172,6 +173,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
172 struct net_device *dev = qdisc_dev(sch); 173 struct net_device *dev = qdisc_dev(sch);
173 int err; 174 int err;
174 175
176 net_inc_ingress_queue();
177 net_inc_egress_queue();
178
175 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); 179 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
176 180
177 q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; 181 q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
@@ -190,18 +194,11 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
190 194
191 err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); 195 err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
192 if (err) 196 if (err)
193 goto err_egress_block_get; 197 return err;
194
195 net_inc_ingress_queue();
196 net_inc_egress_queue();
197 198
198 sch->flags |= TCQ_F_CPUSTATS; 199 sch->flags |= TCQ_F_CPUSTATS;
199 200
200 return 0; 201 return 0;
201
202err_egress_block_get:
203 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
204 return err;
205} 202}
206 203
207static void clsact_destroy(struct Qdisc *sch) 204static void clsact_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9d874e60e032..f0747eb87dc4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -157,6 +157,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
157 .handle = sch->handle, 157 .handle = sch->handle,
158 .parent = sch->parent, 158 .parent = sch->parent,
159 }; 159 };
160 int err;
160 161
161 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) 162 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
162 return -EOPNOTSUPP; 163 return -EOPNOTSUPP;
@@ -171,7 +172,14 @@ static int red_offload(struct Qdisc *sch, bool enable)
171 opt.command = TC_RED_DESTROY; 172 opt.command = TC_RED_DESTROY;
172 } 173 }
173 174
174 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); 175 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
176
177 if (!err && enable)
178 sch->flags |= TCQ_F_OFFLOADED;
179 else
180 sch->flags &= ~TCQ_F_OFFLOADED;
181
182 return err;
175} 183}
176 184
177static void red_destroy(struct Qdisc *sch) 185static void red_destroy(struct Qdisc *sch)
@@ -274,7 +282,7 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt)
274 return red_change(sch, opt); 282 return red_change(sch, opt);
275} 283}
276 284
277static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) 285static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
278{ 286{
279 struct net_device *dev = qdisc_dev(sch); 287 struct net_device *dev = qdisc_dev(sch);
280 struct tc_red_qopt_offload hw_stats = { 288 struct tc_red_qopt_offload hw_stats = {
@@ -286,21 +294,12 @@ static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt)
286 .stats.qstats = &sch->qstats, 294 .stats.qstats = &sch->qstats,
287 }, 295 },
288 }; 296 };
289 int err;
290 297
291 opt->flags &= ~TC_RED_OFFLOADED; 298 if (!(sch->flags & TCQ_F_OFFLOADED))
292 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
293 return 0;
294
295 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
296 &hw_stats);
297 if (err == -EOPNOTSUPP)
298 return 0; 299 return 0;
299 300
300 if (!err) 301 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
301 opt->flags |= TC_RED_OFFLOADED; 302 &hw_stats);
302
303 return err;
304} 303}
305 304
306static int red_dump(struct Qdisc *sch, struct sk_buff *skb) 305static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -319,7 +318,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
319 int err; 318 int err;
320 319
321 sch->qstats.backlog = q->qdisc->qstats.backlog; 320 sch->qstats.backlog = q->qdisc->qstats.backlog;
322 err = red_dump_offload(sch, &opt); 321 err = red_dump_offload_stats(sch, &opt);
323 if (err) 322 if (err)
324 goto nla_put_failure; 323 goto nla_put_failure;
325 324
@@ -347,7 +346,7 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
347 .marked = q->stats.prob_mark + q->stats.forced_mark, 346 .marked = q->stats.prob_mark + q->stats.forced_mark,
348 }; 347 };
349 348
350 if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc) { 349 if (sch->flags & TCQ_F_OFFLOADED) {
351 struct red_stats hw_stats = {0}; 350 struct red_stats hw_stats = {0};
352 struct tc_red_qopt_offload hw_stats_request = { 351 struct tc_red_qopt_offload hw_stats_request = {
353 .command = TC_RED_XSTATS, 352 .command = TC_RED_XSTATS,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 7eec0a0b7f79..5e4100df7bae 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3924,13 +3924,17 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
3924 struct sctp_association *asoc; 3924 struct sctp_association *asoc;
3925 int retval = -EINVAL; 3925 int retval = -EINVAL;
3926 3926
3927 if (optlen < sizeof(struct sctp_reset_streams)) 3927 if (optlen < sizeof(*params))
3928 return -EINVAL; 3928 return -EINVAL;
3929 3929
3930 params = memdup_user(optval, optlen); 3930 params = memdup_user(optval, optlen);
3931 if (IS_ERR(params)) 3931 if (IS_ERR(params))
3932 return PTR_ERR(params); 3932 return PTR_ERR(params);
3933 3933
3934 if (params->srs_number_streams * sizeof(__u16) >
3935 optlen - sizeof(*params))
3936 goto out;
3937
3934 asoc = sctp_id2assoc(sk, params->srs_assoc_id); 3938 asoc = sctp_id2assoc(sk, params->srs_assoc_id);
3935 if (!asoc) 3939 if (!asoc)
3936 goto out; 3940 goto out;
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index c4778cae58ef..444380f968f1 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
231 goto out_free_groups; 231 goto out_free_groups;
232 creds->cr_group_info->gid[i] = kgid; 232 creds->cr_group_info->gid[i] = kgid;
233 } 233 }
234 groups_sort(creds->cr_group_info);
234 235
235 return 0; 236 return 0;
236out_free_groups: 237out_free_groups:
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 5dd4e6c9fef2..26531193fce4 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
481 goto out; 481 goto out;
482 rsci.cred.cr_group_info->gid[i] = kgid; 482 rsci.cred.cr_group_info->gid[i] = kgid;
483 } 483 }
484 groups_sort(rsci.cred.cr_group_info);
484 485
485 /* mech name */ 486 /* mech name */
486 len = qword_get(&mesg, buf, mlen); 487 len = qword_get(&mesg, buf, mlen);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 740b67d5a733..af7f28fb8102 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
520 ug.gi->gid[i] = kgid; 520 ug.gi->gid[i] = kgid;
521 } 521 }
522 522
523 groups_sort(ug.gi);
523 ugp = unix_gid_lookup(cd, uid); 524 ugp = unix_gid_lookup(cd, uid);
524 if (ugp) { 525 if (ugp) {
525 struct cache_head *ch; 526 struct cache_head *ch;
@@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
819 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); 820 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
820 cred->cr_group_info->gid[i] = kgid; 821 cred->cr_group_info->gid[i] = kgid;
821 } 822 }
823 groups_sort(cred->cr_group_info);
822 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 824 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
823 *authp = rpc_autherr_badverf; 825 *authp = rpc_autherr_badverf;
824 return SVC_DENIED; 826 return SVC_DENIED;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 333b9d697ae5..33b74fd84051 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task)
1001{ 1001{
1002 struct rpc_rqst *req = task->tk_rqstp; 1002 struct rpc_rqst *req = task->tk_rqstp;
1003 struct rpc_xprt *xprt = req->rq_xprt; 1003 struct rpc_xprt *xprt = req->rq_xprt;
1004 unsigned int connect_cookie;
1004 int status, numreqs; 1005 int status, numreqs;
1005 1006
1006 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 1007 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
@@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task)
1024 } else if (!req->rq_bytes_sent) 1025 } else if (!req->rq_bytes_sent)
1025 return; 1026 return;
1026 1027
1028 connect_cookie = xprt->connect_cookie;
1027 req->rq_xtime = ktime_get(); 1029 req->rq_xtime = ktime_get();
1028 status = xprt->ops->send_request(task); 1030 status = xprt->ops->send_request(task);
1029 trace_xprt_transmit(xprt, req->rq_xid, status); 1031 trace_xprt_transmit(xprt, req->rq_xid, status);
@@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task)
1047 xprt->stat.bklog_u += xprt->backlog.qlen; 1049 xprt->stat.bklog_u += xprt->backlog.qlen;
1048 xprt->stat.sending_u += xprt->sending.qlen; 1050 xprt->stat.sending_u += xprt->sending.qlen;
1049 xprt->stat.pending_u += xprt->pending.qlen; 1051 xprt->stat.pending_u += xprt->pending.qlen;
1052 spin_unlock_bh(&xprt->transport_lock);
1050 1053
1051 /* Don't race with disconnect */ 1054 req->rq_connect_cookie = connect_cookie;
1052 if (!xprt_connected(xprt)) 1055 if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
1053 task->tk_status = -ENOTCONN;
1054 else {
1055 /* 1056 /*
1056 * Sleep on the pending queue since 1057 * Sleep on the pending queue if we're expecting a reply.
1057 * we're expecting a reply. 1058 * The spinlock ensures atomicity between the test of
1059 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1058 */ 1060 */
1059 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) 1061 spin_lock(&xprt->recv_lock);
1062 if (!req->rq_reply_bytes_recvd) {
1060 rpc_sleep_on(&xprt->pending, task, xprt_timer); 1063 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1061 req->rq_connect_cookie = xprt->connect_cookie; 1064 /*
1065 * Send an extra queue wakeup call if the
1066 * connection was dropped in case the call to
1067 * rpc_sleep_on() raced.
1068 */
1069 if (!xprt_connected(xprt))
1070 xprt_wake_pending_tasks(xprt, -ENOTCONN);
1071 }
1072 spin_unlock(&xprt->recv_lock);
1062 } 1073 }
1063 spin_unlock_bh(&xprt->transport_lock);
1064} 1074}
1065 1075
1066static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) 1076static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index ed34dc0f144c..a3f2ab283aeb 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1408,11 +1408,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1408 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", 1408 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
1409 __func__, rep, req, be32_to_cpu(rep->rr_xid)); 1409 __func__, rep, req, be32_to_cpu(rep->rr_xid));
1410 1410
1411 if (list_empty(&req->rl_registered) && 1411 queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
1412 !test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags))
1413 rpcrdma_complete_rqst(rep);
1414 else
1415 queue_work(rpcrdma_receive_wq, &rep->rr_work);
1416 return; 1412 return;
1417 1413
1418out_badstatus: 1414out_badstatus:
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 646c24494ea7..6ee1ad8978f3 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -52,6 +52,7 @@
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/seq_file.h> 53#include <linux/seq_file.h>
54#include <linux/sunrpc/addr.h> 54#include <linux/sunrpc/addr.h>
55#include <linux/smp.h>
55 56
56#include "xprt_rdma.h" 57#include "xprt_rdma.h"
57 58
@@ -656,6 +657,7 @@ xprt_rdma_allocate(struct rpc_task *task)
656 task->tk_pid, __func__, rqst->rq_callsize, 657 task->tk_pid, __func__, rqst->rq_callsize,
657 rqst->rq_rcvsize, req); 658 rqst->rq_rcvsize, req);
658 659
660 req->rl_cpu = smp_processor_id();
659 req->rl_connect_cookie = 0; /* our reserved value */ 661 req->rl_connect_cookie = 0; /* our reserved value */
660 rpcrdma_set_xprtdata(rqst, req); 662 rpcrdma_set_xprtdata(rqst, req);
661 rqst->rq_buffer = req->rl_sendbuf->rg_base; 663 rqst->rq_buffer = req->rl_sendbuf->rg_base;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 710b3f77db82..8607c029c0dd 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -83,7 +83,7 @@ rpcrdma_alloc_wq(void)
83 struct workqueue_struct *recv_wq; 83 struct workqueue_struct *recv_wq;
84 84
85 recv_wq = alloc_workqueue("xprtrdma_receive", 85 recv_wq = alloc_workqueue("xprtrdma_receive",
86 WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI, 86 WQ_MEM_RECLAIM | WQ_HIGHPRI,
87 0); 87 0);
88 if (!recv_wq) 88 if (!recv_wq)
89 return -ENOMEM; 89 return -ENOMEM;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 51686d9eac5f..1342f743f1c4 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -342,6 +342,7 @@ enum {
342struct rpcrdma_buffer; 342struct rpcrdma_buffer;
343struct rpcrdma_req { 343struct rpcrdma_req {
344 struct list_head rl_list; 344 struct list_head rl_list;
345 int rl_cpu;
345 unsigned int rl_connect_cookie; 346 unsigned int rl_connect_cookie;
346 struct rpcrdma_buffer *rl_buffer; 347 struct rpcrdma_buffer *rl_buffer;
347 struct rpcrdma_rep *rl_reply; 348 struct rpcrdma_rep *rl_reply;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 22c4fd8a9dfe..0cdf5c2ad881 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1140,7 +1140,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1140 __skb_dequeue(arrvq); 1140 __skb_dequeue(arrvq);
1141 __skb_queue_tail(inputq, skb); 1141 __skb_queue_tail(inputq, skb);
1142 } 1142 }
1143 refcount_dec(&skb->users); 1143 kfree_skb(skb);
1144 spin_unlock_bh(&inputq->lock); 1144 spin_unlock_bh(&inputq->lock);
1145 continue; 1145 continue;
1146 } 1146 }
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 278d979c211a..d7d6cb00c47b 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -25,17 +25,45 @@ endif
25 25
26$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509) 26$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
27 @$(kecho) " GEN $@" 27 @$(kecho) " GEN $@"
28 @echo '#include "reg.h"' > $@ 28 @(set -e; \
29 @echo 'const u8 shipped_regdb_certs[] = {' >> $@ 29 allf=""; \
30 @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done 30 for f in $^ ; do \
31 @echo '};' >> $@ 31 # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
32 @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@ 32 thisf=$$(od -An -v -tx1 < $$f | \
33 sed -e 's/ /\n/g' | \
34 sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
35 sed -e 's/^/0x/;s/$$/,/'); \
36 # file should not be empty - maybe command substitution failed? \
37 test ! -z "$$thisf";\
38 allf=$$allf$$thisf;\
39 done; \
40 ( \
41 echo '#include "reg.h"'; \
42 echo 'const u8 shipped_regdb_certs[] = {'; \
43 echo "$$allf"; \
44 echo '};'; \
45 echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
46 ) >> $@)
33 47
34$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ 48$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
35 $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) 49 $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
36 @$(kecho) " GEN $@" 50 @$(kecho) " GEN $@"
37 @echo '#include "reg.h"' > $@ 51 @(set -e; \
38 @echo 'const u8 extra_regdb_certs[] = {' >> $@ 52 allf=""; \
39 @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done 53 for f in $^ ; do \
40 @echo '};' >> $@ 54 # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
41 @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@ 55 thisf=$$(od -An -v -tx1 < $$f | \
56 sed -e 's/ /\n/g' | \
57 sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
58 sed -e 's/^/0x/;s/$$/,/'); \
59 # file should not be empty - maybe command substitution failed? \
60 test ! -z "$$thisf";\
61 allf=$$allf$$thisf;\
62 done; \
63 ( \
64 echo '#include "reg.h"'; \
65 echo 'const u8 extra_regdb_certs[] = {'; \
66 echo "$$allf"; \
67 echo '};'; \
68 echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
69 ) >> $@)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 040aa79e1d9d..31031f10fe56 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6233,28 +6233,6 @@ sub process {
6233 } 6233 }
6234 } 6234 }
6235 6235
6236# whine about ACCESS_ONCE
6237 if ($^V && $^V ge 5.10.0 &&
6238 $line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) {
6239 my $par = $1;
6240 my $eq = $2;
6241 my $fun = $3;
6242 $par =~ s/^\(\s*(.*)\s*\)$/$1/;
6243 if (defined($eq)) {
6244 if (WARN("PREFER_WRITE_ONCE",
6245 "Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) &&
6246 $fix) {
6247 $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/;
6248 }
6249 } else {
6250 if (WARN("PREFER_READ_ONCE",
6251 "Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) &&
6252 $fix) {
6253 $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/;
6254 }
6255 }
6256 }
6257
6258# check for mutex_trylock_recursive usage 6236# check for mutex_trylock_recursive usage
6259 if ($line =~ /mutex_trylock_recursive/) { 6237 if ($line =~ /mutex_trylock_recursive/) {
6260 ERROR("LOCKING", 6238 ERROR("LOCKING",
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 39e07d8574dd..7721d5b2b0c0 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -44,10 +44,10 @@
44set -o errexit 44set -o errexit
45set -o nounset 45set -o nounset
46 46
47READELF="${CROSS_COMPILE}readelf" 47READELF="${CROSS_COMPILE:-}readelf"
48ADDR2LINE="${CROSS_COMPILE}addr2line" 48ADDR2LINE="${CROSS_COMPILE:-}addr2line"
49SIZE="${CROSS_COMPILE}size" 49SIZE="${CROSS_COMPILE:-}size"
50NM="${CROSS_COMPILE}nm" 50NM="${CROSS_COMPILE:-}nm"
51 51
52command -v awk >/dev/null 2>&1 || die "awk isn't installed" 52command -v awk >/dev/null 2>&1 || die "awk isn't installed"
53command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" 53command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
diff --git a/security/keys/key.c b/security/keys/key.c
index 66049183ad89..d97c9394b5dd 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -833,7 +833,6 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
833 833
834 key_check(keyring); 834 key_check(keyring);
835 835
836 key_ref = ERR_PTR(-EPERM);
837 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) 836 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
838 restrict_link = keyring->restrict_link; 837 restrict_link = keyring->restrict_link;
839 838
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 76d22f726ae4..1ffe60bb2845 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1588,9 +1588,8 @@ error_keyring:
1588 * The caller must have Setattr permission to change keyring restrictions. 1588 * The caller must have Setattr permission to change keyring restrictions.
1589 * 1589 *
1590 * The requested type name may be a NULL pointer to reject all attempts 1590 * The requested type name may be a NULL pointer to reject all attempts
1591 * to link to the keyring. If _type is non-NULL, _restriction can be 1591 * to link to the keyring. In this case, _restriction must also be NULL.
1592 * NULL or a pointer to a string describing the restriction. If _type is 1592 * Otherwise, both _type and _restriction must be non-NULL.
1593 * NULL, _restriction must also be NULL.
1594 * 1593 *
1595 * Returns 0 if successful. 1594 * Returns 0 if successful.
1596 */ 1595 */
@@ -1598,7 +1597,6 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
1598 const char __user *_restriction) 1597 const char __user *_restriction)
1599{ 1598{
1600 key_ref_t key_ref; 1599 key_ref_t key_ref;
1601 bool link_reject = !_type;
1602 char type[32]; 1600 char type[32];
1603 char *restriction = NULL; 1601 char *restriction = NULL;
1604 long ret; 1602 long ret;
@@ -1607,31 +1605,29 @@ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
1607 if (IS_ERR(key_ref)) 1605 if (IS_ERR(key_ref))
1608 return PTR_ERR(key_ref); 1606 return PTR_ERR(key_ref);
1609 1607
1608 ret = -EINVAL;
1610 if (_type) { 1609 if (_type) {
1611 ret = key_get_type_from_user(type, _type, sizeof(type)); 1610 if (!_restriction)
1612 if (ret < 0)
1613 goto error; 1611 goto error;
1614 }
1615 1612
1616 if (_restriction) { 1613 ret = key_get_type_from_user(type, _type, sizeof(type));
1617 if (!_type) { 1614 if (ret < 0)
1618 ret = -EINVAL;
1619 goto error; 1615 goto error;
1620 }
1621 1616
1622 restriction = strndup_user(_restriction, PAGE_SIZE); 1617 restriction = strndup_user(_restriction, PAGE_SIZE);
1623 if (IS_ERR(restriction)) { 1618 if (IS_ERR(restriction)) {
1624 ret = PTR_ERR(restriction); 1619 ret = PTR_ERR(restriction);
1625 goto error; 1620 goto error;
1626 } 1621 }
1622 } else {
1623 if (_restriction)
1624 goto error;
1627 } 1625 }
1628 1626
1629 ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); 1627 ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
1630 kfree(restriction); 1628 kfree(restriction);
1631
1632error: 1629error:
1633 key_ref_put(key_ref); 1630 key_ref_put(key_ref);
1634
1635 return ret; 1631 return ret;
1636} 1632}
1637 1633
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index e8036cd0ad54..114f7408feee 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -251,11 +251,12 @@ static int construct_key(struct key *key, const void *callout_info,
251 * The keyring selected is returned with an extra reference upon it which the 251 * The keyring selected is returned with an extra reference upon it which the
252 * caller must release. 252 * caller must release.
253 */ 253 */
254static void construct_get_dest_keyring(struct key **_dest_keyring) 254static int construct_get_dest_keyring(struct key **_dest_keyring)
255{ 255{
256 struct request_key_auth *rka; 256 struct request_key_auth *rka;
257 const struct cred *cred = current_cred(); 257 const struct cred *cred = current_cred();
258 struct key *dest_keyring = *_dest_keyring, *authkey; 258 struct key *dest_keyring = *_dest_keyring, *authkey;
259 int ret;
259 260
260 kenter("%p", dest_keyring); 261 kenter("%p", dest_keyring);
261 262
@@ -264,6 +265,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
264 /* the caller supplied one */ 265 /* the caller supplied one */
265 key_get(dest_keyring); 266 key_get(dest_keyring);
266 } else { 267 } else {
268 bool do_perm_check = true;
269
267 /* use a default keyring; falling through the cases until we 270 /* use a default keyring; falling through the cases until we
268 * find one that we actually have */ 271 * find one that we actually have */
269 switch (cred->jit_keyring) { 272 switch (cred->jit_keyring) {
@@ -278,8 +281,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
278 dest_keyring = 281 dest_keyring =
279 key_get(rka->dest_keyring); 282 key_get(rka->dest_keyring);
280 up_read(&authkey->sem); 283 up_read(&authkey->sem);
281 if (dest_keyring) 284 if (dest_keyring) {
285 do_perm_check = false;
282 break; 286 break;
287 }
283 } 288 }
284 289
285 case KEY_REQKEY_DEFL_THREAD_KEYRING: 290 case KEY_REQKEY_DEFL_THREAD_KEYRING:
@@ -314,11 +319,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
314 default: 319 default:
315 BUG(); 320 BUG();
316 } 321 }
322
323 /*
324 * Require Write permission on the keyring. This is essential
325 * because the default keyring may be the session keyring, and
326 * joining a keyring only requires Search permission.
327 *
328 * However, this check is skipped for the "requestor keyring" so
329 * that /sbin/request-key can itself use request_key() to add
330 * keys to the original requestor's destination keyring.
331 */
332 if (dest_keyring && do_perm_check) {
333 ret = key_permission(make_key_ref(dest_keyring, 1),
334 KEY_NEED_WRITE);
335 if (ret) {
336 key_put(dest_keyring);
337 return ret;
338 }
339 }
317 } 340 }
318 341
319 *_dest_keyring = dest_keyring; 342 *_dest_keyring = dest_keyring;
320 kleave(" [dk %d]", key_serial(dest_keyring)); 343 kleave(" [dk %d]", key_serial(dest_keyring));
321 return; 344 return 0;
322} 345}
323 346
324/* 347/*
@@ -444,11 +467,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
444 if (ctx->index_key.type == &key_type_keyring) 467 if (ctx->index_key.type == &key_type_keyring)
445 return ERR_PTR(-EPERM); 468 return ERR_PTR(-EPERM);
446 469
447 user = key_user_lookup(current_fsuid()); 470 ret = construct_get_dest_keyring(&dest_keyring);
448 if (!user) 471 if (ret)
449 return ERR_PTR(-ENOMEM); 472 goto error;
450 473
451 construct_get_dest_keyring(&dest_keyring); 474 user = key_user_lookup(current_fsuid());
475 if (!user) {
476 ret = -ENOMEM;
477 goto error_put_dest_keyring;
478 }
452 479
453 ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); 480 ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
454 key_user_put(user); 481 key_user_put(user);
@@ -463,7 +490,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
463 } else if (ret == -EINPROGRESS) { 490 } else if (ret == -EINPROGRESS) {
464 ret = 0; 491 ret = 0;
465 } else { 492 } else {
466 goto couldnt_alloc_key; 493 goto error_put_dest_keyring;
467 } 494 }
468 495
469 key_put(dest_keyring); 496 key_put(dest_keyring);
@@ -473,8 +500,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
473construction_failed: 500construction_failed:
474 key_negate_and_link(key, key_negative_timeout, NULL, NULL); 501 key_negate_and_link(key, key_negative_timeout, NULL, NULL);
475 key_put(key); 502 key_put(key);
476couldnt_alloc_key: 503error_put_dest_keyring:
477 key_put(dest_keyring); 504 key_put(dest_keyring);
505error:
478 kleave(" = %d", ret); 506 kleave(" = %d", ret);
479 return ERR_PTR(ret); 507 return ERR_PTR(ret);
480} 508}
@@ -546,9 +574,7 @@ struct key *request_key_and_link(struct key_type *type,
546 if (!IS_ERR(key_ref)) { 574 if (!IS_ERR(key_ref)) {
547 key = key_ref_to_ptr(key_ref); 575 key = key_ref_to_ptr(key_ref);
548 if (dest_keyring) { 576 if (dest_keyring) {
549 construct_get_dest_keyring(&dest_keyring);
550 ret = key_link(dest_keyring, key); 577 ret = key_link(dest_keyring, key);
551 key_put(dest_keyring);
552 if (ret < 0) { 578 if (ret < 0) {
553 key_put(key); 579 key_put(key);
554 key = ERR_PTR(ret); 580 key = ERR_PTR(ret);
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index c0b0e9e8aa66..800104c8a3ed 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -266,6 +266,7 @@
266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ 266/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 267#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 268#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
269#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
269 270
270/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 271/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
271#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 272#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 07fd03c74a77..04e32f965ad7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -84,8 +84,6 @@
84 84
85#define uninitialized_var(x) x = *(&(x)) 85#define uninitialized_var(x) x = *(&(x))
86 86
87#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
88
89#include <linux/types.h> 87#include <linux/types.h>
90 88
91/* 89/*
@@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
135/* 133/*
136 * Prevent the compiler from merging or refetching reads or writes. The 134 * Prevent the compiler from merging or refetching reads or writes. The
137 * compiler is also forbidden from reordering successive instances of 135 * compiler is also forbidden from reordering successive instances of
138 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 136 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
139 * compiler is aware of some particular ordering. One way to make the 137 * particular ordering. One way to make the compiler aware of ordering is to
140 * compiler aware of ordering is to put the two invocations of READ_ONCE, 138 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
141 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 139 * statements.
142 * 140 *
143 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 141 * These two macros will also work on aggregate data types like structs or
144 * data types like structs or unions. If the size of the accessed data 142 * unions. If the size of the accessed data type exceeds the word size of
145 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 143 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
146 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 144 * fall back to memcpy and print a compile-time warning.
147 * compile-time warning.
148 * 145 *
149 * Their two major use cases are: (1) Mediating communication between 146 * Their two major use cases are: (1) Mediating communication between
150 * process-level code and irq/NMI handlers, all running on the same CPU, 147 * process-level code and irq/NMI handlers, all running on the same CPU,
151 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 148 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
152 * mutilate accesses that either do not require ordering or that interact 149 * mutilate accesses that either do not require ordering or that interact
153 * with an explicit memory barrier or atomic instruction that provides the 150 * with an explicit memory barrier or atomic instruction that provides the
154 * required ordering. 151 * required ordering.
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 940c1b075659..6b0c36a58fcb 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -48,6 +48,7 @@ static inline int debug_locks_off(void)
48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) 48#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) 49#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
50#define pr_warn pr_err 50#define pr_warn pr_err
51#define pr_cont pr_err
51 52
52#define list_del_rcu list_del 53#define list_del_rcu list_del
53 54
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..13a58531e6fa
--- /dev/null
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,7 @@
1#if defined(__aarch64__)
2#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
3#elif defined(__s390__)
4#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
5#else
6#include <uapi/asm-generic/bpf_perf_event.h>
7#endif
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 282d7613fce8..496e59a2738b 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -630,9 +630,9 @@ struct kvm_s390_irq {
630 630
631struct kvm_s390_irq_state { 631struct kvm_s390_irq_state {
632 __u64 buf; 632 __u64 buf;
633 __u32 flags; 633 __u32 flags; /* will stay unused for compatibility reasons */
634 __u32 len; 634 __u32 len;
635 __u32 reserved[4]; 635 __u32 reserved[4]; /* will stay unused for compatibility reasons */
636}; 636};
637 637
638/* for KVM_SET_GUEST_DEBUG */ 638/* for KVM_SET_GUEST_DEBUG */
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
index 12e377184ee4..e0b85930dd77 100644
--- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -896,7 +896,7 @@ EndTable
896 896
897GrpTable: Grp3_1 897GrpTable: Grp3_1
8980: TEST Eb,Ib 8980: TEST Eb,Ib
8991: 8991: TEST Eb,Ib
9002: NOT Eb 9002: NOT Eb
9013: NEG Eb 9013: NEG Eb
9024: MUL AL,Eb 9024: MUL AL,Eb
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
index c4d55919fac1..e0b85930dd77 100644
--- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) 607fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) 608fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) 609fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
610ff: 610ff: UD0
611EndTable 611EndTable
612 612
613Table: 3-byte opcode 1 (0x0f 0x38) 613Table: 3-byte opcode 1 (0x0f 0x38)
@@ -717,7 +717,7 @@ AVXcode: 2
7177e: vpermt2d/q Vx,Hx,Wx (66),(ev) 7177e: vpermt2d/q Vx,Hx,Wx (66),(ev)
7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev) 7187f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
71980: INVEPT Gy,Mdq (66) 71980: INVEPT Gy,Mdq (66)
72081: INVPID Gy,Mdq (66) 72081: INVVPID Gy,Mdq (66)
72182: INVPCID Gy,Mdq (66) 72182: INVPCID Gy,Mdq (66)
72283: vpmultishiftqb Vx,Hx,Wx (66),(ev) 72283: vpmultishiftqb Vx,Hx,Wx (66),(ev)
72388: vexpandps/d Vpd,Wpd (66),(ev) 72388: vexpandps/d Vpd,Wpd (66),(ev)
@@ -970,6 +970,15 @@ GrpTable: Grp9
970EndTable 970EndTable
971 971
972GrpTable: Grp10 972GrpTable: Grp10
973# all are UD1
9740: UD1
9751: UD1
9762: UD1
9773: UD1
9784: UD1
9795: UD1
9806: UD1
9817: UD1
973EndTable 982EndTable
974 983
975# Grp11A and Grp11B are expressed as Grp11 in Intel SDM 984# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index efd78b827b05..3a5cb5a6e94a 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md);
70static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 70static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
71{ 71{
72 struct perf_event_mmap_page *pc = mm->base; 72 struct perf_event_mmap_page *pc = mm->base;
73 u64 head = ACCESS_ONCE(pc->data_head); 73 u64 head = READ_ONCE(pc->data_head);
74 rmb(); 74 rmb();
75 return head; 75 return head;
76} 76}
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index f309ab9afd9b..4a9fb8fb445f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,19 +1,8 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3ifeq ($(srctree),)
4srctree := $(patsubst %/,%,$(dir $(CURDIR)))
5srctree := $(patsubst %/,%,$(dir $(srctree)))
6srctree := $(patsubst %/,%,$(dir $(srctree)))
7srctree := $(patsubst %/,%,$(dir $(srctree)))
8endif
9include $(srctree)/tools/scripts/Makefile.arch
10
11$(call detected_var,SRCARCH)
12
13LIBDIR := ../../../lib 3LIBDIR := ../../../lib
14BPFDIR := $(LIBDIR)/bpf 4BPFDIR := $(LIBDIR)/bpf
15APIDIR := ../../../include/uapi 5APIDIR := ../../../include/uapi
16ASMDIR:= ../../../arch/$(ARCH)/include/uapi
17GENDIR := ../../../../include/generated 6GENDIR := ../../../../include/generated
18GENHDR := $(GENDIR)/autoconf.h 7GENHDR := $(GENDIR)/autoconf.h
19 8
@@ -21,7 +10,7 @@ ifneq ($(wildcard $(GENHDR)),)
21 GENFLAGS := -DHAVE_GENHDR 10 GENFLAGS := -DHAVE_GENHDR
22endif 11endif
23 12
24CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
25LDLIBS += -lcap -lelf 14LDLIBS += -lcap -lelf
26 15
27TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 8a1cd1616de4..c9c81614a66a 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -50,14 +50,14 @@ static int parse_status(const char *value)
50 50
51 while (*c != '\0') { 51 while (*c != '\0') {
52 int port, status, speed, devid; 52 int port, status, speed, devid;
53 unsigned long socket; 53 int sockfd;
54 char lbusid[SYSFS_BUS_ID_SIZE]; 54 char lbusid[SYSFS_BUS_ID_SIZE];
55 struct usbip_imported_device *idev; 55 struct usbip_imported_device *idev;
56 char hub[3]; 56 char hub[3];
57 57
58 ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", 58 ret = sscanf(c, "%2s %d %d %d %x %u %31s\n",
59 hub, &port, &status, &speed, 59 hub, &port, &status, &speed,
60 &devid, &socket, lbusid); 60 &devid, &sockfd, lbusid);
61 61
62 if (ret < 5) { 62 if (ret < 5) {
63 dbg("sscanf failed: %d", ret); 63 dbg("sscanf failed: %d", ret);
@@ -66,7 +66,7 @@ static int parse_status(const char *value)
66 66
67 dbg("hub %s port %d status %d speed %d devid %x", 67 dbg("hub %s port %d status %d speed %d devid %x",
68 hub, port, status, speed, devid); 68 hub, port, status, speed, devid);
69 dbg("socket %lx lbusid %s", socket, lbusid); 69 dbg("sockfd %u lbusid %s", sockfd, lbusid);
70 70
71 /* if a device is connected, look at it */ 71 /* if a device is connected, look at it */
72 idev = &vhci_driver->idev[port]; 72 idev = &vhci_driver->idev[port];
@@ -106,7 +106,7 @@ static int parse_status(const char *value)
106 return 0; 106 return 0;
107} 107}
108 108
109#define MAX_STATUS_NAME 16 109#define MAX_STATUS_NAME 18
110 110
111static int refresh_imported_device_list(void) 111static int refresh_imported_device_list(void)
112{ 112{
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c
index 38bb171aceba..e6e81305ef46 100644
--- a/tools/virtio/ringtest/ptr_ring.c
+++ b/tools/virtio/ringtest/ptr_ring.c
@@ -16,24 +16,41 @@
16#define unlikely(x) (__builtin_expect(!!(x), 0)) 16#define unlikely(x) (__builtin_expect(!!(x), 0))
17#define likely(x) (__builtin_expect(!!(x), 1)) 17#define likely(x) (__builtin_expect(!!(x), 1))
18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18#define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a))
19#define SIZE_MAX (~(size_t)0)
20
19typedef pthread_spinlock_t spinlock_t; 21typedef pthread_spinlock_t spinlock_t;
20 22
21typedef int gfp_t; 23typedef int gfp_t;
22static void *kmalloc(unsigned size, gfp_t gfp) 24#define __GFP_ZERO 0x1
23{
24 return memalign(64, size);
25}
26 25
27static void *kzalloc(unsigned size, gfp_t gfp) 26static void *kmalloc(unsigned size, gfp_t gfp)
28{ 27{
29 void *p = memalign(64, size); 28 void *p = memalign(64, size);
30 if (!p) 29 if (!p)
31 return p; 30 return p;
32 memset(p, 0, size);
33 31
32 if (gfp & __GFP_ZERO)
33 memset(p, 0, size);
34 return p; 34 return p;
35} 35}
36 36
37static inline void *kzalloc(unsigned size, gfp_t flags)
38{
39 return kmalloc(size, flags | __GFP_ZERO);
40}
41
42static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
43{
44 if (size != 0 && n > SIZE_MAX / size)
45 return NULL;
46 return kmalloc(n * size, flags);
47}
48
49static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
50{
51 return kmalloc_array(n, size, flags | __GFP_ZERO);
52}
53
37static void kfree(void *p) 54static void kfree(void *p)
38{ 55{
39 if (p) 56 if (p)
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh
index 35b039864b77..0cf28aa6f21c 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/vm/slabinfo-gnuplot.sh
@@ -1,4 +1,4 @@
1#!/bin/sh 1#!/bin/bash
2 2
3# Sergey Senozhatsky, 2015 3# Sergey Senozhatsky, 2015
4# sergey.senozhatsky.work@gmail.com 4# sergey.senozhatsky.work@gmail.com
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 4151250ce8da..f9555b1e7f15 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -479,9 +479,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
479 479
480 vtimer_restore_state(vcpu); 480 vtimer_restore_state(vcpu);
481 481
482 if (has_vhe())
483 disable_el1_phys_timer_access();
484
485 /* Set the background timer for the physical timer emulation. */ 482 /* Set the background timer for the physical timer emulation. */
486 phys_timer_emulate(vcpu); 483 phys_timer_emulate(vcpu);
487} 484}
@@ -510,9 +507,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
510 if (unlikely(!timer->enabled)) 507 if (unlikely(!timer->enabled))
511 return; 508 return;
512 509
513 if (has_vhe())
514 enable_el1_phys_timer_access();
515
516 vtimer_save_state(vcpu); 510 vtimer_save_state(vcpu);
517 511
518 /* 512 /*
@@ -841,7 +835,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
841no_vgic: 835no_vgic:
842 preempt_disable(); 836 preempt_disable();
843 timer->enabled = 1; 837 timer->enabled = 1;
844 kvm_timer_vcpu_load_vgic(vcpu); 838 if (!irqchip_in_kernel(vcpu->kvm))
839 kvm_timer_vcpu_load_user(vcpu);
840 else
841 kvm_timer_vcpu_load_vgic(vcpu);
845 preempt_enable(); 842 preempt_enable();
846 843
847 return 0; 844 return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index a67c106d73f5..6b60c98a6e22 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -188,6 +188,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
188 kvm->vcpus[i] = NULL; 188 kvm->vcpus[i] = NULL;
189 } 189 }
190 } 190 }
191 atomic_set(&kvm->online_vcpus, 0);
191} 192}
192 193
193int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 194int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -296,7 +297,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296{ 297{
297 kvm_mmu_free_memory_caches(vcpu); 298 kvm_mmu_free_memory_caches(vcpu);
298 kvm_timer_vcpu_terminate(vcpu); 299 kvm_timer_vcpu_terminate(vcpu);
299 kvm_vgic_vcpu_destroy(vcpu);
300 kvm_pmu_vcpu_destroy(vcpu); 300 kvm_pmu_vcpu_destroy(vcpu);
301 kvm_vcpu_uninit(vcpu); 301 kvm_vcpu_uninit(vcpu);
302 kmem_cache_free(kvm_vcpu_cache, vcpu); 302 kmem_cache_free(kvm_vcpu_cache, vcpu);
@@ -627,6 +627,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
627 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 627 ret = kvm_handle_mmio_return(vcpu, vcpu->run);
628 if (ret) 628 if (ret)
629 return ret; 629 return ret;
630 if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
631 return 0;
632
630 } 633 }
631 634
632 if (run->immediate_exit) 635 if (run->immediate_exit)
@@ -1502,7 +1505,7 @@ int kvm_arch_init(void *opaque)
1502 bool in_hyp_mode; 1505 bool in_hyp_mode;
1503 1506
1504 if (!is_hyp_mode_available()) { 1507 if (!is_hyp_mode_available()) {
1505 kvm_err("HYP mode not available\n"); 1508 kvm_info("HYP mode not available\n");
1506 return -ENODEV; 1509 return -ENODEV;
1507 } 1510 }
1508 1511
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index f39861639f08..f24404b3c8df 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -27,42 +27,34 @@ void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high)
27 write_sysreg(cntvoff, cntvoff_el2); 27 write_sysreg(cntvoff, cntvoff_el2);
28} 28}
29 29
30void __hyp_text enable_el1_phys_timer_access(void)
31{
32 u64 val;
33
34 /* Allow physical timer/counter access for the host */
35 val = read_sysreg(cnthctl_el2);
36 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
37 write_sysreg(val, cnthctl_el2);
38}
39
40void __hyp_text disable_el1_phys_timer_access(void)
41{
42 u64 val;
43
44 /*
45 * Disallow physical timer access for the guest
46 * Physical counter access is allowed
47 */
48 val = read_sysreg(cnthctl_el2);
49 val &= ~CNTHCTL_EL1PCEN;
50 val |= CNTHCTL_EL1PCTEN;
51 write_sysreg(val, cnthctl_el2);
52}
53
54void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) 30void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu)
55{ 31{
56 /* 32 /*
57 * We don't need to do this for VHE since the host kernel runs in EL2 33 * We don't need to do this for VHE since the host kernel runs in EL2
58 * with HCR_EL2.TGE ==1, which makes those bits have no impact. 34 * with HCR_EL2.TGE ==1, which makes those bits have no impact.
59 */ 35 */
60 if (!has_vhe()) 36 if (!has_vhe()) {
61 enable_el1_phys_timer_access(); 37 u64 val;
38
39 /* Allow physical timer/counter access for the host */
40 val = read_sysreg(cnthctl_el2);
41 val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
42 write_sysreg(val, cnthctl_el2);
43 }
62} 44}
63 45
64void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) 46void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu)
65{ 47{
66 if (!has_vhe()) 48 if (!has_vhe()) {
67 disable_el1_phys_timer_access(); 49 u64 val;
50
51 /*
52 * Disallow physical timer access for the guest
53 * Physical counter access is allowed
54 */
55 val = read_sysreg(cnthctl_el2);
56 val &= ~CNTHCTL_EL1PCEN;
57 val |= CNTHCTL_EL1PCTEN;
58 write_sysreg(val, cnthctl_el2);
59 }
68} 60}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index a3f18d362366..d7fd46fe9efb 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -34,11 +34,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
34 else 34 else
35 elrsr1 = 0; 35 elrsr1 = 0;
36 36
37#ifdef CONFIG_CPU_BIG_ENDIAN
38 cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
39#else
40 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 37 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
41#endif
42} 38}
43 39
44static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) 40static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index b7baf581611a..99e026d2dade 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
112 u32 nr = dist->nr_spis; 112 u32 nr = dist->nr_spis;
113 int i, ret; 113 int i, ret;
114 114
115 entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), 115 entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
116 GFP_KERNEL);
117 if (!entries) 116 if (!entries)
118 return -ENOMEM; 117 return -ENOMEM;
119 118
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 1f761a9991e7..8e633bd9cc1e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -421,6 +421,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
421 u32 *intids; 421 u32 *intids;
422 int nr_irqs, i; 422 int nr_irqs, i;
423 unsigned long flags; 423 unsigned long flags;
424 u8 pendmask;
424 425
425 nr_irqs = vgic_copy_lpi_list(vcpu, &intids); 426 nr_irqs = vgic_copy_lpi_list(vcpu, &intids);
426 if (nr_irqs < 0) 427 if (nr_irqs < 0)
@@ -428,7 +429,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
428 429
429 for (i = 0; i < nr_irqs; i++) { 430 for (i = 0; i < nr_irqs; i++) {
430 int byte_offset, bit_nr; 431 int byte_offset, bit_nr;
431 u8 pendmask;
432 432
433 byte_offset = intids[i] / BITS_PER_BYTE; 433 byte_offset = intids[i] / BITS_PER_BYTE;
434 bit_nr = intids[i] % BITS_PER_BYTE; 434 bit_nr = intids[i] % BITS_PER_BYTE;
@@ -821,6 +821,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
821 return E_ITS_MAPC_COLLECTION_OOR; 821 return E_ITS_MAPC_COLLECTION_OOR;
822 822
823 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 823 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
824 if (!collection)
825 return -ENOMEM;
824 826
825 collection->collection_id = coll_id; 827 collection->collection_id = coll_id;
826 collection->target_addr = COLLECTION_NOT_MAPPED; 828 collection->target_addr = COLLECTION_NOT_MAPPED;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 2f05f732d3fd..f47e8481fa45 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -327,13 +327,13 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
327 int last_byte_offset = -1; 327 int last_byte_offset = -1;
328 struct vgic_irq *irq; 328 struct vgic_irq *irq;
329 int ret; 329 int ret;
330 u8 val;
330 331
331 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
332 int byte_offset, bit_nr; 333 int byte_offset, bit_nr;
333 struct kvm_vcpu *vcpu; 334 struct kvm_vcpu *vcpu;
334 gpa_t pendbase, ptr; 335 gpa_t pendbase, ptr;
335 bool stored; 336 bool stored;
336 u8 val;
337 337
338 vcpu = irq->target_vcpu; 338 vcpu = irq->target_vcpu;
339 if (!vcpu) 339 if (!vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 53c324aa44ef..4a37292855bc 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -337,8 +337,10 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
337 goto out; 337 goto out;
338 338
339 WARN_ON(!(irq->hw && irq->host_irq == virq)); 339 WARN_ON(!(irq->hw && irq->host_irq == virq));
340 irq->hw = false; 340 if (irq->hw) {
341 ret = its_unmap_vlpi(virq); 341 irq->hw = false;
342 ret = its_unmap_vlpi(virq);
343 }
342 344
343out: 345out:
344 mutex_unlock(&its->its_lock); 346 mutex_unlock(&its->its_lock);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index b168a328a9e0..ecb8e25f5fe5 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -492,6 +492,7 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 492int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
493{ 493{
494 struct vgic_irq *irq; 494 struct vgic_irq *irq;
495 unsigned long flags;
495 int ret = 0; 496 int ret = 0;
496 497
497 if (!vgic_initialized(vcpu->kvm)) 498 if (!vgic_initialized(vcpu->kvm))
@@ -502,12 +503,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
502 return -EINVAL; 503 return -EINVAL;
503 504
504 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 505 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
505 spin_lock(&irq->irq_lock); 506 spin_lock_irqsave(&irq->irq_lock, flags);
506 if (irq->owner && irq->owner != owner) 507 if (irq->owner && irq->owner != owner)
507 ret = -EEXIST; 508 ret = -EEXIST;
508 else 509 else
509 irq->owner = owner; 510 irq->owner = owner;
510 spin_unlock(&irq->irq_lock); 511 spin_unlock_irqrestore(&irq->irq_lock, flags);
511 512
512 return ret; 513 return ret;
513} 514}
@@ -823,13 +824,14 @@ void vgic_kick_vcpus(struct kvm *kvm)
823 824
824bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 825bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
825{ 826{
826 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 827 struct vgic_irq *irq;
827 bool map_is_active; 828 bool map_is_active;
828 unsigned long flags; 829 unsigned long flags;
829 830
830 if (!vgic_initialized(vcpu->kvm)) 831 if (!vgic_initialized(vcpu->kvm))
831 return false; 832 return false;
832 833
834 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
833 spin_lock_irqsave(&irq->irq_lock, flags); 835 spin_lock_irqsave(&irq->irq_lock, flags);
834 map_is_active = irq->hw && irq->active; 836 map_is_active = irq->hw && irq->active;
835 spin_unlock_irqrestore(&irq->irq_lock, flags); 837 spin_unlock_irqrestore(&irq->irq_lock, flags);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index c422c10cd1dd..210bf820385a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -135,6 +135,11 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
135static unsigned long long kvm_createvm_count; 135static unsigned long long kvm_createvm_count;
136static unsigned long long kvm_active_vms; 136static unsigned long long kvm_active_vms;
137 137
138__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
139 unsigned long start, unsigned long end)
140{
141}
142
138bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 143bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
139{ 144{
140 if (pfn_valid(pfn)) 145 if (pfn_valid(pfn))
@@ -360,6 +365,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
360 kvm_flush_remote_tlbs(kvm); 365 kvm_flush_remote_tlbs(kvm);
361 366
362 spin_unlock(&kvm->mmu_lock); 367 spin_unlock(&kvm->mmu_lock);
368
369 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
370
363 srcu_read_unlock(&kvm->srcu, idx); 371 srcu_read_unlock(&kvm->srcu, idx);
364} 372}
365 373