aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2018-08-16 14:10:56 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2018-08-16 14:10:56 -0400
commit13fe7056bebb4015c6231a07a1be4d3aebbfe979 (patch)
tree8aefa59a61c081c402bc85f2b47c17e1374eabdd
parented9800100f1a70154c11cfa0ccc0b9ff51e3436a (diff)
parent100294cee9a98bfd4d6cb2d1c8a8aef0e959b0c4 (diff)
Merge branch 'next' into for-linus
Prepare input updates for 4.19 merge window.
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt45
-rw-r--r--Documentation/devicetree/bindings/input/keys.txt8
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt10
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/bu21029.txt35
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/eeti.txt30
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt1
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz90x1.txt7
-rw-r--r--Documentation/i2c/busses/i2c-ocores2
-rw-r--r--Documentation/networking/ppp_generic.txt6
-rw-r--r--Documentation/userspace-api/index.rst1
-rw-r--r--Documentation/userspace-api/spec_ctrl.rst94
-rw-r--r--MAINTAINERS30
-rw-r--r--Makefile11
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h8
-rw-r--r--arch/alpha/kernel/io.c14
-rw-r--r--arch/alpha/kernel/pci-noop.c33
-rw-r--r--arch/alpha/kernel/pci_iommu.c4
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts1
-rw-r--r--arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts2
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c2
-rw-r--r--arch/arm/mach-pxa/palmz72.c2
-rw-r--r--arch/arm/mach-pxa/viper.c4
-rw-r--r--arch/arm/mach-sa1100/simpad.c2
-rw-r--r--arch/arm/mm/dma-mapping.c16
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts1
-rw-r--r--arch/arm64/include/asm/atomic_lse.h24
-rw-r--r--arch/arm64/kernel/arm64ksyms.c8
-rw-r--r--arch/arm64/lib/tishift.S15
-rw-r--r--arch/arm64/mm/fault.c51
-rw-r--r--arch/arm64/mm/mmu.c16
-rw-r--r--arch/mips/boot/compressed/uart-16550.c6
-rw-r--r--arch/mips/boot/dts/xilfpga/Makefile2
-rw-r--r--arch/mips/generic/Platform1
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/ptrace.c24
-rw-r--r--arch/mips/kernel/ptrace32.c6
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/mm/c-r4k.c9
-rw-r--r--arch/nds32/Kconfig7
-rw-r--r--arch/nds32/Kconfig.cpu5
-rw-r--r--arch/nds32/Makefile7
-rw-r--r--arch/nds32/include/asm/Kbuild2
-rw-r--r--arch/nds32/include/asm/bitfield.h3
-rw-r--r--arch/nds32/include/asm/cacheflush.h2
-rw-r--r--arch/nds32/include/asm/io.h2
-rw-r--r--arch/nds32/include/asm/page.h3
-rw-r--r--arch/nds32/include/asm/pgtable.h1
-rw-r--r--arch/nds32/kernel/ex-entry.S2
-rw-r--r--arch/nds32/kernel/head.S28
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/kernel/stacktrace.c2
-rw-r--r--arch/nds32/kernel/vdso.c10
-rw-r--r--arch/nds32/lib/copy_page.S3
-rw-r--r--arch/nds32/mm/alignment.c9
-rw-r--r--arch/nds32/mm/cacheflush.c74
-rw-r--r--arch/nds32/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h29
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h19
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/include/asm/security_features.h11
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S6
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S19
-rw-r--r--arch/powerpc/kernel/security.c149
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S97
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c108
-rw-r--r--arch/powerpc/lib/feature-fixups.c115
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/purgatory/Makefile2
-rw-r--r--arch/x86/include/asm/cpufeatures.h20
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/msr-index.h9
-rw-r--r--arch/x86/include/asm/nospec-branch.h43
-rw-r--r--arch/x86/include/asm/spec-ctrl.h80
-rw-r--r--arch/x86/include/asm/thread_info.h4
-rw-r--r--arch/x86/kernel/cpu/amd.c22
-rw-r--r--arch/x86/kernel/cpu/bugs.c397
-rw-r--r--arch/x86/kernel/cpu/common.c67
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/process.c146
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kvm/cpuid.c26
-rw-r--r--arch/x86/kvm/hyperv.c19
-rw-r--r--arch/x86/kvm/lapic.c16
-rw-r--r--arch/x86/kvm/svm.c66
-rw-r--r--arch/x86/kvm/vmx.c31
-rw-r--r--arch/x86/kvm/x86.c30
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/node.c5
-rw-r--r--drivers/base/power/main.c7
-rw-r--r--drivers/bcma/driver_mips.c2
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/firmware/qcom_scm-32.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c44
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c15
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/i915/i915_query.c15
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h25
-rw-r--r--drivers/hwtracing/intel_th/msu.c6
-rw-r--r--drivers/hwtracing/stm/core.c7
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ad7793.c75
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c41
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c17
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c2
-rw-r--r--drivers/iio/buffer/kfifo_buf.c11
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/infiniband/core/cache.c2
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c55
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c94
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c10
-rw-r--r--drivers/input/evbug.c4
-rw-r--r--drivers/input/evdev.c16
-rw-r--r--drivers/input/gameport/emu10k1-gp.c4
-rw-r--r--drivers/input/gameport/lightning.c4
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/joystick/a3d.c4
-rw-r--r--drivers/input/joystick/adi.c4
-rw-r--r--drivers/input/joystick/amijoy.c4
-rw-r--r--drivers/input/joystick/analog.c4
-rw-r--r--drivers/input/joystick/cobra.c4
-rw-r--r--drivers/input/joystick/db9.c9
-rw-r--r--drivers/input/joystick/gamecon.c4
-rw-r--r--drivers/input/joystick/gf2k.c4
-rw-r--r--drivers/input/joystick/grip.c4
-rw-r--r--drivers/input/joystick/guillemot.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-ff.c8
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c30
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c16
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c4
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c4
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/joystick/interact.c4
-rw-r--r--drivers/input/joystick/joydump.c4
-rw-r--r--drivers/input/joystick/magellan.c4
-rw-r--r--drivers/input/joystick/pxrc.c166
-rw-r--r--drivers/input/joystick/sidewinder.c4
-rw-r--r--drivers/input/joystick/spaceball.c4
-rw-r--r--drivers/input/joystick/spaceorb.c4
-rw-r--r--drivers/input/joystick/stinger.c4
-rw-r--r--drivers/input/joystick/tmdc.c4
-rw-r--r--drivers/input/joystick/turbografx.c4
-rw-r--r--drivers/input/joystick/warrior.c4
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1
-rw-r--r--drivers/input/keyboard/amikbd.c4
-rw-r--r--drivers/input/keyboard/atakbd.c4
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c69
-rw-r--r--drivers/input/keyboard/gpio_keys.c8
-rw-r--r--drivers/input/keyboard/imx_keypad.c12
-rw-r--r--drivers/input/keyboard/newtonkbd.c4
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c15
-rw-r--r--drivers/input/keyboard/stowaway.c4
-rw-r--r--drivers/input/keyboard/sunkbd.c4
-rw-r--r--drivers/input/keyboard/xtkbd.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c70
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c183
-rw-r--r--drivers/input/misc/yealink.c4
-rw-r--r--drivers/input/mouse/appletouch.c7
-rw-r--r--drivers/input/mouse/cyapa_gen5.c1
-rw-r--r--drivers/input/mouse/cyapa_gen6.c1
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mouse/inport.c4
-rw-r--r--drivers/input/mouse/logibm.c4
-rw-r--r--drivers/input/mouse/pc110pad.c4
-rw-r--r--drivers/input/mouse/sermouse.c8
-rw-r--r--drivers/input/serio/ct82c710.c4
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/q40kbd.c4
-rw-r--r--drivers/input/serio/rpckbd.c4
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c223
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c484
-rw-r--r--drivers/input/touchscreen/eeti_ts.c37
-rw-r--r--drivers/input/touchscreen/egalax_ts.c5
-rw-r--r--drivers/input/touchscreen/elo.c1
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c21
-rw-r--r--drivers/input/touchscreen/gunze.c4
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c14
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c4
-rw-r--r--drivers/input/touchscreen/wdt87xx_i2c.c2
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/md-bitmap.c305
-rw-r--r--drivers/md/md-bitmap.h60
-rw-r--r--drivers/md/md-cluster.c18
-rw-r--r--drivers/md/md.c44
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c20
-rw-r--r--drivers/md/raid1.c35
-rw-r--r--drivers/md/raid10.c52
-rw-r--r--drivers/md/raid5-cache.c8
-rw-r--r--drivers/md/raid5.c44
-rw-r--r--drivers/mfd/cros_ec_spi.c24
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c33
-rw-r--r--drivers/net/dsa/b53/b53_common.c13
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c5
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c36
-rw-r--r--drivers/net/ethernet/3com/3c59x.c104
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c88
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c5
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c61
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c22
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c27
-rw-r--r--drivers/net/tun.c61
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c72
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c7
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c23
-rw-r--r--drivers/s390/block/dasd.c7
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/scsi_transport_srp.c22
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sr_ioctl.c10
-rw-r--r--drivers/soc/lantiq/gphy.c36
-rw-r--r--drivers/ssb/Kconfig4
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c25
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--fs/affs/namei.c15
-rw-r--r--fs/afs/security.c10
-rw-r--r--fs/afs/vlclient.c19
-rw-r--r--fs/aio.c7
-rw-r--r--fs/befs/linuxvfs.c17
-rw-r--r--fs/btrfs/inode.c19
-rw-r--r--fs/cachefiles/namei.c10
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dcache.c22
-rw-r--r--fs/ecryptfs/inode.c3
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/f2fs/namei.c12
-rw-r--r--fs/inode.c1
-rw-r--r--fs/internal.h1
-rw-r--r--fs/jffs2/dir.c12
-rw-r--r--fs/jfs/namei.c12
-rw-r--r--fs/kernfs/mount.c1
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfsd/vfs.c22
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c11
-rw-r--r--fs/open.c44
-rw-r--r--fs/orangefs/namei.c9
-rw-r--r--fs/proc/array.c25
-rw-r--r--fs/reiserfs/namei.c12
-rw-r--r--fs/seq_file.c5
-rw-r--r--fs/super.c30
-rw-r--r--fs/sysfs/mount.c6
-rw-r--r--fs/udf/namei.c6
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/iio/buffer_impl.h6
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mlx5/driver.h12
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/node.h8
-rw-r--r--include/linux/nospec.h10
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/seccomp.h5
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/tls.h3
-rw-r--r--include/rdma/ib_umem.h1
-rw-r--r--include/rdma/uverbs_ioctl.h10
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/joystick.h4
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_tcp.h3
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/prctl.h12
-rw-r--r--include/uapi/linux/seccomp.h5
-rw-r--r--include/xen/interface/io/displif.h8
-rw-r--r--include/xen/interface/io/kbdif.h78
-rw-r--r--include/xen/interface/io/sndif.h10
-rw-r--r--init/main.c1
-rw-r--r--ipc/shm.c19
-rw-r--r--kernel/bpf/core.c100
-rw-r--r--kernel/bpf/sockmap.c18
-rw-r--r--kernel/bpf/verifier.c145
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/sched/core.c45
-rw-r--r--kernel/sched/deadline.c6
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/seccomp.c21
-rw-r--r--kernel/sys.c28
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace.h11
-rw-r--r--kernel/trace/trace_events_trigger.c15
-rw-r--r--lib/bitmap.c20
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/radix-tree.c4
-rw-r--r--mm/cma.c83
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/internal.h4
-rw-r--r--mm/kasan/kasan.c66
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/page_alloc.c99
-rw-r--r--mm/swapfile.c7
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/translation-table.c84
-rw-r--r--net/bridge/netfilter/ebt_stp.c4
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/filter.c11
-rw-r--r--net/core/net-sysfs.c6
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/dsa2.c9
-rw-r--r--net/ipv4/fib_frontend.c9
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ipmr_base.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c2
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv6/ip6_gre.c286
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c11
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/seg6_iptunnel.c4
-rw-r--r--net/ipv6/sit.c5
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/kcm/kcmsock.c2
-rw-r--r--net/mac80211/mesh_plink.c8
-rw-r--r--net/ncsi/ncsi-netlink.c2
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c11
-rw-r--r--net/netfilter/nf_tables_api.c85
-rw-r--r--net/netfilter/nf_tables_core.c17
-rw-r--r--net/netfilter/nfnetlink_acct.c2
-rw-r--r--net/netfilter/nfnetlink_cthelper.c7
-rw-r--r--net/netfilter/nft_compat.c201
-rw-r--r--net/netfilter/nft_ct.c20
-rw-r--r--net/netfilter/nft_immediate.c15
-rw-r--r--net/netfilter/nft_limit.c38
-rw-r--r--net/netfilter/nft_meta.c14
-rw-r--r--net/netfilter/x_tables.c6
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c51
-rw-r--r--net/smc/smc_pnet.c71
-rw-r--r--net/tls/tls_sw.c9
-rw-r--r--net/wireless/nl80211.c3
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/xfrm/xfrm_policy.c5
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--security/selinux/hooks.c20
-rw-r--r--security/selinux/ss/services.c2
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/pci/hda/hda_local.h6
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/lib/bpf/libbpf.c2
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt10
-rw-r--r--tools/perf/tests/topology.c30
-rw-r--r--tools/perf/util/bpf-loader.c6
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c12
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/parse-events.c130
-rw-r--r--tools/perf/util/parse-events.h7
-rw-r--r--tools/perf/util/parse-events.y8
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/testing/radix-tree/idr-test.c7
-rw-r--r--tools/testing/selftests/bpf/config2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c5
-rw-r--r--tools/testing/selftests/net/config5
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c4
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c22
488 files changed, 6367 insertions, 2894 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 025b7cf3768d..bd4975e132d3 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -478,6 +478,7 @@ What: /sys/devices/system/cpu/vulnerabilities
478 /sys/devices/system/cpu/vulnerabilities/meltdown 478 /sys/devices/system/cpu/vulnerabilities/meltdown
479 /sys/devices/system/cpu/vulnerabilities/spectre_v1 479 /sys/devices/system/cpu/vulnerabilities/spectre_v1
480 /sys/devices/system/cpu/vulnerabilities/spectre_v2 480 /sys/devices/system/cpu/vulnerabilities/spectre_v2
481 /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
481Date: January 2018 482Date: January 2018
482Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 483Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
483Description: Information about CPU vulnerabilities 484Description: Information about CPU vulnerabilities
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 11fc28ecdb6d..f2040d46f095 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2680,6 +2680,9 @@
2680 allow data leaks with this option, which is equivalent 2680 allow data leaks with this option, which is equivalent
2681 to spectre_v2=off. 2681 to spectre_v2=off.
2682 2682
2683 nospec_store_bypass_disable
2684 [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
2685
2683 noxsave [BUGS=X86] Disables x86 extended register state save 2686 noxsave [BUGS=X86] Disables x86 extended register state save
2684 and restore using xsave. The kernel will fallback to 2687 and restore using xsave. The kernel will fallback to
2685 enabling legacy floating-point and sse state. 2688 enabling legacy floating-point and sse state.
@@ -4025,6 +4028,48 @@
4025 Not specifying this option is equivalent to 4028 Not specifying this option is equivalent to
4026 spectre_v2=auto. 4029 spectre_v2=auto.
4027 4030
4031 spec_store_bypass_disable=
4032 [HW] Control Speculative Store Bypass (SSB) Disable mitigation
4033 (Speculative Store Bypass vulnerability)
4034
4035 Certain CPUs are vulnerable to an exploit against a
4036 a common industry wide performance optimization known
4037 as "Speculative Store Bypass" in which recent stores
4038 to the same memory location may not be observed by
4039 later loads during speculative execution. The idea
4040 is that such stores are unlikely and that they can
4041 be detected prior to instruction retirement at the
4042 end of a particular speculation execution window.
4043
4044 In vulnerable processors, the speculatively forwarded
4045 store can be used in a cache side channel attack, for
4046 example to read memory to which the attacker does not
4047 directly have access (e.g. inside sandboxed code).
4048
4049 This parameter controls whether the Speculative Store
4050 Bypass optimization is used.
4051
4052 on - Unconditionally disable Speculative Store Bypass
4053 off - Unconditionally enable Speculative Store Bypass
4054 auto - Kernel detects whether the CPU model contains an
4055 implementation of Speculative Store Bypass and
4056 picks the most appropriate mitigation. If the
4057 CPU is not vulnerable, "off" is selected. If the
4058 CPU is vulnerable the default mitigation is
4059 architecture and Kconfig dependent. See below.
4060 prctl - Control Speculative Store Bypass per thread
4061 via prctl. Speculative Store Bypass is enabled
4062 for a process by default. The state of the control
4063 is inherited on fork.
4064 seccomp - Same as "prctl" above, but all seccomp threads
4065 will disable SSB unless they explicitly opt out.
4066
4067 Not specifying this option is equivalent to
4068 spec_store_bypass_disable=auto.
4069
4070 Default mitigations:
4071 X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
4072
4028 spia_io_base= [HW,MTD] 4073 spia_io_base= [HW,MTD]
4029 spia_fio_base= 4074 spia_fio_base=
4030 spia_pedr= 4075 spia_pedr=
diff --git a/Documentation/devicetree/bindings/input/keys.txt b/Documentation/devicetree/bindings/input/keys.txt
new file mode 100644
index 000000000000..f5a5ddde53f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/keys.txt
@@ -0,0 +1,8 @@
1General Keys Properties:
2
3Optional properties for Keys:
4- power-off-time-sec: Duration in seconds which the key should be kept
5 pressed for device to power off automatically. Device with key pressed
6 shutdown feature can specify this property.
7- linux,keycodes: Specifies the numeric keycode values to be used for
8 reporting key presses.
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
index 07bf55f6e0b9..34ab5763f494 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
@@ -7,6 +7,7 @@ PROPERTIES
7 Value type: <string> 7 Value type: <string>
8 Definition: must be one of: 8 Definition: must be one of:
9 "qcom,pm8941-pwrkey" 9 "qcom,pm8941-pwrkey"
10 "qcom,pm8941-resin"
10 11
11- reg: 12- reg:
12 Usage: required 13 Usage: required
@@ -32,6 +33,14 @@ PROPERTIES
32 Definition: presence of this property indicates that the KPDPWR_N pin 33 Definition: presence of this property indicates that the KPDPWR_N pin
33 should be configured for pull up. 34 should be configured for pull up.
34 35
36- linux,code:
37 Usage: optional
38 Value type: <u32>
39 Definition: The input key-code associated with the power key.
40 Use the linux event codes defined in
41 include/dt-bindings/input/linux-event-codes.h
42 When property is omitted KEY_POWER is assumed.
43
35EXAMPLE 44EXAMPLE
36 45
37 pwrkey@800 { 46 pwrkey@800 {
@@ -40,4 +49,5 @@ EXAMPLE
40 interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>; 49 interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
41 debounce = <15625>; 50 debounce = <15625>;
42 bias-pull-up; 51 bias-pull-up;
52 linux,code = <KEY_POWER>;
43 }; 53 };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt b/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt
new file mode 100644
index 000000000000..8daa0e868a8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/bu21029.txt
@@ -0,0 +1,35 @@
1* Rohm BU21029 Touch Screen Controller
2
3Required properties:
4 - compatible : must be "rohm,bu21029"
5 - reg : i2c device address of the chip (0x40 or 0x41)
6 - interrupt-parent : the phandle for the gpio controller
7 - interrupts : (gpio) interrupt to which the chip is connected
8 - rohm,x-plate-ohms : x-plate resistance in Ohm
9
10Optional properties:
11 - reset-gpios : gpio pin to reset the chip (active low)
12 - touchscreen-size-x : horizontal resolution of touchscreen (in pixels)
13 - touchscreen-size-y : vertical resolution of touchscreen (in pixels)
14 - touchscreen-max-pressure: maximum pressure value
15 - vdd-supply : power supply for the controller
16
17Example:
18
19 &i2c1 {
20 /* ... */
21
22 bu21029: bu21029@40 {
23 compatible = "rohm,bu21029";
24 reg = <0x40>;
25 interrupt-parent = <&gpio1>;
26 interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
27 reset-gpios = <&gpio6 16 GPIO_ACTIVE_LOW>;
28 rohm,x-plate-ohms = <600>;
29 touchscreen-size-x = <800>;
30 touchscreen-size-y = <480>;
31 touchscreen-max-pressure = <4095>;
32 };
33
34 /* ... */
35 };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/eeti.txt b/Documentation/devicetree/bindings/input/touchscreen/eeti.txt
new file mode 100644
index 000000000000..32b3712c916e
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/eeti.txt
@@ -0,0 +1,30 @@
1Bindings for EETI touchscreen controller
2
3Required properties:
4- compatible: should be "eeti,exc3000-i2c"
5- reg: I2C address of the chip. Should be set to <0xa>
6- interrupts: interrupt to which the chip is connected
7
8Optional properties:
9- attn-gpios: A handle to a GPIO to check whether interrupt is still
10 latched. This is necessary for platforms that lack
11 support for level-triggered IRQs.
12
13The following optional properties described in touchscreen.txt are
14also supported:
15
16- touchscreen-inverted-x
17- touchscreen-inverted-y
18- touchscreen-swapped-x-y
19
20Example:
21
22i2c-master {
23 touchscreen@a {
24 compatible = "eeti,exc3000-i2c";
25 reg = <0xa>;
26 interrupt-parent = <&gpio>;
27 interrupts = <123 IRQ_TYPE_EDGE_RISING>;
28 attn-gpios = <&gpio 123 GPIO_ACTIVE_HIGH>;
29 };
30};
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index 8acf51a4dfa8..47a6a7fe0b86 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -10,6 +10,7 @@ Required properties:
10 "brcm,bcm53128" 10 "brcm,bcm53128"
11 "brcm,bcm5365" 11 "brcm,bcm5365"
12 "brcm,bcm5395" 12 "brcm,bcm5395"
13 "brcm,bcm5389"
13 "brcm,bcm5397" 14 "brcm,bcm5397"
14 "brcm,bcm5398" 15 "brcm,bcm5398"
15 16
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
index 42a248301615..e22d8cfea687 100644
--- a/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
@@ -57,6 +57,13 @@ KSZ9031:
57 - txd2-skew-ps : Skew control of TX data 2 pad 57 - txd2-skew-ps : Skew control of TX data 2 pad
58 - txd3-skew-ps : Skew control of TX data 3 pad 58 - txd3-skew-ps : Skew control of TX data 3 pad
59 59
60 - micrel,force-master:
61 Boolean, force phy to master mode. Only set this option if the phy
62 reference clock provided at CLK125_NDO pin is used as MAC reference
63 clock because the clock jitter in slave mode is to high (errata#2).
64 Attention: The link partner must be configurable as slave otherwise
65 no link will be established.
66
60Examples: 67Examples:
61 68
62 mdio { 69 mdio {
diff --git a/Documentation/i2c/busses/i2c-ocores b/Documentation/i2c/busses/i2c-ocores
index c269aaa2f26a..9e1dfe7553ad 100644
--- a/Documentation/i2c/busses/i2c-ocores
+++ b/Documentation/i2c/busses/i2c-ocores
@@ -2,7 +2,7 @@ Kernel driver i2c-ocores
2 2
3Supported adapters: 3Supported adapters:
4 * OpenCores.org I2C controller by Richard Herveille (see datasheet link) 4 * OpenCores.org I2C controller by Richard Herveille (see datasheet link)
5 Datasheet: http://www.opencores.org/projects.cgi/web/i2c/overview 5 https://opencores.org/project/i2c/overview
6 6
7Author: Peter Korsgaard <jacmet@sunsite.dk> 7Author: Peter Korsgaard <jacmet@sunsite.dk>
8 8
diff --git a/Documentation/networking/ppp_generic.txt b/Documentation/networking/ppp_generic.txt
index 091d20273dcb..61daf4b39600 100644
--- a/Documentation/networking/ppp_generic.txt
+++ b/Documentation/networking/ppp_generic.txt
@@ -300,12 +300,6 @@ unattached instance are:
300The ioctl calls available on an instance of /dev/ppp attached to a 300The ioctl calls available on an instance of /dev/ppp attached to a
301channel are: 301channel are:
302 302
303* PPPIOCDETACH detaches the instance from the channel. This ioctl is
304 deprecated since the same effect can be achieved by closing the
305 instance. In order to prevent possible races this ioctl will fail
306 with an EINVAL error if more than one file descriptor refers to this
307 instance (i.e. as a result of dup(), dup2() or fork()).
308
309* PPPIOCCONNECT connects this channel to a PPP interface. The 303* PPPIOCCONNECT connects this channel to a PPP interface. The
310 argument should point to an int containing the interface unit 304 argument should point to an int containing the interface unit
311 number. It will return an EINVAL error if the channel is already 305 number. It will return an EINVAL error if the channel is already
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index 7b2eb1b7d4ca..a3233da7fa88 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -19,6 +19,7 @@ place where this information is gathered.
19 no_new_privs 19 no_new_privs
20 seccomp_filter 20 seccomp_filter
21 unshare 21 unshare
22 spec_ctrl
22 23
23.. only:: subproject and html 24.. only:: subproject and html
24 25
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
new file mode 100644
index 000000000000..32f3d55c54b7
--- /dev/null
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -0,0 +1,94 @@
1===================
2Speculation Control
3===================
4
5Quite some CPUs have speculation-related misfeatures which are in
6fact vulnerabilities causing data leaks in various forms even across
7privilege domains.
8
9The kernel provides mitigation for such vulnerabilities in various
10forms. Some of these mitigations are compile-time configurable and some
11can be supplied on the kernel command line.
12
13There is also a class of mitigations which are very expensive, but they can
14be restricted to a certain set of processes or tasks in controlled
15environments. The mechanism to control these mitigations is via
16:manpage:`prctl(2)`.
17
18There are two prctl options which are related to this:
19
20 * PR_GET_SPECULATION_CTRL
21
22 * PR_SET_SPECULATION_CTRL
23
24PR_GET_SPECULATION_CTRL
25-----------------------
26
27PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
28which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
29the following meaning:
30
31==== ===================== ===================================================
32Bit Define Description
33==== ===================== ===================================================
340 PR_SPEC_PRCTL Mitigation can be controlled per task by
35 PR_SET_SPECULATION_CTRL.
361 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
37 disabled.
382 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
39 enabled.
403 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
41 subsequent prctl(..., PR_SPEC_ENABLE) will fail.
42==== ===================== ===================================================
43
44If all bits are 0 the CPU is not affected by the speculation misfeature.
45
46If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
47available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
48misfeature will fail.
49
50PR_SET_SPECULATION_CTRL
51-----------------------
52
53PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
54is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
55in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
56PR_SPEC_FORCE_DISABLE.
57
58Common error codes
59------------------
60======= =================================================================
61Value Meaning
62======= =================================================================
63EINVAL The prctl is not implemented by the architecture or unused
64 prctl(2) arguments are not 0.
65
66ENODEV arg2 is selecting a not supported speculation misfeature.
67======= =================================================================
68
69PR_SET_SPECULATION_CTRL error codes
70-----------------------------------
71======= =================================================================
72Value Meaning
73======= =================================================================
740 Success
75
76ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
77 PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
78
79ENXIO Control of the selected speculation misfeature is not possible.
80 See PR_GET_SPECULATION_CTRL.
81
82EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
83 tried to enable it again.
84======= =================================================================
85
86Speculation misfeature controls
87-------------------------------
88- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
89
90 Invocations:
91 * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
92 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
93 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
94 * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/MAINTAINERS b/MAINTAINERS
index 1cb49e9f11dc..57eba2829a6c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2332,7 +2332,7 @@ F: drivers/gpio/gpio-ath79.c
2332F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt 2332F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
2333 2333
2334ATHEROS ATH GENERIC UTILITIES 2334ATHEROS ATH GENERIC UTILITIES
2335M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com> 2335M: Kalle Valo <kvalo@codeaurora.org>
2336L: linux-wireless@vger.kernel.org 2336L: linux-wireless@vger.kernel.org
2337S: Supported 2337S: Supported
2338F: drivers/net/wireless/ath/* 2338F: drivers/net/wireless/ath/*
@@ -2347,7 +2347,7 @@ S: Maintained
2347F: drivers/net/wireless/ath/ath5k/ 2347F: drivers/net/wireless/ath/ath5k/
2348 2348
2349ATHEROS ATH6KL WIRELESS DRIVER 2349ATHEROS ATH6KL WIRELESS DRIVER
2350M: Kalle Valo <kvalo@qca.qualcomm.com> 2350M: Kalle Valo <kvalo@codeaurora.org>
2351L: linux-wireless@vger.kernel.org 2351L: linux-wireless@vger.kernel.org
2352W: http://wireless.kernel.org/en/users/Drivers/ath6kl 2352W: http://wireless.kernel.org/en/users/Drivers/ath6kl
2353T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git 2353T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -5394,7 +5394,6 @@ S: Maintained
5394F: drivers/iommu/exynos-iommu.c 5394F: drivers/iommu/exynos-iommu.c
5395 5395
5396EZchip NPS platform support 5396EZchip NPS platform support
5397M: Elad Kanfi <eladkan@mellanox.com>
5398M: Vineet Gupta <vgupta@synopsys.com> 5397M: Vineet Gupta <vgupta@synopsys.com>
5399S: Supported 5398S: Supported
5400F: arch/arc/plat-eznps 5399F: arch/arc/plat-eznps
@@ -6510,9 +6509,15 @@ F: Documentation/networking/hinic.txt
6510F: drivers/net/ethernet/huawei/hinic/ 6509F: drivers/net/ethernet/huawei/hinic/
6511 6510
6512HUGETLB FILESYSTEM 6511HUGETLB FILESYSTEM
6513M: Nadia Yvette Chambers <nyc@holomorphy.com> 6512M: Mike Kravetz <mike.kravetz@oracle.com>
6513L: linux-mm@kvack.org
6514S: Maintained 6514S: Maintained
6515F: fs/hugetlbfs/ 6515F: fs/hugetlbfs/
6516F: mm/hugetlb.c
6517F: include/linux/hugetlb.h
6518F: Documentation/admin-guide/mm/hugetlbpage.rst
6519F: Documentation/vm/hugetlbfs_reserv.rst
6520F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
6516 6521
6517HVA ST MEDIA DRIVER 6522HVA ST MEDIA DRIVER
6518M: Jean-Christophe Trotin <jean-christophe.trotin@st.com> 6523M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
@@ -9028,7 +9033,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
9028F: drivers/net/ethernet/mellanox/mlx5/core/en_* 9033F: drivers/net/ethernet/mellanox/mlx5/core/en_*
9029 9034
9030MELLANOX ETHERNET INNOVA DRIVER 9035MELLANOX ETHERNET INNOVA DRIVER
9031M: Ilan Tayari <ilant@mellanox.com>
9032R: Boris Pismenny <borisp@mellanox.com> 9036R: Boris Pismenny <borisp@mellanox.com>
9033L: netdev@vger.kernel.org 9037L: netdev@vger.kernel.org
9034S: Supported 9038S: Supported
@@ -9038,7 +9042,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
9038F: include/linux/mlx5/mlx5_ifc_fpga.h 9042F: include/linux/mlx5/mlx5_ifc_fpga.h
9039 9043
9040MELLANOX ETHERNET INNOVA IPSEC DRIVER 9044MELLANOX ETHERNET INNOVA IPSEC DRIVER
9041M: Ilan Tayari <ilant@mellanox.com>
9042R: Boris Pismenny <borisp@mellanox.com> 9045R: Boris Pismenny <borisp@mellanox.com>
9043L: netdev@vger.kernel.org 9046L: netdev@vger.kernel.org
9044S: Supported 9047S: Supported
@@ -9094,7 +9097,6 @@ F: include/uapi/rdma/mlx4-abi.h
9094 9097
9095MELLANOX MLX5 core VPI driver 9098MELLANOX MLX5 core VPI driver
9096M: Saeed Mahameed <saeedm@mellanox.com> 9099M: Saeed Mahameed <saeedm@mellanox.com>
9097M: Matan Barak <matanb@mellanox.com>
9098M: Leon Romanovsky <leonro@mellanox.com> 9100M: Leon Romanovsky <leonro@mellanox.com>
9099L: netdev@vger.kernel.org 9101L: netdev@vger.kernel.org
9100L: linux-rdma@vger.kernel.org 9102L: linux-rdma@vger.kernel.org
@@ -9105,7 +9107,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/
9105F: include/linux/mlx5/ 9107F: include/linux/mlx5/
9106 9108
9107MELLANOX MLX5 IB driver 9109MELLANOX MLX5 IB driver
9108M: Matan Barak <matanb@mellanox.com>
9109M: Leon Romanovsky <leonro@mellanox.com> 9110M: Leon Romanovsky <leonro@mellanox.com>
9110L: linux-rdma@vger.kernel.org 9111L: linux-rdma@vger.kernel.org
9111W: http://www.mellanox.com 9112W: http://www.mellanox.com
@@ -9839,7 +9840,6 @@ F: net/netfilter/xt_CONNSECMARK.c
9839F: net/netfilter/xt_SECMARK.c 9840F: net/netfilter/xt_SECMARK.c
9840 9841
9841NETWORKING [TLS] 9842NETWORKING [TLS]
9842M: Ilya Lesokhin <ilyal@mellanox.com>
9843M: Aviad Yehezkel <aviadye@mellanox.com> 9843M: Aviad Yehezkel <aviadye@mellanox.com>
9844M: Dave Watson <davejwatson@fb.com> 9844M: Dave Watson <davejwatson@fb.com>
9845L: netdev@vger.kernel.org 9845L: netdev@vger.kernel.org
@@ -11646,7 +11646,7 @@ S: Maintained
11646F: drivers/media/tuners/qt1010* 11646F: drivers/media/tuners/qt1010*
11647 11647
11648QUALCOMM ATHEROS ATH10K WIRELESS DRIVER 11648QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
11649M: Kalle Valo <kvalo@qca.qualcomm.com> 11649M: Kalle Valo <kvalo@codeaurora.org>
11650L: ath10k@lists.infradead.org 11650L: ath10k@lists.infradead.org
11651W: http://wireless.kernel.org/en/users/Drivers/ath10k 11651W: http://wireless.kernel.org/en/users/Drivers/ath10k
11652T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git 11652T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -11697,7 +11697,7 @@ S: Maintained
11697F: drivers/media/platform/qcom/venus/ 11697F: drivers/media/platform/qcom/venus/
11698 11698
11699QUALCOMM WCN36XX WIRELESS DRIVER 11699QUALCOMM WCN36XX WIRELESS DRIVER
11700M: Eugene Krasnikov <k.eugene.e@gmail.com> 11700M: Kalle Valo <kvalo@codeaurora.org>
11701L: wcn36xx@lists.infradead.org 11701L: wcn36xx@lists.infradead.org
11702W: http://wireless.kernel.org/en/users/Drivers/wcn36xx 11702W: http://wireless.kernel.org/en/users/Drivers/wcn36xx
11703T: git git://github.com/KrasnikovEugene/wcn36xx.git 11703T: git git://github.com/KrasnikovEugene/wcn36xx.git
@@ -15527,6 +15527,14 @@ L: linux-kernel@vger.kernel.org
15527S: Supported 15527S: Supported
15528F: drivers/char/xillybus/ 15528F: drivers/char/xillybus/
15529 15529
15530XLP9XX I2C DRIVER
15531M: George Cherian <george.cherian@cavium.com>
15532M: Jan Glauber <jglauber@cavium.com>
15533L: linux-i2c@vger.kernel.org
15534W: http://www.cavium.com
15535S: Supported
15536F: drivers/i2c/busses/i2c-xlp9xx.c
15537
15530XRA1403 GPIO EXPANDER 15538XRA1403 GPIO EXPANDER
15531M: Nandor Han <nandor.han@ge.com> 15539M: Nandor Han <nandor.han@ge.com>
15532M: Semi Malinen <semi.malinen@ge.com> 15540M: Semi Malinen <semi.malinen@ge.com>
diff --git a/Makefile b/Makefile
index ec6f45928fd4..554dcaddbce4 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 17 3PATCHLEVEL = 17
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc6 5EXTRAVERSION =
6NAME = Merciless Moray 6NAME = Merciless Moray
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -500,6 +500,9 @@ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
500RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) 500RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
501export RETPOLINE_CFLAGS 501export RETPOLINE_CFLAGS
502 502
503KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
504KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
505
503# check for 'asm goto' 506# check for 'asm goto'
504ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) 507ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
505 CC_HAVE_ASM_GOTO := 1 508 CC_HAVE_ASM_GOTO := 1
@@ -621,9 +624,9 @@ endif # $(dot-config)
621# Defaults to vmlinux, but the arch makefile usually adds further targets 624# Defaults to vmlinux, but the arch makefile usually adds further targets
622all: vmlinux 625all: vmlinux
623 626
624KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 627CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
625KBUILD_AFLAGS += $(call cc-option,-fno-PIE) 628 $(call cc-option,-fno-tree-loop-im) \
626CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) 629 $(call cc-disable-warning,maybe-uninitialized,)
627export CFLAGS_GCOV CFLAGS_KCOV 630export CFLAGS_GCOV CFLAGS_KCOV
628 631
629# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default 632# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b2022885ced8..f19dc31288c8 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -211,6 +211,7 @@ config ALPHA_EIGER
211config ALPHA_JENSEN 211config ALPHA_JENSEN
212 bool "Jensen" 212 bool "Jensen"
213 depends on BROKEN 213 depends on BROKEN
214 select DMA_DIRECT_OPS
214 help 215 help
215 DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one 216 DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
216 of the first-generation Alpha systems. A number of these systems 217 of the first-generation Alpha systems. A number of these systems
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index b78f61f20796..8beeafd4f68e 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -2,11 +2,15 @@
2#ifndef _ALPHA_DMA_MAPPING_H 2#ifndef _ALPHA_DMA_MAPPING_H
3#define _ALPHA_DMA_MAPPING_H 3#define _ALPHA_DMA_MAPPING_H
4 4
5extern const struct dma_map_ops *dma_ops; 5extern const struct dma_map_ops alpha_pci_ops;
6 6
7static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
8{ 8{
9 return dma_ops; 9#ifdef CONFIG_ALPHA_JENSEN
10 return &dma_direct_ops;
11#else
12 return &alpha_pci_ops;
13#endif
10} 14}
11 15
12#endif /* _ALPHA_DMA_MAPPING_H */ 16#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
index 3e3d49c254c5..c025a3e5e357 100644
--- a/arch/alpha/kernel/io.c
+++ b/arch/alpha/kernel/io.c
@@ -37,20 +37,20 @@ unsigned int ioread32(void __iomem *addr)
37 37
38void iowrite8(u8 b, void __iomem *addr) 38void iowrite8(u8 b, void __iomem *addr)
39{ 39{
40 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
41 mb(); 40 mb();
41 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
42} 42}
43 43
44void iowrite16(u16 b, void __iomem *addr) 44void iowrite16(u16 b, void __iomem *addr)
45{ 45{
46 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
47 mb(); 46 mb();
47 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
48} 48}
49 49
50void iowrite32(u32 b, void __iomem *addr) 50void iowrite32(u32 b, void __iomem *addr)
51{ 51{
52 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
53 mb(); 52 mb();
53 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
54} 54}
55 55
56EXPORT_SYMBOL(ioread8); 56EXPORT_SYMBOL(ioread8);
@@ -176,26 +176,26 @@ u64 readq(const volatile void __iomem *addr)
176 176
177void writeb(u8 b, volatile void __iomem *addr) 177void writeb(u8 b, volatile void __iomem *addr)
178{ 178{
179 __raw_writeb(b, addr);
180 mb(); 179 mb();
180 __raw_writeb(b, addr);
181} 181}
182 182
183void writew(u16 b, volatile void __iomem *addr) 183void writew(u16 b, volatile void __iomem *addr)
184{ 184{
185 __raw_writew(b, addr);
186 mb(); 185 mb();
186 __raw_writew(b, addr);
187} 187}
188 188
189void writel(u32 b, volatile void __iomem *addr) 189void writel(u32 b, volatile void __iomem *addr)
190{ 190{
191 __raw_writel(b, addr);
192 mb(); 191 mb();
192 __raw_writel(b, addr);
193} 193}
194 194
195void writeq(u64 b, volatile void __iomem *addr) 195void writeq(u64 b, volatile void __iomem *addr)
196{ 196{
197 __raw_writeq(b, addr);
198 mb(); 197 mb();
198 __raw_writeq(b, addr);
199} 199}
200 200
201EXPORT_SYMBOL(readb); 201EXPORT_SYMBOL(readb);
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index b6ebb65127a8..c7c5879869d3 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -102,36 +102,3 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
102 else 102 else
103 return -ENODEV; 103 return -ENODEV;
104} 104}
105
106static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
107 dma_addr_t *dma_handle, gfp_t gfp,
108 unsigned long attrs)
109{
110 void *ret;
111
112 if (!dev || *dev->dma_mask >= 0xffffffffUL)
113 gfp &= ~GFP_DMA;
114 ret = (void *)__get_free_pages(gfp, get_order(size));
115 if (ret) {
116 memset(ret, 0, size);
117 *dma_handle = virt_to_phys(ret);
118 }
119 return ret;
120}
121
122static int alpha_noop_supported(struct device *dev, u64 mask)
123{
124 return mask < 0x00ffffffUL ? 0 : 1;
125}
126
127const struct dma_map_ops alpha_noop_ops = {
128 .alloc = alpha_noop_alloc_coherent,
129 .free = dma_noop_free_coherent,
130 .map_page = dma_noop_map_page,
131 .map_sg = dma_noop_map_sg,
132 .mapping_error = dma_noop_mapping_error,
133 .dma_supported = alpha_noop_supported,
134};
135
136const struct dma_map_ops *dma_ops = &alpha_noop_ops;
137EXPORT_SYMBOL(dma_ops);
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 83b34b9188ea..6923b0d9c1e1 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -950,6 +950,4 @@ const struct dma_map_ops alpha_pci_ops = {
950 .mapping_error = alpha_pci_mapping_error, 950 .mapping_error = alpha_pci_mapping_error,
951 .dma_supported = alpha_pci_supported, 951 .dma_supported = alpha_pci_supported,
952}; 952};
953 953EXPORT_SYMBOL(alpha_pci_ops);
954const struct dma_map_ops *dma_ops = &alpha_pci_ops;
955EXPORT_SYMBOL(dma_ops);
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 77e8436beed4..3a1c6b45c9a1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -76,7 +76,7 @@
76 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; 76 allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
77 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>, 77 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>,
78 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 78 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
79 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 79 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
80 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>, 80 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>,
81 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 81 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
82 status = "disabled"; 82 status = "disabled";
@@ -88,7 +88,7 @@
88 allwinner,pipeline = "de_fe0-de_be0-lcd0"; 88 allwinner,pipeline = "de_fe0-de_be0-lcd0";
89 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>, 89 clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>,
90 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>, 90 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>,
91 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>, 91 <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>,
92 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 92 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
93 status = "disabled"; 93 status = "disabled";
94 }; 94 };
@@ -99,7 +99,7 @@
99 allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0"; 99 allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
100 clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>, 100 clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>,
101 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 101 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
102 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>, 102 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
103 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>, 103 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>,
104 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>; 104 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
105 status = "disabled"; 105 status = "disabled";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 3328fe583c9b..232f124ce62c 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -117,6 +117,7 @@
117 phy-handle = <&int_mii_phy>; 117 phy-handle = <&int_mii_phy>;
118 phy-mode = "mii"; 118 phy-mode = "mii";
119 allwinner,leds-active-low; 119 allwinner,leds-active-low;
120 status = "okay";
120}; 121};
121 122
122&hdmi { 123&hdmi {
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
index d1311098ea45..ad173605b1b8 100644
--- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
+++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero-dock.dts
@@ -51,7 +51,7 @@
51 51
52 leds { 52 leds {
53 /* The LEDs use PG0~2 pins, which conflict with MMC1 */ 53 /* The LEDs use PG0~2 pins, which conflict with MMC1 */
54 status = "disbaled"; 54 status = "disabled";
55 }; 55 };
56}; 56};
57 57
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index e70feec6fad5..0581ffbedddd 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -323,7 +323,7 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
323 323
324/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */ 324/* All EP93xx devices use the same two GPIO pins for I2C bit-banging */
325static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = { 325static struct gpiod_lookup_table ep93xx_i2c_gpiod_table = {
326 .dev_id = "i2c-gpio", 326 .dev_id = "i2c-gpio.0",
327 .table = { 327 .table = {
328 /* Use local offsets on gpiochip/port "G" */ 328 /* Use local offsets on gpiochip/port "G" */
329 GPIO_LOOKUP_IDX("G", 1, NULL, 0, 329 GPIO_LOOKUP_IDX("G", 1, NULL, 0,
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 77def6169f50..44cbbce6bda6 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -51,7 +51,7 @@ static struct platform_device avila_flash = {
51}; 51};
52 52
53static struct gpiod_lookup_table avila_i2c_gpiod_table = { 53static struct gpiod_lookup_table avila_i2c_gpiod_table = {
54 .dev_id = "i2c-gpio", 54 .dev_id = "i2c-gpio.0",
55 .table = { 55 .table = {
56 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN, 56 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", AVILA_SDA_PIN,
57 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 57 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 0f5c99941a7d..397190f3a8da 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -70,7 +70,7 @@ static struct platform_device dsmg600_flash = {
70}; 70};
71 71
72static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = { 72static struct gpiod_lookup_table dsmg600_i2c_gpiod_table = {
73 .dev_id = "i2c-gpio", 73 .dev_id = "i2c-gpio.0",
74 .table = { 74 .table = {
75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN, 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", DSMG600_SDA_PIN,
76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 033f79b35d51..f0a152e365b1 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -56,7 +56,7 @@ static struct platform_device fsg_flash = {
56}; 56};
57 57
58static struct gpiod_lookup_table fsg_i2c_gpiod_table = { 58static struct gpiod_lookup_table fsg_i2c_gpiod_table = {
59 .dev_id = "i2c-gpio", 59 .dev_id = "i2c-gpio.0",
60 .table = { 60 .table = {
61 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN, 61 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", FSG_SDA_PIN,
62 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 62 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index b168e2fbdbeb..3ec829d52cdd 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -124,7 +124,7 @@ static struct platform_device ixdp425_flash_nand = {
124#endif /* CONFIG_MTD_NAND_PLATFORM */ 124#endif /* CONFIG_MTD_NAND_PLATFORM */
125 125
126static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = { 126static struct gpiod_lookup_table ixdp425_i2c_gpiod_table = {
127 .dev_id = "i2c-gpio", 127 .dev_id = "i2c-gpio.0",
128 .table = { 128 .table = {
129 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN, 129 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", IXDP425_SDA_PIN,
130 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 130 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 76dfff03cb71..4138d6aa4c52 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -102,7 +102,7 @@ static struct platform_device nas100d_leds = {
102}; 102};
103 103
104static struct gpiod_lookup_table nas100d_i2c_gpiod_table = { 104static struct gpiod_lookup_table nas100d_i2c_gpiod_table = {
105 .dev_id = "i2c-gpio", 105 .dev_id = "i2c-gpio.0",
106 .table = { 106 .table = {
107 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN, 107 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NAS100D_SDA_PIN,
108 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 108 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index 91da63a7d7b5..341b263482ef 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -70,7 +70,7 @@ static struct platform_device nslu2_flash = {
70}; 70};
71 71
72static struct gpiod_lookup_table nslu2_i2c_gpiod_table = { 72static struct gpiod_lookup_table nslu2_i2c_gpiod_table = {
73 .dev_id = "i2c-gpio", 73 .dev_id = "i2c-gpio.0",
74 .table = { 74 .table = {
75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN, 75 GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", NSLU2_SDA_PIN,
76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 76 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 5877e547cecd..0adb1bd6208e 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -322,7 +322,7 @@ static struct soc_camera_link palmz72_iclink = {
322}; 322};
323 323
324static struct gpiod_lookup_table palmz72_i2c_gpiod_table = { 324static struct gpiod_lookup_table palmz72_i2c_gpiod_table = {
325 .dev_id = "i2c-gpio", 325 .dev_id = "i2c-gpio.0",
326 .table = { 326 .table = {
327 GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0, 327 GPIO_LOOKUP_IDX("gpio-pxa", 118, NULL, 0,
328 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 328 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 90d0f277de55..207dcc2e94e7 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -460,7 +460,7 @@ static struct platform_device smc91x_device = {
460 460
461/* i2c */ 461/* i2c */
462static struct gpiod_lookup_table viper_i2c_gpiod_table = { 462static struct gpiod_lookup_table viper_i2c_gpiod_table = {
463 .dev_id = "i2c-gpio", 463 .dev_id = "i2c-gpio.1",
464 .table = { 464 .table = {
465 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO, 465 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_RTC_I2C_SDA_GPIO,
466 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 466 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
@@ -789,7 +789,7 @@ static int __init viper_tpm_setup(char *str)
789__setup("tpm=", viper_tpm_setup); 789__setup("tpm=", viper_tpm_setup);
790 790
791struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = { 791struct gpiod_lookup_table viper_tpm_i2c_gpiod_table = {
792 .dev_id = "i2c-gpio", 792 .dev_id = "i2c-gpio.2",
793 .table = { 793 .table = {
794 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO, 794 GPIO_LOOKUP_IDX("gpio-pxa", VIPER_TPM_I2C_SDA_GPIO,
795 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 795 NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index ace010479eb6..f45aed2519ba 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -327,7 +327,7 @@ static struct platform_device simpad_gpio_leds = {
327 * i2c 327 * i2c
328 */ 328 */
329static struct gpiod_lookup_table simpad_i2c_gpiod_table = { 329static struct gpiod_lookup_table simpad_i2c_gpiod_table = {
330 .dev_id = "i2c-gpio", 330 .dev_id = "i2c-gpio.0",
331 .table = { 331 .table = {
332 GPIO_LOOKUP_IDX("gpio", 21, NULL, 0, 332 GPIO_LOOKUP_IDX("gpio", 21, NULL, 0,
333 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 333 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8c398fedbbb6..ada8eb206a90 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -466,12 +466,6 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
466void __init dma_contiguous_remap(void) 466void __init dma_contiguous_remap(void)
467{ 467{
468 int i; 468 int i;
469
470 if (!dma_mmu_remap_num)
471 return;
472
473 /* call flush_cache_all() since CMA area would be large enough */
474 flush_cache_all();
475 for (i = 0; i < dma_mmu_remap_num; i++) { 469 for (i = 0; i < dma_mmu_remap_num; i++) {
476 phys_addr_t start = dma_mmu_remap[i].base; 470 phys_addr_t start = dma_mmu_remap[i].base;
477 phys_addr_t end = start + dma_mmu_remap[i].size; 471 phys_addr_t end = start + dma_mmu_remap[i].size;
@@ -504,15 +498,7 @@ void __init dma_contiguous_remap(void)
504 flush_tlb_kernel_range(__phys_to_virt(start), 498 flush_tlb_kernel_range(__phys_to_virt(start),
505 __phys_to_virt(end)); 499 __phys_to_virt(end));
506 500
507 /* 501 iotable_init(&map, 1);
508 * All the memory in CMA region will be on ZONE_MOVABLE.
509 * If that zone is considered as highmem, the memory in CMA
510 * region is also considered as highmem even if it's
511 * physical address belong to lowmem. In this case,
512 * re-mapping isn't required.
513 */
514 if (!is_highmem_idx(ZONE_MOVABLE))
515 iotable_init(&map, 1);
516 } 502 }
517} 503}
518 504
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 724a0d3b7683..edb4ee0b8896 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -299,7 +299,6 @@
299 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */ 299 /* GPIO blocks 16 thru 19 do not appear to be routed to pins */
300 300
301 dwmmc_0: dwmmc0@f723d000 { 301 dwmmc_0: dwmmc0@f723d000 {
302 max-frequency = <150000000>;
303 cap-mmc-highspeed; 302 cap-mmc-highspeed;
304 mmc-hs200-1_8v; 303 mmc-hs200-1_8v;
305 non-removable; 304 non-removable;
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 9ef0797380cb..f9b0b09153e0 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
117 /* LSE atomics */ 117 /* LSE atomics */
118 " mvn %w[i], %w[i]\n" 118 " mvn %w[i], %w[i]\n"
119 " stclr %w[i], %[v]") 119 " stclr %w[i], %[v]")
120 : [i] "+r" (w0), [v] "+Q" (v->counter) 120 : [i] "+&r" (w0), [v] "+Q" (v->counter)
121 : "r" (x1) 121 : "r" (x1)
122 : __LL_SC_CLOBBERS); 122 : __LL_SC_CLOBBERS);
123} 123}
@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
135 /* LSE atomics */ \ 135 /* LSE atomics */ \
136 " mvn %w[i], %w[i]\n" \ 136 " mvn %w[i], %w[i]\n" \
137 " ldclr" #mb " %w[i], %w[i], %[v]") \ 137 " ldclr" #mb " %w[i], %w[i], %[v]") \
138 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 138 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
139 : "r" (x1) \ 139 : "r" (x1) \
140 : __LL_SC_CLOBBERS, ##cl); \ 140 : __LL_SC_CLOBBERS, ##cl); \
141 \ 141 \
@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
161 /* LSE atomics */ 161 /* LSE atomics */
162 " neg %w[i], %w[i]\n" 162 " neg %w[i], %w[i]\n"
163 " stadd %w[i], %[v]") 163 " stadd %w[i], %[v]")
164 : [i] "+r" (w0), [v] "+Q" (v->counter) 164 : [i] "+&r" (w0), [v] "+Q" (v->counter)
165 : "r" (x1) 165 : "r" (x1)
166 : __LL_SC_CLOBBERS); 166 : __LL_SC_CLOBBERS);
167} 167}
@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
180 " neg %w[i], %w[i]\n" \ 180 " neg %w[i], %w[i]\n" \
181 " ldadd" #mb " %w[i], w30, %[v]\n" \ 181 " ldadd" #mb " %w[i], w30, %[v]\n" \
182 " add %w[i], %w[i], w30") \ 182 " add %w[i], %w[i], w30") \
183 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 183 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
184 : "r" (x1) \ 184 : "r" (x1) \
185 : __LL_SC_CLOBBERS , ##cl); \ 185 : __LL_SC_CLOBBERS , ##cl); \
186 \ 186 \
@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
207 /* LSE atomics */ \ 207 /* LSE atomics */ \
208 " neg %w[i], %w[i]\n" \ 208 " neg %w[i], %w[i]\n" \
209 " ldadd" #mb " %w[i], %w[i], %[v]") \ 209 " ldadd" #mb " %w[i], %w[i], %[v]") \
210 : [i] "+r" (w0), [v] "+Q" (v->counter) \ 210 : [i] "+&r" (w0), [v] "+Q" (v->counter) \
211 : "r" (x1) \ 211 : "r" (x1) \
212 : __LL_SC_CLOBBERS, ##cl); \ 212 : __LL_SC_CLOBBERS, ##cl); \
213 \ 213 \
@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
314 /* LSE atomics */ 314 /* LSE atomics */
315 " mvn %[i], %[i]\n" 315 " mvn %[i], %[i]\n"
316 " stclr %[i], %[v]") 316 " stclr %[i], %[v]")
317 : [i] "+r" (x0), [v] "+Q" (v->counter) 317 : [i] "+&r" (x0), [v] "+Q" (v->counter)
318 : "r" (x1) 318 : "r" (x1)
319 : __LL_SC_CLOBBERS); 319 : __LL_SC_CLOBBERS);
320} 320}
@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
332 /* LSE atomics */ \ 332 /* LSE atomics */ \
333 " mvn %[i], %[i]\n" \ 333 " mvn %[i], %[i]\n" \
334 " ldclr" #mb " %[i], %[i], %[v]") \ 334 " ldclr" #mb " %[i], %[i], %[v]") \
335 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 335 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
336 : "r" (x1) \ 336 : "r" (x1) \
337 : __LL_SC_CLOBBERS, ##cl); \ 337 : __LL_SC_CLOBBERS, ##cl); \
338 \ 338 \
@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
358 /* LSE atomics */ 358 /* LSE atomics */
359 " neg %[i], %[i]\n" 359 " neg %[i], %[i]\n"
360 " stadd %[i], %[v]") 360 " stadd %[i], %[v]")
361 : [i] "+r" (x0), [v] "+Q" (v->counter) 361 : [i] "+&r" (x0), [v] "+Q" (v->counter)
362 : "r" (x1) 362 : "r" (x1)
363 : __LL_SC_CLOBBERS); 363 : __LL_SC_CLOBBERS);
364} 364}
@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
377 " neg %[i], %[i]\n" \ 377 " neg %[i], %[i]\n" \
378 " ldadd" #mb " %[i], x30, %[v]\n" \ 378 " ldadd" #mb " %[i], x30, %[v]\n" \
379 " add %[i], %[i], x30") \ 379 " add %[i], %[i], x30") \
380 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 380 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
381 : "r" (x1) \ 381 : "r" (x1) \
382 : __LL_SC_CLOBBERS, ##cl); \ 382 : __LL_SC_CLOBBERS, ##cl); \
383 \ 383 \
@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
404 /* LSE atomics */ \ 404 /* LSE atomics */ \
405 " neg %[i], %[i]\n" \ 405 " neg %[i], %[i]\n" \
406 " ldadd" #mb " %[i], %[i], %[v]") \ 406 " ldadd" #mb " %[i], %[i], %[v]") \
407 : [i] "+r" (x0), [v] "+Q" (v->counter) \ 407 : [i] "+&r" (x0), [v] "+Q" (v->counter) \
408 : "r" (x1) \ 408 : "r" (x1) \
409 : __LL_SC_CLOBBERS, ##cl); \ 409 : __LL_SC_CLOBBERS, ##cl); \
410 \ 410 \
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
435 " sub x30, x30, %[ret]\n" 435 " sub x30, x30, %[ret]\n"
436 " cbnz x30, 1b\n" 436 " cbnz x30, 1b\n"
437 "2:") 437 "2:")
438 : [ret] "+r" (x0), [v] "+Q" (v->counter) 438 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
439 : 439 :
440 : __LL_SC_CLOBBERS, "cc", "memory"); 440 : __LL_SC_CLOBBERS, "cc", "memory");
441 441
@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
516 " eor %[old1], %[old1], %[oldval1]\n" \ 516 " eor %[old1], %[old1], %[oldval1]\n" \
517 " eor %[old2], %[old2], %[oldval2]\n" \ 517 " eor %[old2], %[old2], %[oldval2]\n" \
518 " orr %[old1], %[old1], %[old2]") \ 518 " orr %[old1], %[old1], %[old2]") \
519 : [old1] "+r" (x0), [old2] "+r" (x1), \ 519 : [old1] "+&r" (x0), [old2] "+&r" (x1), \
520 [v] "+Q" (*(unsigned long *)ptr) \ 520 [v] "+Q" (*(unsigned long *)ptr) \
521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ 521 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ 522 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 66be504edb6c..d894a20b70b2 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
75 /* arm-smccc */ 75 /* arm-smccc */
76EXPORT_SYMBOL(__arm_smccc_smc); 76EXPORT_SYMBOL(__arm_smccc_smc);
77EXPORT_SYMBOL(__arm_smccc_hvc); 77EXPORT_SYMBOL(__arm_smccc_hvc);
78
79 /* tishift.S */
80extern long long __ashlti3(long long a, int b);
81EXPORT_SYMBOL(__ashlti3);
82extern long long __ashrti3(long long a, int b);
83EXPORT_SYMBOL(__ashrti3);
84extern long long __lshrti3(long long a, int b);
85EXPORT_SYMBOL(__lshrti3);
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index d3db9b2cd479..0fdff97794de 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -1,17 +1,6 @@
1/* 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
3 * 2 *
4 * This program is free software; you can redistribute it and/or modify 3 * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 4 */
16 5
17#include <linux/linkage.h> 6#include <linux/linkage.h>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4165485e8b6e..2af3dd89bcdb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -293,6 +293,57 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
293static void __do_user_fault(struct siginfo *info, unsigned int esr) 293static void __do_user_fault(struct siginfo *info, unsigned int esr)
294{ 294{
295 current->thread.fault_address = (unsigned long)info->si_addr; 295 current->thread.fault_address = (unsigned long)info->si_addr;
296
297 /*
298 * If the faulting address is in the kernel, we must sanitize the ESR.
299 * From userspace's point of view, kernel-only mappings don't exist
300 * at all, so we report them as level 0 translation faults.
301 * (This is not quite the way that "no mapping there at all" behaves:
302 * an alignment fault not caused by the memory type would take
303 * precedence over translation fault for a real access to empty
304 * space. Unfortunately we can't easily distinguish "alignment fault
305 * not caused by memory type" from "alignment fault caused by memory
306 * type", so we ignore this wrinkle and just return the translation
307 * fault.)
308 */
309 if (current->thread.fault_address >= TASK_SIZE) {
310 switch (ESR_ELx_EC(esr)) {
311 case ESR_ELx_EC_DABT_LOW:
312 /*
313 * These bits provide only information about the
314 * faulting instruction, which userspace knows already.
315 * We explicitly clear bits which are architecturally
316 * RES0 in case they are given meanings in future.
317 * We always report the ESR as if the fault was taken
318 * to EL1 and so ISV and the bits in ISS[23:14] are
319 * clear. (In fact it always will be a fault to EL1.)
320 */
321 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
322 ESR_ELx_CM | ESR_ELx_WNR;
323 esr |= ESR_ELx_FSC_FAULT;
324 break;
325 case ESR_ELx_EC_IABT_LOW:
326 /*
327 * Claim a level 0 translation fault.
328 * All other bits are architecturally RES0 for faults
329 * reported with that DFSC value, so we clear them.
330 */
331 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
332 esr |= ESR_ELx_FSC_FAULT;
333 break;
334 default:
335 /*
336 * This should never happen (entry.S only brings us
337 * into this code for insn and data aborts from a lower
338 * exception level). Fail safe by not providing an ESR
339 * context record at all.
340 */
341 WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
342 esr = 0;
343 break;
344 }
345 }
346
296 current->thread.fault_code = esr; 347 current->thread.fault_code = esr;
297 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current); 348 arm64_force_sig_info(info, esr_to_fault_info(esr)->name, current);
298} 349}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2dbb2c9f1ec1..493ff75670ff 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -933,13 +933,15 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
933{ 933{
934 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | 934 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
935 pgprot_val(mk_sect_prot(prot))); 935 pgprot_val(mk_sect_prot(prot)));
936 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot);
936 937
937 /* ioremap_page_range doesn't honour BBM */ 938 /* Only allow permission changes for now */
938 if (pud_present(READ_ONCE(*pudp))) 939 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
940 pud_val(new_pud)))
939 return 0; 941 return 0;
940 942
941 BUG_ON(phys & ~PUD_MASK); 943 BUG_ON(phys & ~PUD_MASK);
942 set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot)); 944 set_pud(pudp, new_pud);
943 return 1; 945 return 1;
944} 946}
945 947
@@ -947,13 +949,15 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
947{ 949{
948 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | 950 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
949 pgprot_val(mk_sect_prot(prot))); 951 pgprot_val(mk_sect_prot(prot)));
952 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot);
950 953
951 /* ioremap_page_range doesn't honour BBM */ 954 /* Only allow permission changes for now */
952 if (pmd_present(READ_ONCE(*pmdp))) 955 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
956 pmd_val(new_pmd)))
953 return 0; 957 return 0;
954 958
955 BUG_ON(phys & ~PMD_MASK); 959 BUG_ON(phys & ~PMD_MASK);
956 set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot)); 960 set_pmd(pmdp, new_pmd);
957 return 1; 961 return 1;
958} 962}
959 963
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index b3043c08f769..aee8d7b8f091 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -18,9 +18,9 @@
18#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset)) 18#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
19#endif 19#endif
20 20
21#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780) 21#ifdef CONFIG_MACH_INGENIC
22#include <asm/mach-jz4740/base.h> 22#define INGENIC_UART0_BASE_ADDR 0x10030000
23#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset)) 23#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
24#endif 24#endif
25 25
26#ifdef CONFIG_CPU_XLR 26#ifdef CONFIG_CPU_XLR
diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
index 9987e0e378c5..69ca00590b8d 100644
--- a/arch/mips/boot/dts/xilfpga/Makefile
+++ b/arch/mips/boot/dts/xilfpga/Makefile
@@ -1,4 +1,2 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb 2dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
3
4obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index b51432dd10b6..0dd0d5d460a5 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -16,3 +16,4 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
16its-y := vmlinux.its.S 16its-y := vmlinux.its.S
17its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S 17its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
18its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S 18its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
19its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b9e9bf628849..3775a8d694fb 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
721 if (value & ~known_bits) 721 if (value & ~known_bits)
722 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
723 723
724 /* Setting FRE without FR is not supported. */
725 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
726 return -EOPNOTSUPP;
727
724 /* Avoid inadvertently triggering emulation */ 728 /* Avoid inadvertently triggering emulation */
725 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 729 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
726 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) 730 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0b23b1ad99e6..0c0c23c9c9f5 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -463,7 +463,7 @@ static int fpr_get_msa(struct task_struct *target,
463/* 463/*
464 * Copy the floating-point context to the supplied NT_PRFPREG buffer. 464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
465 * Choose the appropriate helper for general registers, and then copy 465 * Choose the appropriate helper for general registers, and then copy
466 * the FCSR register separately. 466 * the FCSR and FIR registers separately.
467 */ 467 */
468static int fpr_get(struct task_struct *target, 468static int fpr_get(struct task_struct *target,
469 const struct user_regset *regset, 469 const struct user_regset *regset,
@@ -471,6 +471,7 @@ static int fpr_get(struct task_struct *target,
471 void *kbuf, void __user *ubuf) 471 void *kbuf, void __user *ubuf)
472{ 472{
473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 473 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
474 const int fir_pos = fcr31_pos + sizeof(u32);
474 int err; 475 int err;
475 476
476 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 477 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
@@ -483,6 +484,12 @@ static int fpr_get(struct task_struct *target,
483 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf, 484 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
484 &target->thread.fpu.fcr31, 485 &target->thread.fpu.fcr31,
485 fcr31_pos, fcr31_pos + sizeof(u32)); 486 fcr31_pos, fcr31_pos + sizeof(u32));
487 if (err)
488 return err;
489
490 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
491 &boot_cpu_data.fpu_id,
492 fir_pos, fir_pos + sizeof(u32));
486 493
487 return err; 494 return err;
488} 495}
@@ -531,7 +538,8 @@ static int fpr_set_msa(struct task_struct *target,
531/* 538/*
532 * Copy the supplied NT_PRFPREG buffer to the floating-point context. 539 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
533 * Choose the appropriate helper for general registers, and then copy 540 * Choose the appropriate helper for general registers, and then copy
534 * the FCSR register separately. 541 * the FCSR register separately. Ignore the incoming FIR register
542 * contents though, as the register is read-only.
535 * 543 *
536 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', 544 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
537 * which is supposed to have been guaranteed by the kernel before 545 * which is supposed to have been guaranteed by the kernel before
@@ -545,6 +553,7 @@ static int fpr_set(struct task_struct *target,
545 const void *kbuf, const void __user *ubuf) 553 const void *kbuf, const void __user *ubuf)
546{ 554{
547 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); 555 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
556 const int fir_pos = fcr31_pos + sizeof(u32);
548 u32 fcr31; 557 u32 fcr31;
549 int err; 558 int err;
550 559
@@ -572,6 +581,11 @@ static int fpr_set(struct task_struct *target,
572 ptrace_setfcr31(target, fcr31); 581 ptrace_setfcr31(target, fcr31);
573 } 582 }
574 583
584 if (count > 0)
585 err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
586 fir_pos,
587 fir_pos + sizeof(u32));
588
575 return err; 589 return err;
576} 590}
577 591
@@ -793,7 +807,7 @@ long arch_ptrace(struct task_struct *child, long request,
793 fregs = get_fpu_regs(child); 807 fregs = get_fpu_regs(child);
794 808
795#ifdef CONFIG_32BIT 809#ifdef CONFIG_32BIT
796 if (test_thread_flag(TIF_32BIT_FPREGS)) { 810 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
797 /* 811 /*
798 * The odd registers are actually the high 812 * The odd registers are actually the high
799 * order bits of the values stored in the even 813 * order bits of the values stored in the even
@@ -804,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request,
804 break; 818 break;
805 } 819 }
806#endif 820#endif
807 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 821 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
808 break; 822 break;
809 case PC: 823 case PC:
810 tmp = regs->cp0_epc; 824 tmp = regs->cp0_epc;
@@ -888,7 +902,7 @@ long arch_ptrace(struct task_struct *child, long request,
888 902
889 init_fp_ctx(child); 903 init_fp_ctx(child);
890#ifdef CONFIG_32BIT 904#ifdef CONFIG_32BIT
891 if (test_thread_flag(TIF_32BIT_FPREGS)) { 905 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
892 /* 906 /*
893 * The odd registers are actually the high 907 * The odd registers are actually the high
894 * order bits of the values stored in the even 908 * order bits of the values stored in the even
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 2b9260f92ccd..f30c381d3e1c 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -99,7 +99,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
99 break; 99 break;
100 } 100 }
101 fregs = get_fpu_regs(child); 101 fregs = get_fpu_regs(child);
102 if (test_thread_flag(TIF_32BIT_FPREGS)) { 102 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
103 /* 103 /*
104 * The odd registers are actually the high 104 * The odd registers are actually the high
105 * order bits of the values stored in the even 105 * order bits of the values stored in the even
@@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
109 addr & 1); 109 addr & 1);
110 break; 110 break;
111 } 111 }
112 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0); 112 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
113 break; 113 break;
114 case PC: 114 case PC:
115 tmp = regs->cp0_epc; 115 tmp = regs->cp0_epc;
@@ -212,7 +212,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
212 sizeof(child->thread.fpu)); 212 sizeof(child->thread.fpu));
213 child->thread.fpu.fcr31 = 0; 213 child->thread.fpu.fcr31 = 0;
214 } 214 }
215 if (test_thread_flag(TIF_32BIT_FPREGS)) { 215 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
216 /* 216 /*
217 * The odd registers are actually the high 217 * The odd registers are actually the high
218 * order bits of the values stored in the even 218 * order bits of the values stored in the even
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 2549fdd27ee1..0f725e9cee8f 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, 45 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
46 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, 46 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
47 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, 47 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
48 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, 48 { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
49 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, 49 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, 50 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, 51 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6f534b209971..e12dfa48b478 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
851 /* 851 /*
852 * Either no secondary cache or the available caches don't have the 852 * Either no secondary cache or the available caches don't have the
853 * subset property so we have to flush the primary caches 853 * subset property so we have to flush the primary caches
854 * explicitly 854 * explicitly.
855 * If we would need IPI to perform an INDEX-type operation, then
856 * we have to use the HIT-type alternative as IPI cannot be used
857 * here due to interrupts possibly being disabled.
855 */ 858 */
856 if (size >= dcache_size) { 859 if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
857 r4k_blast_dcache(); 860 r4k_blast_dcache();
858 } else { 861 } else {
859 R4600_HIT_CACHEOP_WAR_IMPL; 862 R4600_HIT_CACHEOP_WAR_IMPL;
@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
890 return; 893 return;
891 } 894 }
892 895
893 if (size >= dcache_size) { 896 if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
894 r4k_blast_dcache(); 897 r4k_blast_dcache();
895 } else { 898 } else {
896 R4600_HIT_CACHEOP_WAR_IMPL; 899 R4600_HIT_CACHEOP_WAR_IMPL;
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 249f38d3388f..b7404f2dcf5b 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -9,6 +9,12 @@ config NDS32
9 select CLKSRC_MMIO 9 select CLKSRC_MMIO
10 select CLONE_BACKWARDS 10 select CLONE_BACKWARDS
11 select COMMON_CLK 11 select COMMON_CLK
12 select GENERIC_ASHLDI3
13 select GENERIC_ASHRDI3
14 select GENERIC_LSHRDI3
15 select GENERIC_CMPDI2
16 select GENERIC_MULDI3
17 select GENERIC_UCMPDI2
12 select GENERIC_ATOMIC64 18 select GENERIC_ATOMIC64
13 select GENERIC_CPU_DEVICES 19 select GENERIC_CPU_DEVICES
14 select GENERIC_CLOCKEVENTS 20 select GENERIC_CLOCKEVENTS
@@ -82,6 +88,7 @@ endmenu
82 88
83menu "Kernel Features" 89menu "Kernel Features"
84source "kernel/Kconfig.preempt" 90source "kernel/Kconfig.preempt"
91source "kernel/Kconfig.freezer"
85source "mm/Kconfig" 92source "mm/Kconfig"
86source "kernel/Kconfig.hz" 93source "kernel/Kconfig.hz"
87endmenu 94endmenu
diff --git a/arch/nds32/Kconfig.cpu b/arch/nds32/Kconfig.cpu
index ba44cc539da9..b8c8984d1456 100644
--- a/arch/nds32/Kconfig.cpu
+++ b/arch/nds32/Kconfig.cpu
@@ -1,10 +1,11 @@
1comment "Processor Features" 1comment "Processor Features"
2 2
3config CPU_BIG_ENDIAN 3config CPU_BIG_ENDIAN
4 bool "Big endian" 4 def_bool !CPU_LITTLE_ENDIAN
5 5
6config CPU_LITTLE_ENDIAN 6config CPU_LITTLE_ENDIAN
7 def_bool !CPU_BIG_ENDIAN 7 bool "Little endian"
8 default y
8 9
9config HWZOL 10config HWZOL
10 bool "hardware zero overhead loop support" 11 bool "hardware zero overhead loop support"
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 91f933d5a962..513bb2e9baf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -23,9 +23,6 @@ export TEXTADDR
23# If we have a machine-specific directory, then include it in the build. 23# If we have a machine-specific directory, then include it in the build.
24core-y += arch/nds32/kernel/ arch/nds32/mm/ 24core-y += arch/nds32/kernel/ arch/nds32/mm/
25libs-y += arch/nds32/lib/ 25libs-y += arch/nds32/lib/
26LIBGCC_PATH := \
27 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
28libs-y += $(LIBGCC_PATH)
29 26
30ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""' 27ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
31BUILTIN_DTB := y 28BUILTIN_DTB := y
@@ -35,8 +32,12 @@ endif
35 32
36ifdef CONFIG_CPU_LITTLE_ENDIAN 33ifdef CONFIG_CPU_LITTLE_ENDIAN
37KBUILD_CFLAGS += $(call cc-option, -EL) 34KBUILD_CFLAGS += $(call cc-option, -EL)
35KBUILD_AFLAGS += $(call cc-option, -EL)
36LDFLAGS += $(call cc-option, -EL)
38else 37else
39KBUILD_CFLAGS += $(call cc-option, -EB) 38KBUILD_CFLAGS += $(call cc-option, -EB)
39KBUILD_AFLAGS += $(call cc-option, -EB)
40LDFLAGS += $(call cc-option, -EB)
40endif 41endif
41 42
42boot := arch/nds32/boot 43boot := arch/nds32/boot
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 06bdf8167f5a..142e612aa639 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += dma.h
16generic-y += emergency-restart.h 16generic-y += emergency-restart.h
17generic-y += errno.h 17generic-y += errno.h
18generic-y += exec.h 18generic-y += exec.h
19generic-y += export.h
19generic-y += fb.h 20generic-y += fb.h
20generic-y += fcntl.h 21generic-y += fcntl.h
21generic-y += ftrace.h 22generic-y += ftrace.h
@@ -49,6 +50,7 @@ generic-y += switch_to.h
49generic-y += timex.h 50generic-y += timex.h
50generic-y += topology.h 51generic-y += topology.h
51generic-y += trace_clock.h 52generic-y += trace_clock.h
53generic-y += xor.h
52generic-y += unaligned.h 54generic-y += unaligned.h
53generic-y += user.h 55generic-y += user.h
54generic-y += vga.h 56generic-y += vga.h
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index c73f71d67744..8e84fc385b94 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -336,7 +336,7 @@
336#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE ) 336#define INT_MASK_mskIDIVZE ( 0x1 << INT_MASK_offIDIVZE )
337#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM ) 337#define INT_MASK_mskDSSIM ( 0x1 << INT_MASK_offDSSIM )
338 338
339#define INT_MASK_INITAIAL_VAL 0x10003 339#define INT_MASK_INITAIAL_VAL (INT_MASK_mskDSSIM|INT_MASK_mskIDIVZE)
340 340
341/****************************************************************************** 341/******************************************************************************
342 * ir15: INT_PEND (Interrupt Pending Register) 342 * ir15: INT_PEND (Interrupt Pending Register)
@@ -396,6 +396,7 @@
396#define MMU_CTL_D8KB 1 396#define MMU_CTL_D8KB 1
397#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA ) 397#define MMU_CTL_UNA ( 0x1 << MMU_CTL_offUNA )
398 398
399#define MMU_CTL_CACHEABLE_NON 0
399#define MMU_CTL_CACHEABLE_WB 2 400#define MMU_CTL_CACHEABLE_WB 2
400#define MMU_CTL_CACHEABLE_WT 3 401#define MMU_CTL_CACHEABLE_WT 3
401 402
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 1240f148ec0f..10b48f0d8e85 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -32,6 +32,8 @@ void flush_anon_page(struct vm_area_struct *vma,
32 32
33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 33#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
34void flush_kernel_dcache_page(struct page *page); 34void flush_kernel_dcache_page(struct page *page);
35void flush_kernel_vmap_range(void *addr, int size);
36void invalidate_kernel_vmap_range(void *addr, int size);
35void flush_icache_range(unsigned long start, unsigned long end); 37void flush_icache_range(unsigned long start, unsigned long end);
36void flush_icache_page(struct vm_area_struct *vma, struct page *page); 38void flush_icache_page(struct vm_area_struct *vma, struct page *page);
37#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) 39#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 966e71b3c960..71cd226d6863 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -4,6 +4,8 @@
4#ifndef __ASM_NDS32_IO_H 4#ifndef __ASM_NDS32_IO_H
5#define __ASM_NDS32_IO_H 5#define __ASM_NDS32_IO_H
6 6
7#include <linux/types.h>
8
7extern void iounmap(volatile void __iomem *addr); 9extern void iounmap(volatile void __iomem *addr);
8#define __raw_writeb __raw_writeb 10#define __raw_writeb __raw_writeb
9static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 11static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h
index e27365c097b6..947f0491c9a7 100644
--- a/arch/nds32/include/asm/page.h
+++ b/arch/nds32/include/asm/page.h
@@ -27,6 +27,9 @@ extern void copy_user_highpage(struct page *to, struct page *from,
27 unsigned long vaddr, struct vm_area_struct *vma); 27 unsigned long vaddr, struct vm_area_struct *vma);
28extern void clear_user_highpage(struct page *page, unsigned long vaddr); 28extern void clear_user_highpage(struct page *page, unsigned long vaddr);
29 29
30void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
31 struct page *to);
32void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
30#define __HAVE_ARCH_COPY_USER_HIGHPAGE 33#define __HAVE_ARCH_COPY_USER_HIGHPAGE
31#define clear_user_highpage clear_user_highpage 34#define clear_user_highpage clear_user_highpage
32#else 35#else
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 6783937edbeb..d3e19a55cf53 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -152,6 +152,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
152#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE) 152#define PAGE_CACHE_L1 __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE)
153#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 153#define PAGE_MEMORY __pgprot(_HAVE_PAGE_L | _PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
154#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD) 154#define PAGE_KERNEL __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_E | _PAGE_G | _PAGE_CACHE_SHRD)
155#define PAGE_SHARED __pgprot(_PAGE_V | _PAGE_M_URW_KRW | _PAGE_D | _PAGE_CACHE_SHRD)
155#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV) 156#define PAGE_DEVICE __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D | _PAGE_G | _PAGE_C_DEV)
156#endif /* __ASSEMBLY__ */ 157#endif /* __ASSEMBLY__ */
157 158
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
index a72e83d804f5..b8ae4e9a6b93 100644
--- a/arch/nds32/kernel/ex-entry.S
+++ b/arch/nds32/kernel/ex-entry.S
@@ -118,7 +118,7 @@ common_exception_handler:
118 /* interrupt */ 118 /* interrupt */
1192: 1192:
120#ifdef CONFIG_TRACE_IRQFLAGS 120#ifdef CONFIG_TRACE_IRQFLAGS
121 jal arch_trace_hardirqs_off 121 jal trace_hardirqs_off
122#endif 122#endif
123 move $r0, $sp 123 move $r0, $sp
124 sethi $lp, hi20(ret_from_intr) 124 sethi $lp, hi20(ret_from_intr)
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index 71f57bd70f3b..c5fdae174ced 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -57,14 +57,32 @@ _nodtb:
57 isb 57 isb
58 mtsr $r4, $L1_PPTB ! load page table pointer\n" 58 mtsr $r4, $L1_PPTB ! load page table pointer\n"
59 59
60/* set NTC0 cacheable/writeback, mutliple page size in use */ 60#ifdef CONFIG_CPU_DCACHE_DISABLE
61 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON
62#else
63 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
64 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT
65 #else
66 #define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB
67 #endif
68#endif
69
70/* set NTC cacheability, mutliple page size in use */
61 mfsr $r3, $MMU_CTL 71 mfsr $r3, $MMU_CTL
62 li $r0, #~MMU_CTL_mskNTC0 72#if CONFIG_MEMORY_START >= 0xc0000000
63 and $r3, $r3, $r0 73 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3)
74#elif CONFIG_MEMORY_START >= 0x80000000
75 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2)
76#elif CONFIG_MEMORY_START >= 0x40000000
77 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1)
78#else
79 ori $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0)
80#endif
81
64#ifdef CONFIG_ANDES_PAGE_SIZE_4KB 82#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
65 ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)) 83 ori $r3, $r3, #(MMU_CTL_mskMPZIU)
66#else 84#else
67 ori $r3, $r3, #(MMU_CTL_mskMPZIU|(MMU_CTL_CACHEABLE_WB << MMU_CTL_offNTC0)|MMU_CTL_D8KB) 85 ori $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB)
68#endif 86#endif
69#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS 87#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
70 li $r0, #MMU_CTL_UNA 88 li $r0, #MMU_CTL_UNA
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index ba910e9e4ecb..2f5b2ccebe47 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -293,6 +293,9 @@ void __init setup_arch(char **cmdline_p)
293 /* paging_init() sets up the MMU and marks all pages as reserved */ 293 /* paging_init() sets up the MMU and marks all pages as reserved */
294 paging_init(); 294 paging_init();
295 295
296 /* invalidate all TLB entries because the new mapping is created */
297 __nds32__tlbop_flua();
298
296 /* use generic way to parse */ 299 /* use generic way to parse */
297 parse_early_param(); 300 parse_early_param();
298 301
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
index bc70113c0e84..8b231e910ea6 100644
--- a/arch/nds32/kernel/stacktrace.c
+++ b/arch/nds32/kernel/stacktrace.c
@@ -9,6 +9,7 @@ void save_stack_trace(struct stack_trace *trace)
9{ 9{
10 save_stack_trace_tsk(current, trace); 10 save_stack_trace_tsk(current, trace);
11} 11}
12EXPORT_SYMBOL_GPL(save_stack_trace);
12 13
13void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 14void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
14{ 15{
@@ -45,3 +46,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
45 fpn = (unsigned long *)fpp; 46 fpn = (unsigned long *)fpp;
46 } 47 }
47} 48}
49EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index f1198d7a5654..016f15891f6d 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -23,7 +23,7 @@
23#include <asm/vdso_timer_info.h> 23#include <asm/vdso_timer_info.h>
24#include <asm/cache_info.h> 24#include <asm/cache_info.h>
25extern struct cache_info L1_cache_info[2]; 25extern struct cache_info L1_cache_info[2];
26extern char vdso_start, vdso_end; 26extern char vdso_start[], vdso_end[];
27static unsigned long vdso_pages __ro_after_init; 27static unsigned long vdso_pages __ro_after_init;
28static unsigned long timer_mapping_base; 28static unsigned long timer_mapping_base;
29 29
@@ -66,16 +66,16 @@ static int __init vdso_init(void)
66 int i; 66 int i;
67 struct page **vdso_pagelist; 67 struct page **vdso_pagelist;
68 68
69 if (memcmp(&vdso_start, "\177ELF", 4)) { 69 if (memcmp(vdso_start, "\177ELF", 4)) {
70 pr_err("vDSO is not a valid ELF object!\n"); 70 pr_err("vDSO is not a valid ELF object!\n");
71 return -EINVAL; 71 return -EINVAL;
72 } 72 }
73 /* Creat a timer io mapping to get clock cycles counter */ 73 /* Creat a timer io mapping to get clock cycles counter */
74 get_timer_node_info(); 74 get_timer_node_info();
75 75
76 vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 76 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", 77 pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
78 vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); 78 vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
79 79
80 /* Allocate the vDSO pagelist */ 80 /* Allocate the vDSO pagelist */
81 vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL); 81 vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
@@ -83,7 +83,7 @@ static int __init vdso_init(void)
83 return -ENOMEM; 83 return -ENOMEM;
84 84
85 for (i = 0; i < vdso_pages; i++) 85 for (i = 0; i < vdso_pages; i++)
86 vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); 86 vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE);
87 vdso_spec[1].pages = &vdso_pagelist[0]; 87 vdso_spec[1].pages = &vdso_pagelist[0];
88 88
89 return 0; 89 return 0;
diff --git a/arch/nds32/lib/copy_page.S b/arch/nds32/lib/copy_page.S
index 4a2ff85f17ee..f8701ed161a8 100644
--- a/arch/nds32/lib/copy_page.S
+++ b/arch/nds32/lib/copy_page.S
@@ -2,6 +2,7 @@
2// Copyright (C) 2005-2017 Andes Technology Corporation 2// Copyright (C) 2005-2017 Andes Technology Corporation
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <asm/export.h>
5#include <asm/page.h> 6#include <asm/page.h>
6 7
7 .text 8 .text
@@ -16,6 +17,7 @@ ENTRY(copy_page)
16 popm $r2, $r10 17 popm $r2, $r10
17 ret 18 ret
18ENDPROC(copy_page) 19ENDPROC(copy_page)
20EXPORT_SYMBOL(copy_page)
19 21
20ENTRY(clear_page) 22ENTRY(clear_page)
21 pushm $r1, $r9 23 pushm $r1, $r9
@@ -35,3 +37,4 @@ ENTRY(clear_page)
35 popm $r1, $r9 37 popm $r1, $r9
36 ret 38 ret
37ENDPROC(clear_page) 39ENDPROC(clear_page)
40EXPORT_SYMBOL(clear_page)
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c
index b96a01b10ca7..e1aed9dc692d 100644
--- a/arch/nds32/mm/alignment.c
+++ b/arch/nds32/mm/alignment.c
@@ -19,7 +19,7 @@
19#define RA(inst) (((inst) >> 15) & 0x1FUL) 19#define RA(inst) (((inst) >> 15) & 0x1FUL)
20#define RB(inst) (((inst) >> 10) & 0x1FUL) 20#define RB(inst) (((inst) >> 10) & 0x1FUL)
21#define SV(inst) (((inst) >> 8) & 0x3UL) 21#define SV(inst) (((inst) >> 8) & 0x3UL)
22#define IMM(inst) (((inst) >> 0) & 0x3FFFUL) 22#define IMM(inst) (((inst) >> 0) & 0x7FFFUL)
23 23
24#define RA3(inst) (((inst) >> 3) & 0x7UL) 24#define RA3(inst) (((inst) >> 3) & 0x7UL)
25#define RT3(inst) (((inst) >> 6) & 0x7UL) 25#define RT3(inst) (((inst) >> 6) & 0x7UL)
@@ -28,6 +28,9 @@
28#define RA5(inst) (((inst) >> 0) & 0x1FUL) 28#define RA5(inst) (((inst) >> 0) & 0x1FUL)
29#define RT4(inst) (((inst) >> 5) & 0xFUL) 29#define RT4(inst) (((inst) >> 5) & 0xFUL)
30 30
31#define GET_IMMSVAL(imm_value) \
32 (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
33
31#define __get8_data(val,addr,err) \ 34#define __get8_data(val,addr,err) \
32 __asm__( \ 35 __asm__( \
33 "1: lbi.bi %1, [%2], #1\n" \ 36 "1: lbi.bi %1, [%2], #1\n" \
@@ -467,7 +470,7 @@ static inline int do_32(unsigned long inst, struct pt_regs *regs)
467 } 470 }
468 471
469 if (imm) 472 if (imm)
470 shift = IMM(inst) * len; 473 shift = GET_IMMSVAL(IMM(inst)) * len;
471 else 474 else
472 shift = *idx_to_addr(regs, RB(inst)) << SV(inst); 475 shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
473 476
@@ -552,7 +555,7 @@ static struct ctl_table alignment_tbl[3] = {
552 555
553static struct ctl_table nds32_sysctl_table[2] = { 556static struct ctl_table nds32_sysctl_table[2] = {
554 { 557 {
555 .procname = "unaligned_acess", 558 .procname = "unaligned_access",
556 .mode = 0555, 559 .mode = 0555,
557 .child = alignment_tbl}, 560 .child = alignment_tbl},
558 {} 561 {}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 6eb786a399a2..ce8fd34497bf 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -147,6 +147,25 @@ void flush_cache_vunmap(unsigned long start, unsigned long end)
147 cpu_icache_inval_all(); 147 cpu_icache_inval_all();
148} 148}
149 149
150void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
151 struct page *to)
152{
153 cpu_dcache_wbinval_page((unsigned long)vaddr);
154 cpu_icache_inval_page((unsigned long)vaddr);
155 copy_page(vto, vfrom);
156 cpu_dcache_wbinval_page((unsigned long)vto);
157 cpu_icache_inval_page((unsigned long)vto);
158}
159
160void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
161{
162 cpu_dcache_wbinval_page((unsigned long)vaddr);
163 cpu_icache_inval_page((unsigned long)vaddr);
164 clear_page(addr);
165 cpu_dcache_wbinval_page((unsigned long)addr);
166 cpu_icache_inval_page((unsigned long)addr);
167}
168
150void copy_user_highpage(struct page *to, struct page *from, 169void copy_user_highpage(struct page *to, struct page *from,
151 unsigned long vaddr, struct vm_area_struct *vma) 170 unsigned long vaddr, struct vm_area_struct *vma)
152{ 171{
@@ -156,11 +175,9 @@ void copy_user_highpage(struct page *to, struct page *from,
156 pto = page_to_phys(to); 175 pto = page_to_phys(to);
157 pfrom = page_to_phys(from); 176 pfrom = page_to_phys(from);
158 177
178 local_irq_save(flags);
159 if (aliasing(vaddr, (unsigned long)kfrom)) 179 if (aliasing(vaddr, (unsigned long)kfrom))
160 cpu_dcache_wb_page((unsigned long)kfrom); 180 cpu_dcache_wb_page((unsigned long)kfrom);
161 if (aliasing(vaddr, (unsigned long)kto))
162 cpu_dcache_inval_page((unsigned long)kto);
163 local_irq_save(flags);
164 vto = kremap0(vaddr, pto); 181 vto = kremap0(vaddr, pto);
165 vfrom = kremap1(vaddr, pfrom); 182 vfrom = kremap1(vaddr, pfrom);
166 copy_page((void *)vto, (void *)vfrom); 183 copy_page((void *)vto, (void *)vfrom);
@@ -198,21 +215,25 @@ void flush_dcache_page(struct page *page)
198 if (mapping && !mapping_mapped(mapping)) 215 if (mapping && !mapping_mapped(mapping))
199 set_bit(PG_dcache_dirty, &page->flags); 216 set_bit(PG_dcache_dirty, &page->flags);
200 else { 217 else {
201 int i, pc; 218 unsigned long kaddr, flags;
202 unsigned long vto, kaddr, flags; 219
203 kaddr = (unsigned long)page_address(page); 220 kaddr = (unsigned long)page_address(page);
204 cpu_dcache_wbinval_page(kaddr);
205 pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
206 local_irq_save(flags); 221 local_irq_save(flags);
207 for (i = 0; i < pc; i++) { 222 cpu_dcache_wbinval_page(kaddr);
208 vto = 223 if (mapping) {
209 kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page)); 224 unsigned long vaddr, kto;
210 cpu_dcache_wbinval_page(vto); 225
211 kunmap01(vto); 226 vaddr = page->index << PAGE_SHIFT;
227 if (aliasing(vaddr, kaddr)) {
228 kto = kremap0(vaddr, page_to_phys(page));
229 cpu_dcache_wbinval_page(kto);
230 kunmap01(kto);
231 }
212 } 232 }
213 local_irq_restore(flags); 233 local_irq_restore(flags);
214 } 234 }
215} 235}
236EXPORT_SYMBOL(flush_dcache_page);
216 237
217void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 238void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
218 unsigned long vaddr, void *dst, void *src, int len) 239 unsigned long vaddr, void *dst, void *src, int len)
@@ -251,7 +272,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
251void flush_anon_page(struct vm_area_struct *vma, 272void flush_anon_page(struct vm_area_struct *vma,
252 struct page *page, unsigned long vaddr) 273 struct page *page, unsigned long vaddr)
253{ 274{
254 unsigned long flags; 275 unsigned long kaddr, flags, ktmp;
255 if (!PageAnon(page)) 276 if (!PageAnon(page))
256 return; 277 return;
257 278
@@ -261,7 +282,12 @@ void flush_anon_page(struct vm_area_struct *vma,
261 local_irq_save(flags); 282 local_irq_save(flags);
262 if (vma->vm_flags & VM_EXEC) 283 if (vma->vm_flags & VM_EXEC)
263 cpu_icache_inval_page(vaddr & PAGE_MASK); 284 cpu_icache_inval_page(vaddr & PAGE_MASK);
264 cpu_dcache_wbinval_page((unsigned long)page_address(page)); 285 kaddr = (unsigned long)page_address(page);
286 if (aliasing(vaddr, kaddr)) {
287 ktmp = kremap0(vaddr, page_to_phys(page));
288 cpu_dcache_wbinval_page(ktmp);
289 kunmap01(ktmp);
290 }
265 local_irq_restore(flags); 291 local_irq_restore(flags);
266} 292}
267 293
@@ -272,6 +298,25 @@ void flush_kernel_dcache_page(struct page *page)
272 cpu_dcache_wbinval_page((unsigned long)page_address(page)); 298 cpu_dcache_wbinval_page((unsigned long)page_address(page));
273 local_irq_restore(flags); 299 local_irq_restore(flags);
274} 300}
301EXPORT_SYMBOL(flush_kernel_dcache_page);
302
303void flush_kernel_vmap_range(void *addr, int size)
304{
305 unsigned long flags;
306 local_irq_save(flags);
307 cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr + size);
308 local_irq_restore(flags);
309}
310EXPORT_SYMBOL(flush_kernel_vmap_range);
311
312void invalidate_kernel_vmap_range(void *addr, int size)
313{
314 unsigned long flags;
315 local_irq_save(flags);
316 cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
317 local_irq_restore(flags);
318}
319EXPORT_SYMBOL(invalidate_kernel_vmap_range);
275 320
276void flush_icache_range(unsigned long start, unsigned long end) 321void flush_icache_range(unsigned long start, unsigned long end)
277{ 322{
@@ -283,6 +328,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
283 cpu_cache_wbinval_range(start, end, 1); 328 cpu_cache_wbinval_range(start, end, 1);
284 local_irq_restore(flags); 329 local_irq_restore(flags);
285} 330}
331EXPORT_SYMBOL(flush_icache_range);
286 332
287void flush_icache_page(struct vm_area_struct *vma, struct page *page) 333void flush_icache_page(struct vm_area_struct *vma, struct page *page)
288{ 334{
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 93ee0160720b..c713d2ad55dc 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -30,6 +30,7 @@ extern unsigned long phys_initrd_size;
30 * zero-initialized data and COW. 30 * zero-initialized data and COW.
31 */ 31 */
32struct page *empty_zero_page; 32struct page *empty_zero_page;
33EXPORT_SYMBOL(empty_zero_page);
33 34
34static void __init zone_sizes_init(void) 35static void __init zone_sizes_init(void)
35{ 36{
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 471b2274fbeb..c40b4380951c 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -74,6 +74,27 @@
74 */ 74 */
75#define EX_R3 EX_DAR 75#define EX_R3 EX_DAR
76 76
77#define STF_ENTRY_BARRIER_SLOT \
78 STF_ENTRY_BARRIER_FIXUP_SECTION; \
79 nop; \
80 nop; \
81 nop
82
83#define STF_EXIT_BARRIER_SLOT \
84 STF_EXIT_BARRIER_FIXUP_SECTION; \
85 nop; \
86 nop; \
87 nop; \
88 nop; \
89 nop; \
90 nop
91
92/*
93 * r10 must be free to use, r13 must be paca
94 */
95#define INTERRUPT_TO_KERNEL \
96 STF_ENTRY_BARRIER_SLOT
97
77/* 98/*
78 * Macros for annotating the expected destination of (h)rfid 99 * Macros for annotating the expected destination of (h)rfid
79 * 100 *
@@ -90,16 +111,19 @@
90 rfid 111 rfid
91 112
92#define RFI_TO_USER \ 113#define RFI_TO_USER \
114 STF_EXIT_BARRIER_SLOT; \
93 RFI_FLUSH_SLOT; \ 115 RFI_FLUSH_SLOT; \
94 rfid; \ 116 rfid; \
95 b rfi_flush_fallback 117 b rfi_flush_fallback
96 118
97#define RFI_TO_USER_OR_KERNEL \ 119#define RFI_TO_USER_OR_KERNEL \
120 STF_EXIT_BARRIER_SLOT; \
98 RFI_FLUSH_SLOT; \ 121 RFI_FLUSH_SLOT; \
99 rfid; \ 122 rfid; \
100 b rfi_flush_fallback 123 b rfi_flush_fallback
101 124
102#define RFI_TO_GUEST \ 125#define RFI_TO_GUEST \
126 STF_EXIT_BARRIER_SLOT; \
103 RFI_FLUSH_SLOT; \ 127 RFI_FLUSH_SLOT; \
104 rfid; \ 128 rfid; \
105 b rfi_flush_fallback 129 b rfi_flush_fallback
@@ -108,21 +132,25 @@
108 hrfid 132 hrfid
109 133
110#define HRFI_TO_USER \ 134#define HRFI_TO_USER \
135 STF_EXIT_BARRIER_SLOT; \
111 RFI_FLUSH_SLOT; \ 136 RFI_FLUSH_SLOT; \
112 hrfid; \ 137 hrfid; \
113 b hrfi_flush_fallback 138 b hrfi_flush_fallback
114 139
115#define HRFI_TO_USER_OR_KERNEL \ 140#define HRFI_TO_USER_OR_KERNEL \
141 STF_EXIT_BARRIER_SLOT; \
116 RFI_FLUSH_SLOT; \ 142 RFI_FLUSH_SLOT; \
117 hrfid; \ 143 hrfid; \
118 b hrfi_flush_fallback 144 b hrfi_flush_fallback
119 145
120#define HRFI_TO_GUEST \ 146#define HRFI_TO_GUEST \
147 STF_EXIT_BARRIER_SLOT; \
121 RFI_FLUSH_SLOT; \ 148 RFI_FLUSH_SLOT; \
122 hrfid; \ 149 hrfid; \
123 b hrfi_flush_fallback 150 b hrfi_flush_fallback
124 151
125#define HRFI_TO_UNKNOWN \ 152#define HRFI_TO_UNKNOWN \
153 STF_EXIT_BARRIER_SLOT; \
126 RFI_FLUSH_SLOT; \ 154 RFI_FLUSH_SLOT; \
127 hrfid; \ 155 hrfid; \
128 b hrfi_flush_fallback 156 b hrfi_flush_fallback
@@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
254#define __EXCEPTION_PROLOG_1_PRE(area) \ 282#define __EXCEPTION_PROLOG_1_PRE(area) \
255 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ 283 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
256 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ 284 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
285 INTERRUPT_TO_KERNEL; \
257 SAVE_CTR(r10, area); \ 286 SAVE_CTR(r10, area); \
258 mfcr r9; 287 mfcr r9;
259 288
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 1e82eb3caabd..a9b64df34e2a 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -187,6 +187,22 @@ label##3: \
187 FTR_ENTRY_OFFSET label##1b-label##3b; \ 187 FTR_ENTRY_OFFSET label##1b-label##3b; \
188 .popsection; 188 .popsection;
189 189
190#define STF_ENTRY_BARRIER_FIXUP_SECTION \
191953: \
192 .pushsection __stf_entry_barrier_fixup,"a"; \
193 .align 2; \
194954: \
195 FTR_ENTRY_OFFSET 953b-954b; \
196 .popsection;
197
198#define STF_EXIT_BARRIER_FIXUP_SECTION \
199955: \
200 .pushsection __stf_exit_barrier_fixup,"a"; \
201 .align 2; \
202956: \
203 FTR_ENTRY_OFFSET 955b-956b; \
204 .popsection;
205
190#define RFI_FLUSH_FIXUP_SECTION \ 206#define RFI_FLUSH_FIXUP_SECTION \
191951: \ 207951: \
192 .pushsection __rfi_flush_fixup,"a"; \ 208 .pushsection __rfi_flush_fixup,"a"; \
@@ -199,6 +215,9 @@ label##3: \
199#ifndef __ASSEMBLY__ 215#ifndef __ASSEMBLY__
200#include <linux/types.h> 216#include <linux/types.h>
201 217
218extern long stf_barrier_fallback;
219extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
220extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
202extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; 221extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
203 222
204void apply_feature_fixups(void); 223void apply_feature_fixups(void);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4c02a7378d06..e7377b73cfec 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -96,6 +96,7 @@ struct kvmppc_vcore {
96 struct kvm_vcpu *runner; 96 struct kvm_vcpu *runner;
97 struct kvm *kvm; 97 struct kvm *kvm;
98 u64 tb_offset; /* guest timebase - host timebase */ 98 u64 tb_offset; /* guest timebase - host timebase */
99 u64 tb_offset_applied; /* timebase offset currently in force */
99 ulong lpcr; 100 ulong lpcr;
100 u32 arch_compat; 101 u32 arch_compat;
101 ulong pcr; 102 ulong pcr;
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index fa4d2e1cf772..44989b22383c 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -12,6 +12,17 @@
12extern unsigned long powerpc_security_features; 12extern unsigned long powerpc_security_features;
13extern bool rfi_flush; 13extern bool rfi_flush;
14 14
15/* These are bit flags */
16enum stf_barrier_type {
17 STF_BARRIER_NONE = 0x1,
18 STF_BARRIER_FALLBACK = 0x2,
19 STF_BARRIER_EIEIO = 0x4,
20 STF_BARRIER_SYNC_ORI = 0x8,
21};
22
23void setup_stf_barrier(void);
24void do_stf_barrier_fixups(enum stf_barrier_type types);
25
15static inline void security_ftr_set(unsigned long feature) 26static inline void security_ftr_set(unsigned long feature)
16{ 27{
17 powerpc_security_features |= feature; 28 powerpc_security_features |= feature;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 6bee65f3cfd3..373dc1d6ef44 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -562,6 +562,7 @@ int main(void)
562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads); 562 OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm); 563 OFFSET(VCORE_KVM, kvmppc_vcore, kvm);
564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset); 564 OFFSET(VCORE_TB_OFFSET, kvmppc_vcore, tb_offset);
565 OFFSET(VCORE_TB_OFFSET_APPL, kvmppc_vcore, tb_offset_applied);
565 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr); 566 OFFSET(VCORE_LPCR, kvmppc_vcore, lpcr);
566 OFFSET(VCORE_PCR, kvmppc_vcore, pcr); 567 OFFSET(VCORE_PCR, kvmppc_vcore, pcr);
567 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes); 568 OFFSET(VCORE_DPDES, kvmppc_vcore, dpdes);
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 3f30c994e931..458b928dbd84 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
28 beqlr 28 beqlr
29 li r0,0 29 li r0,0
30 mtspr SPRN_LPID,r0 30 mtspr SPRN_LPID,r0
31 mtspr SPRN_PCR,r0
31 mfspr r3,SPRN_LPCR 32 mfspr r3,SPRN_LPCR
32 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 33 li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
33 bl __init_LPCR_ISA206 34 bl __init_LPCR_ISA206
@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
41 beqlr 42 beqlr
42 li r0,0 43 li r0,0
43 mtspr SPRN_LPID,r0 44 mtspr SPRN_LPID,r0
45 mtspr SPRN_PCR,r0
44 mfspr r3,SPRN_LPCR 46 mfspr r3,SPRN_LPCR
45 li r4,(LPCR_LPES1 >> LPCR_LPES_SH) 47 li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
46 bl __init_LPCR_ISA206 48 bl __init_LPCR_ISA206
@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
57 beqlr 59 beqlr
58 li r0,0 60 li r0,0
59 mtspr SPRN_LPID,r0 61 mtspr SPRN_LPID,r0
62 mtspr SPRN_PCR,r0
60 mfspr r3,SPRN_LPCR 63 mfspr r3,SPRN_LPCR
61 ori r3, r3, LPCR_PECEDH 64 ori r3, r3, LPCR_PECEDH
62 li r4,0 /* LPES = 0 */ 65 li r4,0 /* LPES = 0 */
@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
78 beqlr 81 beqlr
79 li r0,0 82 li r0,0
80 mtspr SPRN_LPID,r0 83 mtspr SPRN_LPID,r0
84 mtspr SPRN_PCR,r0
81 mfspr r3,SPRN_LPCR 85 mfspr r3,SPRN_LPCR
82 ori r3, r3, LPCR_PECEDH 86 ori r3, r3, LPCR_PECEDH
83 li r4,0 /* LPES = 0 */ 87 li r4,0 /* LPES = 0 */
@@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
99 mtspr SPRN_PSSCR,r0 103 mtspr SPRN_PSSCR,r0
100 mtspr SPRN_LPID,r0 104 mtspr SPRN_LPID,r0
101 mtspr SPRN_PID,r0 105 mtspr SPRN_PID,r0
106 mtspr SPRN_PCR,r0
102 mfspr r3,SPRN_LPCR 107 mfspr r3,SPRN_LPCR
103 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 108 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
104 or r3, r3, r4 109 or r3, r3, r4
@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
123 mtspr SPRN_PSSCR,r0 128 mtspr SPRN_PSSCR,r0
124 mtspr SPRN_LPID,r0 129 mtspr SPRN_LPID,r0
125 mtspr SPRN_PID,r0 130 mtspr SPRN_PID,r0
131 mtspr SPRN_PCR,r0
126 mfspr r3,SPRN_LPCR 132 mfspr r3,SPRN_LPCR
127 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) 133 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
128 or r3, r3, r4 134 or r3, r3, r4
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 8ab51f6ca03a..c904477abaf3 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -101,6 +101,7 @@ static void __restore_cpu_cpufeatures(void)
101 if (hv_mode) { 101 if (hv_mode) {
102 mtspr(SPRN_LPID, 0); 102 mtspr(SPRN_LPID, 0);
103 mtspr(SPRN_HFSCR, system_registers.hfscr); 103 mtspr(SPRN_HFSCR, system_registers.hfscr);
104 mtspr(SPRN_PCR, 0);
104 } 105 }
105 mtspr(SPRN_FSCR, system_registers.fscr); 106 mtspr(SPRN_FSCR, system_registers.fscr);
106 107
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ae6a849db60b..f283958129f2 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -885,7 +885,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
885#endif 885#endif
886 886
887 887
888EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED) 888EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
889EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED) 889EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
890TRAMP_KVM(PACA_EXGEN, 0x900) 890TRAMP_KVM(PACA_EXGEN, 0x900)
891EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt) 891EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
@@ -961,6 +961,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
961 mtctr r13; \ 961 mtctr r13; \
962 GET_PACA(r13); \ 962 GET_PACA(r13); \
963 std r10,PACA_EXGEN+EX_R10(r13); \ 963 std r10,PACA_EXGEN+EX_R10(r13); \
964 INTERRUPT_TO_KERNEL; \
964 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \ 965 KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
965 HMT_MEDIUM; \ 966 HMT_MEDIUM; \
966 mfctr r9; 967 mfctr r9;
@@ -969,7 +970,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
969#define SYSCALL_KVMTEST \ 970#define SYSCALL_KVMTEST \
970 HMT_MEDIUM; \ 971 HMT_MEDIUM; \
971 mr r9,r13; \ 972 mr r9,r13; \
972 GET_PACA(r13); 973 GET_PACA(r13); \
974 INTERRUPT_TO_KERNEL;
973#endif 975#endif
974 976
975#define LOAD_SYSCALL_HANDLER(reg) \ 977#define LOAD_SYSCALL_HANDLER(reg) \
@@ -1507,6 +1509,19 @@ masked_##_H##interrupt: \
1507 b .; \ 1509 b .; \
1508 MASKED_DEC_HANDLER(_H) 1510 MASKED_DEC_HANDLER(_H)
1509 1511
1512TRAMP_REAL_BEGIN(stf_barrier_fallback)
1513 std r9,PACA_EXRFI+EX_R9(r13)
1514 std r10,PACA_EXRFI+EX_R10(r13)
1515 sync
1516 ld r9,PACA_EXRFI+EX_R9(r13)
1517 ld r10,PACA_EXRFI+EX_R10(r13)
1518 ori 31,31,0
1519 .rept 14
1520 b 1f
15211:
1522 .endr
1523 blr
1524
1510TRAMP_REAL_BEGIN(rfi_flush_fallback) 1525TRAMP_REAL_BEGIN(rfi_flush_fallback)
1511 SET_SCRATCH0(r13); 1526 SET_SCRATCH0(r13);
1512 GET_PACA(r13); 1527 GET_PACA(r13);
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index bab5a27ea805..b98a722da915 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -8,6 +8,7 @@
8#include <linux/device.h> 8#include <linux/device.h>
9#include <linux/seq_buf.h> 9#include <linux/seq_buf.h>
10 10
11#include <asm/debugfs.h>
11#include <asm/security_features.h> 12#include <asm/security_features.h>
12 13
13 14
@@ -86,3 +87,151 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
86 87
87 return s.len; 88 return s.len;
88} 89}
90
91/*
92 * Store-forwarding barrier support.
93 */
94
95static enum stf_barrier_type stf_enabled_flush_types;
96static bool no_stf_barrier;
97bool stf_barrier;
98
99static int __init handle_no_stf_barrier(char *p)
100{
101 pr_info("stf-barrier: disabled on command line.");
102 no_stf_barrier = true;
103 return 0;
104}
105
106early_param("no_stf_barrier", handle_no_stf_barrier);
107
108/* This is the generic flag used by other architectures */
109static int __init handle_ssbd(char *p)
110{
111 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
112 /* Until firmware tells us, we have the barrier with auto */
113 return 0;
114 } else if (strncmp(p, "off", 3) == 0) {
115 handle_no_stf_barrier(NULL);
116 return 0;
117 } else
118 return 1;
119
120 return 0;
121}
122early_param("spec_store_bypass_disable", handle_ssbd);
123
124/* This is the generic flag used by other architectures */
125static int __init handle_no_ssbd(char *p)
126{
127 handle_no_stf_barrier(NULL);
128 return 0;
129}
130early_param("nospec_store_bypass_disable", handle_no_ssbd);
131
132static void stf_barrier_enable(bool enable)
133{
134 if (enable)
135 do_stf_barrier_fixups(stf_enabled_flush_types);
136 else
137 do_stf_barrier_fixups(STF_BARRIER_NONE);
138
139 stf_barrier = enable;
140}
141
142void setup_stf_barrier(void)
143{
144 enum stf_barrier_type type;
145 bool enable, hv;
146
147 hv = cpu_has_feature(CPU_FTR_HVMODE);
148
149 /* Default to fallback in case fw-features are not available */
150 if (cpu_has_feature(CPU_FTR_ARCH_300))
151 type = STF_BARRIER_EIEIO;
152 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
153 type = STF_BARRIER_SYNC_ORI;
154 else if (cpu_has_feature(CPU_FTR_ARCH_206))
155 type = STF_BARRIER_FALLBACK;
156 else
157 type = STF_BARRIER_NONE;
158
159 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
160 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
161 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
162
163 if (type == STF_BARRIER_FALLBACK) {
164 pr_info("stf-barrier: fallback barrier available\n");
165 } else if (type == STF_BARRIER_SYNC_ORI) {
166 pr_info("stf-barrier: hwsync barrier available\n");
167 } else if (type == STF_BARRIER_EIEIO) {
168 pr_info("stf-barrier: eieio barrier available\n");
169 }
170
171 stf_enabled_flush_types = type;
172
173 if (!no_stf_barrier)
174 stf_barrier_enable(enable);
175}
176
177ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
178{
179 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
180 const char *type;
181 switch (stf_enabled_flush_types) {
182 case STF_BARRIER_EIEIO:
183 type = "eieio";
184 break;
185 case STF_BARRIER_SYNC_ORI:
186 type = "hwsync";
187 break;
188 case STF_BARRIER_FALLBACK:
189 type = "fallback";
190 break;
191 default:
192 type = "unknown";
193 }
194 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
195 }
196
197 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
198 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
199 return sprintf(buf, "Not affected\n");
200
201 return sprintf(buf, "Vulnerable\n");
202}
203
204#ifdef CONFIG_DEBUG_FS
205static int stf_barrier_set(void *data, u64 val)
206{
207 bool enable;
208
209 if (val == 1)
210 enable = true;
211 else if (val == 0)
212 enable = false;
213 else
214 return -EINVAL;
215
216 /* Only do anything if we're changing state */
217 if (enable != stf_barrier)
218 stf_barrier_enable(enable);
219
220 return 0;
221}
222
223static int stf_barrier_get(void *data, u64 *val)
224{
225 *val = stf_barrier ? 1 : 0;
226 return 0;
227}
228
229DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
230
231static __init int stf_barrier_debugfs_init(void)
232{
233 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
234 return 0;
235}
236device_initcall(stf_barrier_debugfs_init);
237#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index c8af90ff49f0..b8d82678f8b4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -134,6 +134,20 @@ SECTIONS
134 134
135#ifdef CONFIG_PPC64 135#ifdef CONFIG_PPC64
136 . = ALIGN(8); 136 . = ALIGN(8);
137 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
138 __start___stf_entry_barrier_fixup = .;
139 *(__stf_entry_barrier_fixup)
140 __stop___stf_entry_barrier_fixup = .;
141 }
142
143 . = ALIGN(8);
144 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
145 __start___stf_exit_barrier_fixup = .;
146 *(__stf_exit_barrier_fixup)
147 __stop___stf_exit_barrier_fixup = .;
148 }
149
150 . = ALIGN(8);
137 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { 151 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
138 __start___rfi_flush_fixup = .; 152 __start___rfi_flush_fixup = .;
139 *(__rfi_flush_fixup) 153 *(__rfi_flush_fixup)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index a57eafec4dc2..361f42c8c73e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -162,7 +162,7 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
165 asm volatile("ptesync": : :"memory"); 165 asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
166} 166}
167 167
168static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) 168static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
@@ -173,7 +173,7 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr)
173 /* RIC=1 PRS=0 R=1 IS=2 */ 173 /* RIC=1 PRS=0 R=1 IS=2 */
174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) 174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1)
175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); 175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory");
176 asm volatile("ptesync": : :"memory"); 176 asm volatile("eieio ; tlbsync ; ptesync": : :"memory");
177} 177}
178 178
179unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, 179unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
@@ -584,7 +584,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
584 584
585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
586 if (ptep && pte_present(*ptep)) { 586 if (ptep && pte_present(*ptep)) {
587 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, 587 old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
588 gpa, shift); 588 gpa, shift);
589 kvmppc_radix_tlbie_page(kvm, gpa, shift); 589 kvmppc_radix_tlbie_page(kvm, gpa, shift);
590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { 590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 4d07fca5121c..9963f65c212b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2441,6 +2441,7 @@ static void init_vcore_to_run(struct kvmppc_vcore *vc)
2441 vc->in_guest = 0; 2441 vc->in_guest = 0;
2442 vc->napping_threads = 0; 2442 vc->napping_threads = 0;
2443 vc->conferring_threads = 0; 2443 vc->conferring_threads = 0;
2444 vc->tb_offset_applied = 0;
2444} 2445}
2445 2446
2446static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) 2447static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bd63fa8a08b5..07ca1b2a7966 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -692,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
69222: ld r8,VCORE_TB_OFFSET(r5) 69222: ld r8,VCORE_TB_OFFSET(r5)
693 cmpdi r8,0 693 cmpdi r8,0
694 beq 37f 694 beq 37f
695 std r8, VCORE_TB_OFFSET_APPL(r5)
695 mftb r6 /* current host timebase */ 696 mftb r6 /* current host timebase */
696 add r8,r8,r6 697 add r8,r8,r6
697 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 698 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
@@ -940,18 +941,6 @@ FTR_SECTION_ELSE
940ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) 941ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
9418: 9428:
942 943
943 /*
944 * Set the decrementer to the guest decrementer.
945 */
946 ld r8,VCPU_DEC_EXPIRES(r4)
947 /* r8 is a host timebase value here, convert to guest TB */
948 ld r5,HSTATE_KVM_VCORE(r13)
949 ld r6,VCORE_TB_OFFSET(r5)
950 add r8,r8,r6
951 mftb r7
952 subf r3,r7,r8
953 mtspr SPRN_DEC,r3
954
955 ld r5, VCPU_SPRG0(r4) 944 ld r5, VCPU_SPRG0(r4)
956 ld r6, VCPU_SPRG1(r4) 945 ld r6, VCPU_SPRG1(r4)
957 ld r7, VCPU_SPRG2(r4) 946 ld r7, VCPU_SPRG2(r4)
@@ -1005,6 +994,18 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1005 mtspr SPRN_LPCR,r8 994 mtspr SPRN_LPCR,r8
1006 isync 995 isync
1007 996
997 /*
998 * Set the decrementer to the guest decrementer.
999 */
1000 ld r8,VCPU_DEC_EXPIRES(r4)
1001 /* r8 is a host timebase value here, convert to guest TB */
1002 ld r5,HSTATE_KVM_VCORE(r13)
1003 ld r6,VCORE_TB_OFFSET_APPL(r5)
1004 add r8,r8,r6
1005 mftb r7
1006 subf r3,r7,r8
1007 mtspr SPRN_DEC,r3
1008
1008 /* Check if HDEC expires soon */ 1009 /* Check if HDEC expires soon */
1009 mfspr r3, SPRN_HDEC 1010 mfspr r3, SPRN_HDEC
1010 EXTEND_HDEC(r3) 1011 EXTEND_HDEC(r3)
@@ -1597,8 +1598,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1597 1598
1598guest_bypass: 1599guest_bypass:
1599 stw r12, STACK_SLOT_TRAP(r1) 1600 stw r12, STACK_SLOT_TRAP(r1)
1600 mr r3, r12 1601
1602 /* Save DEC */
1603 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1604 ld r3, HSTATE_KVM_VCORE(r13)
1605 mfspr r5,SPRN_DEC
1606 mftb r6
1607 /* On P9, if the guest has large decr enabled, don't sign extend */
1608BEGIN_FTR_SECTION
1609 ld r4, VCORE_LPCR(r3)
1610 andis. r4, r4, LPCR_LD@h
1611 bne 16f
1612END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1613 extsw r5,r5
161416: add r5,r5,r6
1615 /* r5 is a guest timebase value here, convert to host TB */
1616 ld r4,VCORE_TB_OFFSET_APPL(r3)
1617 subf r5,r4,r5
1618 std r5,VCPU_DEC_EXPIRES(r9)
1619
1601 /* Increment exit count, poke other threads to exit */ 1620 /* Increment exit count, poke other threads to exit */
1621 mr r3, r12
1602 bl kvmhv_commence_exit 1622 bl kvmhv_commence_exit
1603 nop 1623 nop
1604 ld r9, HSTATE_KVM_VCPU(r13) 1624 ld r9, HSTATE_KVM_VCPU(r13)
@@ -1639,23 +1659,6 @@ guest_bypass:
1639 mtspr SPRN_PURR,r3 1659 mtspr SPRN_PURR,r3
1640 mtspr SPRN_SPURR,r4 1660 mtspr SPRN_SPURR,r4
1641 1661
1642 /* Save DEC */
1643 ld r3, HSTATE_KVM_VCORE(r13)
1644 mfspr r5,SPRN_DEC
1645 mftb r6
1646 /* On P9, if the guest has large decr enabled, don't sign extend */
1647BEGIN_FTR_SECTION
1648 ld r4, VCORE_LPCR(r3)
1649 andis. r4, r4, LPCR_LD@h
1650 bne 16f
1651END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1652 extsw r5,r5
165316: add r5,r5,r6
1654 /* r5 is a guest timebase value here, convert to host TB */
1655 ld r4,VCORE_TB_OFFSET(r3)
1656 subf r5,r4,r5
1657 std r5,VCPU_DEC_EXPIRES(r9)
1658
1659BEGIN_FTR_SECTION 1662BEGIN_FTR_SECTION
1660 b 8f 1663 b 8f
1661END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1664END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
@@ -1905,6 +1908,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1905 cmpwi cr2, r0, 0 1908 cmpwi cr2, r0, 0
1906 beq cr2, 4f 1909 beq cr2, 4f
1907 1910
1911 /*
1912 * Radix: do eieio; tlbsync; ptesync sequence in case we
1913 * interrupted the guest between a tlbie and a ptesync.
1914 */
1915 eieio
1916 tlbsync
1917 ptesync
1918
1908 /* Radix: Handle the case where the guest used an illegal PID */ 1919 /* Radix: Handle the case where the guest used an illegal PID */
1909 LOAD_REG_ADDR(r4, mmu_base_pid) 1920 LOAD_REG_ADDR(r4, mmu_base_pid)
1910 lwz r3, VCPU_GUEST_PID(r9) 1921 lwz r3, VCPU_GUEST_PID(r9)
@@ -2017,9 +2028,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2017 2028
201827: 202927:
2019 /* Subtract timebase offset from timebase */ 2030 /* Subtract timebase offset from timebase */
2020 ld r8,VCORE_TB_OFFSET(r5) 2031 ld r8, VCORE_TB_OFFSET_APPL(r5)
2021 cmpdi r8,0 2032 cmpdi r8,0
2022 beq 17f 2033 beq 17f
2034 li r0, 0
2035 std r0, VCORE_TB_OFFSET_APPL(r5)
2023 mftb r6 /* current guest timebase */ 2036 mftb r6 /* current guest timebase */
2024 subf r8,r8,r6 2037 subf r8,r8,r6
2025 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 2038 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
@@ -2700,7 +2713,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2700 add r3, r3, r5 2713 add r3, r3, r5
2701 ld r4, HSTATE_KVM_VCPU(r13) 2714 ld r4, HSTATE_KVM_VCPU(r13)
2702 ld r5, HSTATE_KVM_VCORE(r13) 2715 ld r5, HSTATE_KVM_VCORE(r13)
2703 ld r6, VCORE_TB_OFFSET(r5) 2716 ld r6, VCORE_TB_OFFSET_APPL(r5)
2704 subf r3, r6, r3 /* convert to host TB value */ 2717 subf r3, r6, r3 /* convert to host TB value */
2705 std r3, VCPU_DEC_EXPIRES(r4) 2718 std r3, VCPU_DEC_EXPIRES(r4)
2706 2719
@@ -2799,7 +2812,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2799 /* Restore guest decrementer */ 2812 /* Restore guest decrementer */
2800 ld r3, VCPU_DEC_EXPIRES(r4) 2813 ld r3, VCPU_DEC_EXPIRES(r4)
2801 ld r5, HSTATE_KVM_VCORE(r13) 2814 ld r5, HSTATE_KVM_VCORE(r13)
2802 ld r6, VCORE_TB_OFFSET(r5) 2815 ld r6, VCORE_TB_OFFSET_APPL(r5)
2803 add r3, r3, r6 /* convert host TB to guest TB value */ 2816 add r3, r3, r6 /* convert host TB to guest TB value */
2804 mftb r7 2817 mftb r7
2805 subf r3, r7, r3 2818 subf r3, r7, r3
@@ -3606,12 +3619,9 @@ kvmppc_fix_pmao:
3606 */ 3619 */
3607kvmhv_start_timing: 3620kvmhv_start_timing:
3608 ld r5, HSTATE_KVM_VCORE(r13) 3621 ld r5, HSTATE_KVM_VCORE(r13)
3609 lbz r6, VCORE_IN_GUEST(r5) 3622 ld r6, VCORE_TB_OFFSET_APPL(r5)
3610 cmpwi r6, 0 3623 mftb r5
3611 beq 5f /* if in guest, need to */ 3624 subf r5, r6, r5 /* subtract current timebase offset */
3612 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
36135: mftb r5
3614 subf r5, r6, r5
3615 std r3, VCPU_CUR_ACTIVITY(r4) 3625 std r3, VCPU_CUR_ACTIVITY(r4)
3616 std r5, VCPU_ACTIVITY_START(r4) 3626 std r5, VCPU_ACTIVITY_START(r4)
3617 blr 3627 blr
@@ -3622,15 +3632,12 @@ kvmhv_start_timing:
3622 */ 3632 */
3623kvmhv_accumulate_time: 3633kvmhv_accumulate_time:
3624 ld r5, HSTATE_KVM_VCORE(r13) 3634 ld r5, HSTATE_KVM_VCORE(r13)
3625 lbz r8, VCORE_IN_GUEST(r5) 3635 ld r8, VCORE_TB_OFFSET_APPL(r5)
3626 cmpwi r8, 0 3636 ld r5, VCPU_CUR_ACTIVITY(r4)
3627 beq 4f /* if in guest, need to */
3628 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
36294: ld r5, VCPU_CUR_ACTIVITY(r4)
3630 ld r6, VCPU_ACTIVITY_START(r4) 3637 ld r6, VCPU_ACTIVITY_START(r4)
3631 std r3, VCPU_CUR_ACTIVITY(r4) 3638 std r3, VCPU_CUR_ACTIVITY(r4)
3632 mftb r7 3639 mftb r7
3633 subf r7, r8, r7 3640 subf r7, r8, r7 /* subtract current timebase offset */
3634 std r7, VCPU_ACTIVITY_START(r4) 3641 std r7, VCPU_ACTIVITY_START(r4)
3635 cmpdi r5, 0 3642 cmpdi r5, 0
3636 beqlr 3643 beqlr
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index c7a5deadd1cc..99c3620b40d9 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -11,6 +11,9 @@
11#define XGLUE(a,b) a##b 11#define XGLUE(a,b) a##b
12#define GLUE(a,b) XGLUE(a,b) 12#define GLUE(a,b) XGLUE(a,b)
13 13
14/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
15#define XICS_DUMMY 1
16
14static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) 17static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
15{ 18{
16 u8 cppr; 19 u8 cppr;
@@ -205,6 +208,10 @@ skip_ipi:
205 goto skip_ipi; 208 goto skip_ipi;
206 } 209 }
207 210
211 /* If it's the dummy interrupt, continue searching */
212 if (hirq == XICS_DUMMY)
213 goto skip_ipi;
214
208 /* If fetching, update queue pointers */ 215 /* If fetching, update queue pointers */
209 if (scan_type == scan_fetch) { 216 if (scan_type == scan_fetch) {
210 q->idx = idx; 217 q->idx = idx;
@@ -385,9 +392,76 @@ static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
385 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); 392 __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
386} 393}
387 394
395static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
396 struct kvmppc_xive_vcpu *xc)
397{
398 unsigned int prio;
399
400 /* For each priority that is now masked */
401 for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
402 struct xive_q *q = &xc->queues[prio];
403 struct kvmppc_xive_irq_state *state;
404 struct kvmppc_xive_src_block *sb;
405 u32 idx, toggle, entry, irq, hw_num;
406 struct xive_irq_data *xd;
407 __be32 *qpage;
408 u16 src;
409
410 idx = q->idx;
411 toggle = q->toggle;
412 qpage = READ_ONCE(q->qpage);
413 if (!qpage)
414 continue;
415
416 /* For each interrupt in the queue */
417 for (;;) {
418 entry = be32_to_cpup(qpage + idx);
419
420 /* No more ? */
421 if ((entry >> 31) == toggle)
422 break;
423 irq = entry & 0x7fffffff;
424
425 /* Skip dummies and IPIs */
426 if (irq == XICS_DUMMY || irq == XICS_IPI)
427 goto next;
428 sb = kvmppc_xive_find_source(xive, irq, &src);
429 if (!sb)
430 goto next;
431 state = &sb->irq_state[src];
432
433 /* Has it been rerouted ? */
434 if (xc->server_num == state->act_server)
435 goto next;
436
437 /*
438 * Allright, it *has* been re-routed, kill it from
439 * the queue.
440 */
441 qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
442
443 /* Find the HW interrupt */
444 kvmppc_xive_select_irq(state, &hw_num, &xd);
445
446 /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
447 if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
448 GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
449
450 /* EOI the source */
451 GLUE(X_PFX,source_eoi)(hw_num, xd);
452
453 next:
454 idx = (idx + 1) & q->msk;
455 if (idx == 0)
456 toggle ^= 1;
457 }
458 }
459}
460
388X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) 461X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
389{ 462{
390 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 463 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
464 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
391 u8 old_cppr; 465 u8 old_cppr;
392 466
393 pr_devel("H_CPPR(cppr=%ld)\n", cppr); 467 pr_devel("H_CPPR(cppr=%ld)\n", cppr);
@@ -407,14 +481,34 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
407 */ 481 */
408 smp_mb(); 482 smp_mb();
409 483
410 /* 484 if (cppr > old_cppr) {
411 * We are masking less, we need to look for pending things 485 /*
412 * to deliver and set VP pending bits accordingly to trigger 486 * We are masking less, we need to look for pending things
413 * a new interrupt otherwise we might miss MFRR changes for 487 * to deliver and set VP pending bits accordingly to trigger
414 * which we have optimized out sending an IPI signal. 488 * a new interrupt otherwise we might miss MFRR changes for
415 */ 489 * which we have optimized out sending an IPI signal.
416 if (cppr > old_cppr) 490 */
417 GLUE(X_PFX,push_pending_to_hw)(xc); 491 GLUE(X_PFX,push_pending_to_hw)(xc);
492 } else {
493 /*
494 * We are masking more, we need to check the queue for any
495 * interrupt that has been routed to another CPU, take
496 * it out (replace it with the dummy) and retrigger it.
497 *
498 * This is necessary since those interrupts may otherwise
499 * never be processed, at least not until this CPU restores
500 * its CPPR.
501 *
502 * This is in theory racy vs. HW adding new interrupts to
503 * the queue. In practice this works because the interesting
504 * cases are when the guest has done a set_xive() to move the
505 * interrupt away, which flushes the xive, followed by the
506 * target CPU doing a H_CPPR. So any new interrupt coming into
507 * the queue must still be routed to us and isn't a source
508 * of concern.
509 */
510 GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
511 }
418 512
419 /* Apply new CPPR */ 513 /* Apply new CPPR */
420 xc->hw_cppr = cppr; 514 xc->hw_cppr = cppr;
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 288fe4f0db4e..e1bcdc32a851 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -23,6 +23,7 @@
23#include <asm/page.h> 23#include <asm/page.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/setup.h> 25#include <asm/setup.h>
26#include <asm/security_features.h>
26#include <asm/firmware.h> 27#include <asm/firmware.h>
27 28
28struct fixup_entry { 29struct fixup_entry {
@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
117} 118}
118 119
119#ifdef CONFIG_PPC_BOOK3S_64 120#ifdef CONFIG_PPC_BOOK3S_64
121void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
122{
123 unsigned int instrs[3], *dest;
124 long *start, *end;
125 int i;
126
127 start = PTRRELOC(&__start___stf_entry_barrier_fixup),
128 end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
129
130 instrs[0] = 0x60000000; /* nop */
131 instrs[1] = 0x60000000; /* nop */
132 instrs[2] = 0x60000000; /* nop */
133
134 i = 0;
135 if (types & STF_BARRIER_FALLBACK) {
136 instrs[i++] = 0x7d4802a6; /* mflr r10 */
137 instrs[i++] = 0x60000000; /* branch patched below */
138 instrs[i++] = 0x7d4803a6; /* mtlr r10 */
139 } else if (types & STF_BARRIER_EIEIO) {
140 instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
141 } else if (types & STF_BARRIER_SYNC_ORI) {
142 instrs[i++] = 0x7c0004ac; /* hwsync */
143 instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
144 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
145 }
146
147 for (i = 0; start < end; start++, i++) {
148 dest = (void *)start + *start;
149
150 pr_devel("patching dest %lx\n", (unsigned long)dest);
151
152 patch_instruction(dest, instrs[0]);
153
154 if (types & STF_BARRIER_FALLBACK)
155 patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
156 BRANCH_SET_LINK);
157 else
158 patch_instruction(dest + 1, instrs[1]);
159
160 patch_instruction(dest + 2, instrs[2]);
161 }
162
163 printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
164 (types == STF_BARRIER_NONE) ? "no" :
165 (types == STF_BARRIER_FALLBACK) ? "fallback" :
166 (types == STF_BARRIER_EIEIO) ? "eieio" :
167 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
168 : "unknown");
169}
170
171void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
172{
173 unsigned int instrs[6], *dest;
174 long *start, *end;
175 int i;
176
177 start = PTRRELOC(&__start___stf_exit_barrier_fixup),
178 end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
179
180 instrs[0] = 0x60000000; /* nop */
181 instrs[1] = 0x60000000; /* nop */
182 instrs[2] = 0x60000000; /* nop */
183 instrs[3] = 0x60000000; /* nop */
184 instrs[4] = 0x60000000; /* nop */
185 instrs[5] = 0x60000000; /* nop */
186
187 i = 0;
188 if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
189 if (cpu_has_feature(CPU_FTR_HVMODE)) {
190 instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
191 instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
192 } else {
193 instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
194 instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
195 }
196 instrs[i++] = 0x7c0004ac; /* hwsync */
197 instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
198 instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
199 if (cpu_has_feature(CPU_FTR_HVMODE)) {
200 instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
201 } else {
202 instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
203 }
204 } else if (types & STF_BARRIER_EIEIO) {
205 instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
206 }
207
208 for (i = 0; start < end; start++, i++) {
209 dest = (void *)start + *start;
210
211 pr_devel("patching dest %lx\n", (unsigned long)dest);
212
213 patch_instruction(dest, instrs[0]);
214 patch_instruction(dest + 1, instrs[1]);
215 patch_instruction(dest + 2, instrs[2]);
216 patch_instruction(dest + 3, instrs[3]);
217 patch_instruction(dest + 4, instrs[4]);
218 patch_instruction(dest + 5, instrs[5]);
219 }
220 printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
221 (types == STF_BARRIER_NONE) ? "no" :
222 (types == STF_BARRIER_FALLBACK) ? "fallback" :
223 (types == STF_BARRIER_EIEIO) ? "eieio" :
224 (types == (STF_BARRIER_SYNC_ORI)) ? "hwsync"
225 : "unknown");
226}
227
228
229void do_stf_barrier_fixups(enum stf_barrier_type types)
230{
231 do_stf_entry_barrier_fixups(types);
232 do_stf_exit_barrier_fixups(types);
233}
234
120void do_rfi_flush_fixups(enum l1d_flush_type types) 235void do_rfi_flush_fixups(enum l1d_flush_type types)
121{ 236{
122 unsigned int instrs[3], *dest; 237 unsigned int instrs[3], *dest;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index ef8c9ce53a61..a6648ec99ca7 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -131,6 +131,7 @@ static void __init pnv_setup_arch(void)
131 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 131 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
132 132
133 pnv_setup_rfi_flush(); 133 pnv_setup_rfi_flush();
134 setup_stf_barrier();
134 135
135 /* Initialize SMP */ 136 /* Initialize SMP */
136 pnv_smp_init(); 137 pnv_smp_init();
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index b55ad4286dc7..fdb32e056ef4 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -710,6 +710,7 @@ static void __init pSeries_setup_arch(void)
710 fwnmi_init(); 710 fwnmi_init();
711 711
712 pseries_setup_rfi_flush(); 712 pseries_setup_rfi_flush();
713 setup_stf_barrier();
713 714
714 /* By default, only probe PCI (can be overridden by rtas_pci) */ 715 /* By default, only probe PCI (can be overridden by rtas_pci) */
715 pci_add_flags(PCI_PROBE_ONLY); 716 pci_add_flags(PCI_PROBE_ONLY);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 8961e3970901..969882b54266 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
578 578
579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL; 579 gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
580 if (gpa && (scb_s->ecb & ECB_TE)) { 580 if (gpa && (scb_s->ecb & ECB_TE)) {
581 if (!(gpa & ~0x1fffU)) { 581 if (!(gpa & ~0x1fffUL)) {
582 rc = set_validity_icpt(scb_s, 0x0080U); 582 rc = set_validity_icpt(scb_s, 0x0080U);
583 goto unpin; 583 goto unpin;
584 } 584 }
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index e9525bc1b4a6..1ace023cbdce 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -21,7 +21,7 @@ LDFLAGS_purgatory.ro += -z nodefaultlib
21KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes 21KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
22KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 22KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
23KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 23KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
24KBUILD_CFLAGS += -c -MD -Os -m64 24KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float
25KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 25KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
26 26
27$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 27$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 578793e97431..fb00a2fca990 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -198,7 +198,6 @@
198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ 198#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ 199#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
200#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ 200#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
201
202#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ 201#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
203#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 202#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
204#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ 203#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
@@ -207,13 +206,19 @@
207#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ 206#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
208#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 207#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
209#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ 208#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
210 209#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
210#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ 211#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */ 212#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */ 213#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
214
215#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ 214#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
216#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ 215#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
216#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
217#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
218#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
219#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
220#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
221#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
217 222
218/* Virtualization flags: Linux defined, word 8 */ 223/* Virtualization flags: Linux defined, word 8 */
219#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ 224#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -274,9 +279,10 @@
274#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 279#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
275#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 280#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
276#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 281#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
277#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ 282#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
278#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */ 283#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
279#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */ 284#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
285#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
280 286
281/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ 287/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
282#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ 288#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -334,6 +340,7 @@
334#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 340#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
335#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 341#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
336#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ 342#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
343#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
337 344
338/* 345/*
339 * BUG word(s) 346 * BUG word(s)
@@ -363,5 +370,6 @@
363#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ 370#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
364#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ 371#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
365#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ 372#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
373#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
366 374
367#endif /* _ASM_X86_CPUFEATURES_H */ 375#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c25775fad4ed..f4b2588865e9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -924,7 +924,7 @@ struct kvm_x86_ops {
924 int (*hardware_setup)(void); /* __init */ 924 int (*hardware_setup)(void); /* __init */
925 void (*hardware_unsetup)(void); /* __exit */ 925 void (*hardware_unsetup)(void); /* __exit */
926 bool (*cpu_has_accelerated_tpr)(void); 926 bool (*cpu_has_accelerated_tpr)(void);
927 bool (*cpu_has_high_real_mode_segbase)(void); 927 bool (*has_emulated_msr)(int index);
928 void (*cpuid_update)(struct kvm_vcpu *vcpu); 928 void (*cpuid_update)(struct kvm_vcpu *vcpu);
929 929
930 struct kvm *(*vm_alloc)(void); 930 struct kvm *(*vm_alloc)(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 53d5b1b9255e..fda2114197b3 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -42,6 +42,8 @@
42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ 42#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ 43#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ 44#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
45#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
46#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
45 47
46#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ 48#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
47#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ 49#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
@@ -68,6 +70,11 @@
68#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a 70#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
69#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ 71#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
70#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ 72#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
73#define ARCH_CAP_SSB_NO (1 << 4) /*
74 * Not susceptible to Speculative Store Bypass
75 * attack, so no Speculative Store Bypass
76 * control required.
77 */
71 78
72#define MSR_IA32_BBL_CR_CTL 0x00000119 79#define MSR_IA32_BBL_CR_CTL 0x00000119
73#define MSR_IA32_BBL_CR_CTL3 0x0000011e 80#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -340,6 +347,8 @@
340#define MSR_AMD64_SEV_ENABLED_BIT 0 347#define MSR_AMD64_SEV_ENABLED_BIT 0
341#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) 348#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
342 349
350#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
351
343/* Fam 17h MSRs */ 352/* Fam 17h MSRs */
344#define MSR_F17H_IRPERF 0xc00000e9 353#define MSR_F17H_IRPERF 0xc00000e9
345 354
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index f928ad9b143f..8b38df98548e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
217 SPECTRE_V2_IBRS, 217 SPECTRE_V2_IBRS,
218}; 218};
219 219
220/* The Speculative Store Bypass disable variants */
221enum ssb_mitigation {
222 SPEC_STORE_BYPASS_NONE,
223 SPEC_STORE_BYPASS_DISABLE,
224 SPEC_STORE_BYPASS_PRCTL,
225 SPEC_STORE_BYPASS_SECCOMP,
226};
227
220extern char __indirect_thunk_start[]; 228extern char __indirect_thunk_start[];
221extern char __indirect_thunk_end[]; 229extern char __indirect_thunk_end[];
222 230
@@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
241#endif 249#endif
242} 250}
243 251
244#define alternative_msr_write(_msr, _val, _feature) \ 252static __always_inline
245 asm volatile(ALTERNATIVE("", \ 253void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
246 "movl %[msr], %%ecx\n\t" \ 254{
247 "movl %[val], %%eax\n\t" \ 255 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
248 "movl $0, %%edx\n\t" \ 256 : : "c" (msr),
249 "wrmsr", \ 257 "a" ((u32)val),
250 _feature) \ 258 "d" ((u32)(val >> 32)),
251 : : [msr] "i" (_msr), [val] "i" (_val) \ 259 [feature] "i" (feature)
252 : "eax", "ecx", "edx", "memory") 260 : "memory");
261}
253 262
254static inline void indirect_branch_prediction_barrier(void) 263static inline void indirect_branch_prediction_barrier(void)
255{ 264{
256 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 265 u64 val = PRED_CMD_IBPB;
257 X86_FEATURE_USE_IBPB); 266
267 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
258} 268}
259 269
270/* The Intel SPEC CTRL MSR base value cache */
271extern u64 x86_spec_ctrl_base;
272
260/* 273/*
261 * With retpoline, we must use IBRS to restrict branch prediction 274 * With retpoline, we must use IBRS to restrict branch prediction
262 * before calling into firmware. 275 * before calling into firmware.
@@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
265 */ 278 */
266#define firmware_restrict_branch_speculation_start() \ 279#define firmware_restrict_branch_speculation_start() \
267do { \ 280do { \
281 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
282 \
268 preempt_disable(); \ 283 preempt_disable(); \
269 alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ 284 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
270 X86_FEATURE_USE_IBRS_FW); \ 285 X86_FEATURE_USE_IBRS_FW); \
271} while (0) 286} while (0)
272 287
273#define firmware_restrict_branch_speculation_end() \ 288#define firmware_restrict_branch_speculation_end() \
274do { \ 289do { \
275 alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ 290 u64 val = x86_spec_ctrl_base; \
291 \
292 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
276 X86_FEATURE_USE_IBRS_FW); \ 293 X86_FEATURE_USE_IBRS_FW); \
277 preempt_enable(); \ 294 preempt_enable(); \
278} while (0) 295} while (0)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 000000000000..ae7c2c5cd7f0
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_SPECCTRL_H_
3#define _ASM_X86_SPECCTRL_H_
4
5#include <linux/thread_info.h>
6#include <asm/nospec-branch.h>
7
8/*
9 * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
10 * the guest has, while on VMEXIT we restore the host view. This
11 * would be easier if SPEC_CTRL were architecturally maskable or
12 * shadowable for guests but this is not (currently) the case.
13 * Takes the guest view of SPEC_CTRL MSR as a parameter and also
14 * the guest's version of VIRT_SPEC_CTRL, if emulated.
15 */
16extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
17
18/**
19 * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
20 * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
21 * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
22 * (may get translated to MSR_AMD64_LS_CFG bits)
23 *
24 * Avoids writing to the MSR if the content/bits are the same
25 */
26static inline
27void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
28{
29 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
30}
31
32/**
33 * x86_spec_ctrl_restore_host - Restore host speculation control registers
34 * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
35 * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
36 * (may get translated to MSR_AMD64_LS_CFG bits)
37 *
38 * Avoids writing to the MSR if the content/bits are the same
39 */
40static inline
41void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
42{
43 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
44}
45
46/* AMD specific Speculative Store Bypass MSR data */
47extern u64 x86_amd_ls_cfg_base;
48extern u64 x86_amd_ls_cfg_ssbd_mask;
49
50static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
51{
52 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
53 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
54}
55
56static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
57{
58 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
59 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
60}
61
62static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
63{
64 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
65}
66
67#ifdef CONFIG_SMP
68extern void speculative_store_bypass_ht_init(void);
69#else
70static inline void speculative_store_bypass_ht_init(void) { }
71#endif
72
73extern void speculative_store_bypass_update(unsigned long tif);
74
75static inline void speculative_store_bypass_update_current(void)
76{
77 speculative_store_bypass_update(current_thread_info()->flags);
78}
79
80#endif
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index a5d9521bb2cb..2ff2a30a264f 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,6 +79,7 @@ struct thread_info {
79#define TIF_SIGPENDING 2 /* signal pending */ 79#define TIF_SIGPENDING 2 /* signal pending */
80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 80#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ 81#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
82#define TIF_SSBD 5 /* Reduced data speculation */
82#define TIF_SYSCALL_EMU 6 /* syscall emulation active */ 83#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
84#define TIF_SECCOMP 8 /* secure computing */ 85#define TIF_SECCOMP 8 /* secure computing */
@@ -105,6 +106,7 @@ struct thread_info {
105#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 106#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
106#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 107#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
107#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) 108#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
109#define _TIF_SSBD (1 << TIF_SSBD)
108#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) 110#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
109#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 111#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
110#define _TIF_SECCOMP (1 << TIF_SECCOMP) 112#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -144,7 +146,7 @@ struct thread_info {
144 146
145/* flags to check in __switch_to() */ 147/* flags to check in __switch_to() */
146#define _TIF_WORK_CTXSW \ 148#define _TIF_WORK_CTXSW \
147 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) 149 (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
148 150
149#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) 151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
150#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) 152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 12bc0a1139da..1b18be3f35a8 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -10,6 +10,7 @@
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/apic.h> 11#include <asm/apic.h>
12#include <asm/cpu.h> 12#include <asm/cpu.h>
13#include <asm/spec-ctrl.h>
13#include <asm/smp.h> 14#include <asm/smp.h>
14#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
15#include <asm/delay.h> 16#include <asm/delay.h>
@@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
554 rdmsrl(MSR_FAM10H_NODE_ID, value); 555 rdmsrl(MSR_FAM10H_NODE_ID, value);
555 nodes_per_socket = ((value >> 3) & 7) + 1; 556 nodes_per_socket = ((value >> 3) & 7) + 1;
556 } 557 }
558
559 if (c->x86 >= 0x15 && c->x86 <= 0x17) {
560 unsigned int bit;
561
562 switch (c->x86) {
563 case 0x15: bit = 54; break;
564 case 0x16: bit = 33; break;
565 case 0x17: bit = 10; break;
566 default: return;
567 }
568 /*
569 * Try to cache the base value so further operations can
570 * avoid RMW. If that faults, do not enable SSBD.
571 */
572 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
573 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
574 setup_force_cpu_cap(X86_FEATURE_SSBD);
575 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
576 }
577 }
557} 578}
558 579
559static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 580static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
@@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
791 812
792static void init_amd_zn(struct cpuinfo_x86 *c) 813static void init_amd_zn(struct cpuinfo_x86 *c)
793{ 814{
815 set_cpu_cap(c, X86_FEATURE_ZEN);
794 /* 816 /*
795 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects 817 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
796 * all up to and including B1. 818 * all up to and including B1.
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bfca937bdcc3..7416fc206b4a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -12,8 +12,10 @@
12#include <linux/utsname.h> 12#include <linux/utsname.h>
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/nospec.h>
16#include <linux/prctl.h>
15 17
16#include <asm/nospec-branch.h> 18#include <asm/spec-ctrl.h>
17#include <asm/cmdline.h> 19#include <asm/cmdline.h>
18#include <asm/bugs.h> 20#include <asm/bugs.h>
19#include <asm/processor.h> 21#include <asm/processor.h>
@@ -27,6 +29,27 @@
27#include <asm/intel-family.h> 29#include <asm/intel-family.h>
28 30
29static void __init spectre_v2_select_mitigation(void); 31static void __init spectre_v2_select_mitigation(void);
32static void __init ssb_select_mitigation(void);
33
34/*
35 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
36 * writes to SPEC_CTRL contain whatever reserved bits have been set.
37 */
38u64 __ro_after_init x86_spec_ctrl_base;
39EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
40
41/*
42 * The vendor and possibly platform specific bits which can be modified in
43 * x86_spec_ctrl_base.
44 */
45static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
46
47/*
48 * AMD specific MSR info for Speculative Store Bypass control.
49 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
50 */
51u64 __ro_after_init x86_amd_ls_cfg_base;
52u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
30 53
31void __init check_bugs(void) 54void __init check_bugs(void)
32{ 55{
@@ -37,9 +60,27 @@ void __init check_bugs(void)
37 print_cpu_info(&boot_cpu_data); 60 print_cpu_info(&boot_cpu_data);
38 } 61 }
39 62
63 /*
64 * Read the SPEC_CTRL MSR to account for reserved bits which may
65 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
66 * init code as it is not enumerated and depends on the family.
67 */
68 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
69 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
70
71 /* Allow STIBP in MSR_SPEC_CTRL if supported */
72 if (boot_cpu_has(X86_FEATURE_STIBP))
73 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
74
40 /* Select the proper spectre mitigation before patching alternatives */ 75 /* Select the proper spectre mitigation before patching alternatives */
41 spectre_v2_select_mitigation(); 76 spectre_v2_select_mitigation();
42 77
78 /*
79 * Select proper mitigation for any exposure to the Speculative Store
80 * Bypass vulnerability.
81 */
82 ssb_select_mitigation();
83
43#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
44 /* 85 /*
45 * Check whether we are able to run this kernel safely on SMP. 86 * Check whether we are able to run this kernel safely on SMP.
@@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
93#undef pr_fmt 134#undef pr_fmt
94#define pr_fmt(fmt) "Spectre V2 : " fmt 135#define pr_fmt(fmt) "Spectre V2 : " fmt
95 136
96static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; 137static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
138 SPECTRE_V2_NONE;
139
140void
141x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
142{
143 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
144 struct thread_info *ti = current_thread_info();
145
146 /* Is MSR_SPEC_CTRL implemented ? */
147 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
148 /*
149 * Restrict guest_spec_ctrl to supported values. Clear the
150 * modifiable bits in the host base value and or the
151 * modifiable bits from the guest value.
152 */
153 guestval = hostval & ~x86_spec_ctrl_mask;
154 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
155
156 /* SSBD controlled in MSR_SPEC_CTRL */
157 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
158 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
159
160 if (hostval != guestval) {
161 msrval = setguest ? guestval : hostval;
162 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
163 }
164 }
165
166 /*
167 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
168 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
169 */
170 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
171 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
172 return;
173
174 /*
175 * If the host has SSBD mitigation enabled, force it in the host's
176 * virtual MSR value. If its not permanently enabled, evaluate
177 * current's TIF_SSBD thread flag.
178 */
179 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
180 hostval = SPEC_CTRL_SSBD;
181 else
182 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
183
184 /* Sanitize the guest value */
185 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
186
187 if (hostval != guestval) {
188 unsigned long tif;
189
190 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
191 ssbd_spec_ctrl_to_tif(hostval);
192
193 speculative_store_bypass_update(tif);
194 }
195}
196EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
197
198static void x86_amd_ssb_disable(void)
199{
200 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
201
202 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
203 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
204 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
205 wrmsrl(MSR_AMD64_LS_CFG, msrval);
206}
97 207
98#ifdef RETPOLINE 208#ifdef RETPOLINE
99static bool spectre_v2_bad_module; 209static bool spectre_v2_bad_module;
@@ -312,32 +422,289 @@ retpoline_auto:
312} 422}
313 423
314#undef pr_fmt 424#undef pr_fmt
425#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
426
427static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
428
429/* The kernel command line selection */
430enum ssb_mitigation_cmd {
431 SPEC_STORE_BYPASS_CMD_NONE,
432 SPEC_STORE_BYPASS_CMD_AUTO,
433 SPEC_STORE_BYPASS_CMD_ON,
434 SPEC_STORE_BYPASS_CMD_PRCTL,
435 SPEC_STORE_BYPASS_CMD_SECCOMP,
436};
437
438static const char *ssb_strings[] = {
439 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
440 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
441 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
442 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
443};
444
445static const struct {
446 const char *option;
447 enum ssb_mitigation_cmd cmd;
448} ssb_mitigation_options[] = {
449 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
450 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
451 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
452 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
453 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
454};
455
456static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
457{
458 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
459 char arg[20];
460 int ret, i;
461
462 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
463 return SPEC_STORE_BYPASS_CMD_NONE;
464 } else {
465 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
466 arg, sizeof(arg));
467 if (ret < 0)
468 return SPEC_STORE_BYPASS_CMD_AUTO;
469
470 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
471 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
472 continue;
473
474 cmd = ssb_mitigation_options[i].cmd;
475 break;
476 }
477
478 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
479 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
480 return SPEC_STORE_BYPASS_CMD_AUTO;
481 }
482 }
483
484 return cmd;
485}
486
487static enum ssb_mitigation __init __ssb_select_mitigation(void)
488{
489 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
490 enum ssb_mitigation_cmd cmd;
491
492 if (!boot_cpu_has(X86_FEATURE_SSBD))
493 return mode;
494
495 cmd = ssb_parse_cmdline();
496 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
497 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
498 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
499 return mode;
500
501 switch (cmd) {
502 case SPEC_STORE_BYPASS_CMD_AUTO:
503 case SPEC_STORE_BYPASS_CMD_SECCOMP:
504 /*
505 * Choose prctl+seccomp as the default mode if seccomp is
506 * enabled.
507 */
508 if (IS_ENABLED(CONFIG_SECCOMP))
509 mode = SPEC_STORE_BYPASS_SECCOMP;
510 else
511 mode = SPEC_STORE_BYPASS_PRCTL;
512 break;
513 case SPEC_STORE_BYPASS_CMD_ON:
514 mode = SPEC_STORE_BYPASS_DISABLE;
515 break;
516 case SPEC_STORE_BYPASS_CMD_PRCTL:
517 mode = SPEC_STORE_BYPASS_PRCTL;
518 break;
519 case SPEC_STORE_BYPASS_CMD_NONE:
520 break;
521 }
522
523 /*
524 * We have three CPU feature flags that are in play here:
525 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
526 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
527 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
528 */
529 if (mode == SPEC_STORE_BYPASS_DISABLE) {
530 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
531 /*
532 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
533 * a completely different MSR and bit dependent on family.
534 */
535 switch (boot_cpu_data.x86_vendor) {
536 case X86_VENDOR_INTEL:
537 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
538 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
539 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
540 break;
541 case X86_VENDOR_AMD:
542 x86_amd_ssb_disable();
543 break;
544 }
545 }
546
547 return mode;
548}
549
550static void ssb_select_mitigation(void)
551{
552 ssb_mode = __ssb_select_mitigation();
553
554 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
555 pr_info("%s\n", ssb_strings[ssb_mode]);
556}
557
558#undef pr_fmt
559#define pr_fmt(fmt) "Speculation prctl: " fmt
560
561static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
562{
563 bool update;
564
565 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
566 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
567 return -ENXIO;
568
569 switch (ctrl) {
570 case PR_SPEC_ENABLE:
571 /* If speculation is force disabled, enable is not allowed */
572 if (task_spec_ssb_force_disable(task))
573 return -EPERM;
574 task_clear_spec_ssb_disable(task);
575 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
576 break;
577 case PR_SPEC_DISABLE:
578 task_set_spec_ssb_disable(task);
579 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
580 break;
581 case PR_SPEC_FORCE_DISABLE:
582 task_set_spec_ssb_disable(task);
583 task_set_spec_ssb_force_disable(task);
584 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
585 break;
586 default:
587 return -ERANGE;
588 }
589
590 /*
591 * If being set on non-current task, delay setting the CPU
592 * mitigation until it is next scheduled.
593 */
594 if (task == current && update)
595 speculative_store_bypass_update_current();
596
597 return 0;
598}
599
600int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
601 unsigned long ctrl)
602{
603 switch (which) {
604 case PR_SPEC_STORE_BYPASS:
605 return ssb_prctl_set(task, ctrl);
606 default:
607 return -ENODEV;
608 }
609}
610
611#ifdef CONFIG_SECCOMP
612void arch_seccomp_spec_mitigate(struct task_struct *task)
613{
614 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
615 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
616}
617#endif
618
619static int ssb_prctl_get(struct task_struct *task)
620{
621 switch (ssb_mode) {
622 case SPEC_STORE_BYPASS_DISABLE:
623 return PR_SPEC_DISABLE;
624 case SPEC_STORE_BYPASS_SECCOMP:
625 case SPEC_STORE_BYPASS_PRCTL:
626 if (task_spec_ssb_force_disable(task))
627 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
628 if (task_spec_ssb_disable(task))
629 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
630 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
631 default:
632 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
633 return PR_SPEC_ENABLE;
634 return PR_SPEC_NOT_AFFECTED;
635 }
636}
637
638int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
639{
640 switch (which) {
641 case PR_SPEC_STORE_BYPASS:
642 return ssb_prctl_get(task);
643 default:
644 return -ENODEV;
645 }
646}
647
648void x86_spec_ctrl_setup_ap(void)
649{
650 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
651 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
652
653 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
654 x86_amd_ssb_disable();
655}
315 656
316#ifdef CONFIG_SYSFS 657#ifdef CONFIG_SYSFS
317ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 658
659static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
660 char *buf, unsigned int bug)
318{ 661{
319 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) 662 if (!boot_cpu_has_bug(bug))
320 return sprintf(buf, "Not affected\n"); 663 return sprintf(buf, "Not affected\n");
321 if (boot_cpu_has(X86_FEATURE_PTI)) 664
322 return sprintf(buf, "Mitigation: PTI\n"); 665 switch (bug) {
666 case X86_BUG_CPU_MELTDOWN:
667 if (boot_cpu_has(X86_FEATURE_PTI))
668 return sprintf(buf, "Mitigation: PTI\n");
669
670 break;
671
672 case X86_BUG_SPECTRE_V1:
673 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
674
675 case X86_BUG_SPECTRE_V2:
676 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
677 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
678 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
679 spectre_v2_module_string());
680
681 case X86_BUG_SPEC_STORE_BYPASS:
682 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
683
684 default:
685 break;
686 }
687
323 return sprintf(buf, "Vulnerable\n"); 688 return sprintf(buf, "Vulnerable\n");
324} 689}
325 690
691ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
692{
693 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
694}
695
326ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 696ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
327{ 697{
328 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) 698 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
329 return sprintf(buf, "Not affected\n");
330 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
331} 699}
332 700
333ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 701ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
334{ 702{
335 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) 703 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
336 return sprintf(buf, "Not affected\n"); 704}
337 705
338 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], 706ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
339 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", 707{
340 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", 708 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
341 spectre_v2_module_string());
342} 709}
343#endif 710#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ce243f7d2d4e..38276f58d3bf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
757 * and they also have a different bit for STIBP support. Also, 757 * and they also have a different bit for STIBP support. Also,
758 * a hypervisor might have set the individual AMD bits even on 758 * a hypervisor might have set the individual AMD bits even on
759 * Intel CPUs, for finer-grained selection of what's available. 759 * Intel CPUs, for finer-grained selection of what's available.
760 *
761 * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
762 * features, which are visible in /proc/cpuinfo and used by the
763 * kernel. So set those accordingly from the Intel bits.
764 */ 760 */
765 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 761 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
766 set_cpu_cap(c, X86_FEATURE_IBRS); 762 set_cpu_cap(c, X86_FEATURE_IBRS);
767 set_cpu_cap(c, X86_FEATURE_IBPB); 763 set_cpu_cap(c, X86_FEATURE_IBPB);
764 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
768 } 765 }
766
769 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 767 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
770 set_cpu_cap(c, X86_FEATURE_STIBP); 768 set_cpu_cap(c, X86_FEATURE_STIBP);
769
770 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
771 cpu_has(c, X86_FEATURE_VIRT_SSBD))
772 set_cpu_cap(c, X86_FEATURE_SSBD);
773
774 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
775 set_cpu_cap(c, X86_FEATURE_IBRS);
776 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
777 }
778
779 if (cpu_has(c, X86_FEATURE_AMD_IBPB))
780 set_cpu_cap(c, X86_FEATURE_IBPB);
781
782 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
783 set_cpu_cap(c, X86_FEATURE_STIBP);
784 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
785 }
771} 786}
772 787
773void get_cpu_cap(struct cpuinfo_x86 *c) 788void get_cpu_cap(struct cpuinfo_x86 *c)
@@ -927,21 +942,47 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
927 {} 942 {}
928}; 943};
929 944
930static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c) 945/* Only list CPUs which speculate but are non susceptible to SSB */
946static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
947 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
948 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
949 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
950 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
951 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
952 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
953 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
954 { X86_VENDOR_AMD, 0x12, },
955 { X86_VENDOR_AMD, 0x11, },
956 { X86_VENDOR_AMD, 0x10, },
957 { X86_VENDOR_AMD, 0xf, },
958 {}
959};
960
961static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
931{ 962{
932 u64 ia32_cap = 0; 963 u64 ia32_cap = 0;
933 964
934 if (x86_match_cpu(cpu_no_meltdown)) 965 if (x86_match_cpu(cpu_no_speculation))
935 return false; 966 return;
967
968 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
969 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
936 970
937 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 971 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
938 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 972 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
939 973
974 if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
975 !(ia32_cap & ARCH_CAP_SSB_NO))
976 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
977
978 if (x86_match_cpu(cpu_no_meltdown))
979 return;
980
940 /* Rogue Data Cache Load? No! */ 981 /* Rogue Data Cache Load? No! */
941 if (ia32_cap & ARCH_CAP_RDCL_NO) 982 if (ia32_cap & ARCH_CAP_RDCL_NO)
942 return false; 983 return;
943 984
944 return true; 985 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
945} 986}
946 987
947/* 988/*
@@ -992,12 +1033,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
992 1033
993 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1034 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
994 1035
995 if (!x86_match_cpu(cpu_no_speculation)) { 1036 cpu_set_bug_bits(c);
996 if (cpu_vulnerable_to_meltdown(c))
997 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
998 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
999 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1000 }
1001 1037
1002 fpu__init_system(c); 1038 fpu__init_system(c);
1003 1039
@@ -1359,6 +1395,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
1359#endif 1395#endif
1360 mtrr_ap_init(); 1396 mtrr_ap_init();
1361 validate_apic_and_package_id(c); 1397 validate_apic_and_package_id(c);
1398 x86_spec_ctrl_setup_ap();
1362} 1399}
1363 1400
1364static __init int setup_noclflush(char *arg) 1401static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index e806b11a99af..37672d299e35 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
50 50
51unsigned int aperfmperf_get_khz(int cpu); 51unsigned int aperfmperf_get_khz(int cpu);
52 52
53extern void x86_spec_ctrl_setup_ap(void);
54
53#endif /* ARCH_X86_CPU_H */ 55#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 60d1897041da..577e7f7ae273 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
188 setup_clear_cpu_cap(X86_FEATURE_IBPB); 188 setup_clear_cpu_cap(X86_FEATURE_IBPB);
189 setup_clear_cpu_cap(X86_FEATURE_STIBP); 189 setup_clear_cpu_cap(X86_FEATURE_STIBP);
190 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); 190 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
191 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
191 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 192 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
193 setup_clear_cpu_cap(X86_FEATURE_SSBD);
194 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
192 } 195 }
193 196
194 /* 197 /*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 03408b942adb..30ca2d1a9231 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -38,6 +38,7 @@
38#include <asm/switch_to.h> 38#include <asm/switch_to.h>
39#include <asm/desc.h> 39#include <asm/desc.h>
40#include <asm/prctl.h> 40#include <asm/prctl.h>
41#include <asm/spec-ctrl.h>
41 42
42/* 43/*
43 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 44 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
278 } 279 }
279} 280}
280 281
282#ifdef CONFIG_SMP
283
284struct ssb_state {
285 struct ssb_state *shared_state;
286 raw_spinlock_t lock;
287 unsigned int disable_state;
288 unsigned long local_state;
289};
290
291#define LSTATE_SSB 0
292
293static DEFINE_PER_CPU(struct ssb_state, ssb_state);
294
295void speculative_store_bypass_ht_init(void)
296{
297 struct ssb_state *st = this_cpu_ptr(&ssb_state);
298 unsigned int this_cpu = smp_processor_id();
299 unsigned int cpu;
300
301 st->local_state = 0;
302
303 /*
304 * Shared state setup happens once on the first bringup
305 * of the CPU. It's not destroyed on CPU hotunplug.
306 */
307 if (st->shared_state)
308 return;
309
310 raw_spin_lock_init(&st->lock);
311
312 /*
313 * Go over HT siblings and check whether one of them has set up the
314 * shared state pointer already.
315 */
316 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
317 if (cpu == this_cpu)
318 continue;
319
320 if (!per_cpu(ssb_state, cpu).shared_state)
321 continue;
322
323 /* Link it to the state of the sibling: */
324 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
325 return;
326 }
327
328 /*
329 * First HT sibling to come up on the core. Link shared state of
330 * the first HT sibling to itself. The siblings on the same core
331 * which come up later will see the shared state pointer and link
332 * themself to the state of this CPU.
333 */
334 st->shared_state = st;
335}
336
337/*
338 * Logic is: First HT sibling enables SSBD for both siblings in the core
339 * and last sibling to disable it, disables it for the whole core. This how
340 * MSR_SPEC_CTRL works in "hardware":
341 *
342 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
343 */
344static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
345{
346 struct ssb_state *st = this_cpu_ptr(&ssb_state);
347 u64 msr = x86_amd_ls_cfg_base;
348
349 if (!static_cpu_has(X86_FEATURE_ZEN)) {
350 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
351 wrmsrl(MSR_AMD64_LS_CFG, msr);
352 return;
353 }
354
355 if (tifn & _TIF_SSBD) {
356 /*
357 * Since this can race with prctl(), block reentry on the
358 * same CPU.
359 */
360 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
361 return;
362
363 msr |= x86_amd_ls_cfg_ssbd_mask;
364
365 raw_spin_lock(&st->shared_state->lock);
366 /* First sibling enables SSBD: */
367 if (!st->shared_state->disable_state)
368 wrmsrl(MSR_AMD64_LS_CFG, msr);
369 st->shared_state->disable_state++;
370 raw_spin_unlock(&st->shared_state->lock);
371 } else {
372 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
373 return;
374
375 raw_spin_lock(&st->shared_state->lock);
376 st->shared_state->disable_state--;
377 if (!st->shared_state->disable_state)
378 wrmsrl(MSR_AMD64_LS_CFG, msr);
379 raw_spin_unlock(&st->shared_state->lock);
380 }
381}
382#else
383static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
384{
385 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
386
387 wrmsrl(MSR_AMD64_LS_CFG, msr);
388}
389#endif
390
391static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
392{
393 /*
394 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
395 * so ssbd_tif_to_spec_ctrl() just works.
396 */
397 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
398}
399
400static __always_inline void intel_set_ssb_state(unsigned long tifn)
401{
402 u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
403
404 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
405}
406
407static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
408{
409 if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
410 amd_set_ssb_virt_state(tifn);
411 else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
412 amd_set_core_ssb_state(tifn);
413 else
414 intel_set_ssb_state(tifn);
415}
416
417void speculative_store_bypass_update(unsigned long tif)
418{
419 preempt_disable();
420 __speculative_store_bypass_update(tif);
421 preempt_enable();
422}
423
281void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 424void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
282 struct tss_struct *tss) 425 struct tss_struct *tss)
283{ 426{
@@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
309 452
310 if ((tifp ^ tifn) & _TIF_NOCPUID) 453 if ((tifp ^ tifn) & _TIF_NOCPUID)
311 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); 454 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
455
456 if ((tifp ^ tifn) & _TIF_SSBD)
457 __speculative_store_bypass_update(tifn);
312} 458}
313 459
314/* 460/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 0f1cbb042f49..9dd324ae4832 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -79,6 +79,7 @@
79#include <asm/qspinlock.h> 79#include <asm/qspinlock.h>
80#include <asm/intel-family.h> 80#include <asm/intel-family.h>
81#include <asm/cpu_device_id.h> 81#include <asm/cpu_device_id.h>
82#include <asm/spec-ctrl.h>
82 83
83/* Number of siblings per CPU package */ 84/* Number of siblings per CPU package */
84int smp_num_siblings = 1; 85int smp_num_siblings = 1;
@@ -244,6 +245,8 @@ static void notrace start_secondary(void *unused)
244 */ 245 */
245 check_tsc_sync_target(); 246 check_tsc_sync_target();
246 247
248 speculative_store_bypass_ht_init();
249
247 /* 250 /*
248 * Lock vector_lock, set CPU online and bring the vector 251 * Lock vector_lock, set CPU online and bring the vector
249 * allocator online. Online must be set with vector_lock held 252 * allocator online. Online must be set with vector_lock held
@@ -1292,6 +1295,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1292 set_mtrr_aps_delayed_init(); 1295 set_mtrr_aps_delayed_init();
1293 1296
1294 smp_quirk_init_udelay(); 1297 smp_quirk_init_udelay();
1298
1299 speculative_store_bypass_ht_init();
1295} 1300}
1296 1301
1297void arch_enable_nonboot_cpus_begin(void) 1302void arch_enable_nonboot_cpus_begin(void)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 82055b90a8b3..92bf2f2e7cdd 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -379,7 +379,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
379 379
380 /* cpuid 0x80000008.ebx */ 380 /* cpuid 0x80000008.ebx */
381 const u32 kvm_cpuid_8000_0008_ebx_x86_features = 381 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
382 F(IBPB) | F(IBRS); 382 F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
383 383
384 /* cpuid 0xC0000001.edx */ 384 /* cpuid 0xC0000001.edx */
385 const u32 kvm_cpuid_C000_0001_edx_x86_features = 385 const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -408,7 +408,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
408 /* cpuid 7.0.edx*/ 408 /* cpuid 7.0.edx*/
409 const u32 kvm_cpuid_7_0_edx_x86_features = 409 const u32 kvm_cpuid_7_0_edx_x86_features =
410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | 410 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
411 F(ARCH_CAPABILITIES); 411 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
412 412
413 /* all calls to cpuid_count() should be made on the same cpu */ 413 /* all calls to cpuid_count() should be made on the same cpu */
414 get_cpu(); 414 get_cpu();
@@ -495,6 +495,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
495 entry->ecx &= ~F(PKU); 495 entry->ecx &= ~F(PKU);
496 entry->edx &= kvm_cpuid_7_0_edx_x86_features; 496 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
497 cpuid_mask(&entry->edx, CPUID_7_EDX); 497 cpuid_mask(&entry->edx, CPUID_7_EDX);
498 /*
499 * We emulate ARCH_CAPABILITIES in software even
500 * if the host doesn't support it.
501 */
502 entry->edx |= F(ARCH_CAPABILITIES);
498 } else { 503 } else {
499 entry->ebx = 0; 504 entry->ebx = 0;
500 entry->ecx = 0; 505 entry->ecx = 0;
@@ -647,13 +652,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
647 g_phys_as = phys_as; 652 g_phys_as = phys_as;
648 entry->eax = g_phys_as | (virt_as << 8); 653 entry->eax = g_phys_as | (virt_as << 8);
649 entry->edx = 0; 654 entry->edx = 0;
650 /* IBRS and IBPB aren't necessarily present in hardware cpuid */ 655 /*
651 if (boot_cpu_has(X86_FEATURE_IBPB)) 656 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
652 entry->ebx |= F(IBPB); 657 * hardware cpuid
653 if (boot_cpu_has(X86_FEATURE_IBRS)) 658 */
654 entry->ebx |= F(IBRS); 659 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
660 entry->ebx |= F(AMD_IBPB);
661 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
662 entry->ebx |= F(AMD_IBRS);
663 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
664 entry->ebx |= F(VIRT_SSBD);
655 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; 665 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
656 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); 666 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
667 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
668 entry->ebx |= F(VIRT_SSBD);
657 break; 669 break;
658 } 670 }
659 case 0x80000019: 671 case 0x80000019:
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5708e951a5c6..46ff64da44ca 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1260,14 +1260,18 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1260 } 1260 }
1261} 1261}
1262 1262
1263static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1263static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1264{ 1264{
1265 struct kvm_run *run = vcpu->run; 1265 kvm_hv_hypercall_set_result(vcpu, result);
1266 1266 ++vcpu->stat.hypercalls;
1267 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
1268 return kvm_skip_emulated_instruction(vcpu); 1267 return kvm_skip_emulated_instruction(vcpu);
1269} 1268}
1270 1269
1270static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1271{
1272 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1273}
1274
1271static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) 1275static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1272{ 1276{
1273 struct eventfd_ctx *eventfd; 1277 struct eventfd_ctx *eventfd;
@@ -1350,7 +1354,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1350 /* Hypercall continuation is not supported yet */ 1354 /* Hypercall continuation is not supported yet */
1351 if (rep_cnt || rep_idx) { 1355 if (rep_cnt || rep_idx) {
1352 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1356 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1353 goto set_result; 1357 goto out;
1354 } 1358 }
1355 1359
1356 switch (code) { 1360 switch (code) {
@@ -1381,9 +1385,8 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1381 break; 1385 break;
1382 } 1386 }
1383 1387
1384set_result: 1388out:
1385 kvm_hv_hypercall_set_result(vcpu, ret); 1389 return kvm_hv_hypercall_complete(vcpu, ret);
1386 return 1;
1387} 1390}
1388 1391
1389void kvm_hv_init_vm(struct kvm *kvm) 1392void kvm_hv_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index b74c9c1405b9..3773c4625114 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1522,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
1522 1522
1523static void advance_periodic_target_expiration(struct kvm_lapic *apic) 1523static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1524{ 1524{
1525 apic->lapic_timer.tscdeadline += 1525 ktime_t now = ktime_get();
1526 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period); 1526 u64 tscl = rdtsc();
1527 ktime_t delta;
1528
1529 /*
1530 * Synchronize both deadlines to the same time source or
1531 * differences in the periods (caused by differences in the
1532 * underlying clocks or numerical approximation errors) will
1533 * cause the two to drift apart over time as the errors
1534 * accumulate.
1535 */
1527 apic->lapic_timer.target_expiration = 1536 apic->lapic_timer.target_expiration =
1528 ktime_add_ns(apic->lapic_timer.target_expiration, 1537 ktime_add_ns(apic->lapic_timer.target_expiration,
1529 apic->lapic_timer.period); 1538 apic->lapic_timer.period);
1539 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1540 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1541 nsec_to_cycles(apic->vcpu, delta);
1530} 1542}
1531 1543
1532static void start_sw_period(struct kvm_lapic *apic) 1544static void start_sw_period(struct kvm_lapic *apic)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1fc05e428aba..26110c202b19 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,7 +49,7 @@
49#include <asm/debugreg.h> 49#include <asm/debugreg.h>
50#include <asm/kvm_para.h> 50#include <asm/kvm_para.h>
51#include <asm/irq_remapping.h> 51#include <asm/irq_remapping.h>
52#include <asm/nospec-branch.h> 52#include <asm/spec-ctrl.h>
53 53
54#include <asm/virtext.h> 54#include <asm/virtext.h>
55#include "trace.h" 55#include "trace.h"
@@ -213,6 +213,12 @@ struct vcpu_svm {
213 } host; 213 } host;
214 214
215 u64 spec_ctrl; 215 u64 spec_ctrl;
216 /*
217 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
218 * translated into the appropriate L2_CFG bits on the host to
219 * perform speculative control.
220 */
221 u64 virt_spec_ctrl;
216 222
217 u32 *msrpm; 223 u32 *msrpm;
218 224
@@ -2060,6 +2066,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2060 2066
2061 vcpu->arch.microcode_version = 0x01000065; 2067 vcpu->arch.microcode_version = 0x01000065;
2062 svm->spec_ctrl = 0; 2068 svm->spec_ctrl = 0;
2069 svm->virt_spec_ctrl = 0;
2063 2070
2064 if (!init_event) { 2071 if (!init_event) {
2065 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | 2072 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
@@ -4108,11 +4115,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4108 break; 4115 break;
4109 case MSR_IA32_SPEC_CTRL: 4116 case MSR_IA32_SPEC_CTRL:
4110 if (!msr_info->host_initiated && 4117 if (!msr_info->host_initiated &&
4111 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) 4118 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
4112 return 1; 4119 return 1;
4113 4120
4114 msr_info->data = svm->spec_ctrl; 4121 msr_info->data = svm->spec_ctrl;
4115 break; 4122 break;
4123 case MSR_AMD64_VIRT_SPEC_CTRL:
4124 if (!msr_info->host_initiated &&
4125 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4126 return 1;
4127
4128 msr_info->data = svm->virt_spec_ctrl;
4129 break;
4116 case MSR_F15H_IC_CFG: { 4130 case MSR_F15H_IC_CFG: {
4117 4131
4118 int family, model; 4132 int family, model;
@@ -4203,7 +4217,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4203 break; 4217 break;
4204 case MSR_IA32_SPEC_CTRL: 4218 case MSR_IA32_SPEC_CTRL:
4205 if (!msr->host_initiated && 4219 if (!msr->host_initiated &&
4206 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) 4220 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
4207 return 1; 4221 return 1;
4208 4222
4209 /* The STIBP bit doesn't fault even if it's not advertised */ 4223 /* The STIBP bit doesn't fault even if it's not advertised */
@@ -4230,7 +4244,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4230 break; 4244 break;
4231 case MSR_IA32_PRED_CMD: 4245 case MSR_IA32_PRED_CMD:
4232 if (!msr->host_initiated && 4246 if (!msr->host_initiated &&
4233 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB)) 4247 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
4234 return 1; 4248 return 1;
4235 4249
4236 if (data & ~PRED_CMD_IBPB) 4250 if (data & ~PRED_CMD_IBPB)
@@ -4244,6 +4258,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4244 break; 4258 break;
4245 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); 4259 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4246 break; 4260 break;
4261 case MSR_AMD64_VIRT_SPEC_CTRL:
4262 if (!msr->host_initiated &&
4263 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4264 return 1;
4265
4266 if (data & ~SPEC_CTRL_SSBD)
4267 return 1;
4268
4269 svm->virt_spec_ctrl = data;
4270 break;
4247 case MSR_STAR: 4271 case MSR_STAR:
4248 svm->vmcb->save.star = data; 4272 svm->vmcb->save.star = data;
4249 break; 4273 break;
@@ -5557,8 +5581,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5557 * is no need to worry about the conditional branch over the wrmsr 5581 * is no need to worry about the conditional branch over the wrmsr
5558 * being speculatively taken. 5582 * being speculatively taken.
5559 */ 5583 */
5560 if (svm->spec_ctrl) 5584 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
5561 native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
5562 5585
5563 asm volatile ( 5586 asm volatile (
5564 "push %%" _ASM_BP "; \n\t" 5587 "push %%" _ASM_BP "; \n\t"
@@ -5652,6 +5675,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5652#endif 5675#endif
5653 ); 5676 );
5654 5677
5678 /* Eliminate branch target predictions from guest mode */
5679 vmexit_fill_RSB();
5680
5681#ifdef CONFIG_X86_64
5682 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5683#else
5684 loadsegment(fs, svm->host.fs);
5685#ifndef CONFIG_X86_32_LAZY_GS
5686 loadsegment(gs, svm->host.gs);
5687#endif
5688#endif
5689
5655 /* 5690 /*
5656 * We do not use IBRS in the kernel. If this vCPU has used the 5691 * We do not use IBRS in the kernel. If this vCPU has used the
5657 * SPEC_CTRL MSR it may have left it on; save the value and 5692 * SPEC_CTRL MSR it may have left it on; save the value and
@@ -5670,20 +5705,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5670 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 5705 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5671 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 5706 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5672 5707
5673 if (svm->spec_ctrl) 5708 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
5674 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
5675
5676 /* Eliminate branch target predictions from guest mode */
5677 vmexit_fill_RSB();
5678
5679#ifdef CONFIG_X86_64
5680 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5681#else
5682 loadsegment(fs, svm->host.fs);
5683#ifndef CONFIG_X86_32_LAZY_GS
5684 loadsegment(gs, svm->host.gs);
5685#endif
5686#endif
5687 5709
5688 reload_tss(vcpu); 5710 reload_tss(vcpu);
5689 5711
@@ -5786,7 +5808,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
5786 return false; 5808 return false;
5787} 5809}
5788 5810
5789static bool svm_has_high_real_mode_segbase(void) 5811static bool svm_has_emulated_msr(int index)
5790{ 5812{
5791 return true; 5813 return true;
5792} 5814}
@@ -7012,7 +7034,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7012 .hardware_enable = svm_hardware_enable, 7034 .hardware_enable = svm_hardware_enable,
7013 .hardware_disable = svm_hardware_disable, 7035 .hardware_disable = svm_hardware_disable,
7014 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, 7036 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
7015 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, 7037 .has_emulated_msr = svm_has_emulated_msr,
7016 7038
7017 .vcpu_create = svm_create_vcpu, 7039 .vcpu_create = svm_create_vcpu,
7018 .vcpu_free = svm_free_vcpu, 7040 .vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3f1696570b41..40aa29204baf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -51,7 +51,7 @@
51#include <asm/apic.h> 51#include <asm/apic.h>
52#include <asm/irq_remapping.h> 52#include <asm/irq_remapping.h>
53#include <asm/mmu_context.h> 53#include <asm/mmu_context.h>
54#include <asm/nospec-branch.h> 54#include <asm/spec-ctrl.h>
55#include <asm/mshyperv.h> 55#include <asm/mshyperv.h>
56 56
57#include "trace.h" 57#include "trace.h"
@@ -3529,7 +3529,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3529 return kvm_get_msr_common(vcpu, msr_info); 3529 return kvm_get_msr_common(vcpu, msr_info);
3530 case MSR_IA32_SPEC_CTRL: 3530 case MSR_IA32_SPEC_CTRL:
3531 if (!msr_info->host_initiated && 3531 if (!msr_info->host_initiated &&
3532 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3533 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 3532 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3534 return 1; 3533 return 1;
3535 3534
@@ -3648,12 +3647,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3648 break; 3647 break;
3649 case MSR_IA32_SPEC_CTRL: 3648 case MSR_IA32_SPEC_CTRL:
3650 if (!msr_info->host_initiated && 3649 if (!msr_info->host_initiated &&
3651 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3652 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 3650 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3653 return 1; 3651 return 1;
3654 3652
3655 /* The STIBP bit doesn't fault even if it's not advertised */ 3653 /* The STIBP bit doesn't fault even if it's not advertised */
3656 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP)) 3654 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
3657 return 1; 3655 return 1;
3658 3656
3659 vmx->spec_ctrl = data; 3657 vmx->spec_ctrl = data;
@@ -3679,7 +3677,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3679 break; 3677 break;
3680 case MSR_IA32_PRED_CMD: 3678 case MSR_IA32_PRED_CMD:
3681 if (!msr_info->host_initiated && 3679 if (!msr_info->host_initiated &&
3682 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
3683 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)) 3680 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3684 return 1; 3681 return 1;
3685 3682
@@ -9488,9 +9485,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
9488} 9485}
9489STACK_FRAME_NON_STANDARD(vmx_handle_external_intr); 9486STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
9490 9487
9491static bool vmx_has_high_real_mode_segbase(void) 9488static bool vmx_has_emulated_msr(int index)
9492{ 9489{
9493 return enable_unrestricted_guest || emulate_invalid_guest_state; 9490 switch (index) {
9491 case MSR_IA32_SMBASE:
9492 /*
9493 * We cannot do SMM unless we can run the guest in big
9494 * real mode.
9495 */
9496 return enable_unrestricted_guest || emulate_invalid_guest_state;
9497 case MSR_AMD64_VIRT_SPEC_CTRL:
9498 /* This is AMD only. */
9499 return false;
9500 default:
9501 return true;
9502 }
9494} 9503}
9495 9504
9496static bool vmx_mpx_supported(void) 9505static bool vmx_mpx_supported(void)
@@ -9722,8 +9731,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9722 * is no need to worry about the conditional branch over the wrmsr 9731 * is no need to worry about the conditional branch over the wrmsr
9723 * being speculatively taken. 9732 * being speculatively taken.
9724 */ 9733 */
9725 if (vmx->spec_ctrl) 9734 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
9726 native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
9727 9735
9728 vmx->__launched = vmx->loaded_vmcs->launched; 9736 vmx->__launched = vmx->loaded_vmcs->launched;
9729 9737
@@ -9871,8 +9879,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9871 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) 9879 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
9872 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); 9880 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
9873 9881
9874 if (vmx->spec_ctrl) 9882 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
9875 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
9876 9883
9877 /* Eliminate branch target predictions from guest mode */ 9884 /* Eliminate branch target predictions from guest mode */
9878 vmexit_fill_RSB(); 9885 vmexit_fill_RSB();
@@ -12632,7 +12639,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
12632 .hardware_enable = hardware_enable, 12639 .hardware_enable = hardware_enable,
12633 .hardware_disable = hardware_disable, 12640 .hardware_disable = hardware_disable,
12634 .cpu_has_accelerated_tpr = report_flexpriority, 12641 .cpu_has_accelerated_tpr = report_flexpriority,
12635 .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, 12642 .has_emulated_msr = vmx_has_emulated_msr,
12636 12643
12637 .vm_init = vmx_vm_init, 12644 .vm_init = vmx_vm_init,
12638 .vm_alloc = vmx_vm_alloc, 12645 .vm_alloc = vmx_vm_alloc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59371de5d722..71e7cda6d014 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1061,6 +1061,7 @@ static u32 emulated_msrs[] = {
1061 MSR_SMI_COUNT, 1061 MSR_SMI_COUNT,
1062 MSR_PLATFORM_INFO, 1062 MSR_PLATFORM_INFO,
1063 MSR_MISC_FEATURES_ENABLES, 1063 MSR_MISC_FEATURES_ENABLES,
1064 MSR_AMD64_VIRT_SPEC_CTRL,
1064}; 1065};
1065 1066
1066static unsigned num_emulated_msrs; 1067static unsigned num_emulated_msrs;
@@ -2906,7 +2907,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2906 * fringe case that is not enabled except via specific settings 2907 * fringe case that is not enabled except via specific settings
2907 * of the module parameters. 2908 * of the module parameters.
2908 */ 2909 */
2909 r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); 2910 r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
2910 break; 2911 break;
2911 case KVM_CAP_VAPIC: 2912 case KVM_CAP_VAPIC:
2912 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); 2913 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
@@ -4606,14 +4607,8 @@ static void kvm_init_msr_list(void)
4606 num_msrs_to_save = j; 4607 num_msrs_to_save = j;
4607 4608
4608 for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { 4609 for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
4609 switch (emulated_msrs[i]) { 4610 if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
4610 case MSR_IA32_SMBASE: 4611 continue;
4611 if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
4612 continue;
4613 break;
4614 default:
4615 break;
4616 }
4617 4612
4618 if (j < i) 4613 if (j < i)
4619 emulated_msrs[j] = emulated_msrs[i]; 4614 emulated_msrs[j] = emulated_msrs[i];
@@ -6676,11 +6671,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6676 unsigned long nr, a0, a1, a2, a3, ret; 6671 unsigned long nr, a0, a1, a2, a3, ret;
6677 int op_64_bit; 6672 int op_64_bit;
6678 6673
6679 if (kvm_hv_hypercall_enabled(vcpu->kvm)) { 6674 if (kvm_hv_hypercall_enabled(vcpu->kvm))
6680 if (!kvm_hv_hypercall(vcpu)) 6675 return kvm_hv_hypercall(vcpu);
6681 return 0;
6682 goto out;
6683 }
6684 6676
6685 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); 6677 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
6686 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); 6678 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@@ -6701,7 +6693,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6701 6693
6702 if (kvm_x86_ops->get_cpl(vcpu) != 0) { 6694 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
6703 ret = -KVM_EPERM; 6695 ret = -KVM_EPERM;
6704 goto out_error; 6696 goto out;
6705 } 6697 }
6706 6698
6707 switch (nr) { 6699 switch (nr) {
@@ -6721,12 +6713,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
6721 ret = -KVM_ENOSYS; 6713 ret = -KVM_ENOSYS;
6722 break; 6714 break;
6723 } 6715 }
6724out_error: 6716out:
6725 if (!op_64_bit) 6717 if (!op_64_bit)
6726 ret = (u32)ret; 6718 ret = (u32)ret;
6727 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 6719 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
6728 6720
6729out:
6730 ++vcpu->stat.hypercalls; 6721 ++vcpu->stat.hypercalls;
6731 return kvm_skip_emulated_instruction(vcpu); 6722 return kvm_skip_emulated_instruction(vcpu);
6732} 6723}
@@ -7985,6 +7976,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7985{ 7976{
7986 struct msr_data apic_base_msr; 7977 struct msr_data apic_base_msr;
7987 int mmu_reset_needed = 0; 7978 int mmu_reset_needed = 0;
7979 int cpuid_update_needed = 0;
7988 int pending_vec, max_bits, idx; 7980 int pending_vec, max_bits, idx;
7989 struct desc_ptr dt; 7981 struct desc_ptr dt;
7990 int ret = -EINVAL; 7982 int ret = -EINVAL;
@@ -8023,8 +8015,10 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
8023 vcpu->arch.cr0 = sregs->cr0; 8015 vcpu->arch.cr0 = sregs->cr0;
8024 8016
8025 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; 8017 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
8018 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
8019 (X86_CR4_OSXSAVE | X86_CR4_PKE));
8026 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); 8020 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
8027 if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE)) 8021 if (cpuid_update_needed)
8028 kvm_update_cpuid(vcpu); 8022 kvm_update_cpuid(vcpu);
8029 8023
8030 idx = srcu_read_lock(&vcpu->kvm->srcu); 8024 idx = srcu_read_lock(&vcpu->kvm->srcu);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6389c88b3500..738fb22978dd 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
334 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ 334 { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
335 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ 335 { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
336 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ 336 { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
337 { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
337 { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ 338 { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
338 { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ 339 { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
339 { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ 340 { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 68596bd4cf06..346b163f6e89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4493 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4493 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4494 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4494 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
4495 4495
4496 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
4497 SD7SN6S256G and SD8SN8U256G */
4498 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
4499
4496 /* devices which puke on READ_NATIVE_MAX */ 4500 /* devices which puke on READ_NATIVE_MAX */
4497 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4501 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4498 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4502 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
@@ -4549,13 +4553,16 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4549 ATA_HORKAGE_ZERO_AFTER_TRIM | 4553 ATA_HORKAGE_ZERO_AFTER_TRIM |
4550 ATA_HORKAGE_NOLPM, }, 4554 ATA_HORKAGE_NOLPM, },
4551 4555
4552 /* This specific Samsung model/firmware-rev does not handle LPM well */ 4556 /* These specific Samsung models/firmware-revs do not handle LPM well */
4553 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4557 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4558 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4554 4559
4555 /* Sandisk devices which are known to not handle LPM well */ 4560 /* Sandisk devices which are known to not handle LPM well */
4556 { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, }, 4561 { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
4557 4562
4558 /* devices that don't properly handle queued TRIM commands */ 4563 /* devices that don't properly handle queued TRIM commands */
4564 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4565 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4559 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4566 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4560 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4567 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4561 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4568 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 9c9a22958717..a8d2eb0ceb8d 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
1151} 1151}
1152 1152
1153 1153
1154static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, 1154static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
1155 int offset, int swap) 1155 int swap)
1156{ 1156{
1157 unsigned char buf[ZEPROM_SIZE]; 1157 unsigned char buf[ZEPROM_SIZE];
1158 struct zatm_dev *zatm_dev; 1158 struct zatm_dev *zatm_dev;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2da998baa75c..30cc9c877ebb 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -534,14 +534,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
534 return sprintf(buf, "Not affected\n"); 534 return sprintf(buf, "Not affected\n");
535} 535}
536 536
537ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
538 struct device_attribute *attr, char *buf)
539{
540 return sprintf(buf, "Not affected\n");
541}
542
537static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); 543static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
538static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); 544static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
539static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); 545static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
546static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
540 547
541static struct attribute *cpu_root_vulnerabilities_attrs[] = { 548static struct attribute *cpu_root_vulnerabilities_attrs[] = {
542 &dev_attr_meltdown.attr, 549 &dev_attr_meltdown.attr,
543 &dev_attr_spectre_v1.attr, 550 &dev_attr_spectre_v1.attr,
544 &dev_attr_spectre_v2.attr, 551 &dev_attr_spectre_v2.attr,
552 &dev_attr_spec_store_bypass.attr,
545 NULL 553 NULL
546}; 554};
547 555
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 7a3a580821e0..a5e821d09656 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -490,7 +490,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
490 return 0; 490 return 0;
491} 491}
492 492
493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) 493int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages,
494 bool check_nid)
494{ 495{
495 unsigned long end_pfn = start_pfn + nr_pages; 496 unsigned long end_pfn = start_pfn + nr_pages;
496 unsigned long pfn; 497 unsigned long pfn;
@@ -514,7 +515,7 @@ int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
514 515
515 mem_blk = find_memory_block_hinted(mem_sect, mem_blk); 516 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
516 517
517 ret = register_mem_sect_under_node(mem_blk, nid, true); 518 ret = register_mem_sect_under_node(mem_blk, nid, check_nid);
518 if (!err) 519 if (!err)
519 err = ret; 520 err = ret;
520 521
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 02a497e7c785..e5e067091572 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
1923 1923
1924 dev->power.wakeup_path = false; 1924 dev->power.wakeup_path = false;
1925 1925
1926 if (dev->power.no_pm_callbacks) { 1926 if (dev->power.no_pm_callbacks)
1927 ret = 1; /* Let device go direct_complete */
1928 goto unlock; 1927 goto unlock;
1929 }
1930 1928
1931 if (dev->pm_domain) 1929 if (dev->pm_domain)
1932 callback = dev->pm_domain->ops.prepare; 1930 callback = dev->pm_domain->ops.prepare;
@@ -1960,7 +1958,8 @@ unlock:
1960 */ 1958 */
1961 spin_lock_irq(&dev->power.lock); 1959 spin_lock_irq(&dev->power.lock);
1962 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && 1960 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1963 pm_runtime_suspended(dev) && ret > 0 && 1961 ((pm_runtime_suspended(dev) && ret > 0) ||
1962 dev->power.no_pm_callbacks) &&
1964 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); 1963 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1965 spin_unlock_irq(&dev->power.lock); 1964 spin_unlock_irq(&dev->power.lock);
1966 return 0; 1965 return 0;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index f040aba48d50..27e9686b6d3a 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
184{ 184{
185 int i; 185 int i;
186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; 186 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
187 char interrupts[20]; 187 char interrupts[25];
188 char *ints = interrupts; 188 char *ints = interrupts;
189 189
190 for (i = 0; i < ARRAY_SIZE(irq_name); i++) 190 for (i = 0; i < ARRAY_SIZE(irq_name); i++)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5d4e31655d96..55cf554bc914 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1068,6 +1068,7 @@ static int loop_clr_fd(struct loop_device *lo)
1068 if (bdev) { 1068 if (bdev) {
1069 bdput(bdev); 1069 bdput(bdev);
1070 invalidate_bdev(bdev); 1070 invalidate_bdev(bdev);
1071 bdev->bd_inode->i_mapping->wb_err = 0;
1071 } 1072 }
1072 set_capacity(lo->lo_disk, 0); 1073 set_capacity(lo->lo_disk, 0);
1073 loop_sysfs_exit(lo); 1074 loop_sysfs_exit(lo);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index d4a81be0d7d2..b6be62025325 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -152,8 +152,8 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; 152 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL); 153 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
154 154
155 memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0, 155 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32)); 156 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
157 157
158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL, 158 eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN); 159 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index dfbd894d5bb7..4e24e591ae74 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -147,7 +147,7 @@ static u32 smc(u32 cmd_addr)
147 "smc #0 @ switch to secure world\n" 147 "smc #0 @ switch to secure world\n"
148 : "=r" (r0) 148 : "=r" (r0)
149 : "r" (r0), "r" (r1), "r" (r2) 149 : "r" (r0), "r" (r1), "r" (r2)
150 : "r3"); 150 : "r3", "r12");
151 } while (r0 == QCOM_SCM_INTERRUPTED); 151 } while (r0 == QCOM_SCM_INTERRUPTED);
152 152
153 return r0; 153 return r0;
@@ -263,7 +263,7 @@ static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
263 "smc #0 @ switch to secure world\n" 263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0) 264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2) 265 : "r" (r0), "r" (r1), "r" (r2)
266 : "r3"); 266 : "r3", "r12");
267 return r0; 267 return r0;
268} 268}
269 269
@@ -298,7 +298,7 @@ static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
298 "smc #0 @ switch to secure world\n" 298 "smc #0 @ switch to secure world\n"
299 : "=r" (r0) 299 : "=r" (r0)
300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3) 300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
301 ); 301 : "r12");
302 return r0; 302 return r0;
303} 303}
304 304
@@ -328,7 +328,7 @@ u32 qcom_scm_get_version(void)
328 "smc #0 @ switch to secure world\n" 328 "smc #0 @ switch to secure world\n"
329 : "=r" (r0), "=r" (r1) 329 : "=r" (r0), "=r" (r1)
330 : "r" (r0), "r" (r1) 330 : "r" (r0), "r" (r1)
331 : "r2", "r3"); 331 : "r2", "r3", "r12");
332 } while (r0 == QCOM_SCM_INTERRUPTED); 332 } while (r0 == QCOM_SCM_INTERRUPTED);
333 333
334 version = r1; 334 version = r1;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1dd1142246c2..27579443cdc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4555,8 +4555,8 @@ static int dm_update_crtcs_state(struct dc *dc,
4555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4555 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4556 struct amdgpu_crtc *acrtc = NULL; 4556 struct amdgpu_crtc *acrtc = NULL;
4557 struct amdgpu_dm_connector *aconnector = NULL; 4557 struct amdgpu_dm_connector *aconnector = NULL;
4558 struct drm_connector_state *new_con_state = NULL; 4558 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
4559 struct dm_connector_state *dm_conn_state = NULL; 4559 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
4560 struct drm_plane_state *new_plane_state = NULL; 4560 struct drm_plane_state *new_plane_state = NULL;
4561 4561
4562 new_stream = NULL; 4562 new_stream = NULL;
@@ -4577,19 +4577,23 @@ static int dm_update_crtcs_state(struct dc *dc,
4577 /* TODO This hack should go away */ 4577 /* TODO This hack should go away */
4578 if (aconnector && enable) { 4578 if (aconnector && enable) {
4579 // Make sure fake sink is created in plug-in scenario 4579 // Make sure fake sink is created in plug-in scenario
4580 new_con_state = drm_atomic_get_connector_state(state, 4580 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
4581 &aconnector->base); 4581 &aconnector->base);
4582 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
4583 &aconnector->base);
4582 4584
4583 if (IS_ERR(new_con_state)) { 4585
4584 ret = PTR_ERR_OR_ZERO(new_con_state); 4586 if (IS_ERR(drm_new_conn_state)) {
4587 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
4585 break; 4588 break;
4586 } 4589 }
4587 4590
4588 dm_conn_state = to_dm_connector_state(new_con_state); 4591 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
4592 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
4589 4593
4590 new_stream = create_stream_for_sink(aconnector, 4594 new_stream = create_stream_for_sink(aconnector,
4591 &new_crtc_state->mode, 4595 &new_crtc_state->mode,
4592 dm_conn_state); 4596 dm_new_conn_state);
4593 4597
4594 /* 4598 /*
4595 * we can have no stream on ACTION_SET if a display 4599 * we can have no stream on ACTION_SET if a display
@@ -4695,20 +4699,30 @@ next_crtc:
4695 * We want to do dc stream updates that do not require a 4699 * We want to do dc stream updates that do not require a
4696 * full modeset below. 4700 * full modeset below.
4697 */ 4701 */
4698 if (!enable || !aconnector || modereset_required(new_crtc_state)) 4702 if (!(enable && aconnector && new_crtc_state->enable &&
4703 new_crtc_state->active))
4699 continue; 4704 continue;
4700 /* 4705 /*
4701 * Given above conditions, the dc state cannot be NULL because: 4706 * Given above conditions, the dc state cannot be NULL because:
4702 * 1. We're attempting to enable a CRTC. Which has a... 4707 * 1. We're in the process of enabling CRTCs (just been added
4703 * 2. Valid connector attached, and 4708 * to the dc context, or already is on the context)
4704 * 3. User does not want to reset it (disable or mark inactive, 4709 * 2. Has a valid connector attached, and
4705 * which can happen on a CRTC that's already disabled). 4710 * 3. Is currently active and enabled.
4706 * => It currently exists. 4711 * => The dc stream state currently exists.
4707 */ 4712 */
4708 BUG_ON(dm_new_crtc_state->stream == NULL); 4713 BUG_ON(dm_new_crtc_state->stream == NULL);
4709 4714
4710 /* Color managment settings */ 4715 /* Scaling or underscan settings */
4711 if (dm_new_crtc_state->base.color_mgmt_changed) { 4716 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
4717 update_stream_scaling_settings(
4718 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
4719
4720 /*
4721 * Color management settings. We also update color properties
4722 * when a modeset is needed, to ensure it gets reprogrammed.
4723 */
4724 if (dm_new_crtc_state->base.color_mgmt_changed ||
4725 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4712 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); 4726 ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state);
4713 if (ret) 4727 if (ret)
4714 goto fail; 4728 goto fail;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index ec8d0006ef7c..3c136f2b954f 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2077,7 +2077,7 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id)
2077 return ret; 2077 return ret;
2078} 2078}
2079 2079
2080void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) 2080void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
2081{ 2081{
2082 mutex_lock(&hdmi->mutex); 2082 mutex_lock(&hdmi->mutex);
2083 2083
@@ -2103,13 +2103,6 @@ void __dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense)
2103 } 2103 }
2104 mutex_unlock(&hdmi->mutex); 2104 mutex_unlock(&hdmi->mutex);
2105} 2105}
2106
2107void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense)
2108{
2109 struct dw_hdmi *hdmi = dev_get_drvdata(dev);
2110
2111 __dw_hdmi_setup_rx_sense(hdmi, hpd, rx_sense);
2112}
2113EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); 2106EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense);
2114 2107
2115static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) 2108static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
@@ -2145,9 +2138,9 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id)
2145 */ 2138 */
2146 if (intr_stat & 2139 if (intr_stat &
2147 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { 2140 (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) {
2148 __dw_hdmi_setup_rx_sense(hdmi, 2141 dw_hdmi_setup_rx_sense(hdmi,
2149 phy_stat & HDMI_PHY_HPD, 2142 phy_stat & HDMI_PHY_HPD,
2150 phy_stat & HDMI_PHY_RX_SENSE); 2143 phy_stat & HDMI_PHY_RX_SENSE);
2151 2144
2152 if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) 2145 if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0)
2153 cec_notifier_set_phys_addr(hdmi->cec_notifier, 2146 cec_notifier_set_phys_addr(hdmi->cec_notifier,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ffe14ec3e7f2..70ae1f232331 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1145,6 +1145,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
1145 static const u16 psr_setup_time_us[] = { 1145 static const u16 psr_setup_time_us[] = {
1146 PSR_SETUP_TIME(330), 1146 PSR_SETUP_TIME(330),
1147 PSR_SETUP_TIME(275), 1147 PSR_SETUP_TIME(275),
1148 PSR_SETUP_TIME(220),
1148 PSR_SETUP_TIME(165), 1149 PSR_SETUP_TIME(165),
1149 PSR_SETUP_TIME(110), 1150 PSR_SETUP_TIME(110),
1150 PSR_SETUP_TIME(55), 1151 PSR_SETUP_TIME(55),
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3ace929dd90f..3f502eef2431 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -4,6 +4,8 @@
4 * Copyright © 2018 Intel Corporation 4 * Copyright © 2018 Intel Corporation
5 */ 5 */
6 6
7#include <linux/nospec.h>
8
7#include "i915_drv.h" 9#include "i915_drv.h"
8#include "i915_query.h" 10#include "i915_query.h"
9#include <uapi/drm/i915_drm.h> 11#include <uapi/drm/i915_drm.h>
@@ -100,7 +102,7 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
100 102
101 for (i = 0; i < args->num_items; i++, user_item_ptr++) { 103 for (i = 0; i < args->num_items; i++, user_item_ptr++) {
102 struct drm_i915_query_item item; 104 struct drm_i915_query_item item;
103 u64 func_idx; 105 unsigned long func_idx;
104 int ret; 106 int ret;
105 107
106 if (copy_from_user(&item, user_item_ptr, sizeof(item))) 108 if (copy_from_user(&item, user_item_ptr, sizeof(item)))
@@ -109,12 +111,17 @@ int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
109 if (item.query_id == 0) 111 if (item.query_id == 0)
110 return -EINVAL; 112 return -EINVAL;
111 113
114 if (overflows_type(item.query_id - 1, unsigned long))
115 return -EINVAL;
116
112 func_idx = item.query_id - 1; 117 func_idx = item.query_id - 1;
113 118
114 if (func_idx < ARRAY_SIZE(i915_query_funcs)) 119 ret = -EINVAL;
120 if (func_idx < ARRAY_SIZE(i915_query_funcs)) {
121 func_idx = array_index_nospec(func_idx,
122 ARRAY_SIZE(i915_query_funcs));
115 ret = i915_query_funcs[func_idx](dev_priv, &item); 123 ret = i915_query_funcs[func_idx](dev_priv, &item);
116 else 124 }
117 ret = -EINVAL;
118 125
119 /* Only write the length back to userspace if they differ. */ 126 /* Only write the length back to userspace if they differ. */
120 if (ret != item.length && put_user(ret, &user_item_ptr->length)) 127 if (ret != item.length && put_user(ret, &user_item_ptr->length))
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8691c86f579c..e125d16a1aa7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -574,6 +574,36 @@ exit:
574 return NOTIFY_OK; 574 return NOTIFY_OK;
575} 575}
576 576
577static int
578intel_lvds_connector_register(struct drm_connector *connector)
579{
580 struct intel_lvds_connector *lvds = to_lvds_connector(connector);
581 int ret;
582
583 ret = intel_connector_register(connector);
584 if (ret)
585 return ret;
586
587 lvds->lid_notifier.notifier_call = intel_lid_notify;
588 if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
589 DRM_DEBUG_KMS("lid notifier registration failed\n");
590 lvds->lid_notifier.notifier_call = NULL;
591 }
592
593 return 0;
594}
595
596static void
597intel_lvds_connector_unregister(struct drm_connector *connector)
598{
599 struct intel_lvds_connector *lvds = to_lvds_connector(connector);
600
601 if (lvds->lid_notifier.notifier_call)
602 acpi_lid_notifier_unregister(&lvds->lid_notifier);
603
604 intel_connector_unregister(connector);
605}
606
577/** 607/**
578 * intel_lvds_destroy - unregister and free LVDS structures 608 * intel_lvds_destroy - unregister and free LVDS structures
579 * @connector: connector to free 609 * @connector: connector to free
@@ -586,9 +616,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
586 struct intel_lvds_connector *lvds_connector = 616 struct intel_lvds_connector *lvds_connector =
587 to_lvds_connector(connector); 617 to_lvds_connector(connector);
588 618
589 if (lvds_connector->lid_notifier.notifier_call)
590 acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
591
592 if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) 619 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
593 kfree(lvds_connector->base.edid); 620 kfree(lvds_connector->base.edid);
594 621
@@ -609,8 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
609 .fill_modes = drm_helper_probe_single_connector_modes, 636 .fill_modes = drm_helper_probe_single_connector_modes,
610 .atomic_get_property = intel_digital_connector_atomic_get_property, 637 .atomic_get_property = intel_digital_connector_atomic_get_property,
611 .atomic_set_property = intel_digital_connector_atomic_set_property, 638 .atomic_set_property = intel_digital_connector_atomic_set_property,
612 .late_register = intel_connector_register, 639 .late_register = intel_lvds_connector_register,
613 .early_unregister = intel_connector_unregister, 640 .early_unregister = intel_lvds_connector_unregister,
614 .destroy = intel_lvds_destroy, 641 .destroy = intel_lvds_destroy,
615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 642 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
616 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 643 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -827,6 +854,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
827 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"), 854 DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
828 }, 855 },
829 }, 856 },
857 {
858 .callback = intel_no_lvds_dmi_callback,
859 .ident = "Radiant P845",
860 .matches = {
861 DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
862 DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
863 },
864 },
830 865
831 { } /* terminating entry */ 866 { } /* terminating entry */
832}; 867};
@@ -1150,12 +1185,6 @@ out:
1150 1185
1151 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; 1186 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
1152 1187
1153 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1154 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1155 DRM_DEBUG_KMS("lid notifier registration failed\n");
1156 lvds_connector->lid_notifier.notifier_call = NULL;
1157 }
1158
1159 return; 1188 return;
1160 1189
1161failed: 1190failed:
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index a393095aac1a..c9ad45686e7a 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -529,7 +529,7 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
529 if (stat & HDMITX_TOP_INTR_HPD_RISE) 529 if (stat & HDMITX_TOP_INTR_HPD_RISE)
530 hpd_connected = true; 530 hpd_connected = true;
531 531
532 dw_hdmi_setup_rx_sense(dw_hdmi->dev, hpd_connected, 532 dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
533 hpd_connected); 533 hpd_connected);
534 534
535 drm_helper_hpd_irq_event(dw_hdmi->encoder.dev); 535 drm_helper_hpd_irq_event(dw_hdmi->encoder.dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 68a40ae26f5b..1e2c931f6acf 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -82,7 +82,7 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
82 struct dispc_clock_info *dispc_cinfo) 82 struct dispc_clock_info *dispc_cinfo)
83{ 83{
84 int i; 84 int i;
85 struct sdi_clk_calc_ctx ctx = { .sdi = sdi }; 85 struct sdi_clk_calc_ctx ctx;
86 86
87 /* 87 /*
88 * DSS fclk gives us very few possibilities, so finding a good pixel 88 * DSS fclk gives us very few possibilities, so finding a good pixel
@@ -95,6 +95,9 @@ static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
95 bool ok; 95 bool ok;
96 96
97 memset(&ctx, 0, sizeof(ctx)); 97 memset(&ctx, 0, sizeof(ctx));
98
99 ctx.sdi = sdi;
100
98 if (pclk > 1000 * i * i * i) 101 if (pclk > 1000 * i * i * i)
99 ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu); 102 ctx.pck_min = max(pclk - 1000 * i * i * i, 0lu);
100 else 103 else
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 3d2d3bbd1342..155ad840f3c5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -88,6 +88,9 @@ static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
88 const struct drm_display_mode *panel_mode; 88 const struct drm_display_mode *panel_mode;
89 struct drm_crtc_state *crtc_state; 89 struct drm_crtc_state *crtc_state;
90 90
91 if (!state->crtc)
92 return 0;
93
91 if (list_empty(&connector->modes)) { 94 if (list_empty(&connector->modes)) {
92 dev_dbg(lvds->dev, "connector: empty modes list\n"); 95 dev_dbg(lvds->dev, "connector: empty modes list\n");
93 return -EINVAL; 96 return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 70e1a8820a7c..8b770a8e02cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1278,8 +1278,6 @@ static void vmw_master_drop(struct drm_device *dev,
1278 dev_priv->active_master = &dev_priv->fbdev_master; 1278 dev_priv->active_master = &dev_priv->fbdev_master;
1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1279 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1280 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1281
1282 vmw_fb_refresh(dev_priv);
1283} 1281}
1284 1282
1285/** 1283/**
@@ -1483,7 +1481,6 @@ static int vmw_pm_freeze(struct device *kdev)
1483 vmw_kms_resume(dev); 1481 vmw_kms_resume(dev);
1484 if (dev_priv->enable_fb) 1482 if (dev_priv->enable_fb)
1485 vmw_fb_on(dev_priv); 1483 vmw_fb_on(dev_priv);
1486 vmw_fb_refresh(dev_priv);
1487 return -EBUSY; 1484 return -EBUSY;
1488 } 1485 }
1489 1486
@@ -1523,8 +1520,6 @@ static int vmw_pm_restore(struct device *kdev)
1523 if (dev_priv->enable_fb) 1520 if (dev_priv->enable_fb)
1524 vmw_fb_on(dev_priv); 1521 vmw_fb_on(dev_priv);
1525 1522
1526 vmw_fb_refresh(dev_priv);
1527
1528 return 0; 1523 return 0;
1529} 1524}
1530 1525
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index f34f368c1a2e..5fcbe1620d50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -910,7 +910,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
910int vmw_fb_close(struct vmw_private *dev_priv); 910int vmw_fb_close(struct vmw_private *dev_priv);
911int vmw_fb_off(struct vmw_private *vmw_priv); 911int vmw_fb_off(struct vmw_private *vmw_priv);
912int vmw_fb_on(struct vmw_private *vmw_priv); 912int vmw_fb_on(struct vmw_private *vmw_priv);
913void vmw_fb_refresh(struct vmw_private *vmw_priv);
914 913
915/** 914/**
916 * Kernel modesetting - vmwgfx_kms.c 915 * Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index ba0cdb743c3e..54e300365a5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -866,21 +866,13 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
866 spin_lock_irqsave(&par->dirty.lock, flags); 866 spin_lock_irqsave(&par->dirty.lock, flags);
867 par->dirty.active = true; 867 par->dirty.active = true;
868 spin_unlock_irqrestore(&par->dirty.lock, flags); 868 spin_unlock_irqrestore(&par->dirty.lock, flags);
869
870 return 0;
871}
872 869
873/** 870 /*
874 * vmw_fb_refresh - Refresh fb display 871 * Need to reschedule a dirty update, because otherwise that's
875 * 872 * only done in dirty_mark() if the previous coalesced
876 * @vmw_priv: Pointer to device private 873 * dirty region was empty.
877 * 874 */
878 * Call into kms to show the fbdev display(s). 875 schedule_delayed_work(&par->local_work, 0);
879 */
880void vmw_fb_refresh(struct vmw_private *vmw_priv)
881{
882 if (!vmw_priv->fb_info)
883 return;
884 876
885 vmw_fb_set_par(vmw_priv->fb_info); 877 return 0;
886} 878}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index cdff99211602..21d746bdc922 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -329,8 +329,6 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
329 struct rpc_channel channel; 329 struct rpc_channel channel;
330 char *msg, *reply = NULL; 330 char *msg, *reply = NULL;
331 size_t reply_len = 0; 331 size_t reply_len = 0;
332 int ret = 0;
333
334 332
335 if (!vmw_msg_enabled) 333 if (!vmw_msg_enabled)
336 return -ENODEV; 334 return -ENODEV;
@@ -344,15 +342,14 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
344 return -ENOMEM; 342 return -ENOMEM;
345 } 343 }
346 344
347 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 345 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
348 vmw_send_msg(&channel, msg) || 346 goto out_open;
349 vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
350 vmw_close_channel(&channel)) {
351 DRM_ERROR("Failed to get %s", guest_info_param);
352 347
353 ret = -EINVAL; 348 if (vmw_send_msg(&channel, msg) ||
354 } 349 vmw_recv_msg(&channel, (void *) &reply, &reply_len))
350 goto out_msg;
355 351
352 vmw_close_channel(&channel);
356 if (buffer && reply && reply_len > 0) { 353 if (buffer && reply && reply_len > 0) {
357 /* Remove reply code, which are the first 2 characters of 354 /* Remove reply code, which are the first 2 characters of
358 * the reply 355 * the reply
@@ -369,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
369 kfree(reply); 366 kfree(reply);
370 kfree(msg); 367 kfree(msg);
371 368
372 return ret; 369 return 0;
370
371out_msg:
372 vmw_close_channel(&channel);
373 kfree(reply);
374out_open:
375 *length = 0;
376 kfree(msg);
377 DRM_ERROR("Failed to get %s", guest_info_param);
378
379 return -EINVAL;
373} 380}
374 381
375 382
@@ -400,15 +407,22 @@ int vmw_host_log(const char *log)
400 return -ENOMEM; 407 return -ENOMEM;
401 } 408 }
402 409
403 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) || 410 if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
404 vmw_send_msg(&channel, msg) || 411 goto out_open;
405 vmw_close_channel(&channel)) {
406 DRM_ERROR("Failed to send log\n");
407 412
408 ret = -EINVAL; 413 if (vmw_send_msg(&channel, msg))
409 } 414 goto out_msg;
410 415
416 vmw_close_channel(&channel);
411 kfree(msg); 417 kfree(msg);
412 418
413 return ret; 419 return 0;
420
421out_msg:
422 vmw_close_channel(&channel);
423out_open:
424 kfree(msg);
425 DRM_ERROR("Failed to send log\n");
426
427 return -EINVAL;
414} 428}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
135 135
136#else 136#else
137 137
138/* In the 32-bit version of this macro, we use "m" because there is no 138/*
139 * more register left for bp 139 * In the 32-bit version of this macro, we store bp in a memory location
140 * because we've ran out of registers.
141 * Now we can't reference that memory location while we've modified
142 * %esp or %ebp, so we first push it on the stack, just before we push
143 * %ebp, and then when we need it we read it from the stack where we
144 * just pushed it.
140 */ 145 */
141#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \ 146#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
142 port_num, magic, bp, \ 147 port_num, magic, bp, \
143 eax, ebx, ecx, edx, si, di) \ 148 eax, ebx, ecx, edx, si, di) \
144({ \ 149({ \
145 asm volatile ("push %%ebp;" \ 150 asm volatile ("push %12;" \
146 "mov %12, %%ebp;" \ 151 "push %%ebp;" \
152 "mov 0x04(%%esp), %%ebp;" \
147 "rep outsb;" \ 153 "rep outsb;" \
148 "pop %%ebp;" : \ 154 "pop %%ebp;" \
155 "add $0x04, %%esp;" : \
149 "=a"(eax), \ 156 "=a"(eax), \
150 "=b"(ebx), \ 157 "=b"(ebx), \
151 "=c"(ecx), \ 158 "=c"(ecx), \
@@ -167,10 +174,12 @@
167 port_num, magic, bp, \ 174 port_num, magic, bp, \
168 eax, ebx, ecx, edx, si, di) \ 175 eax, ebx, ecx, edx, si, di) \
169({ \ 176({ \
170 asm volatile ("push %%ebp;" \ 177 asm volatile ("push %12;" \
171 "mov %12, %%ebp;" \ 178 "push %%ebp;" \
179 "mov 0x04(%%esp), %%ebp;" \
172 "rep insb;" \ 180 "rep insb;" \
173 "pop %%ebp" : \ 181 "pop %%ebp;" \
182 "add $0x04, %%esp;" : \
174 "=a"(eax), \ 183 "=a"(eax), \
175 "=b"(ebx), \ 184 "=b"(ebx), \
176 "=c"(ecx), \ 185 "=c"(ecx), \
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ede388309376..634f58042c77 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -733,8 +733,8 @@ err_nomem:
733 /* Reset the page to write-back before releasing */ 733 /* Reset the page to write-back before releasing */
734 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 734 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
735#endif 735#endif
736 dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc, 736 dma_free_coherent(msc_dev(msc)->parent->parent, size,
737 win->block[i].addr); 737 win->block[i].bdesc, win->block[i].addr);
738 } 738 }
739 kfree(win); 739 kfree(win);
740 740
@@ -769,7 +769,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
769 /* Reset the page to write-back before releasing */ 769 /* Reset the page to write-back before releasing */
770 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 770 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
771#endif 771#endif
772 dma_free_coherent(msc_dev(win->msc), PAGE_SIZE, 772 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
773 win->block[i].bdesc, win->block[i].addr); 773 win->block[i].bdesc, win->block[i].addr);
774 } 774 }
775 775
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 05386b76465e..10bcb5d73f90 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -19,6 +19,7 @@
19#include <linux/stm.h> 19#include <linux/stm.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/vmalloc.h>
22#include "stm.h" 23#include "stm.h"
23 24
24#include <uapi/linux/stm.h> 25#include <uapi/linux/stm.h>
@@ -674,7 +675,7 @@ static void stm_device_release(struct device *dev)
674{ 675{
675 struct stm_device *stm = to_stm_device(dev); 676 struct stm_device *stm = to_stm_device(dev);
676 677
677 kfree(stm); 678 vfree(stm);
678} 679}
679 680
680int stm_register_device(struct device *parent, struct stm_data *stm_data, 681int stm_register_device(struct device *parent, struct stm_data *stm_data,
@@ -691,7 +692,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
691 return -EINVAL; 692 return -EINVAL;
692 693
693 nmasters = stm_data->sw_end - stm_data->sw_start + 1; 694 nmasters = stm_data->sw_end - stm_data->sw_start + 1;
694 stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL); 695 stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
695 if (!stm) 696 if (!stm)
696 return -ENOMEM; 697 return -ENOMEM;
697 698
@@ -744,7 +745,7 @@ err_device:
744 /* matches device_initialize() above */ 745 /* matches device_initialize() above */
745 put_device(&stm->dev); 746 put_device(&stm->dev);
746err_free: 747err_free:
747 kfree(stm); 748 vfree(stm);
748 749
749 return err; 750 return err;
750} 751}
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 8c42ca7107b2..45ae3c025bf6 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * i2c-ocores.c: I2C bus driver for OpenCores I2C controller 2 * i2c-ocores.c: I2C bus driver for OpenCores I2C controller
3 * (http://www.opencores.org/projects.cgi/web/i2c/overview). 3 * (https://opencores.org/project/i2c/overview)
4 * 4 *
5 * Peter Korsgaard <jacmet@sunsite.dk> 5 * Peter Korsgaard <jacmet@sunsite.dk>
6 * 6 *
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 15606f237480..9da79070357c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -158,6 +158,7 @@ config AT91_SAMA5D2_ADC
158 depends on ARCH_AT91 || COMPILE_TEST 158 depends on ARCH_AT91 || COMPILE_TEST
159 depends on HAS_IOMEM 159 depends on HAS_IOMEM
160 depends on HAS_DMA 160 depends on HAS_DMA
161 select IIO_BUFFER
161 select IIO_TRIGGERED_BUFFER 162 select IIO_TRIGGERED_BUFFER
162 help 163 help
163 Say yes here to build support for Atmel SAMA5D2 ADC which is 164 Say yes here to build support for Atmel SAMA5D2 ADC which is
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 801afb61310b..d4bbe5b53318 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
348static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0, 348static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
349 33, 0, 17, 16, 12, 10, 8, 6, 4}; 349 33, 0, 17, 16, 12, 10, 8, 6, 4};
350 350
351static ssize_t ad7793_read_frequency(struct device *dev,
352 struct device_attribute *attr,
353 char *buf)
354{
355 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
356 struct ad7793_state *st = iio_priv(indio_dev);
357
358 return sprintf(buf, "%d\n",
359 st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
360}
361
362static ssize_t ad7793_write_frequency(struct device *dev,
363 struct device_attribute *attr,
364 const char *buf,
365 size_t len)
366{
367 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
368 struct ad7793_state *st = iio_priv(indio_dev);
369 long lval;
370 int i, ret;
371
372 ret = kstrtol(buf, 10, &lval);
373 if (ret)
374 return ret;
375
376 if (lval == 0)
377 return -EINVAL;
378
379 for (i = 0; i < 16; i++)
380 if (lval == st->chip_info->sample_freq_avail[i])
381 break;
382 if (i == 16)
383 return -EINVAL;
384
385 ret = iio_device_claim_direct_mode(indio_dev);
386 if (ret)
387 return ret;
388 st->mode &= ~AD7793_MODE_RATE(-1);
389 st->mode |= AD7793_MODE_RATE(i);
390 ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
391 iio_device_release_direct_mode(indio_dev);
392
393 return len;
394}
395
396static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
397 ad7793_read_frequency,
398 ad7793_write_frequency);
399
400static IIO_CONST_ATTR_SAMP_FREQ_AVAIL( 351static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
401 "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4"); 352 "470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
402 353
@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
424 ad7793_show_scale_available, NULL, 0); 375 ad7793_show_scale_available, NULL, 0);
425 376
426static struct attribute *ad7793_attributes[] = { 377static struct attribute *ad7793_attributes[] = {
427 &iio_dev_attr_sampling_frequency.dev_attr.attr,
428 &iio_const_attr_sampling_frequency_available.dev_attr.attr, 378 &iio_const_attr_sampling_frequency_available.dev_attr.attr,
429 &iio_dev_attr_in_m_in_scale_available.dev_attr.attr, 379 &iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
430 NULL 380 NULL
@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
435}; 385};
436 386
437static struct attribute *ad7797_attributes[] = { 387static struct attribute *ad7797_attributes[] = {
438 &iio_dev_attr_sampling_frequency.dev_attr.attr,
439 &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr, 388 &iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
440 NULL 389 NULL
441}; 390};
@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
505 *val -= offset; 454 *val -= offset;
506 } 455 }
507 return IIO_VAL_INT; 456 return IIO_VAL_INT;
457 case IIO_CHAN_INFO_SAMP_FREQ:
458 *val = st->chip_info
459 ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
460 return IIO_VAL_INT;
508 } 461 }
509 return -EINVAL; 462 return -EINVAL;
510} 463}
@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
542 break; 495 break;
543 } 496 }
544 break; 497 break;
498 case IIO_CHAN_INFO_SAMP_FREQ:
499 if (!val) {
500 ret = -EINVAL;
501 break;
502 }
503
504 for (i = 0; i < 16; i++)
505 if (val == st->chip_info->sample_freq_avail[i])
506 break;
507
508 if (i == 16) {
509 ret = -EINVAL;
510 break;
511 }
512
513 st->mode &= ~AD7793_MODE_RATE(-1);
514 st->mode |= AD7793_MODE_RATE(i);
515 ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
516 st->mode);
517 break;
545 default: 518 default:
546 ret = -EINVAL; 519 ret = -EINVAL;
547 } 520 }
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 4eff8351ce29..8729d6524b4d 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = {
333 + AT91_SAMA5D2_DIFF_CHAN_CNT + 1), 333 + AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
334}; 334};
335 335
336static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
337{
338 int i;
339
340 for (i = 0; i < indio_dev->num_channels; i++) {
341 if (indio_dev->channels[i].scan_index == chan)
342 return i;
343 }
344 return -EINVAL;
345}
346
347static inline struct iio_chan_spec const *
348at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
349{
350 int index = at91_adc_chan_xlate(indio_dev, chan);
351
352 if (index < 0)
353 return NULL;
354 return indio_dev->channels + index;
355}
356
336static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) 357static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
337{ 358{
338 struct iio_dev *indio = iio_trigger_get_drvdata(trig); 359 struct iio_dev *indio = iio_trigger_get_drvdata(trig);
@@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
350 at91_adc_writel(st, AT91_SAMA5D2_TRGR, status); 371 at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
351 372
352 for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) { 373 for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
353 struct iio_chan_spec const *chan = indio->channels + bit; 374 struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
354 375
376 if (!chan)
377 continue;
355 if (state) { 378 if (state) {
356 at91_adc_writel(st, AT91_SAMA5D2_CHER, 379 at91_adc_writel(st, AT91_SAMA5D2_CHER,
357 BIT(chan->channel)); 380 BIT(chan->channel));
@@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
448 471
449 for_each_set_bit(bit, indio_dev->active_scan_mask, 472 for_each_set_bit(bit, indio_dev->active_scan_mask,
450 indio_dev->num_channels) { 473 indio_dev->num_channels) {
451 struct iio_chan_spec const *chan = indio_dev->channels + bit; 474 struct iio_chan_spec const *chan =
475 at91_adc_chan_get(indio_dev, bit);
476
477 if (!chan)
478 continue;
452 479
453 st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8; 480 st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
454 } 481 }
@@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
526 */ 553 */
527 for_each_set_bit(bit, indio_dev->active_scan_mask, 554 for_each_set_bit(bit, indio_dev->active_scan_mask,
528 indio_dev->num_channels) { 555 indio_dev->num_channels) {
529 struct iio_chan_spec const *chan = indio_dev->channels + bit; 556 struct iio_chan_spec const *chan =
557 at91_adc_chan_get(indio_dev, bit);
530 558
559 if (!chan)
560 continue;
531 if (st->dma_st.dma_chan) 561 if (st->dma_st.dma_chan)
532 at91_adc_readl(st, chan->address); 562 at91_adc_readl(st, chan->address);
533 } 563 }
@@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
587 617
588 for_each_set_bit(bit, indio_dev->active_scan_mask, 618 for_each_set_bit(bit, indio_dev->active_scan_mask,
589 indio_dev->num_channels) { 619 indio_dev->num_channels) {
590 struct iio_chan_spec const *chan = indio_dev->channels + bit; 620 struct iio_chan_spec const *chan =
621 at91_adc_chan_get(indio_dev, bit);
591 622
623 if (!chan)
624 continue;
592 st->buffer[i] = at91_adc_readl(st, chan->address); 625 st->buffer[i] = at91_adc_readl(st, chan->address);
593 i++; 626 i++;
594 } 627 }
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 01422d11753c..b28a716a23b2 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
144 * Leave as soon as if exact resolution if reached. 144 * Leave as soon as if exact resolution if reached.
145 * Otherwise the higher resolution below 32 bits is kept. 145 * Otherwise the higher resolution below 32 bits is kept.
146 */ 146 */
147 fl->res = 0;
147 for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) { 148 for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
148 for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) { 149 for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
149 if (fast) 150 if (fast)
@@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
193 } 194 }
194 } 195 }
195 196
196 if (!fl->fosr) 197 if (!fl->res)
197 return -EINVAL; 198 return -EINVAL;
198 199
199 return 0; 200 return 0;
@@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
770 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 771 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
771 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 772 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
772 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; 773 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
773 unsigned int spi_freq = adc->spi_freq; 774 unsigned int spi_freq;
774 int ret = -EINVAL; 775 int ret = -EINVAL;
775 776
776 switch (mask) { 777 switch (mask) {
@@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
784 case IIO_CHAN_INFO_SAMP_FREQ: 785 case IIO_CHAN_INFO_SAMP_FREQ:
785 if (!val) 786 if (!val)
786 return -EINVAL; 787 return -EINVAL;
787 if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) 788
789 switch (ch->src) {
790 case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
788 spi_freq = adc->dfsdm->spi_master_freq; 791 spi_freq = adc->dfsdm->spi_master_freq;
792 break;
793 case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
794 case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
795 spi_freq = adc->dfsdm->spi_master_freq / 2;
796 break;
797 default:
798 spi_freq = adc->spi_freq;
799 }
789 800
790 if (spi_freq % val) 801 if (spi_freq % val)
791 dev_warn(&indio_dev->dev, 802 dev_warn(&indio_dev->dev,
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 05e0c353e089..b32bf57910ca 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
587 * Should be used as the set_length callback for iio_buffer_access_ops 587 * Should be used as the set_length callback for iio_buffer_access_ops
588 * struct for DMA buffers. 588 * struct for DMA buffers.
589 */ 589 */
590int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length) 590int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
591{ 591{
592 /* Avoid an invalid state */ 592 /* Avoid an invalid state */
593 if (length < 2) 593 if (length < 2)
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index 047fe757ab97..70c302a93d7f 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -22,11 +22,18 @@ struct iio_kfifo {
22#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer) 22#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
23 23
24static inline int __iio_allocate_kfifo(struct iio_kfifo *buf, 24static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
25 int bytes_per_datum, int length) 25 size_t bytes_per_datum, unsigned int length)
26{ 26{
27 if ((length == 0) || (bytes_per_datum == 0)) 27 if ((length == 0) || (bytes_per_datum == 0))
28 return -EINVAL; 28 return -EINVAL;
29 29
30 /*
31 * Make sure we don't overflow an unsigned int after kfifo rounds up to
32 * the next power of 2.
33 */
34 if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
35 return -EINVAL;
36
30 return __kfifo_alloc((struct __kfifo *)&buf->kf, length, 37 return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
31 bytes_per_datum, GFP_KERNEL); 38 bytes_per_datum, GFP_KERNEL);
32} 39}
@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
67 return 0; 74 return 0;
68} 75}
69 76
70static int iio_set_length_kfifo(struct iio_buffer *r, int length) 77static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
71{ 78{
72 /* Avoid an invalid state */ 79 /* Avoid an invalid state */
73 if (length < 2) 80 if (length < 2)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index cfb6588565ba..4905a997a7ec 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
178#ifdef CONFIG_PM 178#ifdef CONFIG_PM
179 int ret; 179 int ret;
180 180
181 atomic_set(&st->user_requested_state, state);
182
183 if (atomic_add_unless(&st->runtime_pm_enable, 1, 1)) 181 if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
184 pm_runtime_enable(&st->pdev->dev); 182 pm_runtime_enable(&st->pdev->dev);
185 183
186 if (state) 184 if (state) {
185 atomic_inc(&st->user_requested_state);
187 ret = pm_runtime_get_sync(&st->pdev->dev); 186 ret = pm_runtime_get_sync(&st->pdev->dev);
188 else { 187 } else {
188 atomic_dec(&st->user_requested_state);
189 pm_runtime_mark_last_busy(&st->pdev->dev); 189 pm_runtime_mark_last_busy(&st->pdev->dev);
190 pm_runtime_use_autosuspend(&st->pdev->dev); 190 pm_runtime_use_autosuspend(&st->pdev->dev);
191 ret = pm_runtime_put_autosuspend(&st->pdev->dev); 191 ret = pm_runtime_put_autosuspend(&st->pdev->dev);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index fb2d347f760f..ecc55e98ddd3 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -502,7 +502,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
502 return -EINVAL; 502 return -EINVAL;
503 503
504 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) 504 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
505 return -EAGAIN; 505 return -EINVAL;
506 506
507 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid)); 507 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
508 if (attr) { 508 if (attr) {
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 9a4e899d94b3..2b6c9b516070 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
119 umem->length = size; 119 umem->length = size;
120 umem->address = addr; 120 umem->address = addr;
121 umem->page_shift = PAGE_SHIFT; 121 umem->page_shift = PAGE_SHIFT;
122 umem->pid = get_task_pid(current, PIDTYPE_PID);
123 /* 122 /*
124 * We ask for writable memory if any of the following 123 * We ask for writable memory if any of the following
125 * access flags are set. "Local write" and "remote write" 124 * access flags are set. "Local write" and "remote write"
@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
132 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 131 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
133 132
134 if (access & IB_ACCESS_ON_DEMAND) { 133 if (access & IB_ACCESS_ON_DEMAND) {
135 put_pid(umem->pid);
136 ret = ib_umem_odp_get(context, umem, access); 134 ret = ib_umem_odp_get(context, umem, access);
137 if (ret) { 135 if (ret) {
138 kfree(umem); 136 kfree(umem);
@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
148 146
149 page_list = (struct page **) __get_free_page(GFP_KERNEL); 147 page_list = (struct page **) __get_free_page(GFP_KERNEL);
150 if (!page_list) { 148 if (!page_list) {
151 put_pid(umem->pid);
152 kfree(umem); 149 kfree(umem);
153 return ERR_PTR(-ENOMEM); 150 return ERR_PTR(-ENOMEM);
154 } 151 }
@@ -231,7 +228,6 @@ out:
231 if (ret < 0) { 228 if (ret < 0) {
232 if (need_release) 229 if (need_release)
233 __ib_umem_release(context->device, umem, 0); 230 __ib_umem_release(context->device, umem, 0);
234 put_pid(umem->pid);
235 kfree(umem); 231 kfree(umem);
236 } else 232 } else
237 current->mm->pinned_vm = locked; 233 current->mm->pinned_vm = locked;
@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
274 270
275 __ib_umem_release(umem->context->device, umem, 1); 271 __ib_umem_release(umem->context->device, umem, 1);
276 272
277 task = get_pid_task(umem->pid, PIDTYPE_PID); 273 task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
278 put_pid(umem->pid);
279 if (!task) 274 if (!task)
280 goto out; 275 goto out;
281 mm = get_task_mm(task); 276 mm = get_task_mm(task);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f6c739ec8b62..20b9f31052bf 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
185 bnxt_re_ib_unreg(rdev, false); 185 bnxt_re_ib_unreg(rdev, false);
186} 186}
187 187
188static void bnxt_re_stop_irq(void *handle)
189{
190 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
191 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
192 struct bnxt_qplib_nq *nq;
193 int indx;
194
195 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
196 nq = &rdev->nq[indx - 1];
197 bnxt_qplib_nq_stop_irq(nq, false);
198 }
199
200 bnxt_qplib_rcfw_stop_irq(rcfw, false);
201}
202
203static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
204{
205 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
206 struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
207 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
208 struct bnxt_qplib_nq *nq;
209 int indx, rc;
210
211 if (!ent) {
212 /* Not setting the f/w timeout bit in rcfw.
213 * During the driver unload the first command
214 * to f/w will timeout and that will set the
215 * timeout bit.
216 */
217 dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
218 return;
219 }
220
221 /* Vectors may change after restart, so update with new vectors
222 * in device sctructure.
223 */
224 for (indx = 0; indx < rdev->num_msix; indx++)
225 rdev->msix_entries[indx].vector = ent[indx].vector;
226
227 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
228 false);
229 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
230 nq = &rdev->nq[indx - 1];
231 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
232 msix_ent[indx].vector, false);
233 if (rc)
234 dev_warn(rdev_to_dev(rdev),
235 "Failed to reinit NQ index %d\n", indx - 1);
236 }
237}
238
188static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 239static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
189 .ulp_async_notifier = NULL, 240 .ulp_async_notifier = NULL,
190 .ulp_stop = bnxt_re_stop, 241 .ulp_stop = bnxt_re_stop,
191 .ulp_start = bnxt_re_start, 242 .ulp_start = bnxt_re_start,
192 .ulp_sriov_config = bnxt_re_sriov_config, 243 .ulp_sriov_config = bnxt_re_sriov_config,
193 .ulp_shutdown = bnxt_re_shutdown 244 .ulp_shutdown = bnxt_re_shutdown,
245 .ulp_irq_stop = bnxt_re_stop_irq,
246 .ulp_irq_restart = bnxt_re_start_irq
194}; 247};
195 248
196/* RoCE -> Net driver */ 249/* RoCE -> Net driver */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 3a78faba8d91..50d8f1fc98d5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -336,22 +336,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
336 return IRQ_HANDLED; 336 return IRQ_HANDLED;
337} 337}
338 338
339void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
340{
341 tasklet_disable(&nq->worker);
342 /* Mask h/w interrupt */
343 NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
344 /* Sync with last running IRQ handler */
345 synchronize_irq(nq->vector);
346 if (kill)
347 tasklet_kill(&nq->worker);
348 if (nq->requested) {
349 irq_set_affinity_hint(nq->vector, NULL);
350 free_irq(nq->vector, nq);
351 nq->requested = false;
352 }
353}
354
339void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq) 355void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
340{ 356{
341 if (nq->cqn_wq) { 357 if (nq->cqn_wq) {
342 destroy_workqueue(nq->cqn_wq); 358 destroy_workqueue(nq->cqn_wq);
343 nq->cqn_wq = NULL; 359 nq->cqn_wq = NULL;
344 } 360 }
361
345 /* Make sure the HW is stopped! */ 362 /* Make sure the HW is stopped! */
346 synchronize_irq(nq->vector); 363 bnxt_qplib_nq_stop_irq(nq, true);
347 tasklet_disable(&nq->worker);
348 tasklet_kill(&nq->worker);
349 364
350 if (nq->requested) {
351 irq_set_affinity_hint(nq->vector, NULL);
352 free_irq(nq->vector, nq);
353 nq->requested = false;
354 }
355 if (nq->bar_reg_iomem) 365 if (nq->bar_reg_iomem)
356 iounmap(nq->bar_reg_iomem); 366 iounmap(nq->bar_reg_iomem);
357 nq->bar_reg_iomem = NULL; 367 nq->bar_reg_iomem = NULL;
@@ -361,6 +371,40 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
361 nq->vector = 0; 371 nq->vector = 0;
362} 372}
363 373
374int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
375 int msix_vector, bool need_init)
376{
377 int rc;
378
379 if (nq->requested)
380 return -EFAULT;
381
382 nq->vector = msix_vector;
383 if (need_init)
384 tasklet_init(&nq->worker, bnxt_qplib_service_nq,
385 (unsigned long)nq);
386 else
387 tasklet_enable(&nq->worker);
388
389 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
390 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
391 if (rc)
392 return rc;
393
394 cpumask_clear(&nq->mask);
395 cpumask_set_cpu(nq_indx, &nq->mask);
396 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
397 if (rc) {
398 dev_warn(&nq->pdev->dev,
399 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
400 nq->vector, nq_indx);
401 }
402 nq->requested = true;
403 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
404
405 return rc;
406}
407
364int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 408int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
365 int nq_idx, int msix_vector, int bar_reg_offset, 409 int nq_idx, int msix_vector, int bar_reg_offset,
366 int (*cqn_handler)(struct bnxt_qplib_nq *nq, 410 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
@@ -372,41 +416,17 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
372 resource_size_t nq_base; 416 resource_size_t nq_base;
373 int rc = -1; 417 int rc = -1;
374 418
375 nq->pdev = pdev;
376 nq->vector = msix_vector;
377 if (cqn_handler) 419 if (cqn_handler)
378 nq->cqn_handler = cqn_handler; 420 nq->cqn_handler = cqn_handler;
379 421
380 if (srqn_handler) 422 if (srqn_handler)
381 nq->srqn_handler = srqn_handler; 423 nq->srqn_handler = srqn_handler;
382 424
383 tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
384
385 /* Have a task to schedule CQ notifiers in post send case */ 425 /* Have a task to schedule CQ notifiers in post send case */
386 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq"); 426 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
387 if (!nq->cqn_wq) 427 if (!nq->cqn_wq)
388 goto fail; 428 return -ENOMEM;
389
390 nq->requested = false;
391 memset(nq->name, 0, 32);
392 sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
393 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
394 if (rc) {
395 dev_err(&nq->pdev->dev,
396 "Failed to request IRQ for NQ: %#x", rc);
397 goto fail;
398 }
399
400 cpumask_clear(&nq->mask);
401 cpumask_set_cpu(nq_idx, &nq->mask);
402 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
403 if (rc) {
404 dev_warn(&nq->pdev->dev,
405 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
406 nq->vector, nq_idx);
407 }
408 429
409 nq->requested = true;
410 nq->bar_reg = NQ_CONS_PCI_BAR_REGION; 430 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
411 nq->bar_reg_off = bar_reg_offset; 431 nq->bar_reg_off = bar_reg_offset;
412 nq_base = pci_resource_start(pdev, nq->bar_reg); 432 nq_base = pci_resource_start(pdev, nq->bar_reg);
@@ -419,7 +439,13 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
419 rc = -ENOMEM; 439 rc = -ENOMEM;
420 goto fail; 440 goto fail;
421 } 441 }
422 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements); 442
443 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
444 if (rc) {
445 dev_err(&nq->pdev->dev,
446 "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
447 goto fail;
448 }
423 449
424 return 0; 450 return 0;
425fail: 451fail:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index ade9f13c0fd1..72352ca80ace 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
467 struct bnxt_qplib_cq *cq; 467 struct bnxt_qplib_cq *cq;
468}; 468};
469 469
470void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
470void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq); 471void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
472int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
473 int msix_vector, bool need_init);
471int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, 474int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
472 int nq_idx, int msix_vector, int bar_reg_offset, 475 int nq_idx, int msix_vector, int bar_reg_offset,
473 int (*cqn_handler)(struct bnxt_qplib_nq *nq, 476 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 80027a494730..2852d350ada1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -582,19 +582,29 @@ fail:
582 return -ENOMEM; 582 return -ENOMEM;
583} 583}
584 584
585void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 585void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
586{ 586{
587 unsigned long indx;
588
589 /* Make sure the HW channel is stopped! */
590 synchronize_irq(rcfw->vector);
591 tasklet_disable(&rcfw->worker); 587 tasklet_disable(&rcfw->worker);
592 tasklet_kill(&rcfw->worker); 588 /* Mask h/w interrupts */
589 CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
590 rcfw->creq.max_elements);
591 /* Sync with last running IRQ-handler */
592 synchronize_irq(rcfw->vector);
593 if (kill)
594 tasklet_kill(&rcfw->worker);
593 595
594 if (rcfw->requested) { 596 if (rcfw->requested) {
595 free_irq(rcfw->vector, rcfw); 597 free_irq(rcfw->vector, rcfw);
596 rcfw->requested = false; 598 rcfw->requested = false;
597 } 599 }
600}
601
602void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
603{
604 unsigned long indx;
605
606 bnxt_qplib_rcfw_stop_irq(rcfw, true);
607
598 if (rcfw->cmdq_bar_reg_iomem) 608 if (rcfw->cmdq_bar_reg_iomem)
599 iounmap(rcfw->cmdq_bar_reg_iomem); 609 iounmap(rcfw->cmdq_bar_reg_iomem);
600 rcfw->cmdq_bar_reg_iomem = NULL; 610 rcfw->cmdq_bar_reg_iomem = NULL;
@@ -614,6 +624,31 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
614 rcfw->vector = 0; 624 rcfw->vector = 0;
615} 625}
616 626
627int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
628 bool need_init)
629{
630 int rc;
631
632 if (rcfw->requested)
633 return -EFAULT;
634
635 rcfw->vector = msix_vector;
636 if (need_init)
637 tasklet_init(&rcfw->worker,
638 bnxt_qplib_service_creq, (unsigned long)rcfw);
639 else
640 tasklet_enable(&rcfw->worker);
641 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
642 "bnxt_qplib_creq", rcfw);
643 if (rc)
644 return rc;
645 rcfw->requested = true;
646 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
647 rcfw->creq.max_elements);
648
649 return 0;
650}
651
617int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 652int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
618 struct bnxt_qplib_rcfw *rcfw, 653 struct bnxt_qplib_rcfw *rcfw,
619 int msix_vector, 654 int msix_vector,
@@ -675,27 +710,17 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
675 rcfw->creq_qp_event_processed = 0; 710 rcfw->creq_qp_event_processed = 0;
676 rcfw->creq_func_event_processed = 0; 711 rcfw->creq_func_event_processed = 0;
677 712
678 rcfw->vector = msix_vector;
679 if (aeq_handler) 713 if (aeq_handler)
680 rcfw->aeq_handler = aeq_handler; 714 rcfw->aeq_handler = aeq_handler;
715 init_waitqueue_head(&rcfw->waitq);
681 716
682 tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, 717 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
683 (unsigned long)rcfw);
684
685 rcfw->requested = false;
686 rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
687 "bnxt_qplib_creq", rcfw);
688 if (rc) { 718 if (rc) {
689 dev_err(&rcfw->pdev->dev, 719 dev_err(&rcfw->pdev->dev,
690 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); 720 "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
691 bnxt_qplib_disable_rcfw_channel(rcfw); 721 bnxt_qplib_disable_rcfw_channel(rcfw);
692 return rc; 722 return rc;
693 } 723 }
694 rcfw->requested = true;
695
696 init_waitqueue_head(&rcfw->waitq);
697
698 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
699 724
700 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); 725 init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
701 init.cmdq_size_cmdq_lvl = cpu_to_le16( 726 init.cmdq_size_cmdq_lvl = cpu_to_le16(
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index c7cce2e4185e..46416dfe8830 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
195void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 195void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
196int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, 196int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
197 struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz); 197 struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
198void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
198void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); 199void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
200int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
201 bool need_init);
199int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, 202int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
200 struct bnxt_qplib_rcfw *rcfw, 203 struct bnxt_qplib_rcfw *rcfw,
201 int msix_vector, 204 int msix_vector,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e90f2fd8dc16..1445918e3239 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,10 +489,10 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
489err_dereg_mem: 489err_dereg_mem:
490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 490 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp); 491 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
492err_free_wr_wait:
493 c4iw_put_wr_wait(mhp->wr_waitp);
494err_free_skb: 492err_free_skb:
495 kfree_skb(mhp->dereg_skb); 493 kfree_skb(mhp->dereg_skb);
494err_free_wr_wait:
495 c4iw_put_wr_wait(mhp->wr_waitp);
496err_free_mhp: 496err_free_mhp:
497 kfree(mhp); 497 kfree(mhp);
498 return ERR_PTR(ret); 498 return ERR_PTR(ret);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e6a60fa59f2b..e6bdd0c1e80a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5944 u64 status; 5944 u64 status;
5945 u32 sw_index; 5945 u32 sw_index;
5946 int i = 0; 5946 int i = 0;
5947 unsigned long irq_flags;
5947 5948
5948 sw_index = dd->hw_to_sw[hw_context]; 5949 sw_index = dd->hw_to_sw[hw_context];
5949 if (sw_index >= dd->num_send_contexts) { 5950 if (sw_index >= dd->num_send_contexts) {
@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5953 return; 5954 return;
5954 } 5955 }
5955 sci = &dd->send_contexts[sw_index]; 5956 sci = &dd->send_contexts[sw_index];
5957 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5956 sc = sci->sc; 5958 sc = sci->sc;
5957 if (!sc) { 5959 if (!sc) {
5958 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__, 5960 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5959 sw_index, hw_context); 5961 sw_index, hw_context);
5962 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5960 return; 5963 return;
5961 } 5964 }
5962 5965
@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5978 */ 5981 */
5979 if (sc->type != SC_USER) 5982 if (sc->type != SC_USER)
5980 queue_work(dd->pport->hfi1_wq, &sc->halt_work); 5983 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5984 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5981 5985
5982 /* 5986 /*
5983 * Update the counters for the corresponding status bits. 5987 * Update the counters for the corresponding status bits.
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 14734d0d0b76..3a485f50fede 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -377,6 +377,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
377 377
378 hr_cq->set_ci_db = hr_cq->db.db_record; 378 hr_cq->set_ci_db = hr_cq->db.db_record;
379 *hr_cq->set_ci_db = 0; 379 *hr_cq->set_ci_db = 0;
380 hr_cq->db_en = 1;
380 } 381 }
381 382
382 /* Init mmt table and write buff address to mtt table */ 383 /* Init mmt table and write buff address to mtt table */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 47e1b6ac1e1a..8013d69c5ac4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -722,6 +722,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
722 free_mr->mr_free_pd = to_hr_pd(pd); 722 free_mr->mr_free_pd = to_hr_pd(pd);
723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; 723 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
724 free_mr->mr_free_pd->ibpd.uobject = NULL; 724 free_mr->mr_free_pd->ibpd.uobject = NULL;
725 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
725 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); 726 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
726 727
727 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; 728 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
@@ -1036,7 +1037,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1036 1037
1037 do { 1038 do {
1038 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1039 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1039 if (ret < 0) { 1040 if (ret < 0 && hr_qp) {
1040 dev_err(dev, 1041 dev_err(dev,
1041 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", 1042 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1042 hr_qp->qpn, ret, hr_mr->key, ne); 1043 hr_qp->qpn, ret, hr_mr->key, ne);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 25916e8522ed..1f0965bb64ee 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -142,8 +142,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
142 unsigned long flags; 142 unsigned long flags;
143 unsigned int ind; 143 unsigned int ind;
144 void *wqe = NULL; 144 void *wqe = NULL;
145 u32 tmp_len = 0;
146 bool loopback; 145 bool loopback;
146 u32 tmp_len;
147 int ret = 0; 147 int ret = 0;
148 u8 *smac; 148 u8 *smac;
149 int nreq; 149 int nreq;
@@ -189,6 +189,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
189 189
190 owner_bit = 190 owner_bit =
191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); 191 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
192 tmp_len = 0;
192 193
193 /* Corresponding to the QP type, wqe process separately */ 194 /* Corresponding to the QP type, wqe process separately */
194 if (ibqp->qp_type == IB_QPT_GSI) { 195 if (ibqp->qp_type == IB_QPT_GSI) {
@@ -547,16 +548,20 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
547 } 548 }
548 549
549 if (i < hr_qp->rq.max_gs) { 550 if (i < hr_qp->rq.max_gs) {
550 dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); 551 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
551 dseg[i].addr = 0; 552 dseg->addr = 0;
552 } 553 }
553 554
554 /* rq support inline data */ 555 /* rq support inline data */
555 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list; 556 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
556 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge; 557 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
557 for (i = 0; i < wr->num_sge; i++) { 558 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
558 sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr; 559 (u32)wr->num_sge;
559 sge_list[i].len = wr->sg_list[i].length; 560 for (i = 0; i < wr->num_sge; i++) {
561 sge_list[i].addr =
562 (void *)(u64)wr->sg_list[i].addr;
563 sge_list[i].len = wr->sg_list[i].length;
564 }
560 } 565 }
561 566
562 hr_qp->rq.wrid[ind] = wr->wr_id; 567 hr_qp->rq.wrid[ind] = wr->wr_id;
@@ -613,6 +618,8 @@ static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
613 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, 618 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
614 ring->desc_num * sizeof(struct hns_roce_cmq_desc), 619 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
615 DMA_BIDIRECTIONAL); 620 DMA_BIDIRECTIONAL);
621
622 ring->desc_dma_addr = 0;
616 kfree(ring->desc); 623 kfree(ring->desc);
617} 624}
618 625
@@ -1081,6 +1088,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1081 if (ret) { 1088 if (ret) {
1082 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n", 1089 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1083 ret); 1090 ret);
1091 return ret;
1084 } 1092 }
1085 1093
1086 /* Get pf resource owned by every pf */ 1094 /* Get pf resource owned by every pf */
@@ -1372,6 +1380,8 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1372 1380
1373 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 1381 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
1374 mr->type == MR_TYPE_MR ? 0 : 1); 1382 mr->type == MR_TYPE_MR ? 0 : 1);
1383 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1384 1);
1375 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa); 1385 mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
1376 1386
1377 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); 1387 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
@@ -2169,6 +2179,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2169 struct hns_roce_v2_qp_context *context, 2179 struct hns_roce_v2_qp_context *context,
2170 struct hns_roce_v2_qp_context *qpc_mask) 2180 struct hns_roce_v2_qp_context *qpc_mask)
2171{ 2181{
2182 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2172 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); 2183 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2173 2184
2174 /* 2185 /*
@@ -2281,7 +2292,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
2281 context->rq_db_record_addr = hr_qp->rdb.dma >> 32; 2292 context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
2282 qpc_mask->rq_db_record_addr = 0; 2293 qpc_mask->rq_db_record_addr = 0;
2283 2294
2284 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1); 2295 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
2296 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
2285 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0); 2297 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
2286 2298
2287 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M, 2299 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -4703,6 +4715,8 @@ static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
4703 {0, } 4715 {0, }
4704}; 4716};
4705 4717
4718MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
4719
4706static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, 4720static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
4707 struct hnae3_handle *handle) 4721 struct hnae3_handle *handle)
4708{ 4722{
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 9d48bc07a9e6..96fb6a9ed93c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -199,7 +199,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
199 199
200 memset(props, 0, sizeof(*props)); 200 memset(props, 0, sizeof(*props));
201 201
202 props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid); 202 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
203 props->max_mr_size = (u64)(~(0ULL)); 203 props->max_mr_size = (u64)(~(0ULL));
204 props->page_size_cap = hr_dev->caps.page_size_cap; 204 props->page_size_cap = hr_dev->caps.page_size_cap;
205 props->vendor_id = hr_dev->vendor_id; 205 props->vendor_id = hr_dev->vendor_id;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d4aad34c21e2..baaf906f7c2e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -660,6 +660,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
660 goto err_rq_sge_list; 660 goto err_rq_sge_list;
661 } 661 }
662 *hr_qp->rdb.db_record = 0; 662 *hr_qp->rdb.db_record = 0;
663 hr_qp->rdb_en = 1;
663 } 664 }
664 665
665 /* Allocate QP buf */ 666 /* Allocate QP buf */
@@ -955,7 +956,14 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
955 } 956 }
956 957
957 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 958 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
958 ret = 0; 959 if (hr_dev->caps.min_wqes) {
960 ret = -EPERM;
961 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
962 new_state);
963 } else {
964 ret = 0;
965 }
966
959 goto out; 967 goto out;
960 } 968 }
961 969
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index d5d8c1be345a..2f2b4426ded7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -207,6 +207,7 @@ struct i40iw_msix_vector {
207 u32 irq; 207 u32 irq;
208 u32 cpu_affinity; 208 u32 cpu_affinity;
209 u32 ceq_id; 209 u32 ceq_id;
210 cpumask_t mask;
210}; 211};
211 212
212struct l2params_work { 213struct l2params_work {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 4cfa8f4647e2..f7c6fd9ff6e2 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -2093,7 +2093,7 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
2093 if (netif_is_bond_slave(netdev)) 2093 if (netif_is_bond_slave(netdev))
2094 netdev = netdev_master_upper_dev_get(netdev); 2094 netdev = netdev_master_upper_dev_get(netdev);
2095 2095
2096 neigh = dst_neigh_lookup(dst, &dst_addr); 2096 neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
2097 2097
2098 rcu_read_lock(); 2098 rcu_read_lock();
2099 if (neigh) { 2099 if (neigh) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 6139836fb533..c9f62ca7643c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -331,7 +331,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
331 switch (info->ae_id) { 331 switch (info->ae_id) {
332 case I40IW_AE_LLP_FIN_RECEIVED: 332 case I40IW_AE_LLP_FIN_RECEIVED:
333 if (qp->term_flags) 333 if (qp->term_flags)
334 continue; 334 break;
335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) { 335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; 336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && 337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
@@ -360,7 +360,7 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
360 break; 360 break;
361 case I40IW_AE_LLP_CONNECTION_RESET: 361 case I40IW_AE_LLP_CONNECTION_RESET:
362 if (atomic_read(&iwqp->close_timer_started)) 362 if (atomic_read(&iwqp->close_timer_started))
363 continue; 363 break;
364 i40iw_cm_disconn(iwqp); 364 i40iw_cm_disconn(iwqp);
365 break; 365 break;
366 case I40IW_AE_QP_SUSPEND_COMPLETE: 366 case I40IW_AE_QP_SUSPEND_COMPLETE:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 9cd0d3ef9057..05001e6da1f8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -687,7 +687,6 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
687 struct i40iw_msix_vector *msix_vec) 687 struct i40iw_msix_vector *msix_vec)
688{ 688{
689 enum i40iw_status_code status; 689 enum i40iw_status_code status;
690 cpumask_t mask;
691 690
692 if (iwdev->msix_shared && !ceq_id) { 691 if (iwdev->msix_shared && !ceq_id) {
693 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); 692 tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
@@ -697,9 +696,9 @@ static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iw
697 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); 696 status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
698 } 697 }
699 698
700 cpumask_clear(&mask); 699 cpumask_clear(&msix_vec->mask);
701 cpumask_set_cpu(msix_vec->cpu_affinity, &mask); 700 cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
702 irq_set_affinity_hint(msix_vec->irq, &mask); 701 irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
703 702
704 if (status) { 703 if (status) {
705 i40iw_pr_err("ceq irq config fail\n"); 704 i40iw_pr_err("ceq irq config fail\n");
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 40e4f5ab2b46..68679ad4c6da 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -394,6 +394,7 @@ static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
394 394
395 list_for_each_entry(iwpbl, pbl_list, list) { 395 list_for_each_entry(iwpbl, pbl_list, list) {
396 if (iwpbl->user_base == va) { 396 if (iwpbl->user_base == va) {
397 iwpbl->on_list = false;
397 list_del(&iwpbl->list); 398 list_del(&iwpbl->list);
398 return iwpbl; 399 return iwpbl;
399 } 400 }
@@ -614,6 +615,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
614 return ERR_PTR(-ENOMEM); 615 return ERR_PTR(-ENOMEM);
615 616
616 iwqp = (struct i40iw_qp *)mem; 617 iwqp = (struct i40iw_qp *)mem;
618 iwqp->allocated_buffer = mem;
617 qp = &iwqp->sc_qp; 619 qp = &iwqp->sc_qp;
618 qp->back_qp = (void *)iwqp; 620 qp->back_qp = (void *)iwqp;
619 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; 621 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
@@ -642,7 +644,6 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
642 goto error; 644 goto error;
643 } 645 }
644 646
645 iwqp->allocated_buffer = mem;
646 iwqp->iwdev = iwdev; 647 iwqp->iwdev = iwdev;
647 iwqp->iwpd = iwpd; 648 iwqp->iwpd = iwpd;
648 iwqp->ibqp.qp_num = qp_num; 649 iwqp->ibqp.qp_num = qp_num;
@@ -1898,6 +1899,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1898 goto error; 1899 goto error;
1899 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 1900 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1900 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); 1901 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1902 iwpbl->on_list = true;
1901 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 1903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1902 break; 1904 break;
1903 case IW_MEMREG_TYPE_CQ: 1905 case IW_MEMREG_TYPE_CQ:
@@ -1908,6 +1910,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1908 1910
1909 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1911 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1910 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); 1912 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1913 iwpbl->on_list = true;
1911 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 1914 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1912 break; 1915 break;
1913 case IW_MEMREG_TYPE_MEM: 1916 case IW_MEMREG_TYPE_MEM:
@@ -2045,14 +2048,18 @@ static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2045 switch (iwmr->type) { 2048 switch (iwmr->type) {
2046 case IW_MEMREG_TYPE_CQ: 2049 case IW_MEMREG_TYPE_CQ:
2047 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 2050 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2048 if (!list_empty(&ucontext->cq_reg_mem_list)) 2051 if (iwpbl->on_list) {
2052 iwpbl->on_list = false;
2049 list_del(&iwpbl->list); 2053 list_del(&iwpbl->list);
2054 }
2050 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); 2055 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2051 break; 2056 break;
2052 case IW_MEMREG_TYPE_QP: 2057 case IW_MEMREG_TYPE_QP:
2053 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); 2058 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2054 if (!list_empty(&ucontext->qp_reg_mem_list)) 2059 if (iwpbl->on_list) {
2060 iwpbl->on_list = false;
2055 list_del(&iwpbl->list); 2061 list_del(&iwpbl->list);
2062 }
2056 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); 2063 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2057 break; 2064 break;
2058 default: 2065 default:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 9067443cd311..76cf173377ab 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -78,6 +78,7 @@ struct i40iw_pbl {
78 }; 78 };
79 79
80 bool pbl_allocated; 80 bool pbl_allocated;
81 bool on_list;
81 u64 user_base; 82 u64 user_base;
82 struct i40iw_pble_alloc pble_alloc; 83 struct i40iw_pble_alloc pble_alloc;
83 struct i40iw_mr *iwmr; 84 struct i40iw_mr *iwmr;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b4d8ff8ab807..69716a7ea993 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2416,7 +2416,7 @@ static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 2416 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2417} 2417}
2418 2418
2419static void set_flow_label(void *misc_c, void *misc_v, u8 mask, u8 val, 2419static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2420 bool inner) 2420 bool inner)
2421{ 2421{
2422 if (inner) { 2422 if (inner) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 87b7c1be2a11..2193dc1765fb 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -484,11 +484,6 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
484 return 1; 484 return 1;
485} 485}
486 486
487static int first_med_bfreg(void)
488{
489 return 1;
490}
491
492enum { 487enum {
493 /* this is the first blue flame register in the array of bfregs assigned 488 /* this is the first blue flame register in the array of bfregs assigned
494 * to a processes. Since we do not use it for blue flame but rather 489 * to a processes. Since we do not use it for blue flame but rather
@@ -514,6 +509,12 @@ static int num_med_bfreg(struct mlx5_ib_dev *dev,
514 return n >= 0 ? n : 0; 509 return n >= 0 ? n : 0;
515} 510}
516 511
512static int first_med_bfreg(struct mlx5_ib_dev *dev,
513 struct mlx5_bfreg_info *bfregi)
514{
515 return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
516}
517
517static int first_hi_bfreg(struct mlx5_ib_dev *dev, 518static int first_hi_bfreg(struct mlx5_ib_dev *dev,
518 struct mlx5_bfreg_info *bfregi) 519 struct mlx5_bfreg_info *bfregi)
519{ 520{
@@ -541,10 +542,13 @@ static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
541static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 542static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
542 struct mlx5_bfreg_info *bfregi) 543 struct mlx5_bfreg_info *bfregi)
543{ 544{
544 int minidx = first_med_bfreg(); 545 int minidx = first_med_bfreg(dev, bfregi);
545 int i; 546 int i;
546 547
547 for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) { 548 if (minidx < 0)
549 return minidx;
550
551 for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
548 if (bfregi->count[i] < bfregi->count[minidx]) 552 if (bfregi->count[i] < bfregi->count[minidx])
549 minidx = i; 553 minidx = i;
550 if (!bfregi->count[minidx]) 554 if (!bfregi->count[minidx])
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7d3763b2e01c..3f9afc02d166 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -401,49 +401,47 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
401{ 401{
402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context); 402 struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
403 struct qedr_dev *dev = get_qedr_dev(context->device); 403 struct qedr_dev *dev = get_qedr_dev(context->device);
404 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; 404 unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT;
405 u64 unmapped_db = dev->db_phys_addr;
406 unsigned long len = (vma->vm_end - vma->vm_start); 405 unsigned long len = (vma->vm_end - vma->vm_start);
407 int rc = 0; 406 unsigned long dpi_start;
408 bool found; 407
408 dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size);
409 409
410 DP_DEBUG(dev, QEDR_MSG_INIT, 410 DP_DEBUG(dev, QEDR_MSG_INIT,
411 "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n", 411 "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n",
412 vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len); 412 (void *)vma->vm_start, (void *)vma->vm_end,
413 if (vma->vm_start & (PAGE_SIZE - 1)) { 413 (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size);
414 DP_ERR(dev, "Vma_start not page aligned = %ld\n", 414
415 vma->vm_start); 415 if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) {
416 DP_ERR(dev,
417 "failed mmap, adrresses must be page aligned: start=0x%pK, end=0x%pK\n",
418 (void *)vma->vm_start, (void *)vma->vm_end);
416 return -EINVAL; 419 return -EINVAL;
417 } 420 }
418 421
419 found = qedr_search_mmap(ucontext, vm_page, len); 422 if (!qedr_search_mmap(ucontext, phys_addr, len)) {
420 if (!found) { 423 DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n",
421 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
422 vma->vm_pgoff); 424 vma->vm_pgoff);
423 return -EINVAL; 425 return -EINVAL;
424 } 426 }
425 427
426 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 428 if (phys_addr < dpi_start ||
427 429 ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) {
428 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db + 430 DP_ERR(dev,
429 dev->db_size))) { 431 "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n",
430 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n"); 432 (void *)phys_addr, (void *)dpi_start,
431 if (vma->vm_flags & VM_READ) { 433 ucontext->dpi_size);
432 DP_ERR(dev, "Trying to map doorbell bar for read\n"); 434 return -EINVAL;
433 return -EPERM; 435 }
434 }
435
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437 436
438 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 437 if (vma->vm_flags & VM_READ) {
439 PAGE_SIZE, vma->vm_page_prot); 438 DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n");
440 } else { 439 return -EINVAL;
441 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
442 rc = remap_pfn_range(vma, vma->vm_start,
443 vma->vm_pgoff, len, vma->vm_page_prot);
444 } 440 }
445 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc); 441
446 return rc; 442 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
443 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len,
444 vma->vm_page_prot);
447} 445}
448 446
449struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, 447struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 2cb52fd48cf1..73a00a1c06f6 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -761,7 +761,6 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
761 unsigned int mask; 761 unsigned int mask;
762 unsigned int length = 0; 762 unsigned int length = 0;
763 int i; 763 int i;
764 int must_sched;
765 764
766 while (wr) { 765 while (wr) {
767 mask = wr_opcode_mask(wr->opcode, qp); 766 mask = wr_opcode_mask(wr->opcode, qp);
@@ -791,14 +790,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
791 wr = wr->next; 790 wr = wr->next;
792 } 791 }
793 792
794 /* 793 rxe_run_task(&qp->req.task, 1);
795 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
796 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
797 */
798 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
799 (queue_count(qp->sq.queue) > 1);
800
801 rxe_run_task(&qp->req.task, must_sched);
802 if (unlikely(qp->req.state == QP_STATE_ERROR)) 794 if (unlikely(qp->req.state == QP_STATE_ERROR))
803 rxe_run_task(&qp->comp.task, 1); 795 rxe_run_task(&qp->comp.task, 1);
804 796
diff --git a/drivers/input/evbug.c b/drivers/input/evbug.c
index cd4e6679d61a..5419c1c1f621 100644
--- a/drivers/input/evbug.c
+++ b/drivers/input/evbug.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index c81c79d01d93..370206f987f9 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -481,7 +481,7 @@ static int evdev_release(struct inode *inode, struct file *file)
481 evdev_detach_client(evdev, client); 481 evdev_detach_client(evdev, client);
482 482
483 for (i = 0; i < EV_CNT; ++i) 483 for (i = 0; i < EV_CNT; ++i)
484 kfree(client->evmasks[i]); 484 bitmap_free(client->evmasks[i]);
485 485
486 kvfree(client); 486 kvfree(client);
487 487
@@ -925,17 +925,15 @@ static int evdev_handle_get_val(struct evdev_client *client,
925{ 925{
926 int ret; 926 int ret;
927 unsigned long *mem; 927 unsigned long *mem;
928 size_t len;
929 928
930 len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long); 929 mem = bitmap_alloc(maxbit, GFP_KERNEL);
931 mem = kmalloc(len, GFP_KERNEL);
932 if (!mem) 930 if (!mem)
933 return -ENOMEM; 931 return -ENOMEM;
934 932
935 spin_lock_irq(&dev->event_lock); 933 spin_lock_irq(&dev->event_lock);
936 spin_lock(&client->buffer_lock); 934 spin_lock(&client->buffer_lock);
937 935
938 memcpy(mem, bits, len); 936 bitmap_copy(mem, bits, maxbit);
939 937
940 spin_unlock(&dev->event_lock); 938 spin_unlock(&dev->event_lock);
941 939
@@ -947,7 +945,7 @@ static int evdev_handle_get_val(struct evdev_client *client,
947 if (ret < 0) 945 if (ret < 0)
948 evdev_queue_syn_dropped(client); 946 evdev_queue_syn_dropped(client);
949 947
950 kfree(mem); 948 bitmap_free(mem);
951 949
952 return ret; 950 return ret;
953} 951}
@@ -1003,13 +1001,13 @@ static int evdev_set_mask(struct evdev_client *client,
1003 if (!cnt) 1001 if (!cnt)
1004 return 0; 1002 return 0;
1005 1003
1006 mask = kcalloc(sizeof(unsigned long), BITS_TO_LONGS(cnt), GFP_KERNEL); 1004 mask = bitmap_zalloc(cnt, GFP_KERNEL);
1007 if (!mask) 1005 if (!mask)
1008 return -ENOMEM; 1006 return -ENOMEM;
1009 1007
1010 error = bits_from_user(mask, cnt - 1, codes_size, codes, compat); 1008 error = bits_from_user(mask, cnt - 1, codes_size, codes, compat);
1011 if (error < 0) { 1009 if (error < 0) {
1012 kfree(mask); 1010 bitmap_free(mask);
1013 return error; 1011 return error;
1014 } 1012 }
1015 1013
@@ -1018,7 +1016,7 @@ static int evdev_set_mask(struct evdev_client *client,
1018 client->evmasks[type] = mask; 1016 client->evmasks[type] = mask;
1019 spin_unlock_irqrestore(&client->buffer_lock, flags); 1017 spin_unlock_irqrestore(&client->buffer_lock, flags);
1020 1018
1021 kfree(oldmask); 1019 bitmap_free(oldmask);
1022 1020
1023 return 0; 1021 return 0;
1024} 1022}
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index 2909e9561cf3..afdc20ca0e24 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <asm/io.h> 25#include <asm/io.h>
diff --git a/drivers/input/gameport/lightning.c b/drivers/input/gameport/lightning.c
index 85d6ee09f11f..c6e74c7945cb 100644
--- a/drivers/input/gameport/lightning.c
+++ b/drivers/input/gameport/lightning.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <asm/io.h> 25#include <asm/io.h>
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 7c217848613e..6437645858f9 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -21,10 +21,6 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 24 */
29 25
30#include <asm/io.h> 26#include <asm/io.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 6365c1958264..3304aaaffe87 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
480 */ 480 */
481void input_alloc_absinfo(struct input_dev *dev) 481void input_alloc_absinfo(struct input_dev *dev)
482{ 482{
483 if (!dev->absinfo) 483 if (dev->absinfo)
484 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), 484 return;
485 GFP_KERNEL);
486 485
487 WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); 486 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
487 if (!dev->absinfo) {
488 dev_err(dev->dev.parent ?: &dev->dev,
489 "%s: unable to allocate memory\n", __func__);
490 /*
491 * We will handle this allocation failure in
492 * input_register_device() when we refuse to register input
493 * device with ABS bits but without absinfo.
494 */
495 }
488} 496}
489EXPORT_SYMBOL(input_alloc_absinfo); 497EXPORT_SYMBOL(input_alloc_absinfo);
490 498
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index 55efdfc7eb62..98307039a534 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 15a71acb6997..f466c0d34247 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index c65b5fa69f1e..2b82a838c511 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/types.h> 25#include <linux/types.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c79dbcb4d146..2b445c8d3fcd 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index ae3ee24a2368..14cb956beac4 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index de0dd4756c84..804b1b80a8be 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -263,6 +259,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char
263 db9_saturn_write_sub(port, type, 3, powered, 0); 259 db9_saturn_write_sub(port, type, 3, powered, 0);
264 return data[0] = 0xe3; 260 return data[0] = 0xe3;
265 } 261 }
262 /* else: fall through */
266 default: 263 default:
267 return data[0]; 264 return data[0];
268 } 265 }
@@ -282,11 +279,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in
282 switch (data[j]) { 279 switch (data[j]) {
283 case 0x16: /* multi controller (analog 4 axis) */ 280 case 0x16: /* multi controller (analog 4 axis) */
284 input_report_abs(dev, db9_abs[5], data[j + 6]); 281 input_report_abs(dev, db9_abs[5], data[j + 6]);
282 /* fall through */
285 case 0x15: /* mission stick (analog 3 axis) */ 283 case 0x15: /* mission stick (analog 3 axis) */
286 input_report_abs(dev, db9_abs[3], data[j + 4]); 284 input_report_abs(dev, db9_abs[3], data[j + 4]);
287 input_report_abs(dev, db9_abs[4], data[j + 5]); 285 input_report_abs(dev, db9_abs[4], data[j + 5]);
286 /* fall through */
288 case 0x13: /* racing controller (analog 1 axis) */ 287 case 0x13: /* racing controller (analog 1 axis) */
289 input_report_abs(dev, db9_abs[2], data[j + 3]); 288 input_report_abs(dev, db9_abs[2], data[j + 3]);
289 /* fall through */
290 case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */ 290 case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
291 case 0x02: /* digital pad (digital 2 axis + buttons) */ 291 case 0x02: /* digital pad (digital 2 axis + buttons) */
292 input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64)); 292 input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
@@ -380,6 +380,7 @@ static void db9_timer(struct timer_list *t)
380 input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1)); 380 input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
381 input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1)); 381 input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
382 input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1); 382 input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
383 /* fall through */
383 384
384 case DB9_MULTI_0802: 385 case DB9_MULTI_0802:
385 386
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 4e10ffdf8a36..d62e73dd9f7f 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -24,10 +24,6 @@
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 * Should you need to contact me, the author, you can do so either by
29 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
30 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
31 */ 27 */
32 28
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 0f519db64748..50a60065ab14 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index eac9c5b8d73e..e10395ba62bc 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index a9ac2f9cfce0..43ff817d80ac 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/joystick/iforce/iforce-ff.c b/drivers/input/joystick/iforce/iforce-ff.c
index 0de9a0943a9e..3536d5f5ad18 100644
--- a/drivers/input/joystick/iforce/iforce-ff.c
+++ b/drivers/input/joystick/iforce/iforce-ff.c
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include "iforce.h" 24#include "iforce.h"
@@ -56,7 +52,7 @@ static int make_magnitude_modifier(struct iforce* iforce,
56 52
57 iforce_send_packet(iforce, FF_CMD_MAGNITUDE, data); 53 iforce_send_packet(iforce, FF_CMD_MAGNITUDE, data);
58 54
59 iforce_dump_packet("magnitude: ", FF_CMD_MAGNITUDE, data); 55 iforce_dump_packet(iforce, "magnitude", FF_CMD_MAGNITUDE, data);
60 return 0; 56 return 0;
61} 57}
62 58
@@ -178,7 +174,7 @@ static int make_condition_modifier(struct iforce* iforce,
178 data[9] = (100 * lsat) >> 16; 174 data[9] = (100 * lsat) >> 16;
179 175
180 iforce_send_packet(iforce, FF_CMD_CONDITION, data); 176 iforce_send_packet(iforce, FF_CMD_CONDITION, data);
181 iforce_dump_packet("condition", FF_CMD_CONDITION, data); 177 iforce_dump_packet(iforce, "condition", FF_CMD_CONDITION, data);
182 178
183 return 0; 179 return 0;
184} 180}
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index daeeb4c7e3b0..58d5cfe46526 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include "iforce.h" 24#include "iforce.h"
@@ -33,21 +29,14 @@ MODULE_LICENSE("GPL");
33 29
34static signed short btn_joystick[] = 30static signed short btn_joystick[] =
35{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE, 31{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
36 BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 }; 32 BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A,
37 33 BTN_B, BTN_C, BTN_DEAD, -1 };
38static signed short btn_avb_pegasus[] =
39{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
40 BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 };
41 34
42static signed short btn_wheel[] = 35static signed short btn_joystick_avb[] =
43{ BTN_TRIGGER, BTN_TOP, BTN_THUMB, BTN_TOP2, BTN_BASE,
44 BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_BASE5, BTN_A, BTN_B, BTN_C, -1 };
45
46static signed short btn_avb_tw[] =
47{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE, 36{ BTN_TRIGGER, BTN_THUMB, BTN_TOP, BTN_TOP2, BTN_BASE,
48 BTN_BASE2, BTN_BASE3, BTN_BASE4, -1 }; 37 BTN_BASE2, BTN_BASE3, BTN_BASE4, BTN_DEAD, -1 };
49 38
50static signed short btn_avb_wheel[] = 39static signed short btn_wheel[] =
51{ BTN_GEAR_DOWN, BTN_GEAR_UP, BTN_BASE, BTN_BASE2, BTN_BASE3, 40{ BTN_GEAR_DOWN, BTN_GEAR_UP, BTN_BASE, BTN_BASE2, BTN_BASE3,
52 BTN_BASE4, BTN_BASE5, BTN_BASE6, -1 }; 41 BTN_BASE4, BTN_BASE5, BTN_BASE6, -1 };
53 42
@@ -73,9 +62,9 @@ static struct iforce_device iforce_device[] = {
73 { 0x044f, 0xa01c, "Thrustmaster Motor Sport GT", btn_wheel, abs_wheel, ff_iforce }, 62 { 0x044f, 0xa01c, "Thrustmaster Motor Sport GT", btn_wheel, abs_wheel, ff_iforce },
74 { 0x046d, 0xc281, "Logitech WingMan Force", btn_joystick, abs_joystick, ff_iforce }, 63 { 0x046d, 0xc281, "Logitech WingMan Force", btn_joystick, abs_joystick, ff_iforce },
75 { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce }, 64 { 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
76 { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_avb_pegasus, abs_avb_pegasus, ff_iforce }, 65 { 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
77 { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce }, 66 { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
78 { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //? 67 { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
79 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? 68 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
80 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, 69 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
81 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? 70 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
@@ -360,7 +349,7 @@ int iforce_init_device(struct iforce *iforce)
360 349
361 for (i = 0; c[i]; i++) 350 for (i = 0; c[i]; i++)
362 if (!iforce_get_id_packet(iforce, c + i)) 351 if (!iforce_get_id_packet(iforce, c + i))
363 iforce_dump_packet("info", iforce->ecmd, iforce->edata); 352 iforce_dump_packet(iforce, "info", iforce->ecmd, iforce->edata);
364 353
365/* 354/*
366 * Disable spring, enable force feedback. 355 * Disable spring, enable force feedback.
@@ -388,7 +377,6 @@ int iforce_init_device(struct iforce *iforce)
388 377
389 for (i = 0; iforce->type->btn[i] >= 0; i++) 378 for (i = 0; iforce->type->btn[i] >= 0; i++)
390 set_bit(iforce->type->btn[i], input_dev->keybit); 379 set_bit(iforce->type->btn[i], input_dev->keybit);
391 set_bit(BTN_DEAD, input_dev->keybit);
392 380
393 for (i = 0; iforce->type->abs[i] >= 0; i++) { 381 for (i = 0; iforce->type->abs[i] >= 0; i++) {
394 382
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 08f98f2eaf88..c10169f4554e 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include "iforce.h" 24#include "iforce.h"
@@ -33,14 +29,10 @@ static struct {
33} iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}}; 29} iforce_hat_to_axis[16] = {{ 0,-1}, { 1,-1}, { 1, 0}, { 1, 1}, { 0, 1}, {-1, 1}, {-1, 0}, {-1,-1}};
34 30
35 31
36void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) 32void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data)
37{ 33{
38 int i; 34 dev_dbg(iforce->dev->dev.parent, "%s %s cmd = %04x, data = %*ph\n",
39 35 __func__, msg, cmd, LO(cmd), data);
40 printk(KERN_DEBUG __FILE__ ": %s cmd = %04x, data = ", msg, cmd);
41 for (i = 0; i < LO(cmd); i++)
42 printk("%02x ", data[i]);
43 printk("\n");
44} 36}
45 37
46/* 38/*
@@ -255,7 +247,7 @@ int iforce_get_id_packet(struct iforce *iforce, char *packet)
255 iforce->cr.bRequest = packet[0]; 247 iforce->cr.bRequest = packet[0];
256 iforce->ctrl->dev = iforce->usbdev; 248 iforce->ctrl->dev = iforce->usbdev;
257 249
258 status = usb_submit_urb(iforce->ctrl, GFP_ATOMIC); 250 status = usb_submit_urb(iforce->ctrl, GFP_KERNEL);
259 if (status) { 251 if (status) {
260 dev_err(&iforce->intf->dev, 252 dev_err(&iforce->intf->dev,
261 "usb_submit_urb failed %d\n", status); 253 "usb_submit_urb failed %d\n", status);
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index 154e827b559b..f4ba4a751fe0 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include "iforce.h" 24#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index e8724f1a4a25..78073259c9a1 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include "iforce.h" 24#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 96ae4f5bd0eb..0e9d01f8bcb6 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -19,10 +19,6 @@
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software 20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * Should you need to contact me, the author, you can do so either by
24 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
25 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
26 */ 22 */
27 23
28#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -158,7 +154,7 @@ int iforce_init_device(struct iforce *iforce);
158int iforce_control_playback(struct iforce*, u16 id, unsigned int); 154int iforce_control_playback(struct iforce*, u16 id, unsigned int);
159void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data); 155void iforce_process_packet(struct iforce *iforce, u16 cmd, unsigned char *data);
160int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data); 156int iforce_send_packet(struct iforce *iforce, u16 cmd, unsigned char* data);
161void iforce_dump_packet(char *msg, u16 cmd, unsigned char *data) ; 157void iforce_dump_packet(struct iforce *iforce, char *msg, u16 cmd, unsigned char *data);
162int iforce_get_id_packet(struct iforce *iforce, char *packet); 158int iforce_get_id_packet(struct iforce *iforce, char *packet);
163 159
164/* iforce-ff.c */ 160/* iforce-ff.c */
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 17c2c800743c..598788b3da62 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/input/joystick/joydump.c b/drivers/input/joystick/joydump.c
index d1c6e4846a4a..2ea05ade4d4e 100644
--- a/drivers/input/joystick/joydump.c
+++ b/drivers/input/joystick/joydump.c
@@ -21,10 +21,6 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 24 */
29 25
30#include <linux/module.h> 26#include <linux/module.h>
diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c
index a9d0e3edca94..95a34ab34fc3 100644
--- a/drivers/input/joystick/magellan.c
+++ b/drivers/input/joystick/magellan.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/joystick/pxrc.c b/drivers/input/joystick/pxrc.c
index 07a0dbd3ced2..ea2bf5951d67 100644
--- a/drivers/input/joystick/pxrc.c
+++ b/drivers/input/joystick/pxrc.c
@@ -3,7 +3,6 @@
3 * Driver for Phoenix RC Flight Controller Adapter 3 * Driver for Phoenix RC Flight Controller Adapter
4 * 4 *
5 * Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com> 5 * Copyright (C) 2018 Marcus Folkesson <marcus.folkesson@gmail.com>
6 *
7 */ 6 */
8 7
9#include <linux/kernel.h> 8#include <linux/kernel.h>
@@ -16,31 +15,22 @@
16#include <linux/mutex.h> 15#include <linux/mutex.h>
17#include <linux/input.h> 16#include <linux/input.h>
18 17
19#define PXRC_VENDOR_ID (0x1781) 18#define PXRC_VENDOR_ID 0x1781
20#define PXRC_PRODUCT_ID (0x0898) 19#define PXRC_PRODUCT_ID 0x0898
21
22static const struct usb_device_id pxrc_table[] = {
23 { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
24 { }
25};
26MODULE_DEVICE_TABLE(usb, pxrc_table);
27 20
28struct pxrc { 21struct pxrc {
29 struct input_dev *input; 22 struct input_dev *input;
30 struct usb_device *udev;
31 struct usb_interface *intf; 23 struct usb_interface *intf;
32 struct urb *urb; 24 struct urb *urb;
33 struct mutex pm_mutex; 25 struct mutex pm_mutex;
34 bool is_open; 26 bool is_open;
35 __u8 epaddr;
36 char phys[64]; 27 char phys[64];
37 unsigned char *data;
38 size_t bsize;
39}; 28};
40 29
41static void pxrc_usb_irq(struct urb *urb) 30static void pxrc_usb_irq(struct urb *urb)
42{ 31{
43 struct pxrc *pxrc = urb->context; 32 struct pxrc *pxrc = urb->context;
33 u8 *data = urb->transfer_buffer;
44 int error; 34 int error;
45 35
46 switch (urb->status) { 36 switch (urb->status) {
@@ -68,15 +58,15 @@ static void pxrc_usb_irq(struct urb *urb)
68 } 58 }
69 59
70 if (urb->actual_length == 8) { 60 if (urb->actual_length == 8) {
71 input_report_abs(pxrc->input, ABS_X, pxrc->data[0]); 61 input_report_abs(pxrc->input, ABS_X, data[0]);
72 input_report_abs(pxrc->input, ABS_Y, pxrc->data[2]); 62 input_report_abs(pxrc->input, ABS_Y, data[2]);
73 input_report_abs(pxrc->input, ABS_RX, pxrc->data[3]); 63 input_report_abs(pxrc->input, ABS_RX, data[3]);
74 input_report_abs(pxrc->input, ABS_RY, pxrc->data[4]); 64 input_report_abs(pxrc->input, ABS_RY, data[4]);
75 input_report_abs(pxrc->input, ABS_RUDDER, pxrc->data[5]); 65 input_report_abs(pxrc->input, ABS_RUDDER, data[5]);
76 input_report_abs(pxrc->input, ABS_THROTTLE, pxrc->data[6]); 66 input_report_abs(pxrc->input, ABS_THROTTLE, data[6]);
77 input_report_abs(pxrc->input, ABS_MISC, pxrc->data[7]); 67 input_report_abs(pxrc->input, ABS_MISC, data[7]);
78 68
79 input_report_key(pxrc->input, BTN_A, pxrc->data[1]); 69 input_report_key(pxrc->input, BTN_A, data[1]);
80 } 70 }
81 71
82exit: 72exit:
@@ -120,61 +110,73 @@ static void pxrc_close(struct input_dev *input)
120 mutex_unlock(&pxrc->pm_mutex); 110 mutex_unlock(&pxrc->pm_mutex);
121} 111}
122 112
123static int pxrc_usb_init(struct pxrc *pxrc) 113static void pxrc_free_urb(void *_pxrc)
124{ 114{
115 struct pxrc *pxrc = _pxrc;
116
117 usb_free_urb(pxrc->urb);
118}
119
120static int pxrc_probe(struct usb_interface *intf,
121 const struct usb_device_id *id)
122{
123 struct usb_device *udev = interface_to_usbdev(intf);
124 struct pxrc *pxrc;
125 struct usb_endpoint_descriptor *epirq; 125 struct usb_endpoint_descriptor *epirq;
126 unsigned int pipe; 126 size_t xfer_size;
127 int retval; 127 void *xfer_buf;
128 int error;
128 129
129 /* Set up the endpoint information */ 130 /*
130 /* This device only has an interrupt endpoint */ 131 * Locate the endpoint information. This device only has an
131 retval = usb_find_common_endpoints(pxrc->intf->cur_altsetting, 132 * interrupt endpoint.
132 NULL, NULL, &epirq, NULL); 133 */
133 if (retval) { 134 error = usb_find_common_endpoints(intf->cur_altsetting,
134 dev_err(&pxrc->intf->dev, 135 NULL, NULL, &epirq, NULL);
135 "Could not find endpoint\n"); 136 if (error) {
136 goto error; 137 dev_err(&intf->dev, "Could not find endpoint\n");
138 return error;
137 } 139 }
138 140
139 pxrc->bsize = usb_endpoint_maxp(epirq); 141 pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
140 pxrc->epaddr = epirq->bEndpointAddress; 142 if (!pxrc)
141 pxrc->data = devm_kmalloc(&pxrc->intf->dev, pxrc->bsize, GFP_KERNEL); 143 return -ENOMEM;
142 if (!pxrc->data) {
143 retval = -ENOMEM;
144 goto error;
145 }
146 144
147 usb_set_intfdata(pxrc->intf, pxrc); 145 mutex_init(&pxrc->pm_mutex);
148 usb_make_path(pxrc->udev, pxrc->phys, sizeof(pxrc->phys)); 146 pxrc->intf = intf;
149 strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
150 147
151 pxrc->urb = usb_alloc_urb(0, GFP_KERNEL); 148 usb_set_intfdata(pxrc->intf, pxrc);
152 if (!pxrc->urb) {
153 retval = -ENOMEM;
154 goto error;
155 }
156 149
157 pipe = usb_rcvintpipe(pxrc->udev, pxrc->epaddr), 150 xfer_size = usb_endpoint_maxp(epirq);
158 usb_fill_int_urb(pxrc->urb, pxrc->udev, pipe, pxrc->data, pxrc->bsize, 151 xfer_buf = devm_kmalloc(&intf->dev, xfer_size, GFP_KERNEL);
159 pxrc_usb_irq, pxrc, 1); 152 if (!xfer_buf)
153 return -ENOMEM;
160 154
161error: 155 pxrc->urb = usb_alloc_urb(0, GFP_KERNEL);
162 return retval; 156 if (!pxrc->urb)
157 return -ENOMEM;
163 158
159 error = devm_add_action_or_reset(&intf->dev, pxrc_free_urb, pxrc);
160 if (error)
161 return error;
164 162
165} 163 usb_fill_int_urb(pxrc->urb, udev,
164 usb_rcvintpipe(udev, epirq->bEndpointAddress),
165 xfer_buf, xfer_size, pxrc_usb_irq, pxrc, 1);
166 166
167static int pxrc_input_init(struct pxrc *pxrc) 167 pxrc->input = devm_input_allocate_device(&intf->dev);
168{ 168 if (!pxrc->input) {
169 pxrc->input = devm_input_allocate_device(&pxrc->intf->dev); 169 dev_err(&intf->dev, "couldn't allocate input device\n");
170 if (pxrc->input == NULL) {
171 dev_err(&pxrc->intf->dev, "couldn't allocate input device\n");
172 return -ENOMEM; 170 return -ENOMEM;
173 } 171 }
174 172
175 pxrc->input->name = "PXRC Flight Controller Adapter"; 173 pxrc->input->name = "PXRC Flight Controller Adapter";
174
175 usb_make_path(udev, pxrc->phys, sizeof(pxrc->phys));
176 strlcat(pxrc->phys, "/input0", sizeof(pxrc->phys));
176 pxrc->input->phys = pxrc->phys; 177 pxrc->input->phys = pxrc->phys;
177 usb_to_input_id(pxrc->udev, &pxrc->input->id); 178
179 usb_to_input_id(udev, &pxrc->input->id);
178 180
179 pxrc->input->open = pxrc_open; 181 pxrc->input->open = pxrc_open;
180 pxrc->input->close = pxrc_close; 182 pxrc->input->close = pxrc_close;
@@ -190,46 +192,16 @@ static int pxrc_input_init(struct pxrc *pxrc)
190 192
191 input_set_drvdata(pxrc->input, pxrc); 193 input_set_drvdata(pxrc->input, pxrc);
192 194
193 return input_register_device(pxrc->input); 195 error = input_register_device(pxrc->input);
194} 196 if (error)
195 197 return error;
196static int pxrc_probe(struct usb_interface *intf,
197 const struct usb_device_id *id)
198{
199 struct pxrc *pxrc;
200 int retval;
201
202 pxrc = devm_kzalloc(&intf->dev, sizeof(*pxrc), GFP_KERNEL);
203 if (!pxrc)
204 return -ENOMEM;
205
206 mutex_init(&pxrc->pm_mutex);
207 pxrc->udev = usb_get_dev(interface_to_usbdev(intf));
208 pxrc->intf = intf;
209
210 retval = pxrc_usb_init(pxrc);
211 if (retval)
212 goto error;
213
214 retval = pxrc_input_init(pxrc);
215 if (retval)
216 goto err_free_urb;
217 198
218 return 0; 199 return 0;
219
220err_free_urb:
221 usb_free_urb(pxrc->urb);
222
223error:
224 return retval;
225} 200}
226 201
227static void pxrc_disconnect(struct usb_interface *intf) 202static void pxrc_disconnect(struct usb_interface *intf)
228{ 203{
229 struct pxrc *pxrc = usb_get_intfdata(intf); 204 /* All driver resources are devm-managed. */
230
231 usb_free_urb(pxrc->urb);
232 usb_set_intfdata(intf, NULL);
233} 205}
234 206
235static int pxrc_suspend(struct usb_interface *intf, pm_message_t message) 207static int pxrc_suspend(struct usb_interface *intf, pm_message_t message)
@@ -284,6 +256,12 @@ static int pxrc_reset_resume(struct usb_interface *intf)
284 return pxrc_resume(intf); 256 return pxrc_resume(intf);
285} 257}
286 258
259static const struct usb_device_id pxrc_table[] = {
260 { USB_DEVICE(PXRC_VENDOR_ID, PXRC_PRODUCT_ID) },
261 { }
262};
263MODULE_DEVICE_TABLE(usb, pxrc_table);
264
287static struct usb_driver pxrc_driver = { 265static struct usb_driver pxrc_driver = {
288 .name = "pxrc", 266 .name = "pxrc",
289 .probe = pxrc_probe, 267 .probe = pxrc_probe,
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 5e602a6852b7..f46bf4d41972 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index bb3faeff8cac..ffb9c1f495b6 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -24,10 +24,6 @@
24 * You should have received a copy of the GNU General Public License 24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software 25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 *
28 * Should you need to contact me, the author, you can do so either by
29 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
30 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
31 */ 27 */
32 28
33#include <linux/kernel.h> 29#include <linux/kernel.h>
diff --git a/drivers/input/joystick/spaceorb.c b/drivers/input/joystick/spaceorb.c
index 05da0ed514e2..20540ee71d7f 100644
--- a/drivers/input/joystick/spaceorb.c
+++ b/drivers/input/joystick/spaceorb.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/input/joystick/stinger.c b/drivers/input/joystick/stinger.c
index cb10e7b097ae..ba8579435d6c 100644
--- a/drivers/input/joystick/stinger.c
+++ b/drivers/input/joystick/stinger.c
@@ -21,10 +21,6 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 24 */
29 25
30#include <linux/kernel.h> 26#include <linux/kernel.h>
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 7e17cde464f0..6f4a01cfe79f 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/delay.h> 28#include <linux/delay.h>
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index e2685753e460..bf2f9925e416 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/input/joystick/warrior.c b/drivers/input/joystick/warrior.c
index ef5391ba4470..b60cab168e2a 100644
--- a/drivers/input/joystick/warrior.c
+++ b/drivers/input/joystick/warrior.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/kernel.h> 25#include <linux/kernel.h>
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 32d94c63dc33..2835fba71c33 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -885,6 +885,7 @@ static int adp5589_probe(struct i2c_client *client,
885 switch (id->driver_data) { 885 switch (id->driver_data) {
886 case ADP5585_02: 886 case ADP5585_02:
887 kpad->support_row5 = true; 887 kpad->support_row5 = true;
888 /* fall through */
888 case ADP5585_01: 889 case ADP5585_01:
889 kpad->is_adp5585 = true; 890 kpad->is_adp5585 = true;
890 kpad->var = &const_adp5585; 891 kpad->var = &const_adp5585;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index e04a3b4e55d6..420e33c49e58 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/module.h> 28#include <linux/module.h>
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index f1235831283d..6f62da2909ec 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -34,10 +34,6 @@
34 * You should have received a copy of the GNU General Public License 34 * You should have received a copy of the GNU General Public License
35 * along with this program; if not, write to the Free Software 35 * along with this program; if not, write to the Free Software
36 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 36 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 *
38 * Should you need to contact me, the author, you can do so either by
39 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
40 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
41 */ 37 */
42 38
43#include <linux/module.h> 39#include <linux/module.h>
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 489ddd37bd4e..81be6f781f0b 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -1,25 +1,15 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * ChromeOS EC keyboard driver 2// ChromeOS EC keyboard driver
3 * 3//
4 * Copyright (C) 2012 Google, Inc 4// Copyright (C) 2012 Google, Inc.
5 * 5//
6 * This software is licensed under the terms of the GNU General Public 6// This driver uses the ChromeOS EC byte-level message-based protocol for
7 * License version 2, as published by the Free Software Foundation, and 7// communicating the keyboard state (which keys are pressed) from a keyboard EC
8 * may be copied, distributed, and modified under those terms. 8// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
9 * 9// but everything else (including deghosting) is done here. The main
10 * This program is distributed in the hope that it will be useful, 10// motivation for this is to keep the EC firmware as simple as possible, since
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11// it cannot be easily upgraded and EC flash/IRAM space is relatively
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12// expensive.
13 * GNU General Public License for more details.
14 *
15 * This driver uses the Chrome OS EC byte-level message-based protocol for
16 * communicating the keyboard state (which keys are pressed) from a keyboard EC
17 * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
18 * but everything else (including deghosting) is done here. The main
19 * motivation for this is to keep the EC firmware as simple as possible, since
20 * it cannot be easily upgraded and EC flash/IRAM space is relatively
21 * expensive.
22 */
23 13
24#include <linux/module.h> 14#include <linux/module.h>
25#include <linux/bitops.h> 15#include <linux/bitops.h>
@@ -170,9 +160,6 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
170 int col, row; 160 int col, row;
171 int new_state; 161 int new_state;
172 int old_state; 162 int old_state;
173 int num_cols;
174
175 num_cols = len;
176 163
177 if (ckdev->ghost_filter && cros_ec_keyb_has_ghosting(ckdev, kb_state)) { 164 if (ckdev->ghost_filter && cros_ec_keyb_has_ghosting(ckdev, kb_state)) {
178 /* 165 /*
@@ -242,19 +229,17 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
242 u32 val; 229 u32 val;
243 unsigned int ev_type; 230 unsigned int ev_type;
244 231
232 /*
233 * If not wake enabled, discard key state changes during
234 * suspend. Switches will be re-checked in
235 * cros_ec_keyb_resume() to be sure nothing is lost.
236 */
237 if (queued_during_suspend && !device_may_wakeup(ckdev->dev))
238 return NOTIFY_OK;
239
245 switch (ckdev->ec->event_data.event_type) { 240 switch (ckdev->ec->event_data.event_type) {
246 case EC_MKBP_EVENT_KEY_MATRIX: 241 case EC_MKBP_EVENT_KEY_MATRIX:
247 if (device_may_wakeup(ckdev->dev)) { 242 pm_wakeup_event(ckdev->dev, 0);
248 pm_wakeup_event(ckdev->dev, 0);
249 } else {
250 /*
251 * If keyboard is not wake enabled, discard key state
252 * changes during suspend. Switches will be re-checked
253 * in cros_ec_keyb_resume() to be sure nothing is lost.
254 */
255 if (queued_during_suspend)
256 return NOTIFY_OK;
257 }
258 243
259 if (ckdev->ec->event_size != ckdev->cols) { 244 if (ckdev->ec->event_size != ckdev->cols) {
260 dev_err(ckdev->dev, 245 dev_err(ckdev->dev,
@@ -268,10 +253,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
268 break; 253 break;
269 254
270 case EC_MKBP_EVENT_SYSRQ: 255 case EC_MKBP_EVENT_SYSRQ:
271 if (device_may_wakeup(ckdev->dev)) 256 pm_wakeup_event(ckdev->dev, 0);
272 pm_wakeup_event(ckdev->dev, 0);
273 else if (queued_during_suspend)
274 return NOTIFY_OK;
275 257
276 val = get_unaligned_le32(&ckdev->ec->event_data.data.sysrq); 258 val = get_unaligned_le32(&ckdev->ec->event_data.data.sysrq);
277 dev_dbg(ckdev->dev, "sysrq code from EC: %#x\n", val); 259 dev_dbg(ckdev->dev, "sysrq code from EC: %#x\n", val);
@@ -280,10 +262,7 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
280 262
281 case EC_MKBP_EVENT_BUTTON: 263 case EC_MKBP_EVENT_BUTTON:
282 case EC_MKBP_EVENT_SWITCH: 264 case EC_MKBP_EVENT_SWITCH:
283 if (device_may_wakeup(ckdev->dev)) 265 pm_wakeup_event(ckdev->dev, 0);
284 pm_wakeup_event(ckdev->dev, 0);
285 else if (queued_during_suspend)
286 return NOTIFY_OK;
287 266
288 if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) { 267 if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
289 val = get_unaligned_le32( 268 val = get_unaligned_le32(
@@ -683,6 +662,6 @@ static struct platform_driver cros_ec_keyb_driver = {
683 662
684module_platform_driver(cros_ec_keyb_driver); 663module_platform_driver(cros_ec_keyb_driver);
685 664
686MODULE_LICENSE("GPL"); 665MODULE_LICENSE("GPL v2");
687MODULE_DESCRIPTION("ChromeOS EC keyboard driver"); 666MODULE_DESCRIPTION("ChromeOS EC keyboard driver");
688MODULE_ALIAS("platform:cros-ec-keyb"); 667MODULE_ALIAS("platform:cros-ec-keyb");
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 052e37675086..492a971b95b5 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -196,7 +196,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
196 ssize_t ret; 196 ssize_t ret;
197 int i; 197 int i;
198 198
199 bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL); 199 bits = bitmap_zalloc(n_events, GFP_KERNEL);
200 if (!bits) 200 if (!bits)
201 return -ENOMEM; 201 return -ENOMEM;
202 202
@@ -216,7 +216,7 @@ static ssize_t gpio_keys_attr_show_helper(struct gpio_keys_drvdata *ddata,
216 buf[ret++] = '\n'; 216 buf[ret++] = '\n';
217 buf[ret] = '\0'; 217 buf[ret] = '\0';
218 218
219 kfree(bits); 219 bitmap_free(bits);
220 220
221 return ret; 221 return ret;
222} 222}
@@ -240,7 +240,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
240 ssize_t error; 240 ssize_t error;
241 int i; 241 int i;
242 242
243 bits = kcalloc(BITS_TO_LONGS(n_events), sizeof(*bits), GFP_KERNEL); 243 bits = bitmap_zalloc(n_events, GFP_KERNEL);
244 if (!bits) 244 if (!bits)
245 return -ENOMEM; 245 return -ENOMEM;
246 246
@@ -284,7 +284,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
284 mutex_unlock(&ddata->disable_lock); 284 mutex_unlock(&ddata->disable_lock);
285 285
286out: 286out:
287 kfree(bits); 287 bitmap_free(bits);
288 return error; 288 return error;
289} 289}
290 290
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 25d61d8d4fc4..539cb670de41 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -1,11 +1,7 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Driver for the IMX keypad port. 2//
3 * Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> 3// Driver for the IMX keypad port.
4 * 4// Copyright (C) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9 5
10#include <linux/clk.h> 6#include <linux/clk.h>
11#include <linux/delay.h> 7#include <linux/delay.h>
diff --git a/drivers/input/keyboard/newtonkbd.c b/drivers/input/keyboard/newtonkbd.c
index fb9b8e23ab93..de26e2df0ad5 100644
--- a/drivers/input/keyboard/newtonkbd.c
+++ b/drivers/input/keyboard/newtonkbd.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <j.cormack@doc.ic.ac.uk>, or by paper mail:
26 * Justin Cormack, 68 Dartmouth Park Road, London NW5 1SN, UK.
27 */ 23 */
28 24
29#include <linux/slab.h> 25#include <linux/slab.h>
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
index 53c768b95939..effb63205d3d 100644
--- a/drivers/input/keyboard/snvs_pwrkey.c
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -1,14 +1,7 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Driver for the IMX SNVS ON/OFF Power Key 2//
3 * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved. 3// Driver for the IMX SNVS ON/OFF Power Key
4 * 4// Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12 5
13#include <linux/device.h> 6#include <linux/device.h>
14#include <linux/err.h> 7#include <linux/err.h>
diff --git a/drivers/input/keyboard/stowaway.c b/drivers/input/keyboard/stowaway.c
index 8b6de9a692dc..15a5e74dbe91 100644
--- a/drivers/input/keyboard/stowaway.c
+++ b/drivers/input/keyboard/stowaway.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <marek.vasut@gmail.com>, or by paper mail:
29 * Marek Vasut, Liskovecka 559, Frydek-Mistek, 738 01 Czech Republic
30 */ 26 */
31 27
32#include <linux/slab.h> 28#include <linux/slab.h>
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
index c95707ea2656..ad5d7f94f95a 100644
--- a/drivers/input/keyboard/sunkbd.c
+++ b/drivers/input/keyboard/sunkbd.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/keyboard/xtkbd.c b/drivers/input/keyboard/xtkbd.c
index 8f64b9ded8d0..f7598114b962 100644
--- a/drivers/input/keyboard/xtkbd.c
+++ b/drivers/input/keyboard/xtkbd.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/slab.h> 25#include <linux/slab.h>
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 67482b248b2d..a8937ceac66a 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -466,7 +466,7 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
466 remote->in_endpoint = endpoint; 466 remote->in_endpoint = endpoint;
467 remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */ 467 remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */
468 468
469 remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma); 469 remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_KERNEL, &remote->in_dma);
470 if (!remote->in_buffer) { 470 if (!remote->in_buffer) {
471 error = -ENOMEM; 471 error = -ENOMEM;
472 goto fail1; 472 goto fail1;
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 18ad956454f1..48153e0ca19a 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -20,6 +20,7 @@
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_device.h>
23#include <linux/platform_device.h> 24#include <linux/platform_device.h>
24#include <linux/reboot.h> 25#include <linux/reboot.h>
25#include <linux/regmap.h> 26#include <linux/regmap.h>
@@ -28,6 +29,7 @@
28 29
29#define PON_RT_STS 0x10 30#define PON_RT_STS 0x10
30#define PON_KPDPWR_N_SET BIT(0) 31#define PON_KPDPWR_N_SET BIT(0)
32#define PON_RESIN_N_SET BIT(1)
31 33
32#define PON_PS_HOLD_RST_CTL 0x5a 34#define PON_PS_HOLD_RST_CTL 0x5a
33#define PON_PS_HOLD_RST_CTL2 0x5b 35#define PON_PS_HOLD_RST_CTL2 0x5b
@@ -38,10 +40,15 @@
38 40
39#define PON_PULL_CTL 0x70 41#define PON_PULL_CTL 0x70
40#define PON_KPDPWR_PULL_UP BIT(1) 42#define PON_KPDPWR_PULL_UP BIT(1)
43#define PON_RESIN_PULL_UP BIT(0)
41 44
42#define PON_DBC_CTL 0x71 45#define PON_DBC_CTL 0x71
43#define PON_DBC_DELAY_MASK 0x7 46#define PON_DBC_DELAY_MASK 0x7
44 47
48struct pm8941_data {
49 unsigned int pull_up_bit;
50 unsigned int status_bit;
51};
45 52
46struct pm8941_pwrkey { 53struct pm8941_pwrkey {
47 struct device *dev; 54 struct device *dev;
@@ -52,6 +59,9 @@ struct pm8941_pwrkey {
52 59
53 unsigned int revision; 60 unsigned int revision;
54 struct notifier_block reboot_notifier; 61 struct notifier_block reboot_notifier;
62
63 u32 code;
64 const struct pm8941_data *data;
55}; 65};
56 66
57static int pm8941_reboot_notify(struct notifier_block *nb, 67static int pm8941_reboot_notify(struct notifier_block *nb,
@@ -124,7 +134,8 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
124 if (error) 134 if (error)
125 return IRQ_HANDLED; 135 return IRQ_HANDLED;
126 136
127 input_report_key(pwrkey->input, KEY_POWER, !!(sts & PON_KPDPWR_N_SET)); 137 input_report_key(pwrkey->input, pwrkey->code,
138 sts & pwrkey->data->status_bit);
128 input_sync(pwrkey->input); 139 input_sync(pwrkey->input);
129 140
130 return IRQ_HANDLED; 141 return IRQ_HANDLED;
@@ -157,6 +168,7 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
157{ 168{
158 struct pm8941_pwrkey *pwrkey; 169 struct pm8941_pwrkey *pwrkey;
159 bool pull_up; 170 bool pull_up;
171 struct device *parent;
160 u32 req_delay; 172 u32 req_delay;
161 int error; 173 int error;
162 174
@@ -175,12 +187,30 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
175 return -ENOMEM; 187 return -ENOMEM;
176 188
177 pwrkey->dev = &pdev->dev; 189 pwrkey->dev = &pdev->dev;
190 pwrkey->data = of_device_get_match_data(&pdev->dev);
178 191
179 pwrkey->regmap = dev_get_regmap(pdev->dev.parent, NULL); 192 parent = pdev->dev.parent;
193 pwrkey->regmap = dev_get_regmap(parent, NULL);
180 if (!pwrkey->regmap) { 194 if (!pwrkey->regmap) {
181 dev_err(&pdev->dev, "failed to locate regmap\n"); 195 /*
182 return -ENODEV; 196 * We failed to get regmap for parent. Let's see if we are
197 * a child of pon node and read regmap and reg from its
198 * parent.
199 */
200 pwrkey->regmap = dev_get_regmap(parent->parent, NULL);
201 if (!pwrkey->regmap) {
202 dev_err(&pdev->dev, "failed to locate regmap\n");
203 return -ENODEV;
204 }
205
206 error = of_property_read_u32(parent->of_node,
207 "reg", &pwrkey->baseaddr);
208 } else {
209 error = of_property_read_u32(pdev->dev.of_node, "reg",
210 &pwrkey->baseaddr);
183 } 211 }
212 if (error)
213 return error;
184 214
185 pwrkey->irq = platform_get_irq(pdev, 0); 215 pwrkey->irq = platform_get_irq(pdev, 0);
186 if (pwrkey->irq < 0) { 216 if (pwrkey->irq < 0) {
@@ -188,11 +218,6 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
188 return pwrkey->irq; 218 return pwrkey->irq;
189 } 219 }
190 220
191 error = of_property_read_u32(pdev->dev.of_node, "reg",
192 &pwrkey->baseaddr);
193 if (error)
194 return error;
195
196 error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2, 221 error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2,
197 &pwrkey->revision); 222 &pwrkey->revision);
198 if (error) { 223 if (error) {
@@ -200,13 +225,21 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
200 return error; 225 return error;
201 } 226 }
202 227
228 error = of_property_read_u32(pdev->dev.of_node, "linux,code",
229 &pwrkey->code);
230 if (error) {
231 dev_dbg(&pdev->dev,
232 "no linux,code assuming power (%d)\n", error);
233 pwrkey->code = KEY_POWER;
234 }
235
203 pwrkey->input = devm_input_allocate_device(&pdev->dev); 236 pwrkey->input = devm_input_allocate_device(&pdev->dev);
204 if (!pwrkey->input) { 237 if (!pwrkey->input) {
205 dev_dbg(&pdev->dev, "unable to allocate input device\n"); 238 dev_dbg(&pdev->dev, "unable to allocate input device\n");
206 return -ENOMEM; 239 return -ENOMEM;
207 } 240 }
208 241
209 input_set_capability(pwrkey->input, EV_KEY, KEY_POWER); 242 input_set_capability(pwrkey->input, EV_KEY, pwrkey->code);
210 243
211 pwrkey->input->name = "pm8941_pwrkey"; 244 pwrkey->input->name = "pm8941_pwrkey";
212 pwrkey->input->phys = "pm8941_pwrkey/input0"; 245 pwrkey->input->phys = "pm8941_pwrkey/input0";
@@ -225,8 +258,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
225 258
226 error = regmap_update_bits(pwrkey->regmap, 259 error = regmap_update_bits(pwrkey->regmap,
227 pwrkey->baseaddr + PON_PULL_CTL, 260 pwrkey->baseaddr + PON_PULL_CTL,
228 PON_KPDPWR_PULL_UP, 261 pwrkey->data->pull_up_bit,
229 pull_up ? PON_KPDPWR_PULL_UP : 0); 262 pull_up ? pwrkey->data->pull_up_bit : 0);
230 if (error) { 263 if (error) {
231 dev_err(&pdev->dev, "failed to set pull: %d\n", error); 264 dev_err(&pdev->dev, "failed to set pull: %d\n", error);
232 return error; 265 return error;
@@ -271,8 +304,19 @@ static int pm8941_pwrkey_remove(struct platform_device *pdev)
271 return 0; 304 return 0;
272} 305}
273 306
307static const struct pm8941_data pwrkey_data = {
308 .pull_up_bit = PON_KPDPWR_PULL_UP,
309 .status_bit = PON_KPDPWR_N_SET,
310};
311
312static const struct pm8941_data resin_data = {
313 .pull_up_bit = PON_RESIN_PULL_UP,
314 .status_bit = PON_RESIN_N_SET,
315};
316
274static const struct of_device_id pm8941_pwr_key_id_table[] = { 317static const struct of_device_id pm8941_pwr_key_id_table[] = {
275 { .compatible = "qcom,pm8941-pwrkey" }, 318 { .compatible = "qcom,pm8941-pwrkey", .data = &pwrkey_data },
319 { .compatible = "qcom,pm8941-resin", .data = &resin_data },
276 { } 320 { }
277}; 321};
278MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table); 322MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index 5c8c79623c87..e8de3aaf9f63 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -277,7 +277,7 @@ static int powermate_input_event(struct input_dev *dev, unsigned int type, unsig
277static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm) 277static int powermate_alloc_buffers(struct usb_device *udev, struct powermate_device *pm)
278{ 278{
279 pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX, 279 pm->data = usb_alloc_coherent(udev, POWERMATE_PAYLOAD_SIZE_MAX,
280 GFP_ATOMIC, &pm->data_dma); 280 GFP_KERNEL, &pm->data_dma);
281 if (!pm->data) 281 if (!pm->data)
282 return -1; 282 return -1;
283 283
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index d91f3b1c5375..594f72e39639 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -63,6 +63,9 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *);
63static void xenkbd_handle_motion_event(struct xenkbd_info *info, 63static void xenkbd_handle_motion_event(struct xenkbd_info *info,
64 struct xenkbd_motion *motion) 64 struct xenkbd_motion *motion)
65{ 65{
66 if (unlikely(!info->ptr))
67 return;
68
66 input_report_rel(info->ptr, REL_X, motion->rel_x); 69 input_report_rel(info->ptr, REL_X, motion->rel_x);
67 input_report_rel(info->ptr, REL_Y, motion->rel_y); 70 input_report_rel(info->ptr, REL_Y, motion->rel_y);
68 if (motion->rel_z) 71 if (motion->rel_z)
@@ -73,6 +76,9 @@ static void xenkbd_handle_motion_event(struct xenkbd_info *info,
73static void xenkbd_handle_position_event(struct xenkbd_info *info, 76static void xenkbd_handle_position_event(struct xenkbd_info *info,
74 struct xenkbd_position *pos) 77 struct xenkbd_position *pos)
75{ 78{
79 if (unlikely(!info->ptr))
80 return;
81
76 input_report_abs(info->ptr, ABS_X, pos->abs_x); 82 input_report_abs(info->ptr, ABS_X, pos->abs_x);
77 input_report_abs(info->ptr, ABS_Y, pos->abs_y); 83 input_report_abs(info->ptr, ABS_Y, pos->abs_y);
78 if (pos->rel_z) 84 if (pos->rel_z)
@@ -97,6 +103,9 @@ static void xenkbd_handle_key_event(struct xenkbd_info *info,
97 return; 103 return;
98 } 104 }
99 105
106 if (unlikely(!dev))
107 return;
108
100 input_event(dev, EV_KEY, key->keycode, value); 109 input_event(dev, EV_KEY, key->keycode, value);
101 input_sync(dev); 110 input_sync(dev);
102} 111}
@@ -192,7 +201,7 @@ static int xenkbd_probe(struct xenbus_device *dev,
192 const struct xenbus_device_id *id) 201 const struct xenbus_device_id *id)
193{ 202{
194 int ret, i; 203 int ret, i;
195 unsigned int abs, touch; 204 bool with_mtouch, with_kbd, with_ptr;
196 struct xenkbd_info *info; 205 struct xenkbd_info *info;
197 struct input_dev *kbd, *ptr, *mtouch; 206 struct input_dev *kbd, *ptr, *mtouch;
198 207
@@ -211,106 +220,127 @@ static int xenkbd_probe(struct xenbus_device *dev,
211 if (!info->page) 220 if (!info->page)
212 goto error_nomem; 221 goto error_nomem;
213 222
214 /* Set input abs params to match backend screen res */ 223 /*
215 abs = xenbus_read_unsigned(dev->otherend, 224 * The below are reverse logic, e.g. if the feature is set, then
216 XENKBD_FIELD_FEAT_ABS_POINTER, 0); 225 * do not expose the corresponding virtual device.
217 ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend, 226 */
218 XENKBD_FIELD_WIDTH, 227 with_kbd = !xenbus_read_unsigned(dev->otherend,
219 ptr_size[KPARAM_X]); 228 XENKBD_FIELD_FEAT_DSBL_KEYBRD, 0);
220 ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend, 229
221 XENKBD_FIELD_HEIGHT, 230 with_ptr = !xenbus_read_unsigned(dev->otherend,
222 ptr_size[KPARAM_Y]); 231 XENKBD_FIELD_FEAT_DSBL_POINTER, 0);
223 if (abs) {
224 ret = xenbus_write(XBT_NIL, dev->nodename,
225 XENKBD_FIELD_REQ_ABS_POINTER, "1");
226 if (ret) {
227 pr_warn("xenkbd: can't request abs-pointer\n");
228 abs = 0;
229 }
230 }
231 232
232 touch = xenbus_read_unsigned(dev->nodename, 233 /* Direct logic: if set, then create multi-touch device. */
233 XENKBD_FIELD_FEAT_MTOUCH, 0); 234 with_mtouch = xenbus_read_unsigned(dev->otherend,
234 if (touch) { 235 XENKBD_FIELD_FEAT_MTOUCH, 0);
236 if (with_mtouch) {
235 ret = xenbus_write(XBT_NIL, dev->nodename, 237 ret = xenbus_write(XBT_NIL, dev->nodename,
236 XENKBD_FIELD_REQ_MTOUCH, "1"); 238 XENKBD_FIELD_REQ_MTOUCH, "1");
237 if (ret) { 239 if (ret) {
238 pr_warn("xenkbd: can't request multi-touch"); 240 pr_warn("xenkbd: can't request multi-touch");
239 touch = 0; 241 with_mtouch = 0;
240 } 242 }
241 } 243 }
242 244
243 /* keyboard */ 245 /* keyboard */
244 kbd = input_allocate_device(); 246 if (with_kbd) {
245 if (!kbd) 247 kbd = input_allocate_device();
246 goto error_nomem; 248 if (!kbd)
247 kbd->name = "Xen Virtual Keyboard"; 249 goto error_nomem;
248 kbd->phys = info->phys; 250 kbd->name = "Xen Virtual Keyboard";
249 kbd->id.bustype = BUS_PCI; 251 kbd->phys = info->phys;
250 kbd->id.vendor = 0x5853; 252 kbd->id.bustype = BUS_PCI;
251 kbd->id.product = 0xffff; 253 kbd->id.vendor = 0x5853;
252 254 kbd->id.product = 0xffff;
253 __set_bit(EV_KEY, kbd->evbit); 255
254 for (i = KEY_ESC; i < KEY_UNKNOWN; i++) 256 __set_bit(EV_KEY, kbd->evbit);
255 __set_bit(i, kbd->keybit); 257 for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
256 for (i = KEY_OK; i < KEY_MAX; i++) 258 __set_bit(i, kbd->keybit);
257 __set_bit(i, kbd->keybit); 259 for (i = KEY_OK; i < KEY_MAX; i++)
258 260 __set_bit(i, kbd->keybit);
259 ret = input_register_device(kbd); 261
260 if (ret) { 262 ret = input_register_device(kbd);
261 input_free_device(kbd); 263 if (ret) {
262 xenbus_dev_fatal(dev, ret, "input_register_device(kbd)"); 264 input_free_device(kbd);
263 goto error; 265 xenbus_dev_fatal(dev, ret,
266 "input_register_device(kbd)");
267 goto error;
268 }
269 info->kbd = kbd;
264 } 270 }
265 info->kbd = kbd;
266 271
267 /* pointing device */ 272 /* pointing device */
268 ptr = input_allocate_device(); 273 if (with_ptr) {
269 if (!ptr) 274 unsigned int abs;
270 goto error_nomem; 275
271 ptr->name = "Xen Virtual Pointer"; 276 /* Set input abs params to match backend screen res */
272 ptr->phys = info->phys; 277 abs = xenbus_read_unsigned(dev->otherend,
273 ptr->id.bustype = BUS_PCI; 278 XENKBD_FIELD_FEAT_ABS_POINTER, 0);
274 ptr->id.vendor = 0x5853; 279 ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
275 ptr->id.product = 0xfffe; 280 XENKBD_FIELD_WIDTH,
276 281 ptr_size[KPARAM_X]);
277 if (abs) { 282 ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
278 __set_bit(EV_ABS, ptr->evbit); 283 XENKBD_FIELD_HEIGHT,
279 input_set_abs_params(ptr, ABS_X, 0, ptr_size[KPARAM_X], 0, 0); 284 ptr_size[KPARAM_Y]);
280 input_set_abs_params(ptr, ABS_Y, 0, ptr_size[KPARAM_Y], 0, 0); 285 if (abs) {
281 } else { 286 ret = xenbus_write(XBT_NIL, dev->nodename,
282 input_set_capability(ptr, EV_REL, REL_X); 287 XENKBD_FIELD_REQ_ABS_POINTER, "1");
283 input_set_capability(ptr, EV_REL, REL_Y); 288 if (ret) {
284 } 289 pr_warn("xenkbd: can't request abs-pointer\n");
285 input_set_capability(ptr, EV_REL, REL_WHEEL); 290 abs = 0;
291 }
292 }
286 293
287 __set_bit(EV_KEY, ptr->evbit); 294 ptr = input_allocate_device();
288 for (i = BTN_LEFT; i <= BTN_TASK; i++) 295 if (!ptr)
289 __set_bit(i, ptr->keybit); 296 goto error_nomem;
297 ptr->name = "Xen Virtual Pointer";
298 ptr->phys = info->phys;
299 ptr->id.bustype = BUS_PCI;
300 ptr->id.vendor = 0x5853;
301 ptr->id.product = 0xfffe;
302
303 if (abs) {
304 __set_bit(EV_ABS, ptr->evbit);
305 input_set_abs_params(ptr, ABS_X, 0,
306 ptr_size[KPARAM_X], 0, 0);
307 input_set_abs_params(ptr, ABS_Y, 0,
308 ptr_size[KPARAM_Y], 0, 0);
309 } else {
310 input_set_capability(ptr, EV_REL, REL_X);
311 input_set_capability(ptr, EV_REL, REL_Y);
312 }
313 input_set_capability(ptr, EV_REL, REL_WHEEL);
290 314
291 ret = input_register_device(ptr); 315 __set_bit(EV_KEY, ptr->evbit);
292 if (ret) { 316 for (i = BTN_LEFT; i <= BTN_TASK; i++)
293 input_free_device(ptr); 317 __set_bit(i, ptr->keybit);
294 xenbus_dev_fatal(dev, ret, "input_register_device(ptr)"); 318
295 goto error; 319 ret = input_register_device(ptr);
320 if (ret) {
321 input_free_device(ptr);
322 xenbus_dev_fatal(dev, ret,
323 "input_register_device(ptr)");
324 goto error;
325 }
326 info->ptr = ptr;
296 } 327 }
297 info->ptr = ptr;
298 328
299 /* multi-touch device */ 329 /* multi-touch device */
300 if (touch) { 330 if (with_mtouch) {
301 int num_cont, width, height; 331 int num_cont, width, height;
302 332
303 mtouch = input_allocate_device(); 333 mtouch = input_allocate_device();
304 if (!mtouch) 334 if (!mtouch)
305 goto error_nomem; 335 goto error_nomem;
306 336
307 num_cont = xenbus_read_unsigned(info->xbdev->nodename, 337 num_cont = xenbus_read_unsigned(info->xbdev->otherend,
308 XENKBD_FIELD_MT_NUM_CONTACTS, 338 XENKBD_FIELD_MT_NUM_CONTACTS,
309 1); 339 1);
310 width = xenbus_read_unsigned(info->xbdev->nodename, 340 width = xenbus_read_unsigned(info->xbdev->otherend,
311 XENKBD_FIELD_MT_WIDTH, 341 XENKBD_FIELD_MT_WIDTH,
312 XENFB_WIDTH); 342 XENFB_WIDTH);
313 height = xenbus_read_unsigned(info->xbdev->nodename, 343 height = xenbus_read_unsigned(info->xbdev->otherend,
314 XENKBD_FIELD_MT_HEIGHT, 344 XENKBD_FIELD_MT_HEIGHT,
315 XENFB_HEIGHT); 345 XENFB_HEIGHT);
316 346
@@ -346,6 +376,11 @@ static int xenkbd_probe(struct xenbus_device *dev,
346 info->mtouch = mtouch; 376 info->mtouch = mtouch;
347 } 377 }
348 378
379 if (!(with_kbd || with_ptr || with_mtouch)) {
380 ret = -ENXIO;
381 goto error;
382 }
383
349 ret = xenkbd_connect_backend(dev, info); 384 ret = xenkbd_connect_backend(dev, info);
350 if (ret < 0) 385 if (ret < 0)
351 goto error; 386 goto error;
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index f0c9bf87b4e3..1365cd94ed9b 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -894,12 +894,12 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
894 894
895 /* allocate usb buffers */ 895 /* allocate usb buffers */
896 yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN, 896 yld->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN,
897 GFP_ATOMIC, &yld->irq_dma); 897 GFP_KERNEL, &yld->irq_dma);
898 if (yld->irq_data == NULL) 898 if (yld->irq_data == NULL)
899 return usb_cleanup(yld, -ENOMEM); 899 return usb_cleanup(yld, -ENOMEM);
900 900
901 yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN, 901 yld->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN,
902 GFP_ATOMIC, &yld->ctl_dma); 902 GFP_KERNEL, &yld->ctl_dma);
903 if (!yld->ctl_data) 903 if (!yld->ctl_data)
904 return usb_cleanup(yld, -ENOMEM); 904 return usb_cleanup(yld, -ENOMEM);
905 905
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index 032d27983b6c..f1e66e257cff 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -472,6 +472,7 @@ static int atp_status_check(struct urb *urb)
472 dev->info->datalen, dev->urb->actual_length); 472 dev->info->datalen, dev->urb->actual_length);
473 dev->overflow_warned = true; 473 dev->overflow_warned = true;
474 } 474 }
475 /* fall through */
475 case -ECONNRESET: 476 case -ECONNRESET:
476 case -ENOENT: 477 case -ENOENT:
477 case -ESHUTDOWN: 478 case -ESHUTDOWN:
@@ -810,7 +811,7 @@ static int atp_open(struct input_dev *input)
810{ 811{
811 struct atp *dev = input_get_drvdata(input); 812 struct atp *dev = input_get_drvdata(input);
812 813
813 if (usb_submit_urb(dev->urb, GFP_ATOMIC)) 814 if (usb_submit_urb(dev->urb, GFP_KERNEL))
814 return -EIO; 815 return -EIO;
815 816
816 dev->open = true; 817 dev->open = true;
@@ -976,7 +977,7 @@ static int atp_recover(struct atp *dev)
976 if (error) 977 if (error)
977 return error; 978 return error;
978 979
979 if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC)) 980 if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
980 return -EIO; 981 return -EIO;
981 982
982 return 0; 983 return 0;
@@ -994,7 +995,7 @@ static int atp_resume(struct usb_interface *iface)
994{ 995{
995 struct atp *dev = usb_get_intfdata(iface); 996 struct atp *dev = usb_get_intfdata(iface);
996 997
997 if (dev->open && usb_submit_urb(dev->urb, GFP_ATOMIC)) 998 if (dev->open && usb_submit_urb(dev->urb, GFP_KERNEL))
998 return -EIO; 999 return -EIO;
999 1000
1000 return 0; 1001 return 0;
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 5775d40b3d53..14239fbd72cf 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -2554,6 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa)
2554 } 2554 }
2555 2555
2556 cyapa->state = CYAPA_STATE_GEN5_APP; 2556 cyapa->state = CYAPA_STATE_GEN5_APP;
2557 /* fall through */
2557 2558
2558 case CYAPA_STATE_GEN5_APP: 2559 case CYAPA_STATE_GEN5_APP:
2559 /* 2560 /*
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 016397850b1b..c1b524ab4623 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -680,6 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa)
680 } 680 }
681 681
682 cyapa->state = CYAPA_STATE_GEN6_APP; 682 cyapa->state = CYAPA_STATE_GEN6_APP;
683 /* fall through */
683 684
684 case CYAPA_STATE_GEN6_APP: 685 case CYAPA_STATE_GEN6_APP:
685 /* 686 /*
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index dd85b16dc6f8..44f57cf6675b 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -340,7 +340,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse)
340 */ 340 */
341 if (packet[3] & 0x80) 341 if (packet[3] & 0x80)
342 fingers = 4; 342 fingers = 4;
343 /* pass through... */ 343 /* fall through */
344 case 1: 344 case 1:
345 /* 345 /*
346 * byte 1: . . . . x11 x10 x9 x8 346 * byte 1: . . . . x11 x10 x9 x8
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index 9ce71dfa0de1..b9e68606c44a 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -26,10 +26,6 @@
26 * You should have received a copy of the GNU General Public License 26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software 27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 * Should you need to contact me, the author, you can do so either by
31 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
32 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
33 */ 29 */
34 30
35#include <linux/module.h> 31#include <linux/module.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 6f165e053f4d..2fd6c84cd5b7 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -27,10 +27,6 @@
27 * You should have received a copy of the GNU General Public License 27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software 28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 *
31 * Should you need to contact me, the author, you can do so either by
32 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
33 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
34 */ 30 */
35 31
36#include <linux/module.h> 32#include <linux/module.h>
diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c
index 7b02b652e267..b8965e6bc890 100644
--- a/drivers/input/mouse/pc110pad.c
+++ b/drivers/input/mouse/pc110pad.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/module.h> 28#include <linux/module.h>
diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c
index 8df526620ebf..3e8fb8136452 100644
--- a/drivers/input/mouse/sermouse.c
+++ b/drivers/input/mouse/sermouse.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
@@ -143,7 +139,8 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
143 switch (sermouse->type) { 139 switch (sermouse->type) {
144 140
145 case SERIO_MS: 141 case SERIO_MS:
146 sermouse->type = SERIO_MP; 142 sermouse->type = SERIO_MP;
143 /* fall through */
147 144
148 case SERIO_MP: 145 case SERIO_MP:
149 if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */ 146 if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */
@@ -154,6 +151,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data)
154 case SERIO_MZP: 151 case SERIO_MZP:
155 case SERIO_MZPP: 152 case SERIO_MZPP:
156 input_report_key(dev, BTN_SIDE, (data >> 5) & 1); 153 input_report_key(dev, BTN_SIDE, (data >> 5) & 1);
154 /* fall through */
157 155
158 case SERIO_MZ: 156 case SERIO_MZ:
159 input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1); 157 input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1);
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 9c54c43c9749..2d1e2993b5a8 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/delay.h> 25#include <linux/delay.h>
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 824f4c1c1f31..b8bc71569349 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -573,6 +573,9 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
573 port = &i8042_ports[port_no]; 573 port = &i8042_ports[port_no];
574 serio = port->exists ? port->serio : NULL; 574 serio = port->exists ? port->serio : NULL;
575 575
576 if (irq && serio)
577 pm_wakeup_event(&serio->dev, 0);
578
576 filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n", 579 filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
577 port_no, irq, 580 port_no, irq,
578 dfl & SERIO_PARITY ? ", bad parity" : "", 581 dfl & SERIO_PARITY ? ", bad parity" : "",
diff --git a/drivers/input/serio/q40kbd.c b/drivers/input/serio/q40kbd.c
index d0fccc8ec259..fbb6b33845fa 100644
--- a/drivers/input/serio/q40kbd.c
+++ b/drivers/input/serio/q40kbd.c
@@ -23,10 +23,6 @@
23 * You should have received a copy of the GNU General Public License 23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software 24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 *
27 * Should you need to contact me, the author, you can do so either by
28 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
29 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
30 */ 26 */
31 27
32#include <linux/module.h> 28#include <linux/module.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 8cf964736902..a308d7811427 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -21,10 +21,6 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 *
25 * Should you need to contact me, the author, you can do so either by
26 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
27 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
28 */ 24 */
29 25
30#include <linux/module.h> 26#include <linux/module.h>
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 24a90c8db5b3..2e1fb0649260 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 545fa6e89035..c82cd5079d0e 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1712,7 +1712,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
1712 } 1712 }
1713 1713
1714 aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH, 1714 aiptek->data = usb_alloc_coherent(usbdev, AIPTEK_PACKET_LENGTH,
1715 GFP_ATOMIC, &aiptek->data_dma); 1715 GFP_KERNEL, &aiptek->data_dma);
1716 if (!aiptek->data) { 1716 if (!aiptek->data) {
1717 dev_warn(&intf->dev, "cannot allocate usb buffer\n"); 1717 dev_warn(&intf->dev, "cannot allocate usb buffer\n");
1718 goto fail1; 1718 goto fail1;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 32267c1afebc..5374bd573e66 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -151,6 +151,18 @@ config TOUCHSCREEN_BU21013
151 To compile this driver as a module, choose M here: the 151 To compile this driver as a module, choose M here: the
152 module will be called bu21013_ts. 152 module will be called bu21013_ts.
153 153
154config TOUCHSCREEN_BU21029
155 tristate "Rohm BU21029 based touch panel controllers"
156 depends on I2C
157 help
158 Say Y here if you have a Rohm BU21029 touchscreen controller
159 connected to your system.
160
161 If unsure, say N.
162
163 To compile this driver as a module, choose M here: the
164 module will be called bu21029_ts.
165
154config TOUCHSCREEN_CHIPONE_ICN8318 166config TOUCHSCREEN_CHIPONE_ICN8318
155 tristate "chipone icn8318 touchscreen controller" 167 tristate "chipone icn8318 touchscreen controller"
156 depends on GPIOLIB || COMPILE_TEST 168 depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fd4fd32fb73f..c2175163152d 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_TOUCHSCREEN_AR1021_I2C) += ar1021_i2c.o
18obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o 18obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
19obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o 19obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
20obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o 20obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
21obj-$(CONFIG_TOUCHSCREEN_BU21029) += bu21029_ts.o
21obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o 22obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8318) += chipone_icn8318.o
22obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o 23obj-$(CONFIG_TOUCHSCREEN_CHIPONE_ICN8505) += chipone_icn8505.o
23obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o 24obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 54fe190fd4bc..3232af5dcf89 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -75,6 +75,7 @@
75#define MXT_SPT_DIGITIZER_T43 43 75#define MXT_SPT_DIGITIZER_T43 43
76#define MXT_SPT_MESSAGECOUNT_T44 44 76#define MXT_SPT_MESSAGECOUNT_T44 44
77#define MXT_SPT_CTECONFIG_T46 46 77#define MXT_SPT_CTECONFIG_T46 46
78#define MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71 71
78#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100 79#define MXT_TOUCH_MULTITOUCHSCREEN_T100 100
79 80
80/* MXT_GEN_MESSAGE_T5 object */ 81/* MXT_GEN_MESSAGE_T5 object */
@@ -88,12 +89,12 @@
88#define MXT_COMMAND_DIAGNOSTIC 5 89#define MXT_COMMAND_DIAGNOSTIC 5
89 90
90/* Define for T6 status byte */ 91/* Define for T6 status byte */
91#define MXT_T6_STATUS_RESET (1 << 7) 92#define MXT_T6_STATUS_RESET BIT(7)
92#define MXT_T6_STATUS_OFL (1 << 6) 93#define MXT_T6_STATUS_OFL BIT(6)
93#define MXT_T6_STATUS_SIGERR (1 << 5) 94#define MXT_T6_STATUS_SIGERR BIT(5)
94#define MXT_T6_STATUS_CAL (1 << 4) 95#define MXT_T6_STATUS_CAL BIT(4)
95#define MXT_T6_STATUS_CFGERR (1 << 3) 96#define MXT_T6_STATUS_CFGERR BIT(3)
96#define MXT_T6_STATUS_COMSERR (1 << 2) 97#define MXT_T6_STATUS_COMSERR BIT(2)
97 98
98/* MXT_GEN_POWER_T7 field */ 99/* MXT_GEN_POWER_T7 field */
99struct t7_config { 100struct t7_config {
@@ -112,14 +113,14 @@ struct t7_config {
112#define MXT_T9_RANGE 18 113#define MXT_T9_RANGE 18
113 114
114/* MXT_TOUCH_MULTI_T9 status */ 115/* MXT_TOUCH_MULTI_T9 status */
115#define MXT_T9_UNGRIP (1 << 0) 116#define MXT_T9_UNGRIP BIT(0)
116#define MXT_T9_SUPPRESS (1 << 1) 117#define MXT_T9_SUPPRESS BIT(1)
117#define MXT_T9_AMP (1 << 2) 118#define MXT_T9_AMP BIT(2)
118#define MXT_T9_VECTOR (1 << 3) 119#define MXT_T9_VECTOR BIT(3)
119#define MXT_T9_MOVE (1 << 4) 120#define MXT_T9_MOVE BIT(4)
120#define MXT_T9_RELEASE (1 << 5) 121#define MXT_T9_RELEASE BIT(5)
121#define MXT_T9_PRESS (1 << 6) 122#define MXT_T9_PRESS BIT(6)
122#define MXT_T9_DETECT (1 << 7) 123#define MXT_T9_DETECT BIT(7)
123 124
124struct t9_range { 125struct t9_range {
125 __le16 x; 126 __le16 x;
@@ -127,9 +128,9 @@ struct t9_range {
127} __packed; 128} __packed;
128 129
129/* MXT_TOUCH_MULTI_T9 orient */ 130/* MXT_TOUCH_MULTI_T9 orient */
130#define MXT_T9_ORIENT_SWITCH (1 << 0) 131#define MXT_T9_ORIENT_SWITCH BIT(0)
131#define MXT_T9_ORIENT_INVERTX (1 << 1) 132#define MXT_T9_ORIENT_INVERTX BIT(1)
132#define MXT_T9_ORIENT_INVERTY (1 << 2) 133#define MXT_T9_ORIENT_INVERTY BIT(2)
133 134
134/* MXT_SPT_COMMSCONFIG_T18 */ 135/* MXT_SPT_COMMSCONFIG_T18 */
135#define MXT_COMMS_CTRL 0 136#define MXT_COMMS_CTRL 0
@@ -214,7 +215,7 @@ enum t100_type {
214#define MXT_FRAME_CRC_PASS 0x04 215#define MXT_FRAME_CRC_PASS 0x04
215#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */ 216#define MXT_APP_CRC_FAIL 0x40 /* valid 7 8 bit only */
216#define MXT_BOOT_STATUS_MASK 0x3f 217#define MXT_BOOT_STATUS_MASK 0x3f
217#define MXT_BOOT_EXTENDED_ID (1 << 5) 218#define MXT_BOOT_EXTENDED_ID BIT(5)
218#define MXT_BOOT_ID_MASK 0x1f 219#define MXT_BOOT_ID_MASK 0x1f
219 220
220/* Touchscreen absolute values */ 221/* Touchscreen absolute values */
@@ -276,6 +277,19 @@ enum mxt_suspend_mode {
276 MXT_SUSPEND_T9_CTRL = 1, 277 MXT_SUSPEND_T9_CTRL = 1,
277}; 278};
278 279
280/* Config update context */
281struct mxt_cfg {
282 u8 *raw;
283 size_t raw_size;
284 off_t raw_pos;
285
286 u8 *mem;
287 size_t mem_size;
288 int start_ofs;
289
290 struct mxt_info info;
291};
292
279/* Each client has this additional data */ 293/* Each client has this additional data */
280struct mxt_data { 294struct mxt_data {
281 struct i2c_client *client; 295 struct i2c_client *client;
@@ -317,6 +331,7 @@ struct mxt_data {
317 u8 T6_reportid; 331 u8 T6_reportid;
318 u16 T6_address; 332 u16 T6_address;
319 u16 T7_address; 333 u16 T7_address;
334 u16 T71_address;
320 u8 T9_reportid_min; 335 u8 T9_reportid_min;
321 u8 T9_reportid_max; 336 u8 T9_reportid_max;
322 u8 T19_reportid; 337 u8 T19_reportid;
@@ -382,6 +397,7 @@ static bool mxt_object_readable(unsigned int type)
382 case MXT_SPT_USERDATA_T38: 397 case MXT_SPT_USERDATA_T38:
383 case MXT_SPT_DIGITIZER_T43: 398 case MXT_SPT_DIGITIZER_T43:
384 case MXT_SPT_CTECONFIG_T46: 399 case MXT_SPT_CTECONFIG_T46:
400 case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
385 return true; 401 return true;
386 default: 402 default:
387 return false; 403 return false;
@@ -712,13 +728,13 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
712 u8 status = msg[1]; 728 u8 status = msg[1];
713 u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16); 729 u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16);
714 730
715 complete(&data->crc_completion);
716
717 if (crc != data->config_crc) { 731 if (crc != data->config_crc) {
718 data->config_crc = crc; 732 data->config_crc = crc;
719 dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc); 733 dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc);
720 } 734 }
721 735
736 complete(&data->crc_completion);
737
722 /* Detect reset */ 738 /* Detect reset */
723 if (status & MXT_T6_STATUS_RESET) 739 if (status & MXT_T6_STATUS_RESET)
724 complete(&data->reset_completion); 740 complete(&data->reset_completion);
@@ -827,6 +843,10 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
827 mxt_input_sync(data); 843 mxt_input_sync(data);
828 } 844 }
829 845
846 /* if active, pressure must be non-zero */
847 if (!amplitude)
848 amplitude = MXT_PRESSURE_DEFAULT;
849
830 /* Touch active */ 850 /* Touch active */
831 input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1); 851 input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
832 input_report_abs(input_dev, ABS_MT_POSITION_X, x); 852 input_report_abs(input_dev, ABS_MT_POSITION_X, x);
@@ -1279,12 +1299,7 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
1279 return crc; 1299 return crc;
1280} 1300}
1281 1301
1282static int mxt_prepare_cfg_mem(struct mxt_data *data, 1302static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
1283 const struct firmware *cfg,
1284 unsigned int data_pos,
1285 unsigned int cfg_start_ofs,
1286 u8 *config_mem,
1287 size_t config_mem_size)
1288{ 1303{
1289 struct device *dev = &data->client->dev; 1304 struct device *dev = &data->client->dev;
1290 struct mxt_object *object; 1305 struct mxt_object *object;
@@ -1295,9 +1310,9 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
1295 u16 reg; 1310 u16 reg;
1296 u8 val; 1311 u8 val;
1297 1312
1298 while (data_pos < cfg->size) { 1313 while (cfg->raw_pos < cfg->raw_size) {
1299 /* Read type, instance, length */ 1314 /* Read type, instance, length */
1300 ret = sscanf(cfg->data + data_pos, "%x %x %x%n", 1315 ret = sscanf(cfg->raw + cfg->raw_pos, "%x %x %x%n",
1301 &type, &instance, &size, &offset); 1316 &type, &instance, &size, &offset);
1302 if (ret == 0) { 1317 if (ret == 0) {
1303 /* EOF */ 1318 /* EOF */
@@ -1306,20 +1321,20 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
1306 dev_err(dev, "Bad format: failed to parse object\n"); 1321 dev_err(dev, "Bad format: failed to parse object\n");
1307 return -EINVAL; 1322 return -EINVAL;
1308 } 1323 }
1309 data_pos += offset; 1324 cfg->raw_pos += offset;
1310 1325
1311 object = mxt_get_object(data, type); 1326 object = mxt_get_object(data, type);
1312 if (!object) { 1327 if (!object) {
1313 /* Skip object */ 1328 /* Skip object */
1314 for (i = 0; i < size; i++) { 1329 for (i = 0; i < size; i++) {
1315 ret = sscanf(cfg->data + data_pos, "%hhx%n", 1330 ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
1316 &val, &offset); 1331 &val, &offset);
1317 if (ret != 1) { 1332 if (ret != 1) {
1318 dev_err(dev, "Bad format in T%d at %d\n", 1333 dev_err(dev, "Bad format in T%d at %d\n",
1319 type, i); 1334 type, i);
1320 return -EINVAL; 1335 return -EINVAL;
1321 } 1336 }
1322 data_pos += offset; 1337 cfg->raw_pos += offset;
1323 } 1338 }
1324 continue; 1339 continue;
1325 } 1340 }
@@ -1354,7 +1369,7 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
1354 reg = object->start_address + mxt_obj_size(object) * instance; 1369 reg = object->start_address + mxt_obj_size(object) * instance;
1355 1370
1356 for (i = 0; i < size; i++) { 1371 for (i = 0; i < size; i++) {
1357 ret = sscanf(cfg->data + data_pos, "%hhx%n", 1372 ret = sscanf(cfg->raw + cfg->raw_pos, "%hhx%n",
1358 &val, 1373 &val,
1359 &offset); 1374 &offset);
1360 if (ret != 1) { 1375 if (ret != 1) {
@@ -1362,15 +1377,15 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
1362 type, i); 1377 type, i);
1363 return -EINVAL; 1378 return -EINVAL;
1364 } 1379 }
1365 data_pos += offset; 1380 cfg->raw_pos += offset;
1366 1381
1367 if (i > mxt_obj_size(object)) 1382 if (i > mxt_obj_size(object))
1368 continue; 1383 continue;
1369 1384
1370 byte_offset = reg + i - cfg_start_ofs; 1385 byte_offset = reg + i - cfg->start_ofs;
1371 1386
1372 if (byte_offset >= 0 && byte_offset < config_mem_size) { 1387 if (byte_offset >= 0 && byte_offset < cfg->mem_size) {
1373 *(config_mem + byte_offset) = val; 1388 *(cfg->mem + byte_offset) = val;
1374 } else { 1389 } else {
1375 dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n", 1390 dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
1376 reg, object->type, byte_offset); 1391 reg, object->type, byte_offset);
@@ -1382,22 +1397,21 @@ static int mxt_prepare_cfg_mem(struct mxt_data *data,
1382 return 0; 1397 return 0;
1383} 1398}
1384 1399
1385static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start, 1400static int mxt_upload_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg)
1386 u8 *config_mem, size_t config_mem_size)
1387{ 1401{
1388 unsigned int byte_offset = 0; 1402 unsigned int byte_offset = 0;
1389 int error; 1403 int error;
1390 1404
1391 /* Write configuration as blocks */ 1405 /* Write configuration as blocks */
1392 while (byte_offset < config_mem_size) { 1406 while (byte_offset < cfg->mem_size) {
1393 unsigned int size = config_mem_size - byte_offset; 1407 unsigned int size = cfg->mem_size - byte_offset;
1394 1408
1395 if (size > MXT_MAX_BLOCK_WRITE) 1409 if (size > MXT_MAX_BLOCK_WRITE)
1396 size = MXT_MAX_BLOCK_WRITE; 1410 size = MXT_MAX_BLOCK_WRITE;
1397 1411
1398 error = __mxt_write_reg(data->client, 1412 error = __mxt_write_reg(data->client,
1399 cfg_start + byte_offset, 1413 cfg->start_ofs + byte_offset,
1400 size, config_mem + byte_offset); 1414 size, cfg->mem + byte_offset);
1401 if (error) { 1415 if (error) {
1402 dev_err(&data->client->dev, 1416 dev_err(&data->client->dev,
1403 "Config write error, ret=%d\n", error); 1417 "Config write error, ret=%d\n", error);
@@ -1431,65 +1445,75 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data);
1431 * <SIZE> - 2-byte object size as hex 1445 * <SIZE> - 2-byte object size as hex
1432 * <CONTENTS> - array of <SIZE> 1-byte hex values 1446 * <CONTENTS> - array of <SIZE> 1-byte hex values
1433 */ 1447 */
1434static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg) 1448static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
1435{ 1449{
1436 struct device *dev = &data->client->dev; 1450 struct device *dev = &data->client->dev;
1437 struct mxt_info cfg_info; 1451 struct mxt_cfg cfg;
1438 int ret; 1452 int ret;
1439 int offset; 1453 int offset;
1440 int data_pos;
1441 int i; 1454 int i;
1442 int cfg_start_ofs;
1443 u32 info_crc, config_crc, calculated_crc; 1455 u32 info_crc, config_crc, calculated_crc;
1444 u8 *config_mem; 1456 u16 crc_start = 0;
1445 size_t config_mem_size; 1457
1458 /* Make zero terminated copy of the OBP_RAW file */
1459 cfg.raw = kmemdup_nul(fw->data, fw->size, GFP_KERNEL);
1460 if (!cfg.raw)
1461 return -ENOMEM;
1462
1463 cfg.raw_size = fw->size;
1446 1464
1447 mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1); 1465 mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
1448 1466
1449 if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) { 1467 if (strncmp(cfg.raw, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
1450 dev_err(dev, "Unrecognised config file\n"); 1468 dev_err(dev, "Unrecognised config file\n");
1451 return -EINVAL; 1469 ret = -EINVAL;
1470 goto release_raw;
1452 } 1471 }
1453 1472
1454 data_pos = strlen(MXT_CFG_MAGIC); 1473 cfg.raw_pos = strlen(MXT_CFG_MAGIC);
1455 1474
1456 /* Load information block and check */ 1475 /* Load information block and check */
1457 for (i = 0; i < sizeof(struct mxt_info); i++) { 1476 for (i = 0; i < sizeof(struct mxt_info); i++) {
1458 ret = sscanf(cfg->data + data_pos, "%hhx%n", 1477 ret = sscanf(cfg.raw + cfg.raw_pos, "%hhx%n",
1459 (unsigned char *)&cfg_info + i, 1478 (unsigned char *)&cfg.info + i,
1460 &offset); 1479 &offset);
1461 if (ret != 1) { 1480 if (ret != 1) {
1462 dev_err(dev, "Bad format\n"); 1481 dev_err(dev, "Bad format\n");
1463 return -EINVAL; 1482 ret = -EINVAL;
1483 goto release_raw;
1464 } 1484 }
1465 1485
1466 data_pos += offset; 1486 cfg.raw_pos += offset;
1467 } 1487 }
1468 1488
1469 if (cfg_info.family_id != data->info->family_id) { 1489 if (cfg.info.family_id != data->info->family_id) {
1470 dev_err(dev, "Family ID mismatch!\n"); 1490 dev_err(dev, "Family ID mismatch!\n");
1471 return -EINVAL; 1491 ret = -EINVAL;
1492 goto release_raw;
1472 } 1493 }
1473 1494
1474 if (cfg_info.variant_id != data->info->variant_id) { 1495 if (cfg.info.variant_id != data->info->variant_id) {
1475 dev_err(dev, "Variant ID mismatch!\n"); 1496 dev_err(dev, "Variant ID mismatch!\n");
1476 return -EINVAL; 1497 ret = -EINVAL;
1498 goto release_raw;
1477 } 1499 }
1478 1500
1479 /* Read CRCs */ 1501 /* Read CRCs */
1480 ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset); 1502 ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &info_crc, &offset);
1481 if (ret != 1) { 1503 if (ret != 1) {
1482 dev_err(dev, "Bad format: failed to parse Info CRC\n"); 1504 dev_err(dev, "Bad format: failed to parse Info CRC\n");
1483 return -EINVAL; 1505 ret = -EINVAL;
1506 goto release_raw;
1484 } 1507 }
1485 data_pos += offset; 1508 cfg.raw_pos += offset;
1486 1509
1487 ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset); 1510 ret = sscanf(cfg.raw + cfg.raw_pos, "%x%n", &config_crc, &offset);
1488 if (ret != 1) { 1511 if (ret != 1) {
1489 dev_err(dev, "Bad format: failed to parse Config CRC\n"); 1512 dev_err(dev, "Bad format: failed to parse Config CRC\n");
1490 return -EINVAL; 1513 ret = -EINVAL;
1514 goto release_raw;
1491 } 1515 }
1492 data_pos += offset; 1516 cfg.raw_pos += offset;
1493 1517
1494 /* 1518 /*
1495 * The Info Block CRC is calculated over mxt_info and the object 1519 * The Info Block CRC is calculated over mxt_info and the object
@@ -1515,39 +1539,39 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
1515 } 1539 }
1516 1540
1517 /* Malloc memory to store configuration */ 1541 /* Malloc memory to store configuration */
1518 cfg_start_ofs = MXT_OBJECT_START + 1542 cfg.start_ofs = MXT_OBJECT_START +
1519 data->info->object_num * sizeof(struct mxt_object) + 1543 data->info->object_num * sizeof(struct mxt_object) +
1520 MXT_INFO_CHECKSUM_SIZE; 1544 MXT_INFO_CHECKSUM_SIZE;
1521 config_mem_size = data->mem_size - cfg_start_ofs; 1545 cfg.mem_size = data->mem_size - cfg.start_ofs;
1522 config_mem = kzalloc(config_mem_size, GFP_KERNEL); 1546 cfg.mem = kzalloc(cfg.mem_size, GFP_KERNEL);
1523 if (!config_mem) { 1547 if (!cfg.mem) {
1524 dev_err(dev, "Failed to allocate memory\n"); 1548 ret = -ENOMEM;
1525 return -ENOMEM; 1549 goto release_raw;
1526 } 1550 }
1527 1551
1528 ret = mxt_prepare_cfg_mem(data, cfg, data_pos, cfg_start_ofs, 1552 ret = mxt_prepare_cfg_mem(data, &cfg);
1529 config_mem, config_mem_size);
1530 if (ret) 1553 if (ret)
1531 goto release_mem; 1554 goto release_mem;
1532 1555
1533 /* Calculate crc of the received configs (not the raw config file) */ 1556 /* Calculate crc of the received configs (not the raw config file) */
1534 if (data->T7_address < cfg_start_ofs) { 1557 if (data->T71_address)
1535 dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n", 1558 crc_start = data->T71_address;
1536 data->T7_address, cfg_start_ofs); 1559 else if (data->T7_address)
1537 ret = 0; 1560 crc_start = data->T7_address;
1538 goto release_mem; 1561 else
1539 } 1562 dev_warn(dev, "Could not find CRC start\n");
1540 1563
1541 calculated_crc = mxt_calculate_crc(config_mem, 1564 if (crc_start > cfg.start_ofs) {
1542 data->T7_address - cfg_start_ofs, 1565 calculated_crc = mxt_calculate_crc(cfg.mem,
1543 config_mem_size); 1566 crc_start - cfg.start_ofs,
1567 cfg.mem_size);
1544 1568
1545 if (config_crc > 0 && config_crc != calculated_crc) 1569 if (config_crc > 0 && config_crc != calculated_crc)
1546 dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n", 1570 dev_warn(dev, "Config CRC in file inconsistent, calculated=%06X, file=%06X\n",
1547 calculated_crc, config_crc); 1571 calculated_crc, config_crc);
1572 }
1548 1573
1549 ret = mxt_upload_cfg_mem(data, cfg_start_ofs, 1574 ret = mxt_upload_cfg_mem(data, &cfg);
1550 config_mem, config_mem_size);
1551 if (ret) 1575 if (ret)
1552 goto release_mem; 1576 goto release_mem;
1553 1577
@@ -1562,8 +1586,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
1562 /* T7 config may have changed */ 1586 /* T7 config may have changed */
1563 mxt_init_t7_power_cfg(data); 1587 mxt_init_t7_power_cfg(data);
1564 1588
1589release_raw:
1590 kfree(cfg.raw);
1565release_mem: 1591release_mem:
1566 kfree(config_mem); 1592 kfree(cfg.mem);
1567 return ret; 1593 return ret;
1568} 1594}
1569 1595
@@ -1591,6 +1617,7 @@ static void mxt_free_object_table(struct mxt_data *data)
1591 data->T5_msg_size = 0; 1617 data->T5_msg_size = 0;
1592 data->T6_reportid = 0; 1618 data->T6_reportid = 0;
1593 data->T7_address = 0; 1619 data->T7_address = 0;
1620 data->T71_address = 0;
1594 data->T9_reportid_min = 0; 1621 data->T9_reportid_min = 0;
1595 data->T9_reportid_max = 0; 1622 data->T9_reportid_max = 0;
1596 data->T19_reportid = 0; 1623 data->T19_reportid = 0;
@@ -1656,12 +1683,16 @@ static int mxt_parse_object_table(struct mxt_data *data,
1656 case MXT_GEN_POWER_T7: 1683 case MXT_GEN_POWER_T7:
1657 data->T7_address = object->start_address; 1684 data->T7_address = object->start_address;
1658 break; 1685 break;
1686 case MXT_SPT_DYNAMICCONFIGURATIONCONTAINER_T71:
1687 data->T71_address = object->start_address;
1688 break;
1659 case MXT_TOUCH_MULTI_T9: 1689 case MXT_TOUCH_MULTI_T9:
1660 data->multitouch = MXT_TOUCH_MULTI_T9; 1690 data->multitouch = MXT_TOUCH_MULTI_T9;
1691 /* Only handle messages from first T9 instance */
1661 data->T9_reportid_min = min_id; 1692 data->T9_reportid_min = min_id;
1662 data->T9_reportid_max = max_id; 1693 data->T9_reportid_max = min_id +
1663 data->num_touchids = object->num_report_ids 1694 object->num_report_ids - 1;
1664 * mxt_obj_instances(object); 1695 data->num_touchids = object->num_report_ids;
1665 break; 1696 break;
1666 case MXT_SPT_MESSAGECOUNT_T44: 1697 case MXT_SPT_MESSAGECOUNT_T44:
1667 data->T44_address = object->start_address; 1698 data->T44_address = object->start_address;
@@ -1981,10 +2012,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
1981 2012
1982 /* Register input device */ 2013 /* Register input device */
1983 input_dev = input_allocate_device(); 2014 input_dev = input_allocate_device();
1984 if (!input_dev) { 2015 if (!input_dev)
1985 dev_err(dev, "Failed to allocate memory\n");
1986 return -ENOMEM; 2016 return -ENOMEM;
1987 }
1988 2017
1989 input_dev->name = "Atmel maXTouch Touchscreen"; 2018 input_dev->name = "Atmel maXTouch Touchscreen";
1990 input_dev->phys = data->phys; 2019 input_dev->phys = data->phys;
@@ -2055,12 +2084,6 @@ static int mxt_initialize_input_device(struct mxt_data *data)
2055 } 2084 }
2056 2085
2057 if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 && 2086 if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
2058 data->t100_aux_ampl) {
2059 input_set_abs_params(input_dev, ABS_MT_PRESSURE,
2060 0, 255, 0, 0);
2061 }
2062
2063 if (data->multitouch == MXT_TOUCH_MULTITOUCHSCREEN_T100 &&
2064 data->t100_aux_vect) { 2087 data->t100_aux_vect) {
2065 input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 2088 input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
2066 0, 255, 0, 0); 2089 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
new file mode 100644
index 000000000000..49a8d4bbca3a
--- /dev/null
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -0,0 +1,484 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Rohm BU21029 touchscreen controller driver
4 *
5 * Copyright (C) 2015-2018 Bosch Sicherheitssysteme GmbH
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/delay.h>
13#include <linux/gpio/consumer.h>
14#include <linux/i2c.h>
15#include <linux/input.h>
16#include <linux/input/touchscreen.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/module.h>
20#include <linux/regulator/consumer.h>
21#include <linux/timer.h>
22
23/*
24 * HW_ID1 Register (PAGE=0, ADDR=0x0E, Reset value=0x02, Read only)
25 * +--------+--------+--------+--------+--------+--------+--------+--------+
26 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
27 * +--------+--------+--------+--------+--------+--------+--------+--------+
28 * | HW_IDH |
29 * +--------+--------+--------+--------+--------+--------+--------+--------+
30 * HW_ID2 Register (PAGE=0, ADDR=0x0F, Reset value=0x29, Read only)
31 * +--------+--------+--------+--------+--------+--------+--------+--------+
32 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
33 * +--------+--------+--------+--------+--------+--------+--------+--------+
34 * | HW_IDL |
35 * +--------+--------+--------+--------+--------+--------+--------+--------+
36 * HW_IDH: high 8bits of IC's ID
37 * HW_IDL: low 8bits of IC's ID
38 */
39#define BU21029_HWID_REG (0x0E << 3)
40#define SUPPORTED_HWID 0x0229
41
42/*
43 * CFR0 Register (PAGE=0, ADDR=0x00, Reset value=0x20)
44 * +--------+--------+--------+--------+--------+--------+--------+--------+
45 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
46 * +--------+--------+--------+--------+--------+--------+--------+--------+
47 * | 0 | 0 | CALIB | INTRM | 0 | 0 | 0 | 0 |
48 * +--------+--------+--------+--------+--------+--------+--------+--------+
49 * CALIB: 0 = not to use calibration result (*)
50 * 1 = use calibration result
51 * INTRM: 0 = INT output depend on "pen down" (*)
52 * 1 = INT output always "0"
53 */
54#define BU21029_CFR0_REG (0x00 << 3)
55#define CFR0_VALUE 0x00
56
57/*
58 * CFR1 Register (PAGE=0, ADDR=0x01, Reset value=0xA6)
59 * +--------+--------+--------+--------+--------+--------+--------+--------+
60 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
61 * +--------+--------+--------+--------+--------+--------+--------+--------+
62 * | MAV | AVE[2:0] | 0 | SMPL[2:0] |
63 * +--------+--------+--------+--------+--------+--------+--------+--------+
64 * MAV: 0 = median average filter off
65 * 1 = median average filter on (*)
66 * AVE: AVE+1 = number of average samples for MAV,
67 * if AVE>SMPL, then AVE=SMPL (=3)
68 * SMPL: SMPL+1 = number of conversion samples for MAV (=7)
69 */
70#define BU21029_CFR1_REG (0x01 << 3)
71#define CFR1_VALUE 0xA6
72
73/*
74 * CFR2 Register (PAGE=0, ADDR=0x02, Reset value=0x04)
75 * +--------+--------+--------+--------+--------+--------+--------+--------+
76 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
77 * +--------+--------+--------+--------+--------+--------+--------+--------+
78 * | INTVL_TIME[3:0] | TIME_ST_ADC[3:0] |
79 * +--------+--------+--------+--------+--------+--------+--------+--------+
80 * INTVL_TIME: waiting time between completion of conversion
81 * and start of next conversion, only usable in
82 * autoscan mode (=20.480ms)
83 * TIME_ST_ADC: waiting time between application of voltage
84 * to panel and start of A/D conversion (=100us)
85 */
86#define BU21029_CFR2_REG (0x02 << 3)
87#define CFR2_VALUE 0xC9
88
89/*
90 * CFR3 Register (PAGE=0, ADDR=0x0B, Reset value=0x72)
91 * +--------+--------+--------+--------+--------+--------+--------+--------+
92 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
93 * +--------+--------+--------+--------+--------+--------+--------+--------+
94 * | RM8 | STRETCH| PU90K | DUAL | PIDAC_OFS[3:0] |
95 * +--------+--------+--------+--------+--------+--------+--------+--------+
96 * RM8: 0 = coordinate resolution is 12bit (*)
97 * 1 = coordinate resolution is 8bit
98 * STRETCH: 0 = SCL_STRETCH function off
99 * 1 = SCL_STRETCH function on (*)
100 * PU90K: 0 = internal pull-up resistance for touch detection is ~50kohms (*)
101 * 1 = internal pull-up resistance for touch detection is ~90kohms
102 * DUAL: 0 = dual touch detection off (*)
103 * 1 = dual touch detection on
104 * PIDAC_OFS: dual touch detection circuit adjustment, it is not necessary
105 * to change this from initial value
106 */
107#define BU21029_CFR3_REG (0x0B << 3)
108#define CFR3_VALUE 0x42
109
110/*
111 * LDO Register (PAGE=0, ADDR=0x0C, Reset value=0x00)
112 * +--------+--------+--------+--------+--------+--------+--------+--------+
113 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
114 * +--------+--------+--------+--------+--------+--------+--------+--------+
115 * | 0 | PVDD[2:0] | 0 | AVDD[2:0] |
116 * +--------+--------+--------+--------+--------+--------+--------+--------+
117 * PVDD: output voltage of panel output regulator (=2.000V)
118 * AVDD: output voltage of analog circuit regulator (=2.000V)
119 */
120#define BU21029_LDO_REG (0x0C << 3)
121#define LDO_VALUE 0x77
122
123/*
124 * Serial Interface Command Byte 1 (CID=1)
125 * +--------+--------+--------+--------+--------+--------+--------+--------+
126 * | D7 | D6 | D5 | D4 | D3 | D2 | D1 | D0 |
127 * +--------+--------+--------+--------+--------+--------+--------+--------+
128 * | 1 | CF | CMSK | PDM | STP |
129 * +--------+--------+--------+--------+--------+--------+--------+--------+
130 * CF: conversion function, see table 3 in datasheet p6 (=0000, automatic scan)
131 * CMSK: 0 = executes convert function (*)
132 * 1 = reads the convert result
133 * PDM: 0 = power down after convert function stops (*)
134 * 1 = keep power on after convert function stops
135 * STP: 1 = abort current conversion and power down, set to "0" automatically
136 */
137#define BU21029_AUTOSCAN 0x80
138
139/*
140 * The timeout value needs to be larger than INTVL_TIME + tConv4 (sample and
141 * conversion time), where tConv4 is calculated by formula:
142 * tPON + tDLY1 + (tTIME_ST_ADC + (tADC * tSMPL) * 2 + tDLY2) * 3
143 * see figure 8 in datasheet p15 for details of each field.
144 */
145#define PEN_UP_TIMEOUT_MS 50
146
147#define STOP_DELAY_MIN_US 50
148#define STOP_DELAY_MAX_US 1000
149#define START_DELAY_MS 2
150#define BUF_LEN 8
151#define SCALE_12BIT (1 << 12)
152#define MAX_12BIT ((1 << 12) - 1)
153#define DRIVER_NAME "bu21029"
154
155struct bu21029_ts_data {
156 struct i2c_client *client;
157 struct input_dev *in_dev;
158 struct timer_list timer;
159 struct regulator *vdd;
160 struct gpio_desc *reset_gpios;
161 u32 x_plate_ohms;
162 struct touchscreen_properties prop;
163};
164
165static void bu21029_touch_report(struct bu21029_ts_data *bu21029, const u8 *buf)
166{
167 u16 x, y, z1, z2;
168 u32 rz;
169 s32 max_pressure = input_abs_get_max(bu21029->in_dev, ABS_PRESSURE);
170
171 /*
172 * compose upper 8 and lower 4 bits into a 12bit value:
173 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
174 * | ByteH | ByteL |
175 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
176 * |b07|b06|b05|b04|b03|b02|b01|b00|b07|b06|b05|b04|b03|b02|b01|b00|
177 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
178 * |v11|v10|v09|v08|v07|v06|v05|v04|v03|v02|v01|v00| 0 | 0 | 0 | 0 |
179 * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
180 */
181 x = (buf[0] << 4) | (buf[1] >> 4);
182 y = (buf[2] << 4) | (buf[3] >> 4);
183 z1 = (buf[4] << 4) | (buf[5] >> 4);
184 z2 = (buf[6] << 4) | (buf[7] >> 4);
185
186 if (z1 && z2) {
187 /*
188 * calculate Rz (pressure resistance value) by equation:
189 * Rz = Rx * (x/Q) * ((z2/z1) - 1), where
190 * Rx is x-plate resistance,
191 * Q is the touch screen resolution (8bit = 256, 12bit = 4096)
192 * x, z1, z2 are the measured positions.
193 */
194 rz = z2 - z1;
195 rz *= x;
196 rz *= bu21029->x_plate_ohms;
197 rz /= z1;
198 rz = DIV_ROUND_CLOSEST(rz, SCALE_12BIT);
199 if (rz <= max_pressure) {
200 touchscreen_report_pos(bu21029->in_dev, &bu21029->prop,
201 x, y, false);
202 input_report_abs(bu21029->in_dev, ABS_PRESSURE,
203 max_pressure - rz);
204 input_report_key(bu21029->in_dev, BTN_TOUCH, 1);
205 input_sync(bu21029->in_dev);
206 }
207 }
208}
209
210static void bu21029_touch_release(struct timer_list *t)
211{
212 struct bu21029_ts_data *bu21029 = from_timer(bu21029, t, timer);
213
214 input_report_abs(bu21029->in_dev, ABS_PRESSURE, 0);
215 input_report_key(bu21029->in_dev, BTN_TOUCH, 0);
216 input_sync(bu21029->in_dev);
217}
218
219static irqreturn_t bu21029_touch_soft_irq(int irq, void *data)
220{
221 struct bu21029_ts_data *bu21029 = data;
222 u8 buf[BUF_LEN];
223 int error;
224
225 /*
226 * Read touch data and deassert interrupt (will assert again after
227 * INTVL_TIME + tConv4 for continuous touch)
228 */
229 error = i2c_smbus_read_i2c_block_data(bu21029->client, BU21029_AUTOSCAN,
230 sizeof(buf), buf);
231 if (error < 0)
232 goto out;
233
234 bu21029_touch_report(bu21029, buf);
235
236 /* reset timer for pen up detection */
237 mod_timer(&bu21029->timer,
238 jiffies + msecs_to_jiffies(PEN_UP_TIMEOUT_MS));
239
240out:
241 return IRQ_HANDLED;
242}
243
244static void bu21029_put_chip_in_reset(struct bu21029_ts_data *bu21029)
245{
246 if (bu21029->reset_gpios) {
247 gpiod_set_value_cansleep(bu21029->reset_gpios, 1);
248 usleep_range(STOP_DELAY_MIN_US, STOP_DELAY_MAX_US);
249 }
250}
251
252static int bu21029_start_chip(struct input_dev *dev)
253{
254 struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
255 struct i2c_client *i2c = bu21029->client;
256 struct {
257 u8 reg;
258 u8 value;
259 } init_table[] = {
260 {BU21029_CFR0_REG, CFR0_VALUE},
261 {BU21029_CFR1_REG, CFR1_VALUE},
262 {BU21029_CFR2_REG, CFR2_VALUE},
263 {BU21029_CFR3_REG, CFR3_VALUE},
264 {BU21029_LDO_REG, LDO_VALUE}
265 };
266 int error, i;
267 __be16 hwid;
268
269 error = regulator_enable(bu21029->vdd);
270 if (error) {
271 dev_err(&i2c->dev, "failed to power up chip: %d", error);
272 return error;
273 }
274
275 /* take chip out of reset */
276 if (bu21029->reset_gpios) {
277 gpiod_set_value_cansleep(bu21029->reset_gpios, 0);
278 msleep(START_DELAY_MS);
279 }
280
281 error = i2c_smbus_read_i2c_block_data(i2c, BU21029_HWID_REG,
282 sizeof(hwid), (u8 *)&hwid);
283 if (error < 0) {
284 dev_err(&i2c->dev, "failed to read HW ID\n");
285 goto err_out;
286 }
287
288 if (be16_to_cpu(hwid) != SUPPORTED_HWID) {
289 dev_err(&i2c->dev,
290 "unsupported HW ID 0x%x\n", be16_to_cpu(hwid));
291 error = -ENODEV;
292 goto err_out;
293 }
294
295 for (i = 0; i < ARRAY_SIZE(init_table); ++i) {
296 error = i2c_smbus_write_byte_data(i2c,
297 init_table[i].reg,
298 init_table[i].value);
299 if (error < 0) {
300 dev_err(&i2c->dev,
301 "failed to write %#02x to register %#02x: %d\n",
302 init_table[i].value, init_table[i].reg,
303 error);
304 goto err_out;
305 }
306 }
307
308 error = i2c_smbus_write_byte(i2c, BU21029_AUTOSCAN);
309 if (error < 0) {
310 dev_err(&i2c->dev, "failed to start autoscan\n");
311 goto err_out;
312 }
313
314 enable_irq(bu21029->client->irq);
315 return 0;
316
317err_out:
318 bu21029_put_chip_in_reset(bu21029);
319 regulator_disable(bu21029->vdd);
320 return error;
321}
322
323static void bu21029_stop_chip(struct input_dev *dev)
324{
325 struct bu21029_ts_data *bu21029 = input_get_drvdata(dev);
326
327 disable_irq(bu21029->client->irq);
328 del_timer_sync(&bu21029->timer);
329
330 bu21029_put_chip_in_reset(bu21029);
331 regulator_disable(bu21029->vdd);
332}
333
334static int bu21029_probe(struct i2c_client *client,
335 const struct i2c_device_id *id)
336{
337 struct bu21029_ts_data *bu21029;
338 struct input_dev *in_dev;
339 int error;
340
341 if (!i2c_check_functionality(client->adapter,
342 I2C_FUNC_SMBUS_WRITE_BYTE |
343 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
344 I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
345 dev_err(&client->dev,
346 "i2c functionality support is not sufficient\n");
347 return -EIO;
348 }
349
350 bu21029 = devm_kzalloc(&client->dev, sizeof(*bu21029), GFP_KERNEL);
351 if (!bu21029)
352 return -ENOMEM;
353
354 error = device_property_read_u32(&client->dev, "rohm,x-plate-ohms",
355 &bu21029->x_plate_ohms);
356 if (error) {
357 dev_err(&client->dev,
358 "invalid 'x-plate-ohms' supplied: %d\n", error);
359 return error;
360 }
361
362 bu21029->vdd = devm_regulator_get(&client->dev, "vdd");
363 if (IS_ERR(bu21029->vdd)) {
364 error = PTR_ERR(bu21029->vdd);
365 if (error != -EPROBE_DEFER)
366 dev_err(&client->dev,
367 "failed to acquire 'vdd' supply: %d\n", error);
368 return error;
369 }
370
371 bu21029->reset_gpios = devm_gpiod_get_optional(&client->dev,
372 "reset", GPIOD_OUT_HIGH);
373 if (IS_ERR(bu21029->reset_gpios)) {
374 error = PTR_ERR(bu21029->reset_gpios);
375 if (error != -EPROBE_DEFER)
376 dev_err(&client->dev,
377 "failed to acquire 'reset' gpio: %d\n", error);
378 return error;
379 }
380
381 in_dev = devm_input_allocate_device(&client->dev);
382 if (!in_dev) {
383 dev_err(&client->dev, "unable to allocate input device\n");
384 return -ENOMEM;
385 }
386
387 bu21029->client = client;
388 bu21029->in_dev = in_dev;
389 timer_setup(&bu21029->timer, bu21029_touch_release, 0);
390
391 in_dev->name = DRIVER_NAME;
392 in_dev->id.bustype = BUS_I2C;
393 in_dev->open = bu21029_start_chip;
394 in_dev->close = bu21029_stop_chip;
395
396 input_set_capability(in_dev, EV_KEY, BTN_TOUCH);
397 input_set_abs_params(in_dev, ABS_X, 0, MAX_12BIT, 0, 0);
398 input_set_abs_params(in_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
399 input_set_abs_params(in_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
400 touchscreen_parse_properties(in_dev, false, &bu21029->prop);
401
402 input_set_drvdata(in_dev, bu21029);
403
404 irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
405 error = devm_request_threaded_irq(&client->dev, client->irq,
406 NULL, bu21029_touch_soft_irq,
407 IRQF_ONESHOT, DRIVER_NAME, bu21029);
408 if (error) {
409 dev_err(&client->dev,
410 "unable to request touch irq: %d\n", error);
411 return error;
412 }
413
414 error = input_register_device(in_dev);
415 if (error) {
416 dev_err(&client->dev,
417 "unable to register input device: %d\n", error);
418 return error;
419 }
420
421 i2c_set_clientdata(client, bu21029);
422
423 return 0;
424}
425
426static int __maybe_unused bu21029_suspend(struct device *dev)
427{
428 struct i2c_client *i2c = to_i2c_client(dev);
429 struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
430
431 if (!device_may_wakeup(dev)) {
432 mutex_lock(&bu21029->in_dev->mutex);
433 if (bu21029->in_dev->users)
434 bu21029_stop_chip(bu21029->in_dev);
435 mutex_unlock(&bu21029->in_dev->mutex);
436 }
437
438 return 0;
439}
440
441static int __maybe_unused bu21029_resume(struct device *dev)
442{
443 struct i2c_client *i2c = to_i2c_client(dev);
444 struct bu21029_ts_data *bu21029 = i2c_get_clientdata(i2c);
445
446 if (!device_may_wakeup(dev)) {
447 mutex_lock(&bu21029->in_dev->mutex);
448 if (bu21029->in_dev->users)
449 bu21029_start_chip(bu21029->in_dev);
450 mutex_unlock(&bu21029->in_dev->mutex);
451 }
452
453 return 0;
454}
455static SIMPLE_DEV_PM_OPS(bu21029_pm_ops, bu21029_suspend, bu21029_resume);
456
457static const struct i2c_device_id bu21029_ids[] = {
458 { DRIVER_NAME, 0 },
459 { /* sentinel */ }
460};
461MODULE_DEVICE_TABLE(i2c, bu21029_ids);
462
463#ifdef CONFIG_OF
464static const struct of_device_id bu21029_of_ids[] = {
465 { .compatible = "rohm,bu21029" },
466 { /* sentinel */ }
467};
468MODULE_DEVICE_TABLE(of, bu21029_of_ids);
469#endif
470
471static struct i2c_driver bu21029_driver = {
472 .driver = {
473 .name = DRIVER_NAME,
474 .of_match_table = of_match_ptr(bu21029_of_ids),
475 .pm = &bu21029_pm_ops,
476 },
477 .id_table = bu21029_ids,
478 .probe = bu21029_probe,
479};
480module_i2c_driver(bu21029_driver);
481
482MODULE_AUTHOR("Zhu Yi <yi.zhu5@cn.bosch.com>");
483MODULE_DESCRIPTION("Rohm BU21029 touchscreen controller driver");
484MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 2facad75eb6d..7fe41965c5d1 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Touch Screen driver for EETI's I2C connected touch screen panels 2 * Touch Screen driver for EETI's I2C connected touch screen panels
3 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> 3 * Copyright (c) 2009,2018 Daniel Mack <daniel@zonque.org>
4 * 4 *
5 * See EETI's software guide for the protocol specification: 5 * See EETI's software guide for the protocol specification:
6 * http://home.eeti.com.tw/web20/eg/guide.htm 6 * http://home.eeti.com.tw/documentation.html
7 * 7 *
8 * Based on migor_ts.c 8 * Based on migor_ts.c
9 * Copyright (c) 2008 Magnus Damm 9 * Copyright (c) 2008 Magnus Damm
@@ -25,28 +25,22 @@
25 */ 25 */
26 26
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/moduleparam.h>
29#include <linux/kernel.h> 28#include <linux/kernel.h>
30#include <linux/input.h> 29#include <linux/input.h>
30#include <linux/input/touchscreen.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/gpio/consumer.h> 34#include <linux/gpio/consumer.h>
35#include <linux/of.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <asm/unaligned.h> 37#include <asm/unaligned.h>
37 38
38static bool flip_x;
39module_param(flip_x, bool, 0644);
40MODULE_PARM_DESC(flip_x, "flip x coordinate");
41
42static bool flip_y;
43module_param(flip_y, bool, 0644);
44MODULE_PARM_DESC(flip_y, "flip y coordinate");
45
46struct eeti_ts { 39struct eeti_ts {
47 struct i2c_client *client; 40 struct i2c_client *client;
48 struct input_dev *input; 41 struct input_dev *input;
49 struct gpio_desc *attn_gpio; 42 struct gpio_desc *attn_gpio;
43 struct touchscreen_properties props;
50 bool running; 44 bool running;
51}; 45};
52 46
@@ -73,17 +67,10 @@ static void eeti_ts_report_event(struct eeti_ts *eeti, u8 *buf)
73 x >>= res - EETI_TS_BITDEPTH; 67 x >>= res - EETI_TS_BITDEPTH;
74 y >>= res - EETI_TS_BITDEPTH; 68 y >>= res - EETI_TS_BITDEPTH;
75 69
76 if (flip_x)
77 x = EETI_MAXVAL - x;
78
79 if (flip_y)
80 y = EETI_MAXVAL - y;
81
82 if (buf[0] & REPORT_BIT_HAS_PRESSURE) 70 if (buf[0] & REPORT_BIT_HAS_PRESSURE)
83 input_report_abs(eeti->input, ABS_PRESSURE, buf[5]); 71 input_report_abs(eeti->input, ABS_PRESSURE, buf[5]);
84 72
85 input_report_abs(eeti->input, ABS_X, x); 73 touchscreen_report_pos(eeti->input, &eeti->props, x, y, false);
86 input_report_abs(eeti->input, ABS_Y, y);
87 input_report_key(eeti->input, BTN_TOUCH, buf[0] & REPORT_BIT_PRESSED); 74 input_report_key(eeti->input, BTN_TOUCH, buf[0] & REPORT_BIT_PRESSED);
88 input_sync(eeti->input); 75 input_sync(eeti->input);
89} 76}
@@ -178,6 +165,8 @@ static int eeti_ts_probe(struct i2c_client *client,
178 input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0); 165 input_set_abs_params(input, ABS_Y, 0, EETI_MAXVAL, 0, 0);
179 input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0); 166 input_set_abs_params(input, ABS_PRESSURE, 0, 0xff, 0, 0);
180 167
168 touchscreen_parse_properties(input, false, &eeti->props);
169
181 input->name = client->name; 170 input->name = client->name;
182 input->id.bustype = BUS_I2C; 171 input->id.bustype = BUS_I2C;
183 input->open = eeti_ts_open; 172 input->open = eeti_ts_open;
@@ -262,10 +251,18 @@ static const struct i2c_device_id eeti_ts_id[] = {
262}; 251};
263MODULE_DEVICE_TABLE(i2c, eeti_ts_id); 252MODULE_DEVICE_TABLE(i2c, eeti_ts_id);
264 253
254#ifdef CONFIG_OF
255static const struct of_device_id of_eeti_ts_match[] = {
256 { .compatible = "eeti,exc3000-i2c", },
257 { }
258};
259#endif
260
265static struct i2c_driver eeti_ts_driver = { 261static struct i2c_driver eeti_ts_driver = {
266 .driver = { 262 .driver = {
267 .name = "eeti_ts", 263 .name = "eeti_ts",
268 .pm = &eeti_ts_pm, 264 .pm = &eeti_ts_pm,
265 .of_match_table = of_match_ptr(of_eeti_ts_match),
269 }, 266 },
270 .probe = eeti_ts_probe, 267 .probe = eeti_ts_probe,
271 .id_table = eeti_ts_id, 268 .id_table = eeti_ts_id,
@@ -274,5 +271,5 @@ static struct i2c_driver eeti_ts_driver = {
274module_i2c_driver(eeti_ts_driver); 271module_i2c_driver(eeti_ts_driver);
275 272
276MODULE_DESCRIPTION("EETI Touchscreen driver"); 273MODULE_DESCRIPTION("EETI Touchscreen driver");
277MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); 274MODULE_AUTHOR("Daniel Mack <daniel@zonque.org>");
278MODULE_LICENSE("GPL"); 275MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 752ae9cf4514..80e69bb8283e 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for EETI eGalax Multiple Touch Controller 3 * Driver for EETI eGalax Multiple Touch Controller
3 * 4 *
4 * Copyright (C) 2011 Freescale Semiconductor, Inc. 5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
5 * 6 *
6 * based on max11801_ts.c 7 * based on max11801_ts.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13/* EETI eGalax serial touch screen controller is a I2C based multiple 10/* EETI eGalax serial touch screen controller is a I2C based multiple
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 83433e8efff7..7f2942f3cec6 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -352,6 +352,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
352 352
353 case 1: /* 6-byte protocol */ 353 case 1: /* 6-byte protocol */
354 input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0); 354 input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
355 /* fall through */
355 356
356 case 2: /* 4-byte protocol */ 357 case 2: /* 4-byte protocol */
357 input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0); 358 input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index 47fe1f184bbc..1d6c8f490b40 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -1,16 +1,11 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de> 2//
3 * 3// Copyright (C) 2014-2015 Pengutronix, Markus Pargmann <mpa@pengutronix.de>
4 * This program is free software; you can redistribute it and/or modify it under 4// Based on driver from 2011:
5 * the terms of the GNU General Public License version 2 as published by the 5// Juergen Beisert, Pengutronix <kernel@pengutronix.de>
6 * Free Software Foundation. 6//
7 * 7// This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
8 * Based on driver from 2011: 8// connected to the imx25 ADC.
9 * Juergen Beisert, Pengutronix <kernel@pengutronix.de>
10 *
11 * This is the driver for the imx25 TCQ (Touchscreen Conversion Queue)
12 * connected to the imx25 ADC.
13 */
14 9
15#include <linux/clk.h> 10#include <linux/clk.h>
16#include <linux/device.h> 11#include <linux/device.h>
diff --git a/drivers/input/touchscreen/gunze.c b/drivers/input/touchscreen/gunze.c
index 481586909d28..054c2537b392 100644
--- a/drivers/input/touchscreen/gunze.c
+++ b/drivers/input/touchscreen/gunze.c
@@ -20,10 +20,6 @@
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * Should you need to contact me, the author, you can do so either by
25 * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
26 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
27 */ 23 */
28 24
29#include <linux/errno.h> 25#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ee82a975bfd2..c10fc594f94d 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -1,12 +1,8 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Freescale i.MX6UL touchscreen controller driver 2//
3 * 3// Freescale i.MX6UL touchscreen controller driver
4 * Copyright (C) 2015 Freescale Semiconductor, Inc. 4//
5 * 5// Copyright (C) 2015 Freescale Semiconductor, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 6
11#include <linux/errno.h> 7#include <linux/errno.h>
12#include <linux/kernel.h> 8#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index d1c09e6a2cb6..c89853a36f9e 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -466,7 +466,7 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
466 } 466 }
467 } 467 }
468 468
469 return 0; 469 return false;
470} 470}
471 471
472static bool raydium_i2c_fw_trigger(struct i2c_client *client) 472static bool raydium_i2c_fw_trigger(struct i2c_client *client)
@@ -492,7 +492,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
492 } 492 }
493 } 493 }
494 494
495 return 0; 495 return false;
496} 496}
497 497
498static int raydium_i2c_check_path(struct i2c_client *client) 498static int raydium_i2c_check_path(struct i2c_client *client)
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index 20f7f3902757..166edeb77776 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -1142,7 +1142,7 @@ static int __maybe_unused wdt87xx_resume(struct device *dev)
1142 * The chip may have been reset while system is resuming, 1142 * The chip may have been reset while system is resuming,
1143 * give it some time to settle. 1143 * give it some time to settle.
1144 */ 1144 */
1145 mdelay(100); 1145 msleep(100);
1146 1146
1147 error = wdt87xx_send_command(client, VND_CMD_START, 0); 1147 error = wdt87xx_send_command(client, VND_CMD_START, 0);
1148 if (error) 1148 if (error)
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index 944a7f338099..1b25d8bc153a 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
388** Receive and process command from user mode utility 388** Receive and process command from user mode utility
389*/ 389*/
390void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 390void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
391 int length, 391 int length, void *mptr,
392 divas_xdi_copy_from_user_fn_t cp_fn) 392 divas_xdi_copy_from_user_fn_t cp_fn)
393{ 393{
394 diva_xdi_um_cfg_cmd_t msg; 394 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
395 diva_os_xdi_adapter_t *a = NULL; 395 diva_os_xdi_adapter_t *a = NULL;
396 diva_os_spin_lock_magic_t old_irql; 396 diva_os_spin_lock_magic_t old_irql;
397 struct list_head *tmp; 397 struct list_head *tmp;
@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
401 length, sizeof(diva_xdi_um_cfg_cmd_t))) 401 length, sizeof(diva_xdi_um_cfg_cmd_t)))
402 return NULL; 402 return NULL;
403 } 403 }
404 if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) { 404 if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
405 DBG_ERR(("A: A(?) open, write error")) 405 DBG_ERR(("A: A(?) open, write error"))
406 return NULL; 406 return NULL;
407 } 407 }
408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 408 diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
409 list_for_each(tmp, &adapter_queue) { 409 list_for_each(tmp, &adapter_queue) {
410 a = list_entry(tmp, diva_os_xdi_adapter_t, link); 410 a = list_entry(tmp, diva_os_xdi_adapter_t, link);
411 if (a->controller == (int)msg.adapter) 411 if (a->controller == (int)msg->adapter)
412 break; 412 break;
413 a = NULL; 413 a = NULL;
414 } 414 }
415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter"); 415 diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
416 416
417 if (!a) { 417 if (!a) {
418 DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter)) 418 DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
419 } 419 }
420 420
421 return (a); 421 return (a);
@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
437 437
438int 438int
439diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 439diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
440 int length, divas_xdi_copy_from_user_fn_t cp_fn) 440 int length, void *mptr,
441 divas_xdi_copy_from_user_fn_t cp_fn)
441{ 442{
443 diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
442 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter; 444 diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
443 void *data; 445 void *data;
444 446
@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
459 return (-2); 461 return (-2);
460 } 462 }
461 463
462 length = (*cp_fn) (os_handle, data, src, length); 464 if (msg) {
465 *(diva_xdi_um_cfg_cmd_t *)data = *msg;
466 length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
467 src + sizeof(*msg), length - sizeof(*msg));
468 } else {
469 length = (*cp_fn) (os_handle, data, src, length);
470 }
463 if (length > 0) { 471 if (length > 0) {
464 if ((*(a->interface.cmd_proc)) 472 if ((*(a->interface.cmd_proc))
465 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) { 473 (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index b067032093a8..1ad76650fbf9 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn); 20 int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
21 21
22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src, 22int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
23 int length, divas_xdi_copy_from_user_fn_t cp_fn); 23 int length, void *msg,
24 divas_xdi_copy_from_user_fn_t cp_fn);
24 25
25void *diva_xdi_open_adapter(void *os_handle, const void __user *src, 26void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
26 int length, 27 int length, void *msg,
27 divas_xdi_copy_from_user_fn_t cp_fn); 28 divas_xdi_copy_from_user_fn_t cp_fn);
28 29
29void diva_xdi_close_adapter(void *adapter, void *os_handle); 30void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b9980e84f9db..b6a3950b2564 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
591static ssize_t divas_write(struct file *file, const char __user *buf, 591static ssize_t divas_write(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos) 592 size_t count, loff_t *ppos)
593{ 593{
594 diva_xdi_um_cfg_cmd_t msg;
594 int ret = -EINVAL; 595 int ret = -EINVAL;
595 596
596 if (!file->private_data) { 597 if (!file->private_data) {
597 file->private_data = diva_xdi_open_adapter(file, buf, 598 file->private_data = diva_xdi_open_adapter(file, buf,
598 count, 599 count, &msg,
599 xdi_copy_from_user); 600 xdi_copy_from_user);
600 } 601 if (!file->private_data)
601 if (!file->private_data) { 602 return (-ENODEV);
602 return (-ENODEV); 603 ret = diva_xdi_write(file->private_data, file,
604 buf, count, &msg, xdi_copy_from_user);
605 } else {
606 ret = diva_xdi_write(file->private_data, file,
607 buf, count, NULL, xdi_copy_from_user);
603 } 608 }
604 609
605 ret = diva_xdi_write(file->private_data, file,
606 buf, count, xdi_copy_from_user);
607 switch (ret) { 610 switch (ret) {
608 case -1: /* Message should be removed from rx mailbox first */ 611 case -1: /* Message should be removed from rx mailbox first */
609 ret = -EBUSY; 612 ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
622static ssize_t divas_read(struct file *file, char __user *buf, 625static ssize_t divas_read(struct file *file, char __user *buf,
623 size_t count, loff_t *ppos) 626 size_t count, loff_t *ppos)
624{ 627{
628 diva_xdi_um_cfg_cmd_t msg;
625 int ret = -EINVAL; 629 int ret = -EINVAL;
626 630
627 if (!file->private_data) { 631 if (!file->private_data) {
628 file->private_data = diva_xdi_open_adapter(file, buf, 632 file->private_data = diva_xdi_open_adapter(file, buf,
629 count, 633 count, &msg,
630 xdi_copy_from_user); 634 xdi_copy_from_user);
631 } 635 }
632 if (!file->private_data) { 636 if (!file->private_data) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6f823f44b4aa..e0acbcefb2ba 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3859,7 +3859,7 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
3859 /* Try loading the bitmap unless "raid0", which does not have one */ 3859 /* Try loading the bitmap unless "raid0", which does not have one */
3860 if (!rs_is_raid0(rs) && 3860 if (!rs_is_raid0(rs) &&
3861 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { 3861 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
3862 r = bitmap_load(&rs->md); 3862 r = md_bitmap_load(&rs->md);
3863 if (r) 3863 if (r)
3864 DMERR("Failed to load bitmap"); 3864 DMERR("Failed to load bitmap");
3865 } 3865 }
@@ -3987,8 +3987,8 @@ static int raid_preresume(struct dm_target *ti)
3987 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ 3987 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
3988 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && 3988 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3989 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { 3989 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
3990 r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, 3990 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors,
3991 to_bytes(rs->requested_bitmap_chunk_sectors), 0); 3991 to_bytes(rs->requested_bitmap_chunk_sectors), 0);
3992 if (r) 3992 if (r)
3993 DMERR("Failed to resize bitmap"); 3993 DMERR("Failed to resize bitmap");
3994 } 3994 }
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 239c7bb3929b..712a20c4608c 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -46,8 +46,8 @@ static inline char *bmname(struct bitmap *bitmap)
46 * if we find our page, we increment the page's refcount so that it stays 46 * if we find our page, we increment the page's refcount so that it stays
47 * allocated while we're using it 47 * allocated while we're using it
48 */ 48 */
49static int bitmap_checkpage(struct bitmap_counts *bitmap, 49static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
50 unsigned long page, int create, int no_hijack) 50 unsigned long page, int create, int no_hijack)
51__releases(bitmap->lock) 51__releases(bitmap->lock)
52__acquires(bitmap->lock) 52__acquires(bitmap->lock)
53{ 53{
@@ -115,7 +115,7 @@ __acquires(bitmap->lock)
115/* if page is completely empty, put it back on the free list, or dealloc it */ 115/* if page is completely empty, put it back on the free list, or dealloc it */
116/* if page was hijacked, unmark the flag so it might get alloced next time */ 116/* if page was hijacked, unmark the flag so it might get alloced next time */
117/* Note: lock should be held when calling this */ 117/* Note: lock should be held when calling this */
118static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) 118static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
119{ 119{
120 char *ptr; 120 char *ptr;
121 121
@@ -280,7 +280,7 @@ restart:
280 return -EINVAL; 280 return -EINVAL;
281} 281}
282 282
283static void bitmap_file_kick(struct bitmap *bitmap); 283static void md_bitmap_file_kick(struct bitmap *bitmap);
284/* 284/*
285 * write out a page to a file 285 * write out a page to a file
286 */ 286 */
@@ -310,7 +310,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
310 atomic_read(&bitmap->pending_writes)==0); 310 atomic_read(&bitmap->pending_writes)==0);
311 } 311 }
312 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 312 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
313 bitmap_file_kick(bitmap); 313 md_bitmap_file_kick(bitmap);
314} 314}
315 315
316static void end_bitmap_write(struct buffer_head *bh, int uptodate) 316static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -421,11 +421,11 @@ out:
421 */ 421 */
422 422
423/* 423/*
424 * bitmap_wait_writes() should be called before writing any bitmap 424 * md_bitmap_wait_writes() should be called before writing any bitmap
425 * blocks, to ensure previous writes, particularly from 425 * blocks, to ensure previous writes, particularly from
426 * bitmap_daemon_work(), have completed. 426 * md_bitmap_daemon_work(), have completed.
427 */ 427 */
428static void bitmap_wait_writes(struct bitmap *bitmap) 428static void md_bitmap_wait_writes(struct bitmap *bitmap)
429{ 429{
430 if (bitmap->storage.file) 430 if (bitmap->storage.file)
431 wait_event(bitmap->write_wait, 431 wait_event(bitmap->write_wait,
@@ -443,7 +443,7 @@ static void bitmap_wait_writes(struct bitmap *bitmap)
443 443
444 444
445/* update the event counter and sync the superblock to disk */ 445/* update the event counter and sync the superblock to disk */
446void bitmap_update_sb(struct bitmap *bitmap) 446void md_bitmap_update_sb(struct bitmap *bitmap)
447{ 447{
448 bitmap_super_t *sb; 448 bitmap_super_t *sb;
449 449
@@ -476,10 +476,10 @@ void bitmap_update_sb(struct bitmap *bitmap)
476 kunmap_atomic(sb); 476 kunmap_atomic(sb);
477 write_page(bitmap, bitmap->storage.sb_page, 1); 477 write_page(bitmap, bitmap->storage.sb_page, 1);
478} 478}
479EXPORT_SYMBOL(bitmap_update_sb); 479EXPORT_SYMBOL(md_bitmap_update_sb);
480 480
481/* print out the bitmap file superblock */ 481/* print out the bitmap file superblock */
482void bitmap_print_sb(struct bitmap *bitmap) 482void md_bitmap_print_sb(struct bitmap *bitmap)
483{ 483{
484 bitmap_super_t *sb; 484 bitmap_super_t *sb;
485 485
@@ -518,7 +518,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
518 * 518 *
519 * Returns: 0 on success, -Exxx on error 519 * Returns: 0 on success, -Exxx on error
520 */ 520 */
521static int bitmap_new_disk_sb(struct bitmap *bitmap) 521static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
522{ 522{
523 bitmap_super_t *sb; 523 bitmap_super_t *sb;
524 unsigned long chunksize, daemon_sleep, write_behind; 524 unsigned long chunksize, daemon_sleep, write_behind;
@@ -577,7 +577,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
577} 577}
578 578
579/* read the superblock from the bitmap file and initialize some bitmap fields */ 579/* read the superblock from the bitmap file and initialize some bitmap fields */
580static int bitmap_read_sb(struct bitmap *bitmap) 580static int md_bitmap_read_sb(struct bitmap *bitmap)
581{ 581{
582 char *reason = NULL; 582 char *reason = NULL;
583 bitmap_super_t *sb; 583 bitmap_super_t *sb;
@@ -727,7 +727,7 @@ out_no_sb:
727 bitmap->mddev->bitmap_info.space > sectors_reserved) 727 bitmap->mddev->bitmap_info.space > sectors_reserved)
728 bitmap->mddev->bitmap_info.space = sectors_reserved; 728 bitmap->mddev->bitmap_info.space = sectors_reserved;
729 if (err) { 729 if (err) {
730 bitmap_print_sb(bitmap); 730 md_bitmap_print_sb(bitmap);
731 if (bitmap->cluster_slot < 0) 731 if (bitmap->cluster_slot < 0)
732 md_cluster_stop(bitmap->mddev); 732 md_cluster_stop(bitmap->mddev);
733 } 733 }
@@ -774,9 +774,9 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
774 return store->filemap[file_page_index(store, chunk)]; 774 return store->filemap[file_page_index(store, chunk)];
775} 775}
776 776
777static int bitmap_storage_alloc(struct bitmap_storage *store, 777static int md_bitmap_storage_alloc(struct bitmap_storage *store,
778 unsigned long chunks, int with_super, 778 unsigned long chunks, int with_super,
779 int slot_number) 779 int slot_number)
780{ 780{
781 int pnum, offset = 0; 781 int pnum, offset = 0;
782 unsigned long num_pages; 782 unsigned long num_pages;
@@ -830,7 +830,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
830 return 0; 830 return 0;
831} 831}
832 832
833static void bitmap_file_unmap(struct bitmap_storage *store) 833static void md_bitmap_file_unmap(struct bitmap_storage *store)
834{ 834{
835 struct page **map, *sb_page; 835 struct page **map, *sb_page;
836 int pages; 836 int pages;
@@ -862,12 +862,12 @@ static void bitmap_file_unmap(struct bitmap_storage *store)
862 * then it is no longer reliable, so we stop using it and we mark the file 862 * then it is no longer reliable, so we stop using it and we mark the file
863 * as failed in the superblock 863 * as failed in the superblock
864 */ 864 */
865static void bitmap_file_kick(struct bitmap *bitmap) 865static void md_bitmap_file_kick(struct bitmap *bitmap)
866{ 866{
867 char *path, *ptr = NULL; 867 char *path, *ptr = NULL;
868 868
869 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { 869 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
870 bitmap_update_sb(bitmap); 870 md_bitmap_update_sb(bitmap);
871 871
872 if (bitmap->storage.file) { 872 if (bitmap->storage.file) {
873 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 873 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -923,7 +923,7 @@ static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
923 * we set the bit immediately, then we record the page number so that 923 * we set the bit immediately, then we record the page number so that
924 * when an unplug occurs, we can flush the dirty pages out to disk 924 * when an unplug occurs, we can flush the dirty pages out to disk
925 */ 925 */
926static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) 926static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
927{ 927{
928 unsigned long bit; 928 unsigned long bit;
929 struct page *page; 929 struct page *page;
@@ -952,7 +952,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
952 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY); 952 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
953} 953}
954 954
955static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) 955static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
956{ 956{
957 unsigned long bit; 957 unsigned long bit;
958 struct page *page; 958 struct page *page;
@@ -980,7 +980,7 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
980 } 980 }
981} 981}
982 982
983static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) 983static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
984{ 984{
985 unsigned long bit; 985 unsigned long bit;
986 struct page *page; 986 struct page *page;
@@ -1005,7 +1005,7 @@ static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
1005/* this gets called when the md device is ready to unplug its underlying 1005/* this gets called when the md device is ready to unplug its underlying
1006 * (slave) device queues -- before we let any writes go down, we need to 1006 * (slave) device queues -- before we let any writes go down, we need to
1007 * sync the dirty pages of the bitmap file to disk */ 1007 * sync the dirty pages of the bitmap file to disk */
1008void bitmap_unplug(struct bitmap *bitmap) 1008void md_bitmap_unplug(struct bitmap *bitmap)
1009{ 1009{
1010 unsigned long i; 1010 unsigned long i;
1011 int dirty, need_write; 1011 int dirty, need_write;
@@ -1025,7 +1025,7 @@ void bitmap_unplug(struct bitmap *bitmap)
1025 BITMAP_PAGE_NEEDWRITE); 1025 BITMAP_PAGE_NEEDWRITE);
1026 if (dirty || need_write) { 1026 if (dirty || need_write) {
1027 if (!writing) { 1027 if (!writing) {
1028 bitmap_wait_writes(bitmap); 1028 md_bitmap_wait_writes(bitmap);
1029 if (bitmap->mddev->queue) 1029 if (bitmap->mddev->queue)
1030 blk_add_trace_msg(bitmap->mddev->queue, 1030 blk_add_trace_msg(bitmap->mddev->queue,
1031 "md bitmap_unplug"); 1031 "md bitmap_unplug");
@@ -1036,14 +1036,14 @@ void bitmap_unplug(struct bitmap *bitmap)
1036 } 1036 }
1037 } 1037 }
1038 if (writing) 1038 if (writing)
1039 bitmap_wait_writes(bitmap); 1039 md_bitmap_wait_writes(bitmap);
1040 1040
1041 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1041 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1042 bitmap_file_kick(bitmap); 1042 md_bitmap_file_kick(bitmap);
1043} 1043}
1044EXPORT_SYMBOL(bitmap_unplug); 1044EXPORT_SYMBOL(md_bitmap_unplug);
1045 1045
1046static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 1046static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
1047/* * bitmap_init_from_disk -- called at bitmap_create time to initialize 1047/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1048 * the in-memory bitmap from the on-disk bitmap -- also, sets up the 1048 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1049 * memory mapping of the bitmap file 1049 * memory mapping of the bitmap file
@@ -1055,7 +1055,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
1055 * We ignore all bits for sectors that end earlier than 'start'. 1055 * We ignore all bits for sectors that end earlier than 'start'.
1056 * This is used when reading an out-of-date bitmap... 1056 * This is used when reading an out-of-date bitmap...
1057 */ 1057 */
1058static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) 1058static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1059{ 1059{
1060 unsigned long i, chunks, index, oldindex, bit, node_offset = 0; 1060 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
1061 struct page *page = NULL; 1061 struct page *page = NULL;
@@ -1078,9 +1078,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1078 /* if the disk bit is set, set the memory bit */ 1078 /* if the disk bit is set, set the memory bit */
1079 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) 1079 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
1080 >= start); 1080 >= start);
1081 bitmap_set_memory_bits(bitmap, 1081 md_bitmap_set_memory_bits(bitmap,
1082 (sector_t)i << bitmap->counts.chunkshift, 1082 (sector_t)i << bitmap->counts.chunkshift,
1083 needed); 1083 needed);
1084 } 1084 }
1085 return 0; 1085 return 0;
1086 } 1086 }
@@ -1159,9 +1159,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1159 /* if the disk bit is set, set the memory bit */ 1159 /* if the disk bit is set, set the memory bit */
1160 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift 1160 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
1161 >= start); 1161 >= start);
1162 bitmap_set_memory_bits(bitmap, 1162 md_bitmap_set_memory_bits(bitmap,
1163 (sector_t)i << bitmap->counts.chunkshift, 1163 (sector_t)i << bitmap->counts.chunkshift,
1164 needed); 1164 needed);
1165 bit_cnt++; 1165 bit_cnt++;
1166 } 1166 }
1167 offset = 0; 1167 offset = 0;
@@ -1179,7 +1179,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1179 return ret; 1179 return ret;
1180} 1180}
1181 1181
1182void bitmap_write_all(struct bitmap *bitmap) 1182void md_bitmap_write_all(struct bitmap *bitmap)
1183{ 1183{
1184 /* We don't actually write all bitmap blocks here, 1184 /* We don't actually write all bitmap blocks here,
1185 * just flag them as needing to be written 1185 * just flag them as needing to be written
@@ -1198,16 +1198,16 @@ void bitmap_write_all(struct bitmap *bitmap)
1198 bitmap->allclean = 0; 1198 bitmap->allclean = 0;
1199} 1199}
1200 1200
1201static void bitmap_count_page(struct bitmap_counts *bitmap, 1201static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1202 sector_t offset, int inc) 1202 sector_t offset, int inc)
1203{ 1203{
1204 sector_t chunk = offset >> bitmap->chunkshift; 1204 sector_t chunk = offset >> bitmap->chunkshift;
1205 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1205 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1206 bitmap->bp[page].count += inc; 1206 bitmap->bp[page].count += inc;
1207 bitmap_checkfree(bitmap, page); 1207 md_bitmap_checkfree(bitmap, page);
1208} 1208}
1209 1209
1210static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) 1210static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1211{ 1211{
1212 sector_t chunk = offset >> bitmap->chunkshift; 1212 sector_t chunk = offset >> bitmap->chunkshift;
1213 unsigned long page = chunk >> PAGE_COUNTER_SHIFT; 1213 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
@@ -1217,16 +1217,16 @@ static void bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
1217 bp->pending = 1; 1217 bp->pending = 1;
1218} 1218}
1219 1219
1220static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, 1220static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1221 sector_t offset, sector_t *blocks, 1221 sector_t offset, sector_t *blocks,
1222 int create); 1222 int create);
1223 1223
1224/* 1224/*
1225 * bitmap daemon -- periodically wakes up to clean bits and flush pages 1225 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1226 * out to disk 1226 * out to disk
1227 */ 1227 */
1228 1228
1229void bitmap_daemon_work(struct mddev *mddev) 1229void md_bitmap_daemon_work(struct mddev *mddev)
1230{ 1230{
1231 struct bitmap *bitmap; 1231 struct bitmap *bitmap;
1232 unsigned long j; 1232 unsigned long j;
@@ -1301,10 +1301,8 @@ void bitmap_daemon_work(struct mddev *mddev)
1301 } 1301 }
1302 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; 1302 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
1303 } 1303 }
1304 bmc = bitmap_get_counter(counts,
1305 block,
1306 &blocks, 0);
1307 1304
1305 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
1308 if (!bmc) { 1306 if (!bmc) {
1309 j |= PAGE_COUNTER_MASK; 1307 j |= PAGE_COUNTER_MASK;
1310 continue; 1308 continue;
@@ -1312,17 +1310,17 @@ void bitmap_daemon_work(struct mddev *mddev)
1312 if (*bmc == 1 && !bitmap->need_sync) { 1310 if (*bmc == 1 && !bitmap->need_sync) {
1313 /* We can clear the bit */ 1311 /* We can clear the bit */
1314 *bmc = 0; 1312 *bmc = 0;
1315 bitmap_count_page(counts, block, -1); 1313 md_bitmap_count_page(counts, block, -1);
1316 bitmap_file_clear_bit(bitmap, block); 1314 md_bitmap_file_clear_bit(bitmap, block);
1317 } else if (*bmc && *bmc <= 2) { 1315 } else if (*bmc && *bmc <= 2) {
1318 *bmc = 1; 1316 *bmc = 1;
1319 bitmap_set_pending(counts, block); 1317 md_bitmap_set_pending(counts, block);
1320 bitmap->allclean = 0; 1318 bitmap->allclean = 0;
1321 } 1319 }
1322 } 1320 }
1323 spin_unlock_irq(&counts->lock); 1321 spin_unlock_irq(&counts->lock);
1324 1322
1325 bitmap_wait_writes(bitmap); 1323 md_bitmap_wait_writes(bitmap);
1326 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. 1324 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1327 * DIRTY pages need to be written by bitmap_unplug so it can wait 1325 * DIRTY pages need to be written by bitmap_unplug so it can wait
1328 * for them. 1326 * for them.
@@ -1352,9 +1350,9 @@ void bitmap_daemon_work(struct mddev *mddev)
1352 mutex_unlock(&mddev->bitmap_info.mutex); 1350 mutex_unlock(&mddev->bitmap_info.mutex);
1353} 1351}
1354 1352
1355static bitmap_counter_t *bitmap_get_counter(struct bitmap_counts *bitmap, 1353static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1356 sector_t offset, sector_t *blocks, 1354 sector_t offset, sector_t *blocks,
1357 int create) 1355 int create)
1358__releases(bitmap->lock) 1356__releases(bitmap->lock)
1359__acquires(bitmap->lock) 1357__acquires(bitmap->lock)
1360{ 1358{
@@ -1368,7 +1366,7 @@ __acquires(bitmap->lock)
1368 sector_t csize; 1366 sector_t csize;
1369 int err; 1367 int err;
1370 1368
1371 err = bitmap_checkpage(bitmap, page, create, 0); 1369 err = md_bitmap_checkpage(bitmap, page, create, 0);
1372 1370
1373 if (bitmap->bp[page].hijacked || 1371 if (bitmap->bp[page].hijacked ||
1374 bitmap->bp[page].map == NULL) 1372 bitmap->bp[page].map == NULL)
@@ -1394,7 +1392,7 @@ __acquires(bitmap->lock)
1394 &(bitmap->bp[page].map[pageoff]); 1392 &(bitmap->bp[page].map[pageoff]);
1395} 1393}
1396 1394
1397int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind) 1395int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
1398{ 1396{
1399 if (!bitmap) 1397 if (!bitmap)
1400 return 0; 1398 return 0;
@@ -1415,7 +1413,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1415 bitmap_counter_t *bmc; 1413 bitmap_counter_t *bmc;
1416 1414
1417 spin_lock_irq(&bitmap->counts.lock); 1415 spin_lock_irq(&bitmap->counts.lock);
1418 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); 1416 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
1419 if (!bmc) { 1417 if (!bmc) {
1420 spin_unlock_irq(&bitmap->counts.lock); 1418 spin_unlock_irq(&bitmap->counts.lock);
1421 return 0; 1419 return 0;
@@ -1437,8 +1435,8 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1437 1435
1438 switch (*bmc) { 1436 switch (*bmc) {
1439 case 0: 1437 case 0:
1440 bitmap_file_set_bit(bitmap, offset); 1438 md_bitmap_file_set_bit(bitmap, offset);
1441 bitmap_count_page(&bitmap->counts, offset, 1); 1439 md_bitmap_count_page(&bitmap->counts, offset, 1);
1442 /* fall through */ 1440 /* fall through */
1443 case 1: 1441 case 1:
1444 *bmc = 2; 1442 *bmc = 2;
@@ -1456,10 +1454,10 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1456 } 1454 }
1457 return 0; 1455 return 0;
1458} 1456}
1459EXPORT_SYMBOL(bitmap_startwrite); 1457EXPORT_SYMBOL(md_bitmap_startwrite);
1460 1458
1461void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, 1459void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1462 int success, int behind) 1460 unsigned long sectors, int success, int behind)
1463{ 1461{
1464 if (!bitmap) 1462 if (!bitmap)
1465 return; 1463 return;
@@ -1477,7 +1475,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1477 bitmap_counter_t *bmc; 1475 bitmap_counter_t *bmc;
1478 1476
1479 spin_lock_irqsave(&bitmap->counts.lock, flags); 1477 spin_lock_irqsave(&bitmap->counts.lock, flags);
1480 bmc = bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); 1478 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
1481 if (!bmc) { 1479 if (!bmc) {
1482 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1480 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1483 return; 1481 return;
@@ -1498,7 +1496,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1498 1496
1499 (*bmc)--; 1497 (*bmc)--;
1500 if (*bmc <= 2) { 1498 if (*bmc <= 2) {
1501 bitmap_set_pending(&bitmap->counts, offset); 1499 md_bitmap_set_pending(&bitmap->counts, offset);
1502 bitmap->allclean = 0; 1500 bitmap->allclean = 0;
1503 } 1501 }
1504 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1502 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
@@ -1509,7 +1507,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1509 sectors = 0; 1507 sectors = 0;
1510 } 1508 }
1511} 1509}
1512EXPORT_SYMBOL(bitmap_endwrite); 1510EXPORT_SYMBOL(md_bitmap_endwrite);
1513 1511
1514static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1512static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1515 int degraded) 1513 int degraded)
@@ -1521,7 +1519,7 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
1521 return 1; /* always resync if no bitmap */ 1519 return 1; /* always resync if no bitmap */
1522 } 1520 }
1523 spin_lock_irq(&bitmap->counts.lock); 1521 spin_lock_irq(&bitmap->counts.lock);
1524 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1522 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1525 rv = 0; 1523 rv = 0;
1526 if (bmc) { 1524 if (bmc) {
1527 /* locked */ 1525 /* locked */
@@ -1539,8 +1537,8 @@ static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t
1539 return rv; 1537 return rv;
1540} 1538}
1541 1539
1542int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, 1540int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1543 int degraded) 1541 int degraded)
1544{ 1542{
1545 /* bitmap_start_sync must always report on multiples of whole 1543 /* bitmap_start_sync must always report on multiples of whole
1546 * pages, otherwise resync (which is very PAGE_SIZE based) will 1544 * pages, otherwise resync (which is very PAGE_SIZE based) will
@@ -1561,9 +1559,9 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1561 } 1559 }
1562 return rv; 1560 return rv;
1563} 1561}
1564EXPORT_SYMBOL(bitmap_start_sync); 1562EXPORT_SYMBOL(md_bitmap_start_sync);
1565 1563
1566void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted) 1564void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
1567{ 1565{
1568 bitmap_counter_t *bmc; 1566 bitmap_counter_t *bmc;
1569 unsigned long flags; 1567 unsigned long flags;
@@ -1573,7 +1571,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
1573 return; 1571 return;
1574 } 1572 }
1575 spin_lock_irqsave(&bitmap->counts.lock, flags); 1573 spin_lock_irqsave(&bitmap->counts.lock, flags);
1576 bmc = bitmap_get_counter(&bitmap->counts, offset, blocks, 0); 1574 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
1577 if (bmc == NULL) 1575 if (bmc == NULL)
1578 goto unlock; 1576 goto unlock;
1579 /* locked */ 1577 /* locked */
@@ -1584,7 +1582,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
1584 *bmc |= NEEDED_MASK; 1582 *bmc |= NEEDED_MASK;
1585 else { 1583 else {
1586 if (*bmc <= 2) { 1584 if (*bmc <= 2) {
1587 bitmap_set_pending(&bitmap->counts, offset); 1585 md_bitmap_set_pending(&bitmap->counts, offset);
1588 bitmap->allclean = 0; 1586 bitmap->allclean = 0;
1589 } 1587 }
1590 } 1588 }
@@ -1592,9 +1590,9 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, i
1592 unlock: 1590 unlock:
1593 spin_unlock_irqrestore(&bitmap->counts.lock, flags); 1591 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
1594} 1592}
1595EXPORT_SYMBOL(bitmap_end_sync); 1593EXPORT_SYMBOL(md_bitmap_end_sync);
1596 1594
1597void bitmap_close_sync(struct bitmap *bitmap) 1595void md_bitmap_close_sync(struct bitmap *bitmap)
1598{ 1596{
1599 /* Sync has finished, and any bitmap chunks that weren't synced 1597 /* Sync has finished, and any bitmap chunks that weren't synced
1600 * properly have been aborted. It remains to us to clear the 1598 * properly have been aborted. It remains to us to clear the
@@ -1605,13 +1603,13 @@ void bitmap_close_sync(struct bitmap *bitmap)
1605 if (!bitmap) 1603 if (!bitmap)
1606 return; 1604 return;
1607 while (sector < bitmap->mddev->resync_max_sectors) { 1605 while (sector < bitmap->mddev->resync_max_sectors) {
1608 bitmap_end_sync(bitmap, sector, &blocks, 0); 1606 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1609 sector += blocks; 1607 sector += blocks;
1610 } 1608 }
1611} 1609}
1612EXPORT_SYMBOL(bitmap_close_sync); 1610EXPORT_SYMBOL(md_bitmap_close_sync);
1613 1611
1614void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) 1612void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1615{ 1613{
1616 sector_t s = 0; 1614 sector_t s = 0;
1617 sector_t blocks; 1615 sector_t blocks;
@@ -1633,15 +1631,15 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
1633 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); 1631 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
1634 s = 0; 1632 s = 0;
1635 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1633 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
1636 bitmap_end_sync(bitmap, s, &blocks, 0); 1634 md_bitmap_end_sync(bitmap, s, &blocks, 0);
1637 s += blocks; 1635 s += blocks;
1638 } 1636 }
1639 bitmap->last_end_sync = jiffies; 1637 bitmap->last_end_sync = jiffies;
1640 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); 1638 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
1641} 1639}
1642EXPORT_SYMBOL(bitmap_cond_end_sync); 1640EXPORT_SYMBOL(md_bitmap_cond_end_sync);
1643 1641
1644void bitmap_sync_with_cluster(struct mddev *mddev, 1642void md_bitmap_sync_with_cluster(struct mddev *mddev,
1645 sector_t old_lo, sector_t old_hi, 1643 sector_t old_lo, sector_t old_hi,
1646 sector_t new_lo, sector_t new_hi) 1644 sector_t new_lo, sector_t new_hi)
1647{ 1645{
@@ -1649,20 +1647,20 @@ void bitmap_sync_with_cluster(struct mddev *mddev,
1649 sector_t sector, blocks = 0; 1647 sector_t sector, blocks = 0;
1650 1648
1651 for (sector = old_lo; sector < new_lo; ) { 1649 for (sector = old_lo; sector < new_lo; ) {
1652 bitmap_end_sync(bitmap, sector, &blocks, 0); 1650 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1653 sector += blocks; 1651 sector += blocks;
1654 } 1652 }
1655 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); 1653 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1656 1654
1657 for (sector = old_hi; sector < new_hi; ) { 1655 for (sector = old_hi; sector < new_hi; ) {
1658 bitmap_start_sync(bitmap, sector, &blocks, 0); 1656 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1659 sector += blocks; 1657 sector += blocks;
1660 } 1658 }
1661 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); 1659 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1662} 1660}
1663EXPORT_SYMBOL(bitmap_sync_with_cluster); 1661EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
1664 1662
1665static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) 1663static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
1666{ 1664{
1667 /* For each chunk covered by any of these sectors, set the 1665 /* For each chunk covered by any of these sectors, set the
1668 * counter to 2 and possibly set resync_needed. They should all 1666 * counter to 2 and possibly set resync_needed. They should all
@@ -1672,15 +1670,15 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
1672 sector_t secs; 1670 sector_t secs;
1673 bitmap_counter_t *bmc; 1671 bitmap_counter_t *bmc;
1674 spin_lock_irq(&bitmap->counts.lock); 1672 spin_lock_irq(&bitmap->counts.lock);
1675 bmc = bitmap_get_counter(&bitmap->counts, offset, &secs, 1); 1673 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
1676 if (!bmc) { 1674 if (!bmc) {
1677 spin_unlock_irq(&bitmap->counts.lock); 1675 spin_unlock_irq(&bitmap->counts.lock);
1678 return; 1676 return;
1679 } 1677 }
1680 if (!*bmc) { 1678 if (!*bmc) {
1681 *bmc = 2; 1679 *bmc = 2;
1682 bitmap_count_page(&bitmap->counts, offset, 1); 1680 md_bitmap_count_page(&bitmap->counts, offset, 1);
1683 bitmap_set_pending(&bitmap->counts, offset); 1681 md_bitmap_set_pending(&bitmap->counts, offset);
1684 bitmap->allclean = 0; 1682 bitmap->allclean = 0;
1685 } 1683 }
1686 if (needed) 1684 if (needed)
@@ -1689,14 +1687,14 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
1689} 1687}
1690 1688
1691/* dirty the memory and file bits for bitmap chunks "s" to "e" */ 1689/* dirty the memory and file bits for bitmap chunks "s" to "e" */
1692void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e) 1690void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1693{ 1691{
1694 unsigned long chunk; 1692 unsigned long chunk;
1695 1693
1696 for (chunk = s; chunk <= e; chunk++) { 1694 for (chunk = s; chunk <= e; chunk++) {
1697 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; 1695 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
1698 bitmap_set_memory_bits(bitmap, sec, 1); 1696 md_bitmap_set_memory_bits(bitmap, sec, 1);
1699 bitmap_file_set_bit(bitmap, sec); 1697 md_bitmap_file_set_bit(bitmap, sec);
1700 if (sec < bitmap->mddev->recovery_cp) 1698 if (sec < bitmap->mddev->recovery_cp)
1701 /* We are asserting that the array is dirty, 1699 /* We are asserting that the array is dirty,
1702 * so move the recovery_cp address back so 1700 * so move the recovery_cp address back so
@@ -1709,7 +1707,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
1709/* 1707/*
1710 * flush out any pending updates 1708 * flush out any pending updates
1711 */ 1709 */
1712void bitmap_flush(struct mddev *mddev) 1710void md_bitmap_flush(struct mddev *mddev)
1713{ 1711{
1714 struct bitmap *bitmap = mddev->bitmap; 1712 struct bitmap *bitmap = mddev->bitmap;
1715 long sleep; 1713 long sleep;
@@ -1722,18 +1720,18 @@ void bitmap_flush(struct mddev *mddev)
1722 */ 1720 */
1723 sleep = mddev->bitmap_info.daemon_sleep * 2; 1721 sleep = mddev->bitmap_info.daemon_sleep * 2;
1724 bitmap->daemon_lastrun -= sleep; 1722 bitmap->daemon_lastrun -= sleep;
1725 bitmap_daemon_work(mddev); 1723 md_bitmap_daemon_work(mddev);
1726 bitmap->daemon_lastrun -= sleep; 1724 bitmap->daemon_lastrun -= sleep;
1727 bitmap_daemon_work(mddev); 1725 md_bitmap_daemon_work(mddev);
1728 bitmap->daemon_lastrun -= sleep; 1726 bitmap->daemon_lastrun -= sleep;
1729 bitmap_daemon_work(mddev); 1727 md_bitmap_daemon_work(mddev);
1730 bitmap_update_sb(bitmap); 1728 md_bitmap_update_sb(bitmap);
1731} 1729}
1732 1730
1733/* 1731/*
1734 * free memory that was allocated 1732 * free memory that was allocated
1735 */ 1733 */
1736void bitmap_free(struct bitmap *bitmap) 1734void md_bitmap_free(struct bitmap *bitmap)
1737{ 1735{
1738 unsigned long k, pages; 1736 unsigned long k, pages;
1739 struct bitmap_page *bp; 1737 struct bitmap_page *bp;
@@ -1753,7 +1751,7 @@ void bitmap_free(struct bitmap *bitmap)
1753 atomic_read(&bitmap->pending_writes) == 0); 1751 atomic_read(&bitmap->pending_writes) == 0);
1754 1752
1755 /* release the bitmap file */ 1753 /* release the bitmap file */
1756 bitmap_file_unmap(&bitmap->storage); 1754 md_bitmap_file_unmap(&bitmap->storage);
1757 1755
1758 bp = bitmap->counts.bp; 1756 bp = bitmap->counts.bp;
1759 pages = bitmap->counts.pages; 1757 pages = bitmap->counts.pages;
@@ -1767,9 +1765,9 @@ void bitmap_free(struct bitmap *bitmap)
1767 kfree(bp); 1765 kfree(bp);
1768 kfree(bitmap); 1766 kfree(bitmap);
1769} 1767}
1770EXPORT_SYMBOL(bitmap_free); 1768EXPORT_SYMBOL(md_bitmap_free);
1771 1769
1772void bitmap_wait_behind_writes(struct mddev *mddev) 1770void md_bitmap_wait_behind_writes(struct mddev *mddev)
1773{ 1771{
1774 struct bitmap *bitmap = mddev->bitmap; 1772 struct bitmap *bitmap = mddev->bitmap;
1775 1773
@@ -1783,14 +1781,14 @@ void bitmap_wait_behind_writes(struct mddev *mddev)
1783 } 1781 }
1784} 1782}
1785 1783
1786void bitmap_destroy(struct mddev *mddev) 1784void md_bitmap_destroy(struct mddev *mddev)
1787{ 1785{
1788 struct bitmap *bitmap = mddev->bitmap; 1786 struct bitmap *bitmap = mddev->bitmap;
1789 1787
1790 if (!bitmap) /* there was no bitmap */ 1788 if (!bitmap) /* there was no bitmap */
1791 return; 1789 return;
1792 1790
1793 bitmap_wait_behind_writes(mddev); 1791 md_bitmap_wait_behind_writes(mddev);
1794 1792
1795 mutex_lock(&mddev->bitmap_info.mutex); 1793 mutex_lock(&mddev->bitmap_info.mutex);
1796 spin_lock(&mddev->lock); 1794 spin_lock(&mddev->lock);
@@ -1800,7 +1798,7 @@ void bitmap_destroy(struct mddev *mddev)
1800 if (mddev->thread) 1798 if (mddev->thread)
1801 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; 1799 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1802 1800
1803 bitmap_free(bitmap); 1801 md_bitmap_free(bitmap);
1804} 1802}
1805 1803
1806/* 1804/*
@@ -1808,7 +1806,7 @@ void bitmap_destroy(struct mddev *mddev)
1808 * if this returns an error, bitmap_destroy must be called to do clean up 1806 * if this returns an error, bitmap_destroy must be called to do clean up
1809 * once mddev->bitmap is set 1807 * once mddev->bitmap is set
1810 */ 1808 */
1811struct bitmap *bitmap_create(struct mddev *mddev, int slot) 1809struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
1812{ 1810{
1813 struct bitmap *bitmap; 1811 struct bitmap *bitmap;
1814 sector_t blocks = mddev->resync_max_sectors; 1812 sector_t blocks = mddev->resync_max_sectors;
@@ -1863,9 +1861,9 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1863 * instructing us to create a new on-disk bitmap instance. 1861 * instructing us to create a new on-disk bitmap instance.
1864 */ 1862 */
1865 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) 1863 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1866 err = bitmap_new_disk_sb(bitmap); 1864 err = md_bitmap_new_disk_sb(bitmap);
1867 else 1865 else
1868 err = bitmap_read_sb(bitmap); 1866 err = md_bitmap_read_sb(bitmap);
1869 } else { 1867 } else {
1870 err = 0; 1868 err = 0;
1871 if (mddev->bitmap_info.chunksize == 0 || 1869 if (mddev->bitmap_info.chunksize == 0 ||
@@ -1878,7 +1876,7 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1878 goto error; 1876 goto error;
1879 1877
1880 bitmap->daemon_lastrun = jiffies; 1878 bitmap->daemon_lastrun = jiffies;
1881 err = bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); 1879 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
1882 if (err) 1880 if (err)
1883 goto error; 1881 goto error;
1884 1882
@@ -1891,11 +1889,11 @@ struct bitmap *bitmap_create(struct mddev *mddev, int slot)
1891 1889
1892 return bitmap; 1890 return bitmap;
1893 error: 1891 error:
1894 bitmap_free(bitmap); 1892 md_bitmap_free(bitmap);
1895 return ERR_PTR(err); 1893 return ERR_PTR(err);
1896} 1894}
1897 1895
1898int bitmap_load(struct mddev *mddev) 1896int md_bitmap_load(struct mddev *mddev)
1899{ 1897{
1900 int err = 0; 1898 int err = 0;
1901 sector_t start = 0; 1899 sector_t start = 0;
@@ -1915,10 +1913,10 @@ int bitmap_load(struct mddev *mddev)
1915 */ 1913 */
1916 while (sector < mddev->resync_max_sectors) { 1914 while (sector < mddev->resync_max_sectors) {
1917 sector_t blocks; 1915 sector_t blocks;
1918 bitmap_start_sync(bitmap, sector, &blocks, 0); 1916 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
1919 sector += blocks; 1917 sector += blocks;
1920 } 1918 }
1921 bitmap_close_sync(bitmap); 1919 md_bitmap_close_sync(bitmap);
1922 1920
1923 if (mddev->degraded == 0 1921 if (mddev->degraded == 0
1924 || bitmap->events_cleared == mddev->events) 1922 || bitmap->events_cleared == mddev->events)
@@ -1927,7 +1925,7 @@ int bitmap_load(struct mddev *mddev)
1927 start = mddev->recovery_cp; 1925 start = mddev->recovery_cp;
1928 1926
1929 mutex_lock(&mddev->bitmap_info.mutex); 1927 mutex_lock(&mddev->bitmap_info.mutex);
1930 err = bitmap_init_from_disk(bitmap, start); 1928 err = md_bitmap_init_from_disk(bitmap, start);
1931 mutex_unlock(&mddev->bitmap_info.mutex); 1929 mutex_unlock(&mddev->bitmap_info.mutex);
1932 1930
1933 if (err) 1931 if (err)
@@ -1940,29 +1938,29 @@ int bitmap_load(struct mddev *mddev)
1940 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; 1938 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
1941 md_wakeup_thread(mddev->thread); 1939 md_wakeup_thread(mddev->thread);
1942 1940
1943 bitmap_update_sb(bitmap); 1941 md_bitmap_update_sb(bitmap);
1944 1942
1945 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) 1943 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
1946 err = -EIO; 1944 err = -EIO;
1947out: 1945out:
1948 return err; 1946 return err;
1949} 1947}
1950EXPORT_SYMBOL_GPL(bitmap_load); 1948EXPORT_SYMBOL_GPL(md_bitmap_load);
1951 1949
1952struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) 1950struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1953{ 1951{
1954 int rv = 0; 1952 int rv = 0;
1955 struct bitmap *bitmap; 1953 struct bitmap *bitmap;
1956 1954
1957 bitmap = bitmap_create(mddev, slot); 1955 bitmap = md_bitmap_create(mddev, slot);
1958 if (IS_ERR(bitmap)) { 1956 if (IS_ERR(bitmap)) {
1959 rv = PTR_ERR(bitmap); 1957 rv = PTR_ERR(bitmap);
1960 return ERR_PTR(rv); 1958 return ERR_PTR(rv);
1961 } 1959 }
1962 1960
1963 rv = bitmap_init_from_disk(bitmap, 0); 1961 rv = md_bitmap_init_from_disk(bitmap, 0);
1964 if (rv) { 1962 if (rv) {
1965 bitmap_free(bitmap); 1963 md_bitmap_free(bitmap);
1966 return ERR_PTR(rv); 1964 return ERR_PTR(rv);
1967 } 1965 }
1968 1966
@@ -1973,7 +1971,7 @@ EXPORT_SYMBOL(get_bitmap_from_slot);
1973/* Loads the bitmap associated with slot and copies the resync information 1971/* Loads the bitmap associated with slot and copies the resync information
1974 * to our bitmap 1972 * to our bitmap
1975 */ 1973 */
1976int bitmap_copy_from_slot(struct mddev *mddev, int slot, 1974int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
1977 sector_t *low, sector_t *high, bool clear_bits) 1975 sector_t *low, sector_t *high, bool clear_bits)
1978{ 1976{
1979 int rv = 0, i, j; 1977 int rv = 0, i, j;
@@ -1990,35 +1988,35 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1990 counts = &bitmap->counts; 1988 counts = &bitmap->counts;
1991 for (j = 0; j < counts->chunks; j++) { 1989 for (j = 0; j < counts->chunks; j++) {
1992 block = (sector_t)j << counts->chunkshift; 1990 block = (sector_t)j << counts->chunkshift;
1993 if (bitmap_file_test_bit(bitmap, block)) { 1991 if (md_bitmap_file_test_bit(bitmap, block)) {
1994 if (!lo) 1992 if (!lo)
1995 lo = block; 1993 lo = block;
1996 hi = block; 1994 hi = block;
1997 bitmap_file_clear_bit(bitmap, block); 1995 md_bitmap_file_clear_bit(bitmap, block);
1998 bitmap_set_memory_bits(mddev->bitmap, block, 1); 1996 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
1999 bitmap_file_set_bit(mddev->bitmap, block); 1997 md_bitmap_file_set_bit(mddev->bitmap, block);
2000 } 1998 }
2001 } 1999 }
2002 2000
2003 if (clear_bits) { 2001 if (clear_bits) {
2004 bitmap_update_sb(bitmap); 2002 md_bitmap_update_sb(bitmap);
2005 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs 2003 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2006 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ 2004 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2007 for (i = 0; i < bitmap->storage.file_pages; i++) 2005 for (i = 0; i < bitmap->storage.file_pages; i++)
2008 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) 2006 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2009 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); 2007 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
2010 bitmap_unplug(bitmap); 2008 md_bitmap_unplug(bitmap);
2011 } 2009 }
2012 bitmap_unplug(mddev->bitmap); 2010 md_bitmap_unplug(mddev->bitmap);
2013 *low = lo; 2011 *low = lo;
2014 *high = hi; 2012 *high = hi;
2015 2013
2016 return rv; 2014 return rv;
2017} 2015}
2018EXPORT_SYMBOL_GPL(bitmap_copy_from_slot); 2016EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
2019 2017
2020 2018
2021void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) 2019void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2022{ 2020{
2023 unsigned long chunk_kb; 2021 unsigned long chunk_kb;
2024 struct bitmap_counts *counts; 2022 struct bitmap_counts *counts;
@@ -2045,7 +2043,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
2045 seq_printf(seq, "\n"); 2043 seq_printf(seq, "\n");
2046} 2044}
2047 2045
2048int bitmap_resize(struct bitmap *bitmap, sector_t blocks, 2046int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2049 int chunksize, int init) 2047 int chunksize, int init)
2050{ 2048{
2051 /* If chunk_size is 0, choose an appropriate chunk size. 2049 /* If chunk_size is 0, choose an appropriate chunk size.
@@ -2106,12 +2104,12 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2106 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); 2104 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2107 memset(&store, 0, sizeof(store)); 2105 memset(&store, 0, sizeof(store));
2108 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) 2106 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
2109 ret = bitmap_storage_alloc(&store, chunks, 2107 ret = md_bitmap_storage_alloc(&store, chunks,
2110 !bitmap->mddev->bitmap_info.external, 2108 !bitmap->mddev->bitmap_info.external,
2111 mddev_is_clustered(bitmap->mddev) 2109 mddev_is_clustered(bitmap->mddev)
2112 ? bitmap->cluster_slot : 0); 2110 ? bitmap->cluster_slot : 0);
2113 if (ret) { 2111 if (ret) {
2114 bitmap_file_unmap(&store); 2112 md_bitmap_file_unmap(&store);
2115 goto err; 2113 goto err;
2116 } 2114 }
2117 2115
@@ -2120,7 +2118,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2120 new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL); 2118 new_bp = kzalloc(pages * sizeof(*new_bp), GFP_KERNEL);
2121 ret = -ENOMEM; 2119 ret = -ENOMEM;
2122 if (!new_bp) { 2120 if (!new_bp) {
2123 bitmap_file_unmap(&store); 2121 md_bitmap_file_unmap(&store);
2124 goto err; 2122 goto err;
2125 } 2123 }
2126 2124
@@ -2134,7 +2132,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2134 memcpy(page_address(store.sb_page), 2132 memcpy(page_address(store.sb_page),
2135 page_address(bitmap->storage.sb_page), 2133 page_address(bitmap->storage.sb_page),
2136 sizeof(bitmap_super_t)); 2134 sizeof(bitmap_super_t));
2137 bitmap_file_unmap(&bitmap->storage); 2135 md_bitmap_file_unmap(&bitmap->storage);
2138 bitmap->storage = store; 2136 bitmap->storage = store;
2139 2137
2140 old_counts = bitmap->counts; 2138 old_counts = bitmap->counts;
@@ -2154,7 +2152,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2154 if (mddev_is_clustered(bitmap->mddev)) { 2152 if (mddev_is_clustered(bitmap->mddev)) {
2155 unsigned long page; 2153 unsigned long page;
2156 for (page = 0; page < pages; page++) { 2154 for (page = 0; page < pages; page++) {
2157 ret = bitmap_checkpage(&bitmap->counts, page, 1, 1); 2155 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
2158 if (ret) { 2156 if (ret) {
2159 unsigned long k; 2157 unsigned long k;
2160 2158
@@ -2184,27 +2182,23 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2184 bitmap_counter_t *bmc_old, *bmc_new; 2182 bitmap_counter_t *bmc_old, *bmc_new;
2185 int set; 2183 int set;
2186 2184
2187 bmc_old = bitmap_get_counter(&old_counts, block, 2185 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
2188 &old_blocks, 0);
2189 set = bmc_old && NEEDED(*bmc_old); 2186 set = bmc_old && NEEDED(*bmc_old);
2190 2187
2191 if (set) { 2188 if (set) {
2192 bmc_new = bitmap_get_counter(&bitmap->counts, block, 2189 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2193 &new_blocks, 1);
2194 if (*bmc_new == 0) { 2190 if (*bmc_new == 0) {
2195 /* need to set on-disk bits too. */ 2191 /* need to set on-disk bits too. */
2196 sector_t end = block + new_blocks; 2192 sector_t end = block + new_blocks;
2197 sector_t start = block >> chunkshift; 2193 sector_t start = block >> chunkshift;
2198 start <<= chunkshift; 2194 start <<= chunkshift;
2199 while (start < end) { 2195 while (start < end) {
2200 bitmap_file_set_bit(bitmap, block); 2196 md_bitmap_file_set_bit(bitmap, block);
2201 start += 1 << chunkshift; 2197 start += 1 << chunkshift;
2202 } 2198 }
2203 *bmc_new = 2; 2199 *bmc_new = 2;
2204 bitmap_count_page(&bitmap->counts, 2200 md_bitmap_count_page(&bitmap->counts, block, 1);
2205 block, 1); 2201 md_bitmap_set_pending(&bitmap->counts, block);
2206 bitmap_set_pending(&bitmap->counts,
2207 block);
2208 } 2202 }
2209 *bmc_new |= NEEDED_MASK; 2203 *bmc_new |= NEEDED_MASK;
2210 if (new_blocks < old_blocks) 2204 if (new_blocks < old_blocks)
@@ -2225,18 +2219,15 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2225 int i; 2219 int i;
2226 while (block < (chunks << chunkshift)) { 2220 while (block < (chunks << chunkshift)) {
2227 bitmap_counter_t *bmc; 2221 bitmap_counter_t *bmc;
2228 bmc = bitmap_get_counter(&bitmap->counts, block, 2222 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
2229 &new_blocks, 1);
2230 if (bmc) { 2223 if (bmc) {
2231 /* new space. It needs to be resynced, so 2224 /* new space. It needs to be resynced, so
2232 * we set NEEDED_MASK. 2225 * we set NEEDED_MASK.
2233 */ 2226 */
2234 if (*bmc == 0) { 2227 if (*bmc == 0) {
2235 *bmc = NEEDED_MASK | 2; 2228 *bmc = NEEDED_MASK | 2;
2236 bitmap_count_page(&bitmap->counts, 2229 md_bitmap_count_page(&bitmap->counts, block, 1);
2237 block, 1); 2230 md_bitmap_set_pending(&bitmap->counts, block);
2238 bitmap_set_pending(&bitmap->counts,
2239 block);
2240 } 2231 }
2241 } 2232 }
2242 block += new_blocks; 2233 block += new_blocks;
@@ -2247,14 +2238,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
2247 spin_unlock_irq(&bitmap->counts.lock); 2238 spin_unlock_irq(&bitmap->counts.lock);
2248 2239
2249 if (!init) { 2240 if (!init) {
2250 bitmap_unplug(bitmap); 2241 md_bitmap_unplug(bitmap);
2251 bitmap->mddev->pers->quiesce(bitmap->mddev, 0); 2242 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2252 } 2243 }
2253 ret = 0; 2244 ret = 0;
2254err: 2245err:
2255 return ret; 2246 return ret;
2256} 2247}
2257EXPORT_SYMBOL_GPL(bitmap_resize); 2248EXPORT_SYMBOL_GPL(md_bitmap_resize);
2258 2249
2259static ssize_t 2250static ssize_t
2260location_show(struct mddev *mddev, char *page) 2251location_show(struct mddev *mddev, char *page)
@@ -2298,7 +2289,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2298 } 2289 }
2299 if (mddev->pers) { 2290 if (mddev->pers) {
2300 mddev->pers->quiesce(mddev, 1); 2291 mddev->pers->quiesce(mddev, 1);
2301 bitmap_destroy(mddev); 2292 md_bitmap_destroy(mddev);
2302 mddev->pers->quiesce(mddev, 0); 2293 mddev->pers->quiesce(mddev, 0);
2303 } 2294 }
2304 mddev->bitmap_info.offset = 0; 2295 mddev->bitmap_info.offset = 0;
@@ -2337,18 +2328,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
2337 if (mddev->pers) { 2328 if (mddev->pers) {
2338 struct bitmap *bitmap; 2329 struct bitmap *bitmap;
2339 mddev->pers->quiesce(mddev, 1); 2330 mddev->pers->quiesce(mddev, 1);
2340 bitmap = bitmap_create(mddev, -1); 2331 bitmap = md_bitmap_create(mddev, -1);
2341 if (IS_ERR(bitmap)) 2332 if (IS_ERR(bitmap))
2342 rv = PTR_ERR(bitmap); 2333 rv = PTR_ERR(bitmap);
2343 else { 2334 else {
2344 mddev->bitmap = bitmap; 2335 mddev->bitmap = bitmap;
2345 rv = bitmap_load(mddev); 2336 rv = md_bitmap_load(mddev);
2346 if (rv) 2337 if (rv)
2347 mddev->bitmap_info.offset = 0; 2338 mddev->bitmap_info.offset = 0;
2348 } 2339 }
2349 mddev->pers->quiesce(mddev, 0); 2340 mddev->pers->quiesce(mddev, 0);
2350 if (rv) { 2341 if (rv) {
2351 bitmap_destroy(mddev); 2342 md_bitmap_destroy(mddev);
2352 goto out; 2343 goto out;
2353 } 2344 }
2354 } 2345 }
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 5df35ca90f58..cfd7395de8fd 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -236,43 +236,43 @@ struct bitmap {
236/* the bitmap API */ 236/* the bitmap API */
237 237
238/* these are used only by md/bitmap */ 238/* these are used only by md/bitmap */
239struct bitmap *bitmap_create(struct mddev *mddev, int slot); 239struct bitmap *md_bitmap_create(struct mddev *mddev, int slot);
240int bitmap_load(struct mddev *mddev); 240int md_bitmap_load(struct mddev *mddev);
241void bitmap_flush(struct mddev *mddev); 241void md_bitmap_flush(struct mddev *mddev);
242void bitmap_destroy(struct mddev *mddev); 242void md_bitmap_destroy(struct mddev *mddev);
243 243
244void bitmap_print_sb(struct bitmap *bitmap); 244void md_bitmap_print_sb(struct bitmap *bitmap);
245void bitmap_update_sb(struct bitmap *bitmap); 245void md_bitmap_update_sb(struct bitmap *bitmap);
246void bitmap_status(struct seq_file *seq, struct bitmap *bitmap); 246void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap);
247 247
248int bitmap_setallbits(struct bitmap *bitmap); 248int md_bitmap_setallbits(struct bitmap *bitmap);
249void bitmap_write_all(struct bitmap *bitmap); 249void md_bitmap_write_all(struct bitmap *bitmap);
250 250
251void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); 251void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
252 252
253/* these are exported */ 253/* these are exported */
254int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, 254int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
255 unsigned long sectors, int behind); 255 unsigned long sectors, int behind);
256void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, 256void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
257 unsigned long sectors, int success, int behind); 257 unsigned long sectors, int success, int behind);
258int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded); 258int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int degraded);
259void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted); 259void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
260void bitmap_close_sync(struct bitmap *bitmap); 260void md_bitmap_close_sync(struct bitmap *bitmap);
261void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force); 261void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
262void bitmap_sync_with_cluster(struct mddev *mddev, 262void md_bitmap_sync_with_cluster(struct mddev *mddev,
263 sector_t old_lo, sector_t old_hi, 263 sector_t old_lo, sector_t old_hi,
264 sector_t new_lo, sector_t new_hi); 264 sector_t new_lo, sector_t new_hi);
265 265
266void bitmap_unplug(struct bitmap *bitmap); 266void md_bitmap_unplug(struct bitmap *bitmap);
267void bitmap_daemon_work(struct mddev *mddev); 267void md_bitmap_daemon_work(struct mddev *mddev);
268 268
269int bitmap_resize(struct bitmap *bitmap, sector_t blocks, 269int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
270 int chunksize, int init); 270 int chunksize, int init);
271struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot); 271struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot);
272int bitmap_copy_from_slot(struct mddev *mddev, int slot, 272int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
273 sector_t *lo, sector_t *hi, bool clear_bits); 273 sector_t *lo, sector_t *hi, bool clear_bits);
274void bitmap_free(struct bitmap *bitmap); 274void md_bitmap_free(struct bitmap *bitmap);
275void bitmap_wait_behind_writes(struct mddev *mddev); 275void md_bitmap_wait_behind_writes(struct mddev *mddev);
276#endif 276#endif
277 277
278#endif 278#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 79bfbc840385..fdd357fb21c5 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -326,7 +326,7 @@ static void recover_bitmaps(struct md_thread *thread)
326 str, ret); 326 str, ret);
327 goto clear_bit; 327 goto clear_bit;
328 } 328 }
329 ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true); 329 ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
330 if (ret) { 330 if (ret) {
331 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot); 331 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
332 goto clear_bit; 332 goto clear_bit;
@@ -480,9 +480,7 @@ static void process_suspend_info(struct mddev *mddev,
480 * resync thread is running in another node, 480 * resync thread is running in another node,
481 * so we don't need to do the resync again 481 * so we don't need to do the resync again
482 * with the same section */ 482 * with the same section */
483 bitmap_sync_with_cluster(mddev, cinfo->sync_low, 483 md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
484 cinfo->sync_hi,
485 lo, hi);
486 cinfo->sync_low = lo; 484 cinfo->sync_low = lo;
487 cinfo->sync_hi = hi; 485 cinfo->sync_hi = hi;
488 486
@@ -829,7 +827,7 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
829 } 827 }
830 828
831 /* Read the disk bitmap sb and check if it needs recovery */ 829 /* Read the disk bitmap sb and check if it needs recovery */
832 ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false); 830 ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
833 if (ret) { 831 if (ret) {
834 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i); 832 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
835 lockres_free(bm_lockres); 833 lockres_free(bm_lockres);
@@ -1127,13 +1125,13 @@ static int cluster_check_sync_size(struct mddev *mddev)
1127 bm_lockres = lockres_init(mddev, str, NULL, 1); 1125 bm_lockres = lockres_init(mddev, str, NULL, 1);
1128 if (!bm_lockres) { 1126 if (!bm_lockres) {
1129 pr_err("md-cluster: Cannot initialize %s\n", str); 1127 pr_err("md-cluster: Cannot initialize %s\n", str);
1130 bitmap_free(bitmap); 1128 md_bitmap_free(bitmap);
1131 return -1; 1129 return -1;
1132 } 1130 }
1133 bm_lockres->flags |= DLM_LKF_NOQUEUE; 1131 bm_lockres->flags |= DLM_LKF_NOQUEUE;
1134 rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); 1132 rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
1135 if (!rv) 1133 if (!rv)
1136 bitmap_update_sb(bitmap); 1134 md_bitmap_update_sb(bitmap);
1137 lockres_free(bm_lockres); 1135 lockres_free(bm_lockres);
1138 1136
1139 sb = kmap_atomic(bitmap->storage.sb_page); 1137 sb = kmap_atomic(bitmap->storage.sb_page);
@@ -1141,11 +1139,11 @@ static int cluster_check_sync_size(struct mddev *mddev)
1141 sync_size = sb->sync_size; 1139 sync_size = sb->sync_size;
1142 else if (sync_size != sb->sync_size) { 1140 else if (sync_size != sb->sync_size) {
1143 kunmap_atomic(sb); 1141 kunmap_atomic(sb);
1144 bitmap_free(bitmap); 1142 md_bitmap_free(bitmap);
1145 return -1; 1143 return -1;
1146 } 1144 }
1147 kunmap_atomic(sb); 1145 kunmap_atomic(sb);
1148 bitmap_free(bitmap); 1146 md_bitmap_free(bitmap);
1149 } 1147 }
1150 1148
1151 return (my_sync_size == sync_size) ? 0 : -1; 1149 return (my_sync_size == sync_size) ? 0 : -1;
@@ -1442,7 +1440,7 @@ static int gather_bitmaps(struct md_rdev *rdev)
1442 for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { 1440 for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
1443 if (sn == (cinfo->slot_number - 1)) 1441 if (sn == (cinfo->slot_number - 1))
1444 continue; 1442 continue;
1445 err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); 1443 err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
1446 if (err) { 1444 if (err) {
1447 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn); 1445 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
1448 goto out; 1446 goto out;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c208c01f63a5..edad9f8442e6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2560,7 +2560,7 @@ repeat:
2560 if (mddev->queue) 2560 if (mddev->queue)
2561 blk_add_trace_msg(mddev->queue, "md md_update_sb"); 2561 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2562rewrite: 2562rewrite:
2563 bitmap_update_sb(mddev->bitmap); 2563 md_bitmap_update_sb(mddev->bitmap);
2564 rdev_for_each(rdev, mddev) { 2564 rdev_for_each(rdev, mddev) {
2565 char b[BDEVNAME_SIZE]; 2565 char b[BDEVNAME_SIZE];
2566 2566
@@ -4372,10 +4372,10 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4372 if (buf == end) break; 4372 if (buf == end) break;
4373 } 4373 }
4374 if (*end && !isspace(*end)) break; 4374 if (*end && !isspace(*end)) break;
4375 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 4375 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4376 buf = skip_spaces(end); 4376 buf = skip_spaces(end);
4377 } 4377 }
4378 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 4378 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4379out: 4379out:
4380 mddev_unlock(mddev); 4380 mddev_unlock(mddev);
4381 return len; 4381 return len;
@@ -5588,7 +5588,7 @@ int md_run(struct mddev *mddev)
5588 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 5588 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5589 struct bitmap *bitmap; 5589 struct bitmap *bitmap;
5590 5590
5591 bitmap = bitmap_create(mddev, -1); 5591 bitmap = md_bitmap_create(mddev, -1);
5592 if (IS_ERR(bitmap)) { 5592 if (IS_ERR(bitmap)) {
5593 err = PTR_ERR(bitmap); 5593 err = PTR_ERR(bitmap);
5594 pr_warn("%s: failed to create bitmap (%d)\n", 5594 pr_warn("%s: failed to create bitmap (%d)\n",
@@ -5603,7 +5603,7 @@ int md_run(struct mddev *mddev)
5603 pers->free(mddev, mddev->private); 5603 pers->free(mddev, mddev->private);
5604 mddev->private = NULL; 5604 mddev->private = NULL;
5605 module_put(pers->owner); 5605 module_put(pers->owner);
5606 bitmap_destroy(mddev); 5606 md_bitmap_destroy(mddev);
5607 goto abort; 5607 goto abort;
5608 } 5608 }
5609 if (mddev->queue) { 5609 if (mddev->queue) {
@@ -5688,9 +5688,9 @@ static int do_md_run(struct mddev *mddev)
5688 err = md_run(mddev); 5688 err = md_run(mddev);
5689 if (err) 5689 if (err)
5690 goto out; 5690 goto out;
5691 err = bitmap_load(mddev); 5691 err = md_bitmap_load(mddev);
5692 if (err) { 5692 if (err) {
5693 bitmap_destroy(mddev); 5693 md_bitmap_destroy(mddev);
5694 goto out; 5694 goto out;
5695 } 5695 }
5696 5696
@@ -5832,7 +5832,7 @@ static void __md_stop_writes(struct mddev *mddev)
5832 mddev->pers->quiesce(mddev, 1); 5832 mddev->pers->quiesce(mddev, 1);
5833 mddev->pers->quiesce(mddev, 0); 5833 mddev->pers->quiesce(mddev, 0);
5834 } 5834 }
5835 bitmap_flush(mddev); 5835 md_bitmap_flush(mddev);
5836 5836
5837 if (mddev->ro == 0 && 5837 if (mddev->ro == 0 &&
5838 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 5838 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -5854,7 +5854,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
5854 5854
5855static void mddev_detach(struct mddev *mddev) 5855static void mddev_detach(struct mddev *mddev)
5856{ 5856{
5857 bitmap_wait_behind_writes(mddev); 5857 md_bitmap_wait_behind_writes(mddev);
5858 if (mddev->pers && mddev->pers->quiesce) { 5858 if (mddev->pers && mddev->pers->quiesce) {
5859 mddev->pers->quiesce(mddev, 1); 5859 mddev->pers->quiesce(mddev, 1);
5860 mddev->pers->quiesce(mddev, 0); 5860 mddev->pers->quiesce(mddev, 0);
@@ -5867,7 +5867,7 @@ static void mddev_detach(struct mddev *mddev)
5867static void __md_stop(struct mddev *mddev) 5867static void __md_stop(struct mddev *mddev)
5868{ 5868{
5869 struct md_personality *pers = mddev->pers; 5869 struct md_personality *pers = mddev->pers;
5870 bitmap_destroy(mddev); 5870 md_bitmap_destroy(mddev);
5871 mddev_detach(mddev); 5871 mddev_detach(mddev);
5872 /* Ensure ->event_work is done */ 5872 /* Ensure ->event_work is done */
5873 flush_workqueue(md_misc_wq); 5873 flush_workqueue(md_misc_wq);
@@ -6681,21 +6681,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
6681 if (fd >= 0) { 6681 if (fd >= 0) {
6682 struct bitmap *bitmap; 6682 struct bitmap *bitmap;
6683 6683
6684 bitmap = bitmap_create(mddev, -1); 6684 bitmap = md_bitmap_create(mddev, -1);
6685 mddev_suspend(mddev); 6685 mddev_suspend(mddev);
6686 if (!IS_ERR(bitmap)) { 6686 if (!IS_ERR(bitmap)) {
6687 mddev->bitmap = bitmap; 6687 mddev->bitmap = bitmap;
6688 err = bitmap_load(mddev); 6688 err = md_bitmap_load(mddev);
6689 } else 6689 } else
6690 err = PTR_ERR(bitmap); 6690 err = PTR_ERR(bitmap);
6691 if (err) { 6691 if (err) {
6692 bitmap_destroy(mddev); 6692 md_bitmap_destroy(mddev);
6693 fd = -1; 6693 fd = -1;
6694 } 6694 }
6695 mddev_resume(mddev); 6695 mddev_resume(mddev);
6696 } else if (fd < 0) { 6696 } else if (fd < 0) {
6697 mddev_suspend(mddev); 6697 mddev_suspend(mddev);
6698 bitmap_destroy(mddev); 6698 md_bitmap_destroy(mddev);
6699 mddev_resume(mddev); 6699 mddev_resume(mddev);
6700 } 6700 }
6701 } 6701 }
@@ -6981,15 +6981,15 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6981 mddev->bitmap_info.default_offset; 6981 mddev->bitmap_info.default_offset;
6982 mddev->bitmap_info.space = 6982 mddev->bitmap_info.space =
6983 mddev->bitmap_info.default_space; 6983 mddev->bitmap_info.default_space;
6984 bitmap = bitmap_create(mddev, -1); 6984 bitmap = md_bitmap_create(mddev, -1);
6985 mddev_suspend(mddev); 6985 mddev_suspend(mddev);
6986 if (!IS_ERR(bitmap)) { 6986 if (!IS_ERR(bitmap)) {
6987 mddev->bitmap = bitmap; 6987 mddev->bitmap = bitmap;
6988 rv = bitmap_load(mddev); 6988 rv = md_bitmap_load(mddev);
6989 } else 6989 } else
6990 rv = PTR_ERR(bitmap); 6990 rv = PTR_ERR(bitmap);
6991 if (rv) 6991 if (rv)
6992 bitmap_destroy(mddev); 6992 md_bitmap_destroy(mddev);
6993 mddev_resume(mddev); 6993 mddev_resume(mddev);
6994 } else { 6994 } else {
6995 /* remove the bitmap */ 6995 /* remove the bitmap */
@@ -7014,7 +7014,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7014 md_cluster_ops->leave(mddev); 7014 md_cluster_ops->leave(mddev);
7015 } 7015 }
7016 mddev_suspend(mddev); 7016 mddev_suspend(mddev);
7017 bitmap_destroy(mddev); 7017 md_bitmap_destroy(mddev);
7018 mddev_resume(mddev); 7018 mddev_resume(mddev);
7019 mddev->bitmap_info.offset = 0; 7019 mddev->bitmap_info.offset = 0;
7020 } 7020 }
@@ -7877,7 +7877,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
7877 } else 7877 } else
7878 seq_printf(seq, "\n "); 7878 seq_printf(seq, "\n ");
7879 7879
7880 bitmap_status(seq, mddev->bitmap); 7880 md_bitmap_status(seq, mddev->bitmap);
7881 7881
7882 seq_printf(seq, "\n"); 7882 seq_printf(seq, "\n");
7883 } 7883 }
@@ -8748,7 +8748,7 @@ void md_check_recovery(struct mddev *mddev)
8748 return; 8748 return;
8749 8749
8750 if (mddev->bitmap) 8750 if (mddev->bitmap)
8751 bitmap_daemon_work(mddev); 8751 md_bitmap_daemon_work(mddev);
8752 8752
8753 if (signal_pending(current)) { 8753 if (signal_pending(current)) {
8754 if (mddev->pers->sync_request && !mddev->external) { 8754 if (mddev->pers->sync_request && !mddev->external) {
@@ -8885,7 +8885,7 @@ void md_check_recovery(struct mddev *mddev)
8885 * which has the bitmap stored on all devices. 8885 * which has the bitmap stored on all devices.
8886 * So make sure all bitmap pages get written 8886 * So make sure all bitmap pages get written
8887 */ 8887 */
8888 bitmap_write_all(mddev->bitmap); 8888 md_bitmap_write_all(mddev->bitmap);
8889 } 8889 }
8890 INIT_WORK(&mddev->del_work, md_start_sync); 8890 INIT_WORK(&mddev->del_work, md_start_sync);
8891 queue_work(md_misc_wq, &mddev->del_work); 8891 queue_work(md_misc_wq, &mddev->del_work);
@@ -9133,7 +9133,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9133 if (ret) 9133 if (ret)
9134 pr_info("md-cluster: resize failed\n"); 9134 pr_info("md-cluster: resize failed\n");
9135 else 9135 else
9136 bitmap_update_sb(mddev->bitmap); 9136 md_bitmap_update_sb(mddev->bitmap);
9137 } 9137 }
9138 9138
9139 /* Check for change of roles in the active devices */ 9139 /* Check for change of roles in the active devices */
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 829b4ce057d8..0a3b8ae4a29c 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -69,9 +69,9 @@ static struct dm_block_validator index_validator = {
69 */ 69 */
70#define BITMAP_CSUM_XOR 240779 70#define BITMAP_CSUM_XOR 240779
71 71
72static void bitmap_prepare_for_write(struct dm_block_validator *v, 72static void dm_bitmap_prepare_for_write(struct dm_block_validator *v,
73 struct dm_block *b, 73 struct dm_block *b,
74 size_t block_size) 74 size_t block_size)
75{ 75{
76 struct disk_bitmap_header *disk_header = dm_block_data(b); 76 struct disk_bitmap_header *disk_header = dm_block_data(b);
77 77
@@ -81,9 +81,9 @@ static void bitmap_prepare_for_write(struct dm_block_validator *v,
81 BITMAP_CSUM_XOR)); 81 BITMAP_CSUM_XOR));
82} 82}
83 83
84static int bitmap_check(struct dm_block_validator *v, 84static int dm_bitmap_check(struct dm_block_validator *v,
85 struct dm_block *b, 85 struct dm_block *b,
86 size_t block_size) 86 size_t block_size)
87{ 87{
88 struct disk_bitmap_header *disk_header = dm_block_data(b); 88 struct disk_bitmap_header *disk_header = dm_block_data(b);
89 __le32 csum_disk; 89 __le32 csum_disk;
@@ -108,8 +108,8 @@ static int bitmap_check(struct dm_block_validator *v,
108 108
109static struct dm_block_validator dm_sm_bitmap_validator = { 109static struct dm_block_validator dm_sm_bitmap_validator = {
110 .name = "sm_bitmap", 110 .name = "sm_bitmap",
111 .prepare_for_write = bitmap_prepare_for_write, 111 .prepare_for_write = dm_bitmap_prepare_for_write,
112 .check = bitmap_check 112 .check = dm_bitmap_check,
113}; 113};
114 114
115/*----------------------------------------------------------------*/ 115/*----------------------------------------------------------------*/
@@ -124,7 +124,7 @@ static void *dm_bitmap_data(struct dm_block *b)
124 124
125#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL 125#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
126 126
127static unsigned bitmap_word_used(void *addr, unsigned b) 127static unsigned dm_bitmap_word_used(void *addr, unsigned b)
128{ 128{
129 __le64 *words_le = addr; 129 __le64 *words_le = addr;
130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT); 130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -170,7 +170,7 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
170{ 170{
171 while (begin < end) { 171 while (begin < end) {
172 if (!(begin & (ENTRIES_PER_WORD - 1)) && 172 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
173 bitmap_word_used(addr, begin)) { 173 dm_bitmap_word_used(addr, begin)) {
174 begin += ENTRIES_PER_WORD; 174 begin += ENTRIES_PER_WORD;
175 continue; 175 continue;
176 } 176 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e9e3308cb0a7..136294139d05 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -385,10 +385,10 @@ static void close_write(struct r1bio *r1_bio)
385 r1_bio->behind_master_bio = NULL; 385 r1_bio->behind_master_bio = NULL;
386 } 386 }
387 /* clear the bitmap if all writes complete successfully */ 387 /* clear the bitmap if all writes complete successfully */
388 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 388 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
389 r1_bio->sectors, 389 r1_bio->sectors,
390 !test_bit(R1BIO_Degraded, &r1_bio->state), 390 !test_bit(R1BIO_Degraded, &r1_bio->state),
391 test_bit(R1BIO_BehindIO, &r1_bio->state)); 391 test_bit(R1BIO_BehindIO, &r1_bio->state));
392 md_write_end(r1_bio->mddev); 392 md_write_end(r1_bio->mddev);
393} 393}
394 394
@@ -781,7 +781,7 @@ static int raid1_congested(struct mddev *mddev, int bits)
781static void flush_bio_list(struct r1conf *conf, struct bio *bio) 781static void flush_bio_list(struct r1conf *conf, struct bio *bio)
782{ 782{
783 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 783 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
784 bitmap_unplug(conf->mddev->bitmap); 784 md_bitmap_unplug(conf->mddev->bitmap);
785 wake_up(&conf->wait_barrier); 785 wake_up(&conf->wait_barrier);
786 786
787 while (bio) { /* submit pending writes */ 787 while (bio) { /* submit pending writes */
@@ -1470,10 +1470,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1470 alloc_behind_master_bio(r1_bio, bio); 1470 alloc_behind_master_bio(r1_bio, bio);
1471 } 1471 }
1472 1472
1473 bitmap_startwrite(bitmap, r1_bio->sector, 1473 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1474 r1_bio->sectors, 1474 test_bit(R1BIO_BehindIO, &r1_bio->state));
1475 test_bit(R1BIO_BehindIO,
1476 &r1_bio->state));
1477 first_clone = 0; 1475 first_clone = 0;
1478 } 1476 }
1479 1477
@@ -1881,8 +1879,7 @@ static void end_sync_write(struct bio *bio)
1881 long sectors_to_go = r1_bio->sectors; 1879 long sectors_to_go = r1_bio->sectors;
1882 /* make sure these bits doesn't get cleared. */ 1880 /* make sure these bits doesn't get cleared. */
1883 do { 1881 do {
1884 bitmap_end_sync(mddev->bitmap, s, 1882 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1885 &sync_blocks, 1);
1886 s += sync_blocks; 1883 s += sync_blocks;
1887 sectors_to_go -= sync_blocks; 1884 sectors_to_go -= sync_blocks;
1888 } while (sectors_to_go > 0); 1885 } while (sectors_to_go > 0);
@@ -2629,12 +2626,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2629 * We can find the current addess in mddev->curr_resync 2626 * We can find the current addess in mddev->curr_resync
2630 */ 2627 */
2631 if (mddev->curr_resync < max_sector) /* aborted */ 2628 if (mddev->curr_resync < max_sector) /* aborted */
2632 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2629 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2633 &sync_blocks, 1); 2630 &sync_blocks, 1);
2634 else /* completed sync */ 2631 else /* completed sync */
2635 conf->fullsync = 0; 2632 conf->fullsync = 0;
2636 2633
2637 bitmap_close_sync(mddev->bitmap); 2634 md_bitmap_close_sync(mddev->bitmap);
2638 close_sync(conf); 2635 close_sync(conf);
2639 2636
2640 if (mddev_is_clustered(mddev)) { 2637 if (mddev_is_clustered(mddev)) {
@@ -2654,7 +2651,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2654 /* before building a request, check if we can skip these blocks.. 2651 /* before building a request, check if we can skip these blocks..
2655 * This call the bitmap_start_sync doesn't actually record anything 2652 * This call the bitmap_start_sync doesn't actually record anything
2656 */ 2653 */
2657 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2654 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2658 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2655 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2659 /* We can skip this block, and probably several more */ 2656 /* We can skip this block, and probably several more */
2660 *skipped = 1; 2657 *skipped = 1;
@@ -2672,7 +2669,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2672 * sector_nr + two times RESYNC_SECTORS 2669 * sector_nr + two times RESYNC_SECTORS
2673 */ 2670 */
2674 2671
2675 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 2672 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2676 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); 2673 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2677 2674
2678 2675
@@ -2831,8 +2828,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2831 if (len == 0) 2828 if (len == 0)
2832 break; 2829 break;
2833 if (sync_blocks == 0) { 2830 if (sync_blocks == 0) {
2834 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 2831 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2835 &sync_blocks, still_degraded) && 2832 &sync_blocks, still_degraded) &&
2836 !conf->fullsync && 2833 !conf->fullsync &&
2837 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2834 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2838 break; 2835 break;
@@ -3171,7 +3168,7 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
3171 mddev->array_sectors > newsize) 3168 mddev->array_sectors > newsize)
3172 return -EINVAL; 3169 return -EINVAL;
3173 if (mddev->bitmap) { 3170 if (mddev->bitmap) {
3174 int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0); 3171 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3175 if (ret) 3172 if (ret)
3176 return ret; 3173 return ret;
3177 } 3174 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3c60774c8430..02451c0fa2e3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -438,10 +438,10 @@ static void raid10_end_read_request(struct bio *bio)
438static void close_write(struct r10bio *r10_bio) 438static void close_write(struct r10bio *r10_bio)
439{ 439{
440 /* clear the bitmap if all writes complete successfully */ 440 /* clear the bitmap if all writes complete successfully */
441 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, 441 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
442 r10_bio->sectors, 442 r10_bio->sectors,
443 !test_bit(R10BIO_Degraded, &r10_bio->state), 443 !test_bit(R10BIO_Degraded, &r10_bio->state),
444 0); 444 0);
445 md_write_end(r10_bio->mddev); 445 md_write_end(r10_bio->mddev);
446} 446}
447 447
@@ -915,7 +915,7 @@ static void flush_pending_writes(struct r10conf *conf)
915 blk_start_plug(&plug); 915 blk_start_plug(&plug);
916 /* flush any pending bitmap writes to disk 916 /* flush any pending bitmap writes to disk
917 * before proceeding w/ I/O */ 917 * before proceeding w/ I/O */
918 bitmap_unplug(conf->mddev->bitmap); 918 md_bitmap_unplug(conf->mddev->bitmap);
919 wake_up(&conf->wait_barrier); 919 wake_up(&conf->wait_barrier);
920 920
921 while (bio) { /* submit pending writes */ 921 while (bio) { /* submit pending writes */
@@ -1100,7 +1100,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1100 1100
1101 /* we aren't scheduling, so we can do the write-out directly. */ 1101 /* we aren't scheduling, so we can do the write-out directly. */
1102 bio = bio_list_get(&plug->pending); 1102 bio = bio_list_get(&plug->pending);
1103 bitmap_unplug(mddev->bitmap); 1103 md_bitmap_unplug(mddev->bitmap);
1104 wake_up(&conf->wait_barrier); 1104 wake_up(&conf->wait_barrier);
1105 1105
1106 while (bio) { /* submit pending writes */ 1106 while (bio) { /* submit pending writes */
@@ -1517,7 +1517,7 @@ retry_write:
1517 } 1517 }
1518 1518
1519 atomic_set(&r10_bio->remaining, 1); 1519 atomic_set(&r10_bio->remaining, 1);
1520 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1520 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1521 1521
1522 for (i = 0; i < conf->copies; i++) { 1522 for (i = 0; i < conf->copies; i++) {
1523 if (r10_bio->devs[i].bio) 1523 if (r10_bio->devs[i].bio)
@@ -2990,13 +2990,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2990 2990
2991 if (mddev->curr_resync < max_sector) { /* aborted */ 2991 if (mddev->curr_resync < max_sector) { /* aborted */
2992 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2992 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2993 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2993 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2994 &sync_blocks, 1); 2994 &sync_blocks, 1);
2995 else for (i = 0; i < conf->geo.raid_disks; i++) { 2995 else for (i = 0; i < conf->geo.raid_disks; i++) {
2996 sector_t sect = 2996 sector_t sect =
2997 raid10_find_virt(conf, mddev->curr_resync, i); 2997 raid10_find_virt(conf, mddev->curr_resync, i);
2998 bitmap_end_sync(mddev->bitmap, sect, 2998 md_bitmap_end_sync(mddev->bitmap, sect,
2999 &sync_blocks, 1); 2999 &sync_blocks, 1);
3000 } 3000 }
3001 } else { 3001 } else {
3002 /* completed sync */ 3002 /* completed sync */
@@ -3017,7 +3017,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3017 } 3017 }
3018 conf->fullsync = 0; 3018 conf->fullsync = 0;
3019 } 3019 }
3020 bitmap_close_sync(mddev->bitmap); 3020 md_bitmap_close_sync(mddev->bitmap);
3021 close_sync(conf); 3021 close_sync(conf);
3022 *skipped = 1; 3022 *skipped = 1;
3023 return sectors_skipped; 3023 return sectors_skipped;
@@ -3111,8 +3111,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3111 * we only need to recover the block if it is set in 3111 * we only need to recover the block if it is set in
3112 * the bitmap 3112 * the bitmap
3113 */ 3113 */
3114 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3114 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3115 &sync_blocks, 1); 3115 &sync_blocks, 1);
3116 if (sync_blocks < max_sync) 3116 if (sync_blocks < max_sync)
3117 max_sync = sync_blocks; 3117 max_sync = sync_blocks;
3118 if (!must_sync && 3118 if (!must_sync &&
@@ -3157,8 +3157,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3157 } 3157 }
3158 } 3158 }
3159 3159
3160 must_sync = bitmap_start_sync(mddev->bitmap, sect, 3160 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3161 &sync_blocks, still_degraded); 3161 &sync_blocks, still_degraded);
3162 3162
3163 any_working = 0; 3163 any_working = 0;
3164 for (j=0; j<conf->copies;j++) { 3164 for (j=0; j<conf->copies;j++) {
@@ -3334,13 +3334,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3334 * safety reason, which ensures curr_resync_completed is 3334 * safety reason, which ensures curr_resync_completed is
3335 * updated in bitmap_cond_end_sync. 3335 * updated in bitmap_cond_end_sync.
3336 */ 3336 */
3337 bitmap_cond_end_sync(mddev->bitmap, sector_nr, 3337 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3338 mddev_is_clustered(mddev) && 3338 mddev_is_clustered(mddev) &&
3339 (sector_nr + 2 * RESYNC_SECTORS > 3339 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3340 conf->cluster_sync_high));
3341 3340
3342 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 3341 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3343 &sync_blocks, mddev->degraded) && 3342 &sync_blocks, mddev->degraded) &&
3344 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, 3343 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3345 &mddev->recovery)) { 3344 &mddev->recovery)) {
3346 /* We can skip this block */ 3345 /* We can skip this block */
@@ -4015,7 +4014,7 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
4015 mddev->array_sectors > size) 4014 mddev->array_sectors > size)
4016 return -EINVAL; 4015 return -EINVAL;
4017 if (mddev->bitmap) { 4016 if (mddev->bitmap) {
4018 int ret = bitmap_resize(mddev->bitmap, size, 0, 0); 4017 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4019 if (ret) 4018 if (ret)
4020 return ret; 4019 return ret;
4021 } 4020 }
@@ -4281,10 +4280,9 @@ static int raid10_start_reshape(struct mddev *mddev)
4281 spin_unlock_irq(&conf->device_lock); 4280 spin_unlock_irq(&conf->device_lock);
4282 4281
4283 if (mddev->delta_disks && mddev->bitmap) { 4282 if (mddev->delta_disks && mddev->bitmap) {
4284 ret = bitmap_resize(mddev->bitmap, 4283 ret = md_bitmap_resize(mddev->bitmap,
4285 raid10_size(mddev, 0, 4284 raid10_size(mddev, 0, conf->geo.raid_disks),
4286 conf->geo.raid_disks), 4285 0, 0);
4287 0, 0);
4288 if (ret) 4286 if (ret)
4289 goto abort; 4287 goto abort;
4290 } 4288 }
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 3c65f52b68f5..e2564d80128f 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -324,10 +324,10 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
324 if (sh->dev[i].written) { 324 if (sh->dev[i].written) {
325 set_bit(R5_UPTODATE, &sh->dev[i].flags); 325 set_bit(R5_UPTODATE, &sh->dev[i].flags);
326 r5c_return_dev_pending_writes(conf, &sh->dev[i]); 326 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
327 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 327 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
328 STRIPE_SECTORS, 328 STRIPE_SECTORS,
329 !test_bit(STRIPE_DEGRADED, &sh->state), 329 !test_bit(STRIPE_DEGRADED, &sh->state),
330 0); 330 0);
331 } 331 }
332 } 332 }
333} 333}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index be117d0a65a8..4b8f6c476e72 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3295,8 +3295,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
3295 */ 3295 */
3296 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3296 set_bit(STRIPE_BITMAP_PENDING, &sh->state);
3297 spin_unlock_irq(&sh->stripe_lock); 3297 spin_unlock_irq(&sh->stripe_lock);
3298 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3298 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3299 STRIPE_SECTORS, 0); 3299 STRIPE_SECTORS, 0);
3300 spin_lock_irq(&sh->stripe_lock); 3300 spin_lock_irq(&sh->stripe_lock);
3301 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3301 clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
3302 if (!sh->batch_head) { 3302 if (!sh->batch_head) {
@@ -3386,8 +3386,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3386 bi = nextbi; 3386 bi = nextbi;
3387 } 3387 }
3388 if (bitmap_end) 3388 if (bitmap_end)
3389 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3389 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3390 STRIPE_SECTORS, 0, 0); 3390 STRIPE_SECTORS, 0, 0);
3391 bitmap_end = 0; 3391 bitmap_end = 0;
3392 /* and fail all 'written' */ 3392 /* and fail all 'written' */
3393 bi = sh->dev[i].written; 3393 bi = sh->dev[i].written;
@@ -3432,8 +3432,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3432 } 3432 }
3433 } 3433 }
3434 if (bitmap_end) 3434 if (bitmap_end)
3435 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3435 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3436 STRIPE_SECTORS, 0, 0); 3436 STRIPE_SECTORS, 0, 0);
3437 /* If we were in the middle of a write the parity block might 3437 /* If we were in the middle of a write the parity block might
3438 * still be locked - so just clear all R5_LOCKED flags 3438 * still be locked - so just clear all R5_LOCKED flags
3439 */ 3439 */
@@ -3773,10 +3773,10 @@ returnbi:
3773 bio_endio(wbi); 3773 bio_endio(wbi);
3774 wbi = wbi2; 3774 wbi = wbi2;
3775 } 3775 }
3776 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3776 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3777 STRIPE_SECTORS, 3777 STRIPE_SECTORS,
3778 !test_bit(STRIPE_DEGRADED, &sh->state), 3778 !test_bit(STRIPE_DEGRADED, &sh->state),
3779 0); 3779 0);
3780 if (head_sh->batch_head) { 3780 if (head_sh->batch_head) {
3781 sh = list_first_entry(&sh->batch_list, 3781 sh = list_first_entry(&sh->batch_list,
3782 struct stripe_head, 3782 struct stripe_head,
@@ -5533,10 +5533,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
5533 for (d = 0; 5533 for (d = 0;
5534 d < conf->raid_disks - conf->max_degraded; 5534 d < conf->raid_disks - conf->max_degraded;
5535 d++) 5535 d++)
5536 bitmap_startwrite(mddev->bitmap, 5536 md_bitmap_startwrite(mddev->bitmap,
5537 sh->sector, 5537 sh->sector,
5538 STRIPE_SECTORS, 5538 STRIPE_SECTORS,
5539 0); 5539 0);
5540 sh->bm_seq = conf->seq_flush + 1; 5540 sh->bm_seq = conf->seq_flush + 1;
5541 set_bit(STRIPE_BIT_DELAY, &sh->state); 5541 set_bit(STRIPE_BIT_DELAY, &sh->state);
5542 } 5542 }
@@ -6014,11 +6014,11 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
6014 } 6014 }
6015 6015
6016 if (mddev->curr_resync < max_sector) /* aborted */ 6016 if (mddev->curr_resync < max_sector) /* aborted */
6017 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 6017 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
6018 &sync_blocks, 1); 6018 &sync_blocks, 1);
6019 else /* completed sync */ 6019 else /* completed sync */
6020 conf->fullsync = 0; 6020 conf->fullsync = 0;
6021 bitmap_close_sync(mddev->bitmap); 6021 md_bitmap_close_sync(mddev->bitmap);
6022 6022
6023 return 0; 6023 return 0;
6024 } 6024 }
@@ -6047,7 +6047,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
6047 } 6047 }
6048 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 6048 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
6049 !conf->fullsync && 6049 !conf->fullsync &&
6050 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 6050 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
6051 sync_blocks >= STRIPE_SECTORS) { 6051 sync_blocks >= STRIPE_SECTORS) {
6052 /* we can skip this block, and probably more */ 6052 /* we can skip this block, and probably more */
6053 sync_blocks /= STRIPE_SECTORS; 6053 sync_blocks /= STRIPE_SECTORS;
@@ -6055,7 +6055,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
6055 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 6055 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
6056 } 6056 }
6057 6057
6058 bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); 6058 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
6059 6059
6060 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); 6060 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0);
6061 if (sh == NULL) { 6061 if (sh == NULL) {
@@ -6078,7 +6078,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
6078 } 6078 }
6079 rcu_read_unlock(); 6079 rcu_read_unlock();
6080 6080
6081 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 6081 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
6082 6082
6083 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 6083 set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
6084 set_bit(STRIPE_HANDLE, &sh->state); 6084 set_bit(STRIPE_HANDLE, &sh->state);
@@ -6279,7 +6279,7 @@ static void raid5d(struct md_thread *thread)
6279 /* Now is a good time to flush some bitmap updates */ 6279 /* Now is a good time to flush some bitmap updates */
6280 conf->seq_flush++; 6280 conf->seq_flush++;
6281 spin_unlock_irq(&conf->device_lock); 6281 spin_unlock_irq(&conf->device_lock);
6282 bitmap_unplug(mddev->bitmap); 6282 md_bitmap_unplug(mddev->bitmap);
6283 spin_lock_irq(&conf->device_lock); 6283 spin_lock_irq(&conf->device_lock);
6284 conf->seq_write = conf->seq_flush; 6284 conf->seq_write = conf->seq_flush;
6285 activate_bit_delay(conf, conf->temp_inactive_list); 6285 activate_bit_delay(conf, conf->temp_inactive_list);
@@ -7734,7 +7734,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
7734 mddev->array_sectors > newsize) 7734 mddev->array_sectors > newsize)
7735 return -EINVAL; 7735 return -EINVAL;
7736 if (mddev->bitmap) { 7736 if (mddev->bitmap) {
7737 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 7737 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
7738 if (ret) 7738 if (ret)
7739 return ret; 7739 return ret;
7740 } 7740 }
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 1b52b8557034..2060d1483043 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -419,10 +419,25 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
419 /* Verify that EC can process command */ 419 /* Verify that EC can process command */
420 for (i = 0; i < len; i++) { 420 for (i = 0; i < len; i++) {
421 rx_byte = rx_buf[i]; 421 rx_byte = rx_buf[i];
422 /*
423 * Seeing the PAST_END, RX_BAD_DATA, or NOT_READY
424 * markers are all signs that the EC didn't fully
425 * receive our command. e.g., if the EC is flashing
426 * itself, it can't respond to any commands and instead
427 * clocks out EC_SPI_PAST_END from its SPI hardware
428 * buffer. Similar occurrences can happen if the AP is
429 * too slow to clock out data after asserting CS -- the
430 * EC will abort and fill its buffer with
431 * EC_SPI_RX_BAD_DATA.
432 *
433 * In all cases, these errors should be safe to retry.
434 * Report -EAGAIN and let the caller decide what to do
435 * about that.
436 */
422 if (rx_byte == EC_SPI_PAST_END || 437 if (rx_byte == EC_SPI_PAST_END ||
423 rx_byte == EC_SPI_RX_BAD_DATA || 438 rx_byte == EC_SPI_RX_BAD_DATA ||
424 rx_byte == EC_SPI_NOT_READY) { 439 rx_byte == EC_SPI_NOT_READY) {
425 ret = -EREMOTEIO; 440 ret = -EAGAIN;
426 break; 441 break;
427 } 442 }
428 } 443 }
@@ -431,7 +446,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
431 if (!ret) 446 if (!ret)
432 ret = cros_ec_spi_receive_packet(ec_dev, 447 ret = cros_ec_spi_receive_packet(ec_dev,
433 ec_msg->insize + sizeof(*response)); 448 ec_msg->insize + sizeof(*response));
434 else 449 else if (ret != -EAGAIN)
435 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); 450 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
436 451
437 final_ret = terminate_request(ec_dev); 452 final_ret = terminate_request(ec_dev);
@@ -537,10 +552,11 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
537 /* Verify that EC can process command */ 552 /* Verify that EC can process command */
538 for (i = 0; i < len; i++) { 553 for (i = 0; i < len; i++) {
539 rx_byte = rx_buf[i]; 554 rx_byte = rx_buf[i];
555 /* See comments in cros_ec_pkt_xfer_spi() */
540 if (rx_byte == EC_SPI_PAST_END || 556 if (rx_byte == EC_SPI_PAST_END ||
541 rx_byte == EC_SPI_RX_BAD_DATA || 557 rx_byte == EC_SPI_RX_BAD_DATA ||
542 rx_byte == EC_SPI_NOT_READY) { 558 rx_byte == EC_SPI_NOT_READY) {
543 ret = -EREMOTEIO; 559 ret = -EAGAIN;
544 break; 560 break;
545 } 561 }
546 } 562 }
@@ -549,7 +565,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
549 if (!ret) 565 if (!ret)
550 ret = cros_ec_spi_receive_response(ec_dev, 566 ret = cros_ec_spi_receive_response(ec_dev,
551 ec_msg->insize + EC_MSG_TX_PROTO_BYTES); 567 ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
552 else 568 else if (ret != -EAGAIN)
553 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret); 569 dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
554 570
555 final_ret = terminate_request(ec_dev); 571 final_ret = terminate_request(ec_dev);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 9e923cd1d80e..38a7586b00cc 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2485,7 +2485,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
2485 break; 2485 break;
2486 } 2486 }
2487 2487
2488 return 0; 2488 return ret;
2489} 2489}
2490 2490
2491#ifdef CONFIG_COMPAT 2491#ifdef CONFIG_COMPAT
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0ef741bc515d..d0e83db42ae5 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
33 const struct sdhci_iproc_data *data; 33 const struct sdhci_iproc_data *data;
34 u32 shadow_cmd; 34 u32 shadow_cmd;
35 u32 shadow_blk; 35 u32 shadow_blk;
36 bool is_cmd_shadowed;
37 bool is_blk_shadowed;
36}; 38};
37 39
38#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18) 40#define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
48 50
49static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg) 51static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
50{ 52{
51 u32 val = sdhci_iproc_readl(host, (reg & ~3)); 53 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
52 u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff; 54 struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
55 u32 val;
56 u16 word;
57
58 if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
59 /* Get the saved transfer mode */
60 val = iproc_host->shadow_cmd;
61 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
62 iproc_host->is_blk_shadowed) {
63 /* Get the saved block info */
64 val = iproc_host->shadow_blk;
65 } else {
66 val = sdhci_iproc_readl(host, (reg & ~3));
67 }
68 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
53 return word; 69 return word;
54} 70}
55 71
@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
105 121
106 if (reg == SDHCI_COMMAND) { 122 if (reg == SDHCI_COMMAND) {
107 /* Write the block now as we are issuing a command */ 123 /* Write the block now as we are issuing a command */
108 if (iproc_host->shadow_blk != 0) { 124 if (iproc_host->is_blk_shadowed) {
109 sdhci_iproc_writel(host, iproc_host->shadow_blk, 125 sdhci_iproc_writel(host, iproc_host->shadow_blk,
110 SDHCI_BLOCK_SIZE); 126 SDHCI_BLOCK_SIZE);
111 iproc_host->shadow_blk = 0; 127 iproc_host->is_blk_shadowed = false;
112 } 128 }
113 oldval = iproc_host->shadow_cmd; 129 oldval = iproc_host->shadow_cmd;
114 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 130 iproc_host->is_cmd_shadowed = false;
131 } else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
132 iproc_host->is_blk_shadowed) {
115 /* Block size and count are stored in shadow reg */ 133 /* Block size and count are stored in shadow reg */
116 oldval = iproc_host->shadow_blk; 134 oldval = iproc_host->shadow_blk;
117 } else { 135 } else {
@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
123 if (reg == SDHCI_TRANSFER_MODE) { 141 if (reg == SDHCI_TRANSFER_MODE) {
124 /* Save the transfer mode until the command is issued */ 142 /* Save the transfer mode until the command is issued */
125 iproc_host->shadow_cmd = newval; 143 iproc_host->shadow_cmd = newval;
144 iproc_host->is_cmd_shadowed = true;
126 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { 145 } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
127 /* Save the block info until the command is issued */ 146 /* Save the block info until the command is issued */
128 iproc_host->shadow_blk = newval; 147 iproc_host->shadow_blk = newval;
148 iproc_host->is_blk_shadowed = true;
129 } else { 149 } else {
130 /* Command or other regular 32-bit write */ 150 /* Command or other regular 32-bit write */
131 sdhci_iproc_writel(host, newval, reg & ~3); 151 sdhci_iproc_writel(host, newval, reg & ~3);
@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
166 186
167static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = { 187static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
168 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 188 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
169 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN, 189 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
170 .ops = &sdhci_iproc_32only_ops, 190 .ops = &sdhci_iproc_32only_ops,
171}; 191};
172 192
@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
206 .caps1 = SDHCI_DRIVER_TYPE_C | 226 .caps1 = SDHCI_DRIVER_TYPE_C |
207 SDHCI_DRIVER_TYPE_D | 227 SDHCI_DRIVER_TYPE_D |
208 SDHCI_SUPPORT_DDR50, 228 SDHCI_SUPPORT_DDR50,
209 .mmc_caps = MMC_CAP_1_8V_DDR,
210}; 229};
211 230
212static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = { 231static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 78616787f2a3..3da5fca77cbd 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1712,6 +1712,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
1712 .duplex_reg = B53_DUPLEX_STAT_FE, 1712 .duplex_reg = B53_DUPLEX_STAT_FE,
1713 }, 1713 },
1714 { 1714 {
1715 .chip_id = BCM5389_DEVICE_ID,
1716 .dev_name = "BCM5389",
1717 .vlans = 4096,
1718 .enabled_ports = 0x1f,
1719 .arl_entries = 4,
1720 .cpu_port = B53_CPU_PORT,
1721 .vta_regs = B53_VTA_REGS,
1722 .duplex_reg = B53_DUPLEX_STAT_GE,
1723 .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1724 .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1725 },
1726 {
1715 .chip_id = BCM5395_DEVICE_ID, 1727 .chip_id = BCM5395_DEVICE_ID,
1716 .dev_name = "BCM5395", 1728 .dev_name = "BCM5395",
1717 .vlans = 4096, 1729 .vlans = 4096,
@@ -2034,6 +2046,7 @@ int b53_switch_detect(struct b53_device *dev)
2034 else 2046 else
2035 dev->chip_id = BCM5365_DEVICE_ID; 2047 dev->chip_id = BCM5365_DEVICE_ID;
2036 break; 2048 break;
2049 case BCM5389_DEVICE_ID:
2037 case BCM5395_DEVICE_ID: 2050 case BCM5395_DEVICE_ID:
2038 case BCM5397_DEVICE_ID: 2051 case BCM5397_DEVICE_ID:
2039 case BCM5398_DEVICE_ID: 2052 case BCM5398_DEVICE_ID:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index fa7556f5d4fb..a533a90e3904 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
285#define B53_BRCM_OUI_1 0x0143bc00 285#define B53_BRCM_OUI_1 0x0143bc00
286#define B53_BRCM_OUI_2 0x03625c00 286#define B53_BRCM_OUI_2 0x03625c00
287#define B53_BRCM_OUI_3 0x00406000 287#define B53_BRCM_OUI_3 0x00406000
288#define B53_BRCM_OUI_4 0x01410c00
288 289
289static int b53_mdio_probe(struct mdio_device *mdiodev) 290static int b53_mdio_probe(struct mdio_device *mdiodev)
290{ 291{
@@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
311 */ 312 */
312 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && 313 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
313 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && 314 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
314 (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { 315 (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
316 (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
315 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); 317 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
316 return -ENODEV; 318 return -ENODEV;
317 } 319 }
@@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
360 { .compatible = "brcm,bcm53125" }, 362 { .compatible = "brcm,bcm53125" },
361 { .compatible = "brcm,bcm53128" }, 363 { .compatible = "brcm,bcm53128" },
362 { .compatible = "brcm,bcm5365" }, 364 { .compatible = "brcm,bcm5365" },
365 { .compatible = "brcm,bcm5389" },
363 { .compatible = "brcm,bcm5395" }, 366 { .compatible = "brcm,bcm5395" },
364 { .compatible = "brcm,bcm5397" }, 367 { .compatible = "brcm,bcm5397" },
365 { .compatible = "brcm,bcm5398" }, 368 { .compatible = "brcm,bcm5398" },
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 1187ebd79287..3b57f47d0e79 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -48,6 +48,7 @@ struct b53_io_ops {
48enum { 48enum {
49 BCM5325_DEVICE_ID = 0x25, 49 BCM5325_DEVICE_ID = 0x25,
50 BCM5365_DEVICE_ID = 0x65, 50 BCM5365_DEVICE_ID = 0x65,
51 BCM5389_DEVICE_ID = 0x89,
51 BCM5395_DEVICE_ID = 0x95, 52 BCM5395_DEVICE_ID = 0x95,
52 BCM5397_DEVICE_ID = 0x97, 53 BCM5397_DEVICE_ID = 0x97,
53 BCM5398_DEVICE_ID = 0x98, 54 BCM5398_DEVICE_ID = 0x98,
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 23b45da784cb..b89acaee12d4 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
354 /* Locate the first rule available */ 354 /* Locate the first rule available */
355 if (fs->location == RX_CLS_LOC_ANY) 355 if (fs->location == RX_CLS_LOC_ANY)
356 rule_index = find_first_zero_bit(priv->cfp.used, 356 rule_index = find_first_zero_bit(priv->cfp.used,
357 bcm_sf2_cfp_rule_size(priv)); 357 priv->num_cfp_rules);
358 else 358 else
359 rule_index = fs->location; 359 rule_index = fs->location;
360 360
361 if (rule_index > bcm_sf2_cfp_rule_size(priv))
362 return -ENOSPC;
363
361 layout = &udf_tcpip4_layout; 364 layout = &udf_tcpip4_layout;
362 /* We only use one UDF slice for now */ 365 /* We only use one UDF slice for now */
363 slice_num = bcm_sf2_get_slice_number(layout, 0); 366 slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
562 * first half because the HW search is by incrementing addresses. 565 * first half because the HW search is by incrementing addresses.
563 */ 566 */
564 if (fs->location == RX_CLS_LOC_ANY) 567 if (fs->location == RX_CLS_LOC_ANY)
565 rule_index[0] = find_first_zero_bit(priv->cfp.used, 568 rule_index[1] = find_first_zero_bit(priv->cfp.used,
566 bcm_sf2_cfp_rule_size(priv)); 569 priv->num_cfp_rules);
567 else 570 else
568 rule_index[0] = fs->location; 571 rule_index[1] = fs->location;
572 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
573 return -ENOSPC;
569 574
570 /* Flag it as used (cleared on error path) such that we can immediately 575 /* Flag it as used (cleared on error path) such that we can immediately
571 * obtain a second one to chain from. 576 * obtain a second one to chain from.
572 */ 577 */
573 set_bit(rule_index[0], priv->cfp.used); 578 set_bit(rule_index[1], priv->cfp.used);
574 579
575 rule_index[1] = find_first_zero_bit(priv->cfp.used, 580 rule_index[0] = find_first_zero_bit(priv->cfp.used,
576 bcm_sf2_cfp_rule_size(priv)); 581 priv->num_cfp_rules);
577 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) { 582 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
578 ret = -ENOSPC; 583 ret = -ENOSPC;
579 goto out_err; 584 goto out_err;
580 } 585 }
@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
712 /* Flag the second half rule as being used now, return it as the 717 /* Flag the second half rule as being used now, return it as the
713 * location, and flag it as unique while dumping rules 718 * location, and flag it as unique while dumping rules
714 */ 719 */
715 set_bit(rule_index[1], priv->cfp.used); 720 set_bit(rule_index[0], priv->cfp.used);
716 set_bit(rule_index[1], priv->cfp.unique); 721 set_bit(rule_index[1], priv->cfp.unique);
717 fs->location = rule_index[1]; 722 fs->location = rule_index[1];
718 723
719 return ret; 724 return ret;
720 725
721out_err: 726out_err:
722 clear_bit(rule_index[0], priv->cfp.used); 727 clear_bit(rule_index[1], priv->cfp.used);
723 return ret; 728 return ret;
724} 729}
725 730
@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
785 int ret; 790 int ret;
786 u32 reg; 791 u32 reg;
787 792
788 /* Refuse deletion of unused rules, and the default reserved rule */
789 if (!test_bit(loc, priv->cfp.used) || loc == 0)
790 return -EINVAL;
791
792 /* Indicate which rule we want to read */ 793 /* Indicate which rule we want to read */
793 bcm_sf2_cfp_rule_addr_set(priv, loc); 794 bcm_sf2_cfp_rule_addr_set(priv, loc);
794 795
@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
826 u32 next_loc = 0; 827 u32 next_loc = 0;
827 int ret; 828 int ret;
828 829
830 /* Refuse deleting unused rules, and those that are not unique since
831 * that could leave IPv6 rules with one of the chained rule in the
832 * table.
833 */
834 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
835 return -EINVAL;
836
829 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); 837 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
830 if (ret) 838 if (ret)
831 return ret; 839 return ret;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 36c8950dbd2d..176861bd2252 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1212 vp->mii.reg_num_mask = 0x1f; 1212 vp->mii.reg_num_mask = 0x1f;
1213 1213
1214 /* Makes sure rings are at least 16 byte aligned. */ 1214 /* Makes sure rings are at least 16 byte aligned. */
1215 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE 1215 vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1216 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1217 &vp->rx_ring_dma); 1217 &vp->rx_ring_dma, GFP_KERNEL);
1218 retval = -ENOMEM; 1218 retval = -ENOMEM;
1219 if (!vp->rx_ring) 1219 if (!vp->rx_ring)
1220 goto free_device; 1220 goto free_device;
@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1476 return 0; 1476 return 0;
1477 1477
1478free_ring: 1478free_ring:
1479 pci_free_consistent(pdev, 1479 dma_free_coherent(&pdev->dev,
1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE 1480 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
1481 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1481 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1482 vp->rx_ring, 1482 vp->rx_ring, vp->rx_ring_dma);
1483 vp->rx_ring_dma);
1484free_device: 1483free_device:
1485 free_netdev(dev); 1484 free_netdev(dev);
1486 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1485 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
1751 break; /* Bad news! */ 1750 break; /* Bad news! */
1752 1751
1753 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1752 skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
1754 dma = pci_map_single(VORTEX_PCI(vp), skb->data, 1753 dma = dma_map_single(vp->gendev, skb->data,
1755 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1754 PKT_BUF_SZ, DMA_FROM_DEVICE);
1756 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) 1755 if (dma_mapping_error(vp->gendev, dma))
1757 break; 1756 break;
1758 vp->rx_ring[i].addr = cpu_to_le32(dma); 1757 vp->rx_ring[i].addr = cpu_to_le32(dma);
1759 } 1758 }
@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2067 if (vp->bus_master) { 2066 if (vp->bus_master) {
2068 /* Set the bus-master controller to transfer the packet. */ 2067 /* Set the bus-master controller to transfer the packet. */
2069 int len = (skb->len + 3) & ~3; 2068 int len = (skb->len + 3) & ~3;
2070 vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, 2069 vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2071 PCI_DMA_TODEVICE); 2070 DMA_TO_DEVICE);
2072 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { 2071 if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2073 dev_kfree_skb_any(skb); 2072 dev_kfree_skb_any(skb);
2074 dev->stats.tx_dropped++; 2073 dev->stats.tx_dropped++;
2075 return NETDEV_TX_OK; 2074 return NETDEV_TX_OK;
@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2168 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2167 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2169 2168
2170 if (!skb_shinfo(skb)->nr_frags) { 2169 if (!skb_shinfo(skb)->nr_frags) {
2171 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, 2170 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2172 PCI_DMA_TODEVICE); 2171 DMA_TO_DEVICE);
2173 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2172 if (dma_mapping_error(vp->gendev, dma_addr))
2174 goto out_dma_err; 2173 goto out_dma_err;
2175 2174
2176 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2175 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2178 } else { 2177 } else {
2179 int i; 2178 int i;
2180 2179
2181 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, 2180 dma_addr = dma_map_single(vp->gendev, skb->data,
2182 skb_headlen(skb), PCI_DMA_TODEVICE); 2181 skb_headlen(skb), DMA_TO_DEVICE);
2183 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2182 if (dma_mapping_error(vp->gendev, dma_addr))
2184 goto out_dma_err; 2183 goto out_dma_err;
2185 2184
2186 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); 2185 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2190 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2189 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2191 2190
2192 dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, 2191 dma_addr = skb_frag_dma_map(vp->gendev, frag,
2193 0, 2192 0,
2194 frag->size, 2193 frag->size,
2195 DMA_TO_DEVICE); 2194 DMA_TO_DEVICE);
2196 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { 2195 if (dma_mapping_error(vp->gendev, dma_addr)) {
2197 for(i = i-1; i >= 0; i--) 2196 for(i = i-1; i >= 0; i--)
2198 dma_unmap_page(&VORTEX_PCI(vp)->dev, 2197 dma_unmap_page(vp->gendev,
2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), 2198 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2200 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), 2199 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2201 DMA_TO_DEVICE); 2200 DMA_TO_DEVICE);
2202 2201
2203 pci_unmap_single(VORTEX_PCI(vp), 2202 dma_unmap_single(vp->gendev,
2204 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2203 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2205 le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2204 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2206 PCI_DMA_TODEVICE); 2205 DMA_TO_DEVICE);
2207 2206
2208 goto out_dma_err; 2207 goto out_dma_err;
2209 } 2208 }
@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2218 } 2217 }
2219 } 2218 }
2220#else 2219#else
2221 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); 2220 dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2222 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) 2221 if (dma_mapping_error(vp->gendev, dma_addr))
2223 goto out_dma_err; 2222 goto out_dma_err;
2224 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); 2223 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2225 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); 2224 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254out: 2253out:
2255 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
2256out_dma_err: 2255out_dma_err:
2257 dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); 2256 dev_err(vp->gendev, "Error mapping dma buffer\n");
2258 goto out; 2257 goto out;
2259} 2258}
2260 2259
@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
2322 if (status & DMADone) { 2321 if (status & DMADone) {
2323 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { 2322 if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2324 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ 2323 iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2325 pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); 2324 dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2326 pkts_compl++; 2325 pkts_compl++;
2327 bytes_compl += vp->tx_skb->len; 2326 bytes_compl += vp->tx_skb->len;
2328 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ 2327 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
2459 struct sk_buff *skb = vp->tx_skbuff[entry]; 2458 struct sk_buff *skb = vp->tx_skbuff[entry];
2460#if DO_ZEROCOPY 2459#if DO_ZEROCOPY
2461 int i; 2460 int i;
2462 pci_unmap_single(VORTEX_PCI(vp), 2461 dma_unmap_single(vp->gendev,
2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2462 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, 2463 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 PCI_DMA_TODEVICE); 2464 DMA_TO_DEVICE);
2466 2465
2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2466 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2468 pci_unmap_page(VORTEX_PCI(vp), 2467 dma_unmap_page(vp->gendev,
2469 le32_to_cpu(vp->tx_ring[entry].frag[i].addr), 2468 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2470 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, 2469 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2471 PCI_DMA_TODEVICE); 2470 DMA_TO_DEVICE);
2472#else 2471#else
2473 pci_unmap_single(VORTEX_PCI(vp), 2472 dma_unmap_single(vp->gendev,
2474 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); 2473 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2475#endif 2474#endif
2476 pkts_compl++; 2475 pkts_compl++;
2477 bytes_compl += skb->len; 2476 bytes_compl += skb->len;
@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
2561 /* 'skb_put()' points to the start of sk_buff data area. */ 2560 /* 'skb_put()' points to the start of sk_buff data area. */
2562 if (vp->bus_master && 2561 if (vp->bus_master &&
2563 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { 2562 ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2564 dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), 2563 dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2565 pkt_len, PCI_DMA_FROMDEVICE); 2564 pkt_len, DMA_FROM_DEVICE);
2566 iowrite32(dma, ioaddr + Wn7_MasterAddr); 2565 iowrite32(dma, ioaddr + Wn7_MasterAddr);
2567 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); 2566 iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2568 iowrite16(StartDMAUp, ioaddr + EL3_CMD); 2567 iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2569 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) 2568 while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2570 ; 2569 ;
2571 pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); 2570 dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2572 } else { 2571 } else {
2573 ioread32_rep(ioaddr + RX_FIFO, 2572 ioread32_rep(ioaddr + RX_FIFO,
2574 skb_put(skb, pkt_len), 2573 skb_put(skb, pkt_len),
@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
2635 if (pkt_len < rx_copybreak && 2634 if (pkt_len < rx_copybreak &&
2636 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { 2635 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2637 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2638 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2637 dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2639 /* 'skb_put()' points to the start of sk_buff data area. */ 2638 /* 'skb_put()' points to the start of sk_buff data area. */
2640 skb_put_data(skb, vp->rx_skbuff[entry]->data, 2639 skb_put_data(skb, vp->rx_skbuff[entry]->data,
2641 pkt_len); 2640 pkt_len);
2642 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2641 dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2643 vp->rx_copy++; 2642 vp->rx_copy++;
2644 } else { 2643 } else {
2645 /* Pre-allocate the replacement skb. If it or its 2644 /* Pre-allocate the replacement skb. If it or its
@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
2651 dev->stats.rx_dropped++; 2650 dev->stats.rx_dropped++;
2652 goto clear_complete; 2651 goto clear_complete;
2653 } 2652 }
2654 newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, 2653 newdma = dma_map_single(vp->gendev, newskb->data,
2655 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2654 PKT_BUF_SZ, DMA_FROM_DEVICE);
2656 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { 2655 if (dma_mapping_error(vp->gendev, newdma)) {
2657 dev->stats.rx_dropped++; 2656 dev->stats.rx_dropped++;
2658 consume_skb(newskb); 2657 consume_skb(newskb);
2659 goto clear_complete; 2658 goto clear_complete;
@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
2664 vp->rx_skbuff[entry] = newskb; 2663 vp->rx_skbuff[entry] = newskb;
2665 vp->rx_ring[entry].addr = cpu_to_le32(newdma); 2664 vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2666 skb_put(skb, pkt_len); 2665 skb_put(skb, pkt_len);
2667 pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2666 dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2668 vp->rx_nocopy++; 2667 vp->rx_nocopy++;
2669 } 2668 }
2670 skb->protocol = eth_type_trans(skb, dev); 2669 skb->protocol = eth_type_trans(skb, dev);
@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
2761 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ 2760 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2762 for (i = 0; i < RX_RING_SIZE; i++) 2761 for (i = 0; i < RX_RING_SIZE; i++)
2763 if (vp->rx_skbuff[i]) { 2762 if (vp->rx_skbuff[i]) {
2764 pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), 2763 dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2765 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2764 PKT_BUF_SZ, DMA_FROM_DEVICE);
2766 dev_kfree_skb(vp->rx_skbuff[i]); 2765 dev_kfree_skb(vp->rx_skbuff[i]);
2767 vp->rx_skbuff[i] = NULL; 2766 vp->rx_skbuff[i] = NULL;
2768 } 2767 }
@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
2775 int k; 2774 int k;
2776 2775
2777 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) 2776 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2778 pci_unmap_single(VORTEX_PCI(vp), 2777 dma_unmap_single(vp->gendev,
2779 le32_to_cpu(vp->tx_ring[i].frag[k].addr), 2778 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2780 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, 2779 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2781 PCI_DMA_TODEVICE); 2780 DMA_TO_DEVICE);
2782#else 2781#else
2783 pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); 2782 dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2784#endif 2783#endif
2785 dev_kfree_skb(skb); 2784 dev_kfree_skb(skb);
2786 vp->tx_skbuff[i] = NULL; 2785 vp->tx_skbuff[i] = NULL;
@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
3288 3287
3289 pci_iounmap(pdev, vp->ioaddr); 3288 pci_iounmap(pdev, vp->ioaddr);
3290 3289
3291 pci_free_consistent(pdev, 3290 dma_free_coherent(&pdev->dev,
3292 sizeof(struct boom_rx_desc) * RX_RING_SIZE 3291 sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3293 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3292 sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3294 vp->rx_ring, 3293 vp->rx_ring, vp->rx_ring_dma);
3295 vp->rx_ring_dma);
3296 3294
3297 pci_release_regions(pdev); 3295 pci_release_regions(pdev);
3298 3296
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index ac99d089ac72..1c97e39b478e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -164,7 +164,9 @@ bad_clone_list[] __initdata = {
164#define NESM_START_PG 0x40 /* First page of TX buffer */ 164#define NESM_START_PG 0x40 /* First page of TX buffer */
165#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ 165#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
166 166
167#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ 167#if defined(CONFIG_MACH_TX49XX)
168# define DCR_VAL 0x48 /* 8-bit mode */
169#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */
168# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) 170# define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49)
169#else 171#else
170# define DCR_VAL 0x49 172# define DCR_VAL 0x49
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a561705f232c..be198cc0b10c 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1552 if (!ioaddr) { 1552 if (!ioaddr) {
1553 if (pcnet32_debug & NETIF_MSG_PROBE) 1553 if (pcnet32_debug & NETIF_MSG_PROBE)
1554 pr_err("card has no PCI IO resources, aborting\n"); 1554 pr_err("card has no PCI IO resources, aborting\n");
1555 return -ENODEV; 1555 err = -ENODEV;
1556 goto err_disable_dev;
1556 } 1557 }
1557 1558
1558 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); 1559 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1559 if (err) { 1560 if (err) {
1560 if (pcnet32_debug & NETIF_MSG_PROBE) 1561 if (pcnet32_debug & NETIF_MSG_PROBE)
1561 pr_err("architecture does not support 32bit PCI busmaster DMA\n"); 1562 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1562 return err; 1563 goto err_disable_dev;
1563 } 1564 }
1564 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { 1565 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1565 if (pcnet32_debug & NETIF_MSG_PROBE) 1566 if (pcnet32_debug & NETIF_MSG_PROBE)
1566 pr_err("io address range already allocated\n"); 1567 pr_err("io address range already allocated\n");
1567 return -EBUSY; 1568 err = -EBUSY;
1569 goto err_disable_dev;
1568 } 1570 }
1569 1571
1570 err = pcnet32_probe1(ioaddr, 1, pdev); 1572 err = pcnet32_probe1(ioaddr, 1, pdev);
1573
1574err_disable_dev:
1571 if (err < 0) 1575 if (err < 0)
1572 pci_disable_device(pdev); 1576 pci_disable_device(pdev);
1573 1577
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index b57acb8dc35b..dc25066c59a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
419 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 419 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
420 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 420 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
421 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 421 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
422 {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ 422 {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
423 {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ 423 {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
424 {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ 424 {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
425 {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ 425 {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
426 {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ 426 {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
427 {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ 427 {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
428 {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ 428 {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
429 {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ 429 {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
430 {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ 430 {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
431}; 431};
432 432
433static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { 433static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
444 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ 444 {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
445 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ 445 {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
446 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ 446 {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
447 {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
448 {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
449 {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
450 {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
451 {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
452 {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
453 {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
454 {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
455 {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
456 {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
457}; 447};
458 448
459static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { 449static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index db92f1858060..b76447baccaf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
836{ 836{
837 struct tp_params *tp = &adap->params.tp; 837 struct tp_params *tp = &adap->params.tp;
838 u64 hash_filter_mask = tp->hash_filter_mask; 838 u64 hash_filter_mask = tp->hash_filter_mask;
839 u32 mask; 839 u64 ntuple_mask = 0;
840 840
841 if (!is_hashfilter(adap)) 841 if (!is_hashfilter(adap))
842 return false; 842 return false;
@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
865 if (!fs->val.fport || fs->mask.fport != 0xffff) 865 if (!fs->val.fport || fs->mask.fport != 0xffff)
866 return false; 866 return false;
867 867
868 if (tp->fcoe_shift >= 0) { 868 /* calculate tuple mask and compare with mask configured in hw */
869 mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; 869 if (tp->fcoe_shift >= 0)
870 if (mask && !fs->mask.fcoe) 870 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
871 return false;
872 }
873 871
874 if (tp->port_shift >= 0) { 872 if (tp->port_shift >= 0)
875 mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; 873 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
876 if (mask && !fs->mask.iport)
877 return false;
878 }
879 874
880 if (tp->vnic_shift >= 0) { 875 if (tp->vnic_shift >= 0) {
881 mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; 876 if ((adap->params.tp.ingress_config & VNIC_F))
882 877 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
883 if ((adap->params.tp.ingress_config & VNIC_F)) { 878 else
884 if (mask && !fs->mask.pfvf_vld) 879 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
885 return false; 880 tp->vnic_shift;
886 } else {
887 if (mask && !fs->mask.ovlan_vld)
888 return false;
889 }
890 } 881 }
891 882
892 if (tp->vlan_shift >= 0) { 883 if (tp->vlan_shift >= 0)
893 mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; 884 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
894 if (mask && !fs->mask.ivlan)
895 return false;
896 }
897 885
898 if (tp->tos_shift >= 0) { 886 if (tp->tos_shift >= 0)
899 mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; 887 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
900 if (mask && !fs->mask.tos)
901 return false;
902 }
903 888
904 if (tp->protocol_shift >= 0) { 889 if (tp->protocol_shift >= 0)
905 mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; 890 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
906 if (mask && !fs->mask.proto)
907 return false;
908 }
909 891
910 if (tp->ethertype_shift >= 0) { 892 if (tp->ethertype_shift >= 0)
911 mask = (hash_filter_mask >> tp->ethertype_shift) & 893 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
912 FT_ETHERTYPE_W;
913 if (mask && !fs->mask.ethtype)
914 return false;
915 }
916 894
917 if (tp->macmatch_shift >= 0) { 895 if (tp->macmatch_shift >= 0)
918 mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; 896 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
919 if (mask && !fs->mask.macidx) 897
920 return false; 898 if (tp->matchtype_shift >= 0)
921 } 899 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
900
901 if (tp->frag_shift >= 0)
902 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
903
904 if (ntuple_mask != hash_filter_mask)
905 return false;
922 906
923 if (tp->matchtype_shift >= 0) {
924 mask = (hash_filter_mask >> tp->matchtype_shift) &
925 FT_MPSHITTYPE_W;
926 if (mask && !fs->mask.matchtype)
927 return false;
928 }
929 if (tp->frag_shift >= 0) {
930 mask = (hash_filter_mask >> tp->frag_shift) &
931 FT_FRAGMENTATION_W;
932 if (mask && !fs->mask.frag)
933 return false;
934 }
935 return true; 907 return true;
936} 908}
937 909
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 81684acf52af..8a8b12b720ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2747 pci_set_master(pdev); 2747 pci_set_master(pdev);
2748 2748
2749 /* Query PCI controller on system for DMA addressing 2749 /* Query PCI controller on system for DMA addressing
2750 * limitation for the device. Try 64-bit first, and 2750 * limitation for the device. Try 47-bit first, and
2751 * fail to 32-bit. 2751 * fail to 32-bit.
2752 */ 2752 */
2753 2753
2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2754 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
2755 if (err) { 2755 if (err) {
2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2756 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2757 if (err) { 2757 if (err) {
@@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2765 goto err_out_release_regions; 2765 goto err_out_release_regions;
2766 } 2766 }
2767 } else { 2767 } else {
2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
2769 if (err) { 2769 if (err) {
2770 dev_err(dev, "Unable to obtain %u-bit DMA " 2770 dev_err(dev, "Unable to obtain %u-bit DMA "
2771 "for consistent allocations, aborting\n", 64); 2771 "for consistent allocations, aborting\n", 47);
2772 goto err_out_release_regions; 2772 goto err_out_release_regions;
2773 } 2773 }
2774 using_dac = 1; 2774 using_dac = 1;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c697e79e491e..8f755009ff38 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
3309 if ((val & POST_STAGE_FAT_LOG_START) 3309 if ((val & POST_STAGE_FAT_LOG_START)
3310 != POST_STAGE_FAT_LOG_START && 3310 != POST_STAGE_FAT_LOG_START &&
3311 (val & POST_STAGE_ARMFW_UE) 3311 (val & POST_STAGE_ARMFW_UE)
3312 != POST_STAGE_ARMFW_UE) 3312 != POST_STAGE_ARMFW_UE &&
3313 (val & POST_STAGE_RECOVERABLE_ERR)
3314 != POST_STAGE_RECOVERABLE_ERR)
3313 return; 3315 return;
3314 } 3316 }
3315 3317
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4604bc8eb5b..9d3eed46830d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f81439796ac7..43d973215040 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Fast Ethernet Controller (ENET) PTP driver for MX6x. 3 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
3 * 4 *
4 * Copyright (C) 2012 Freescale Semiconductor, Inc. 5 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 6 */
19 7
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e8d6a6f6aaf..5ec1185808e5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
192 if (adapter->fw_done_rc) { 192 if (adapter->fw_done_rc) {
193 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 193 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
194 adapter->fw_done_rc); 194 adapter->fw_done_rc);
195 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
195 return -1; 196 return -1;
196 } 197 }
197 return 0; 198 return 0;
@@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev)
795 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 796 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
796 unsigned long timeout = msecs_to_jiffies(30000); 797 unsigned long timeout = msecs_to_jiffies(30000);
797 int retry_count = 0; 798 int retry_count = 0;
799 bool retry;
798 int rc; 800 int rc;
799 801
800 do { 802 do {
803 retry = false;
801 if (retry_count > IBMVNIC_MAX_QUEUES) { 804 if (retry_count > IBMVNIC_MAX_QUEUES) {
802 netdev_warn(netdev, "Login attempts exceeded\n"); 805 netdev_warn(netdev, "Login attempts exceeded\n");
803 return -1; 806 return -1;
@@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev)
821 retry_count++; 824 retry_count++;
822 release_sub_crqs(adapter, 1); 825 release_sub_crqs(adapter, 1);
823 826
827 retry = true;
828 netdev_dbg(netdev,
829 "Received partial success, retrying...\n");
824 adapter->init_done_rc = 0; 830 adapter->init_done_rc = 0;
825 reinit_completion(&adapter->init_done); 831 reinit_completion(&adapter->init_done);
826 send_cap_queries(adapter); 832 send_cap_queries(adapter);
@@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev)
848 netdev_warn(netdev, "Adapter login failed\n"); 854 netdev_warn(netdev, "Adapter login failed\n");
849 return -1; 855 return -1;
850 } 856 }
851 } while (adapter->init_done_rc == PARTIALSUCCESS); 857 } while (retry);
852 858
853 /* handle pending MAC address changes after successful login */ 859 /* handle pending MAC address changes after successful login */
854 if (adapter->mac_change_pending) { 860 if (adapter->mac_change_pending) {
@@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1821 if (rc) 1827 if (rc)
1822 return rc; 1828 return rc;
1823 } 1829 }
1830 ibmvnic_disable_irqs(adapter);
1824 } 1831 }
1825
1826 ibmvnic_disable_irqs(adapter);
1827 adapter->state = VNIC_CLOSED; 1832 adapter->state = VNIC_CLOSED;
1828 1833
1829 if (reset_state == VNIC_CLOSED) 1834 if (reset_state == VNIC_CLOSED)
@@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2617{ 2622{
2618 struct device *dev = &adapter->vdev->dev; 2623 struct device *dev = &adapter->vdev->dev;
2619 unsigned long rc; 2624 unsigned long rc;
2620 u64 val;
2621 2625
2622 if (scrq->hw_irq > 0x100000000ULL) { 2626 if (scrq->hw_irq > 0x100000000ULL) {
2623 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); 2627 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2624 return 1; 2628 return 1;
2625 } 2629 }
2626 2630
2627 val = (0xff000000) | scrq->hw_irq; 2631 if (adapter->resetting &&
2628 rc = plpar_hcall_norets(H_EOI, val); 2632 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2629 if (rc) 2633 u64 val = (0xff000000) | scrq->hw_irq;
2630 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2634
2631 val, rc); 2635 rc = plpar_hcall_norets(H_EOI, val);
2636 if (rc)
2637 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2638 val, rc);
2639 }
2632 2640
2633 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, 2641 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2634 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); 2642 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
@@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4586 release_crq_queue(adapter); 4594 release_crq_queue(adapter);
4587 } 4595 }
4588 4596
4589 rc = init_stats_buffers(adapter);
4590 if (rc)
4591 return rc;
4592
4593 rc = init_stats_token(adapter);
4594 if (rc)
4595 return rc;
4596
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
@@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4662 goto ibmvnic_init_fail; 4662 goto ibmvnic_init_fail;
4663 } while (rc == EAGAIN); 4663 } while (rc == EAGAIN);
4664 4664
4665 rc = init_stats_buffers(adapter);
4666 if (rc)
4667 goto ibmvnic_init_fail;
4668
4669 rc = init_stats_token(adapter);
4670 if (rc)
4671 goto ibmvnic_stats_fail;
4672
4665 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4673 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4666 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; 4674 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4667 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; 4675 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4668 4676
4669 rc = device_create_file(&dev->dev, &dev_attr_failover); 4677 rc = device_create_file(&dev->dev, &dev_attr_failover);
4670 if (rc) 4678 if (rc)
4671 goto ibmvnic_init_fail; 4679 goto ibmvnic_dev_file_err;
4672 4680
4673 netif_carrier_off(netdev); 4681 netif_carrier_off(netdev);
4674 rc = register_netdev(netdev); 4682 rc = register_netdev(netdev);
@@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4687ibmvnic_register_fail: 4695ibmvnic_register_fail:
4688 device_remove_file(&dev->dev, &dev_attr_failover); 4696 device_remove_file(&dev->dev, &dev_attr_failover);
4689 4697
4698ibmvnic_dev_file_err:
4699 release_stats_token(adapter);
4700
4701ibmvnic_stats_fail:
4702 release_stats_buffers(adapter);
4703
4690ibmvnic_init_fail: 4704ibmvnic_init_fail:
4691 release_sub_crqs(adapter, 1); 4705 release_sub_crqs(adapter, 1);
4692 release_crq_queue(adapter); 4706 release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index afadba99f7b8..2ecd55856c50 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9054,7 +9054,6 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
9054{ 9054{
9055 const struct tc_action *a; 9055 const struct tc_action *a;
9056 LIST_HEAD(actions); 9056 LIST_HEAD(actions);
9057 int err;
9058 9057
9059 if (!tcf_exts_has_actions(exts)) 9058 if (!tcf_exts_has_actions(exts))
9060 return -EINVAL; 9059 return -EINVAL;
@@ -9075,11 +9074,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
9075 9074
9076 if (!dev) 9075 if (!dev)
9077 return -EINVAL; 9076 return -EINVAL;
9078 err = handle_redirect_action(adapter, dev->ifindex, queue, 9077 return handle_redirect_action(adapter, dev->ifindex,
9079 action); 9078 queue, action);
9080 if (err == 0)
9081 return err;
9082 } 9079 }
9080
9081 return -EINVAL;
9083 } 9082 }
9084 9083
9085 return -EINVAL; 9084 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a822f7a56bc5..685337d58276 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -43,12 +43,12 @@
43#include "fw.h" 43#include "fw.h"
44 44
45/* 45/*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB 46 * We allocate in page size (default 4KB on many archs) chunks to avoid high
47 * per chunk. 47 * order memory allocations in fragmented/high usage memory situation.
48 */ 48 */
49enum { 49enum {
50 MLX4_ICM_ALLOC_SIZE = 1 << 18, 50 MLX4_ICM_ALLOC_SIZE = PAGE_SIZE,
51 MLX4_TABLE_CHUNK_SIZE = 1 << 18 51 MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE,
52}; 52};
53 53
54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
398 u64 size; 398 u64 size;
399 399
400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; 400 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
401 if (WARN_ON(!obj_per_chunk))
402 return -EINVAL;
401 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; 403 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
402 404
403 table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); 405 table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL);
404 if (!table->icm) 406 if (!table->icm)
405 return -ENOMEM; 407 return -ENOMEM;
406 table->virt = virt; 408 table->virt = virt;
@@ -446,7 +448,7 @@ err:
446 mlx4_free_icm(dev, table->icm[i], use_coherent); 448 mlx4_free_icm(dev, table->icm[i], use_coherent);
447 } 449 }
448 450
449 kfree(table->icm); 451 kvfree(table->icm);
450 452
451 return -ENOMEM; 453 return -ENOMEM;
452} 454}
@@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
462 mlx4_free_icm(dev, table->icm[i], table->coherent); 464 mlx4_free_icm(dev, table->icm[i], table->coherent);
463 } 465 }
464 466
465 kfree(table->icm); 467 kvfree(table->icm);
466} 468}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 2edcce98ab2d..65482f004e50 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
172 list_add_tail(&dev_ctx->list, &priv->ctx_list); 172 list_add_tail(&dev_ctx->list, &priv->ctx_list);
173 spin_unlock_irqrestore(&priv->ctx_lock, flags); 173 spin_unlock_irqrestore(&priv->ctx_lock, flags);
174 174
175 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", 175 mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
176 dev_ctx->intf->protocol, enable ? 176 dev_ctx->intf->protocol, enable ?
177 "enabled" : "disabled"); 177 "enabled" : "disabled");
178 } 178 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 211578ffc70d..60172a38c4a4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2929 mlx4_err(dev, "Failed to create file for port %d\n", port); 2929 mlx4_err(dev, "Failed to create file for port %d\n", port);
2930 devlink_port_unregister(&info->devlink_port); 2930 devlink_port_unregister(&info->devlink_port);
2931 info->port = -1; 2931 info->port = -1;
2932 return err;
2932 } 2933 }
2933 2934
2934 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2935 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
@@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2950 &info->port_attr); 2951 &info->port_attr);
2951 devlink_port_unregister(&info->devlink_port); 2952 devlink_port_unregister(&info->devlink_port);
2952 info->port = -1; 2953 info->port = -1;
2954 return err;
2953 } 2955 }
2954 2956
2955 return err; 2957 return 0;
2956} 2958}
2957 2959
2958static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2960static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 3aaf4bad6c5a..427e7a31862c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
394 struct mlx4_qp *qp; 394 struct mlx4_qp *qp;
395 395
396 spin_lock(&qp_table->lock); 396 spin_lock_irq(&qp_table->lock);
397 397
398 qp = __mlx4_qp_lookup(dev, qpn); 398 qp = __mlx4_qp_lookup(dev, qpn);
399 399
400 spin_unlock(&qp_table->lock); 400 spin_unlock_irq(&qp_table->lock);
401 return qp; 401 return qp;
402} 402}
403 403
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 176645762e49..1ff0b0e93804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); 615 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
616} 616}
617 617
618static __be32 mlx5e_get_fcs(struct sk_buff *skb)
619{
620 int last_frag_sz, bytes_in_prev, nr_frags;
621 u8 *fcs_p1, *fcs_p2;
622 skb_frag_t *last_frag;
623 __be32 fcs_bytes;
624
625 if (!skb_is_nonlinear(skb))
626 return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
627
628 nr_frags = skb_shinfo(skb)->nr_frags;
629 last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
630 last_frag_sz = skb_frag_size(last_frag);
631
632 /* If all FCS data is in last frag */
633 if (last_frag_sz >= ETH_FCS_LEN)
634 return *(__be32 *)(skb_frag_address(last_frag) +
635 last_frag_sz - ETH_FCS_LEN);
636
637 fcs_p2 = (u8 *)skb_frag_address(last_frag);
638 bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
639
640 /* Find where the other part of the FCS is - Linear or another frag */
641 if (nr_frags == 1) {
642 fcs_p1 = skb_tail_pointer(skb);
643 } else {
644 skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
645
646 fcs_p1 = skb_frag_address(prev_frag) +
647 skb_frag_size(prev_frag);
648 }
649 fcs_p1 -= bytes_in_prev;
650
651 memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
652 memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
653
654 return fcs_bytes;
655}
656
618static inline void mlx5e_handle_csum(struct net_device *netdev, 657static inline void mlx5e_handle_csum(struct net_device *netdev,
619 struct mlx5_cqe64 *cqe, 658 struct mlx5_cqe64 *cqe,
620 struct mlx5e_rq *rq, 659 struct mlx5e_rq *rq,
@@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
643 skb->csum = csum_partial(skb->data + ETH_HLEN, 682 skb->csum = csum_partial(skb->data + ETH_HLEN,
644 network_depth - ETH_HLEN, 683 network_depth - ETH_HLEN,
645 skb->csum); 684 skb->csum);
685 if (unlikely(netdev->features & NETIF_F_RXFCS))
686 skb->csum = csum_add(skb->csum,
687 (__force __wsum)mlx5e_get_fcs(skb));
646 rq->stats.csum_complete++; 688 rq->stats.csum_complete++;
647 return; 689 return;
648 } 690 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 0f5da499a223..fad8c2e3804e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
237 context->buf.sg[0].data = &context->command; 237 context->buf.sg[0].data = &context->command;
238 238
239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); 239 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
240 list_add_tail(&context->list, &fdev->ipsec->pending_cmds); 240 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
241 if (!res)
242 list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
241 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); 243 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
242 244
243 res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
244 if (res) { 245 if (res) {
245 mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", 246 mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
246 res);
247 spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
248 list_del(&context->list);
249 spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
250 kfree(context); 247 kfree(context);
251 return ERR_PTR(res); 248 return ERR_PTR(res);
252 } 249 }
250
253 /* Context will be freed by wait func after completion */ 251 /* Context will be freed by wait func after completion */
254 return context; 252 return context;
255} 253}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index ca38a30fbe91..adc6ab2cf429 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4433,6 +4433,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4433 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4433 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4434 return -EINVAL; 4434 return -EINVAL;
4435 } 4435 }
4436 if (is_vlan_dev(upper_dev) &&
4437 vlan_dev_vlan_id(upper_dev) == 1) {
4438 NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
4439 return -EINVAL;
4440 }
4436 break; 4441 break;
4437 case NETDEV_CHANGEUPPER: 4442 case NETDEV_CHANGEUPPER:
4438 upper_dev = info->upper_dev; 4443 upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 7ed08486ae23..c805dcbebd02 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -84,7 +84,7 @@ static int sonic_open(struct net_device *dev)
84 for (i = 0; i < SONIC_NUM_RRS; i++) { 84 for (i = 0; i < SONIC_NUM_RRS; i++) {
85 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), 85 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
86 SONIC_RBSIZE, DMA_FROM_DEVICE); 86 SONIC_RBSIZE, DMA_FROM_DEVICE);
87 if (!laddr) { 87 if (dma_mapping_error(lp->device, laddr)) {
88 while(i > 0) { /* free any that were mapped successfully */ 88 while(i > 0) { /* free any that were mapped successfully */
89 i--; 89 i--;
90 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE); 90 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 1dc424685f4e..35fb31f682af 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
335 return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); 335 return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
336 336
337 start = mem; 337 start = mem;
338 while (mem - start + 8 < nfp_cpp_area_size(area)) { 338 while (mem - start + 8 <= nfp_cpp_area_size(area)) {
339 u8 __iomem *value; 339 u8 __iomem *value;
340 u32 type, length; 340 u32 type, length;
341 341
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 00f41c145d4d..820b226d6ff8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -77,7 +77,7 @@
77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 77#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
78 78
79/* ILT entry structure */ 79/* ILT entry structure */
80#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL 80#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
81#define ILT_ENTRY_PHY_ADDR_SHIFT 0 81#define ILT_ENTRY_PHY_ADDR_SHIFT 0
82#define ILT_ENTRY_VALID_MASK 0x1ULL 82#define ILT_ENTRY_VALID_MASK 0x1ULL
83#define ILT_ENTRY_VALID_SHIFT 52 83#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 38502815d681..468c59d2e491 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
292 struct qed_ll2_tx_packet *p_pkt = NULL; 292 struct qed_ll2_tx_packet *p_pkt = NULL;
293 struct qed_ll2_info *p_ll2_conn; 293 struct qed_ll2_info *p_ll2_conn;
294 struct qed_ll2_tx_queue *p_tx; 294 struct qed_ll2_tx_queue *p_tx;
295 unsigned long flags = 0;
295 dma_addr_t tx_frag; 296 dma_addr_t tx_frag;
296 297
297 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 298 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
300 301
301 p_tx = &p_ll2_conn->tx_queue; 302 p_tx = &p_ll2_conn->tx_queue;
302 303
304 spin_lock_irqsave(&p_tx->lock, flags);
303 while (!list_empty(&p_tx->active_descq)) { 305 while (!list_empty(&p_tx->active_descq)) {
304 p_pkt = list_first_entry(&p_tx->active_descq, 306 p_pkt = list_first_entry(&p_tx->active_descq,
305 struct qed_ll2_tx_packet, list_entry); 307 struct qed_ll2_tx_packet, list_entry);
@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
309 list_del(&p_pkt->list_entry); 311 list_del(&p_pkt->list_entry);
310 b_last_packet = list_empty(&p_tx->active_descq); 312 b_last_packet = list_empty(&p_tx->active_descq);
311 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); 313 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
314 spin_unlock_irqrestore(&p_tx->lock, flags);
312 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 315 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
313 struct qed_ooo_buffer *p_buffer; 316 struct qed_ooo_buffer *p_buffer;
314 317
@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
328 b_last_frag, 331 b_last_frag,
329 b_last_packet); 332 b_last_packet);
330 } 333 }
334 spin_lock_irqsave(&p_tx->lock, flags);
331 } 335 }
336 spin_unlock_irqrestore(&p_tx->lock, flags);
332} 337}
333 338
334static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) 339static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
556 struct qed_ll2_info *p_ll2_conn = NULL; 561 struct qed_ll2_info *p_ll2_conn = NULL;
557 struct qed_ll2_rx_packet *p_pkt = NULL; 562 struct qed_ll2_rx_packet *p_pkt = NULL;
558 struct qed_ll2_rx_queue *p_rx; 563 struct qed_ll2_rx_queue *p_rx;
564 unsigned long flags = 0;
559 565
560 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); 566 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
561 if (!p_ll2_conn) 567 if (!p_ll2_conn)
@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
563 569
564 p_rx = &p_ll2_conn->rx_queue; 570 p_rx = &p_ll2_conn->rx_queue;
565 571
572 spin_lock_irqsave(&p_rx->lock, flags);
566 while (!list_empty(&p_rx->active_descq)) { 573 while (!list_empty(&p_rx->active_descq)) {
567 p_pkt = list_first_entry(&p_rx->active_descq, 574 p_pkt = list_first_entry(&p_rx->active_descq,
568 struct qed_ll2_rx_packet, list_entry); 575 struct qed_ll2_rx_packet, list_entry);
569 if (!p_pkt) 576 if (!p_pkt)
570 break; 577 break;
571
572 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); 578 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
579 spin_unlock_irqrestore(&p_rx->lock, flags);
573 580
574 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { 581 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
575 struct qed_ooo_buffer *p_buffer; 582 struct qed_ooo_buffer *p_buffer;
@@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
588 cookie, 595 cookie,
589 rx_buf_addr, b_last); 596 rx_buf_addr, b_last);
590 } 597 }
598 spin_lock_irqsave(&p_rx->lock, flags);
591 } 599 }
600 spin_unlock_irqrestore(&p_rx->lock, flags);
601}
602
603static bool
604qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
605 struct core_rx_slow_path_cqe *p_cqe)
606{
607 struct ooo_opaque *iscsi_ooo;
608 u32 cid;
609
610 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
611 return false;
612
613 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
614 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
615 return false;
616
617 /* Need to make a flush */
618 cid = le32_to_cpu(iscsi_ooo->cid);
619 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
620
621 return true;
592} 622}
593 623
594static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, 624static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
@@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
617 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); 647 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
618 cqe_type = cqe->rx_cqe_sp.type; 648 cqe_type = cqe->rx_cqe_sp.type;
619 649
650 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
651 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
652 &cqe->rx_cqe_sp))
653 continue;
654
620 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { 655 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
621 DP_NOTICE(p_hwfn, 656 DP_NOTICE(p_hwfn,
622 "Got a non-regular LB LL2 completion [type 0x%02x]\n", 657 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
@@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
794 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; 829 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
795 int rc; 830 int rc;
796 831
832 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
833 return 0;
834
797 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); 835 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
798 if (rc) 836 if (rc)
799 return rc; 837 return rc;
@@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
814 u16 new_idx = 0, num_bds = 0; 852 u16 new_idx = 0, num_bds = 0;
815 int rc; 853 int rc;
816 854
855 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
856 return 0;
857
817 new_idx = le16_to_cpu(*p_tx->p_fw_cons); 858 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
818 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 859 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
819 860
@@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1867 1908
1868 /* Stop Tx & Rx of connection, if needed */ 1909 /* Stop Tx & Rx of connection, if needed */
1869 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { 1910 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1911 p_ll2_conn->tx_queue.b_cb_registred = false;
1912 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1870 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); 1913 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1871 if (rc) 1914 if (rc)
1872 goto out; 1915 goto out;
1916
1873 qed_ll2_txq_flush(p_hwfn, connection_handle); 1917 qed_ll2_txq_flush(p_hwfn, connection_handle);
1918 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1874 } 1919 }
1875 1920
1876 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { 1921 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1922 p_ll2_conn->rx_queue.b_cb_registred = false;
1923 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1877 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); 1924 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1878 if (rc) 1925 if (rc)
1879 goto out; 1926 goto out;
1927
1880 qed_ll2_rxq_flush(p_hwfn, connection_handle); 1928 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1929 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1881 } 1930 }
1882 1931
1883 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) 1932 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
@@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
1925 if (!p_ll2_conn) 1974 if (!p_ll2_conn)
1926 return; 1975 return;
1927 1976
1928 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1929 p_ll2_conn->rx_queue.b_cb_registred = false;
1930 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1931 }
1932
1933 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1934 p_ll2_conn->tx_queue.b_cb_registred = false;
1935 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1936 }
1937
1938 kfree(p_ll2_conn->tx_queue.descq_mem); 1977 kfree(p_ll2_conn->tx_queue.descq_mem);
1939 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); 1978 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1940 1979
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index a01e7d6e5442..f6655e251bbd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1066 1066
1067 DP_INFO(edev, "Starting qede_remove\n"); 1067 DP_INFO(edev, "Starting qede_remove\n");
1068 1068
1069 qede_rdma_dev_remove(edev);
1069 unregister_netdev(ndev); 1070 unregister_netdev(ndev);
1070 cancel_delayed_work_sync(&edev->sp_task); 1071 cancel_delayed_work_sync(&edev->sp_task);
1071 1072
1072 qede_ptp_disable(edev); 1073 qede_ptp_disable(edev);
1073 1074
1074 qede_rdma_dev_remove(edev);
1075
1076 edev->ops->common->set_power_state(cdev, PCI_D0); 1075 edev->ops->common->set_power_state(cdev, PCI_D0);
1077 1076
1078 pci_set_drvdata(pdev, NULL); 1077 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a5b792ce2ae7..1bf930d4a1e5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -163,7 +163,7 @@ enum {
163}; 163};
164 164
165/* Driver's parameters */ 165/* Driver's parameters */
166#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 166#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS)
167#define SH_ETH_RX_ALIGN 32 167#define SH_ETH_RX_ALIGN 32
168#else 168#else
169#define SH_ETH_RX_ALIGN 2 169#define SH_ETH_RX_ALIGN 2
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f4c0b02ddad8..59fbf74dcada 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
1674 if (ret) 1674 if (ret)
1675 goto unreg_napi; 1675 goto unreg_napi;
1676 1676
1677 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 1677 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
1678 dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); 1678 dev_warn(&pdev->dev, "Failed to set DMA mask\n");
1679 1679
1680 ret = register_netdev(ndev); 1680 ret = register_netdev(ndev);
1681 if (ret) { 1681 if (ret) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index abceea802ea1..38828ab77eb9 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1873 if (IS_ERR(priv->txchan)) { 1873 if (IS_ERR(priv->txchan)) {
1874 dev_err(&pdev->dev, "error initializing tx dma channel\n"); 1874 dev_err(&pdev->dev, "error initializing tx dma channel\n");
1875 rc = PTR_ERR(priv->txchan); 1875 rc = PTR_ERR(priv->txchan);
1876 goto no_cpdma_chan; 1876 goto err_free_dma;
1877 } 1877 }
1878 1878
1879 priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, 1879 priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
1881 if (IS_ERR(priv->rxchan)) { 1881 if (IS_ERR(priv->rxchan)) {
1882 dev_err(&pdev->dev, "error initializing rx dma channel\n"); 1882 dev_err(&pdev->dev, "error initializing rx dma channel\n");
1883 rc = PTR_ERR(priv->rxchan); 1883 rc = PTR_ERR(priv->rxchan);
1884 goto no_cpdma_chan; 1884 goto err_free_txchan;
1885 } 1885 }
1886 1886
1887 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1887 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1888 if (!res) { 1888 if (!res) {
1889 dev_err(&pdev->dev, "error getting irq res\n"); 1889 dev_err(&pdev->dev, "error getting irq res\n");
1890 rc = -ENOENT; 1890 rc = -ENOENT;
1891 goto no_cpdma_chan; 1891 goto err_free_rxchan;
1892 } 1892 }
1893 ndev->irq = res->start; 1893 ndev->irq = res->start;
1894 1894
@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1914 pm_runtime_put_noidle(&pdev->dev); 1914 pm_runtime_put_noidle(&pdev->dev);
1915 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", 1915 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
1916 __func__, rc); 1916 __func__, rc);
1917 goto no_cpdma_chan; 1917 goto err_napi_del;
1918 } 1918 }
1919 1919
1920 /* register the network device */ 1920 /* register the network device */
@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1924 dev_err(&pdev->dev, "error in register_netdev\n"); 1924 dev_err(&pdev->dev, "error in register_netdev\n");
1925 rc = -ENODEV; 1925 rc = -ENODEV;
1926 pm_runtime_put(&pdev->dev); 1926 pm_runtime_put(&pdev->dev);
1927 goto no_cpdma_chan; 1927 goto err_napi_del;
1928 } 1928 }
1929 1929
1930 1930
@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
1937 1937
1938 return 0; 1938 return 0;
1939 1939
1940no_cpdma_chan: 1940err_napi_del:
1941 if (priv->txchan) 1941 netif_napi_del(&priv->napi);
1942 cpdma_chan_destroy(priv->txchan); 1942err_free_rxchan:
1943 if (priv->rxchan) 1943 cpdma_chan_destroy(priv->rxchan);
1944 cpdma_chan_destroy(priv->rxchan); 1944err_free_txchan:
1945 cpdma_chan_destroy(priv->txchan);
1946err_free_dma:
1945 cpdma_ctlr_destroy(priv->dma); 1947 cpdma_ctlr_destroy(priv->dma);
1946no_pdata: 1948no_pdata:
1947 if (of_phy_is_fixed_link(np)) 1949 if (of_phy_is_fixed_link(np))
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 450eec264a5e..4377c26f714d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused,
792 break; 792 break;
793 793
794 case NETDEV_CHANGEADDR: 794 case NETDEV_CHANGEADDR:
795 list_for_each_entry(ipvlan, &port->ipvlans, pnode) 795 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
796 ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); 796 ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
797 call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
798 }
797 break; 799 break;
798 800
799 case NETDEV_PRE_TYPE_CHANGE: 801 case NETDEV_PRE_TYPE_CHANGE:
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 6838129839ca..e757b09f1889 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
61 return rc; 61 return rc;
62 62
63 /* make rcal=100, since rdb default is 000 */ 63 /* make rcal=100, since rdb default is 000 */
64 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); 64 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
65 if (rc < 0) 65 if (rc < 0)
66 return rc; 66 return rc;
67 67
68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ 68 /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
69 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); 69 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
70 if (rc < 0) 70 if (rc < 0)
71 return rc; 71 return rc;
72 72
73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ 73 /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
74 rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); 74 rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
75 75
76 return 0; 76 return 0;
77} 77}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 5ad130c3da43..d5e0833d69b9 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
56 /* The register must be written to both the Shadow Register Select and 56 /* The register must be written to both the Shadow Register Select and
57 * the Shadow Read Register Selector 57 * the Shadow Read Register Selector
58 */ 58 */
59 phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | 59 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); 60 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
61 return phy_read(phydev, MII_BCM54XX_AUX_CTL); 61 return phy_read(phydev, MII_BCM54XX_AUX_CTL);
62} 62}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 7c73808cbbde..81cceaa412fe 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
14#ifndef _LINUX_BCM_PHY_LIB_H 14#ifndef _LINUX_BCM_PHY_LIB_H
15#define _LINUX_BCM_PHY_LIB_H 15#define _LINUX_BCM_PHY_LIB_H
16 16
17#include <linux/brcmphy.h>
17#include <linux/phy.h> 18#include <linux/phy.h>
18 19
19int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); 20int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
20int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); 21int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
21 22
23static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
24 u16 reg, u16 val)
25{
26 return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
27}
28
22int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); 29int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
23int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); 30int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
24 31
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 29b1c88b55cc..01d2ff2f6241 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
65static void r_rc_cal_reset(struct phy_device *phydev) 65static void r_rc_cal_reset(struct phy_device *phydev)
66{ 66{
67 /* Reset R_CAL/RC_CAL Engine */ 67 /* Reset R_CAL/RC_CAL Engine */
68 bcm_phy_write_exp(phydev, 0x00b0, 0x0010); 68 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
69 69
70 /* Disable Reset R_AL/RC_CAL Engine */ 70 /* Disable Reset R_AL/RC_CAL Engine */
71 bcm_phy_write_exp(phydev, 0x00b0, 0x0000); 71 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
72} 72}
73 73
74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) 74static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index f41b224a9cdb..ab195f0916d6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev)
573 ksz9031_of_load_skew_values(phydev, of_node, 573 ksz9031_of_load_skew_values(phydev, of_node,
574 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, 574 MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
575 tx_data_skews, 4); 575 tx_data_skews, 4);
576
577 /* Silicon Errata Sheet (DS80000691D or DS80000692D):
578 * When the device links in the 1000BASE-T slave mode only,
579 * the optional 125MHz reference output clock (CLK125_NDO)
580 * has wide duty cycle variation.
581 *
582 * The optional CLK125_NDO clock does not meet the RGMII
583 * 45/55 percent (min/max) duty cycle requirement and therefore
584 * cannot be used directly by the MAC side for clocking
585 * applications that have setup/hold time requirements on
586 * rising and falling clock edges.
587 *
588 * Workaround:
589 * Force the phy to be the master to receive a stable clock
590 * which meets the duty cycle requirement.
591 */
592 if (of_property_read_bool(of_node, "micrel,force-master")) {
593 result = phy_read(phydev, MII_CTRL1000);
594 if (result < 0)
595 goto err_force_master;
596
597 /* enable master mode, config & prefer master */
598 result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER;
599 result = phy_write(phydev, MII_CTRL1000, result);
600 if (result < 0)
601 goto err_force_master;
602 }
576 } 603 }
577 604
578 return ksz9031_center_flp_timing(phydev); 605 return ksz9031_center_flp_timing(phydev);
606
607err_force_master:
608 phydev_err(phydev, "failed to force the phy to master mode\n");
609 return result;
579} 610}
580 611
581#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 612#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index dc7c7ec43202..02ad03a2fab7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
605 605
606 if (cmd == PPPIOCDETACH) { 606 if (cmd == PPPIOCDETACH) {
607 /* 607 /*
608 * We have to be careful here... if the file descriptor 608 * PPPIOCDETACH is no longer supported as it was heavily broken,
609 * has been dup'd, we could have another process in the 609 * and is only known to have been used by pppd older than
610 * middle of a poll using the same file *, so we had 610 * ppp-2.4.2 (released November 2003).
611 * better not free the interface data structures -
612 * instead we fail the ioctl. Even in this case, we
613 * shut down the interface if we are the owner of it.
614 * Actually, we should get rid of PPPIOCDETACH, userland
615 * (i.e. pppd) could achieve the same effect by closing
616 * this fd and reopening /dev/ppp.
617 */ 611 */
612 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n",
613 current->comm, current->pid);
618 err = -EINVAL; 614 err = -EINVAL;
619 if (pf->kind == INTERFACE) {
620 ppp = PF_TO_PPP(pf);
621 rtnl_lock();
622 if (file == ppp->owner)
623 unregister_netdevice(ppp->dev);
624 rtnl_unlock();
625 }
626 if (atomic_long_read(&file->f_count) < 2) {
627 ppp_release(NULL, file);
628 err = 0;
629 } else
630 pr_warn("PPPIOCDETACH file->f_count=%ld\n",
631 atomic_long_read(&file->f_count));
632 goto out; 615 goto out;
633 } 616 }
634 617
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ef33950a45d9..23e9eb66197f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile)
681 skb_queue_purge(&tfile->sk.sk_error_queue); 681 skb_queue_purge(&tfile->sk.sk_error_queue);
682} 682}
683 683
684static void tun_cleanup_tx_ring(struct tun_file *tfile)
685{
686 if (tfile->tx_ring.queue) {
687 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
688 xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
690 }
691}
692
693static void __tun_detach(struct tun_file *tfile, bool clean) 684static void __tun_detach(struct tun_file *tfile, bool clean)
694{ 685{
695 struct tun_file *ntfile; 686 struct tun_file *ntfile;
@@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
736 tun->dev->reg_state == NETREG_REGISTERED) 727 tun->dev->reg_state == NETREG_REGISTERED)
737 unregister_netdevice(tun->dev); 728 unregister_netdevice(tun->dev);
738 } 729 }
739 tun_cleanup_tx_ring(tfile); 730 if (tun)
731 xdp_rxq_info_unreg(&tfile->xdp_rxq);
732 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
740 sock_put(&tfile->sk); 733 sock_put(&tfile->sk);
741 } 734 }
742} 735}
@@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev)
783 tun_napi_del(tun, tfile); 776 tun_napi_del(tun, tfile);
784 /* Drop read queue */ 777 /* Drop read queue */
785 tun_queue_purge(tfile); 778 tun_queue_purge(tfile);
779 xdp_rxq_info_unreg(&tfile->xdp_rxq);
786 sock_put(&tfile->sk); 780 sock_put(&tfile->sk);
787 tun_cleanup_tx_ring(tfile);
788 } 781 }
789 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 782 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
790 tun_enable_queue(tfile); 783 tun_enable_queue(tfile);
791 tun_queue_purge(tfile); 784 tun_queue_purge(tfile);
785 xdp_rxq_info_unreg(&tfile->xdp_rxq);
792 sock_put(&tfile->sk); 786 sock_put(&tfile->sk);
793 tun_cleanup_tx_ring(tfile);
794 } 787 }
795 BUG_ON(tun->numdisabled != 0); 788 BUG_ON(tun->numdisabled != 0);
796 789
@@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
834 } 827 }
835 828
836 if (!tfile->detached && 829 if (!tfile->detached &&
837 ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { 830 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
831 GFP_KERNEL, tun_ptr_free)) {
838 err = -ENOMEM; 832 err = -ENOMEM;
839 goto out; 833 goto out;
840 } 834 }
@@ -1429,6 +1423,13 @@ static void tun_net_init(struct net_device *dev)
1429 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1423 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1430} 1424}
1431 1425
1426static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1427{
1428 struct sock *sk = tfile->socket.sk;
1429
1430 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1431}
1432
1432/* Character device part */ 1433/* Character device part */
1433 1434
1434/* Poll */ 1435/* Poll */
@@ -1451,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1451 if (!ptr_ring_empty(&tfile->tx_ring)) 1452 if (!ptr_ring_empty(&tfile->tx_ring))
1452 mask |= EPOLLIN | EPOLLRDNORM; 1453 mask |= EPOLLIN | EPOLLRDNORM;
1453 1454
1454 if (tun->dev->flags & IFF_UP && 1455 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1455 (sock_writeable(sk) || 1456 * guarantee EPOLLOUT to be raised by either here or
1456 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1457 * tun_sock_write_space(). Then process could get notification
1457 sock_writeable(sk)))) 1458 * after it writes to a down device and meets -EIO.
1459 */
1460 if (tun_sock_writeable(tun, tfile) ||
1461 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1462 tun_sock_writeable(tun, tfile)))
1458 mask |= EPOLLOUT | EPOLLWRNORM; 1463 mask |= EPOLLOUT | EPOLLWRNORM;
1459 1464
1460 if (tun->dev->reg_state != NETREG_REGISTERED) 1465 if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -1645,7 +1650,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1645 else 1650 else
1646 *skb_xdp = 0; 1651 *skb_xdp = 0;
1647 1652
1648 preempt_disable(); 1653 local_bh_disable();
1649 rcu_read_lock(); 1654 rcu_read_lock();
1650 xdp_prog = rcu_dereference(tun->xdp_prog); 1655 xdp_prog = rcu_dereference(tun->xdp_prog);
1651 if (xdp_prog && !*skb_xdp) { 1656 if (xdp_prog && !*skb_xdp) {
@@ -1670,7 +1675,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1670 if (err) 1675 if (err)
1671 goto err_redirect; 1676 goto err_redirect;
1672 rcu_read_unlock(); 1677 rcu_read_unlock();
1673 preempt_enable(); 1678 local_bh_enable();
1674 return NULL; 1679 return NULL;
1675 case XDP_TX: 1680 case XDP_TX:
1676 get_page(alloc_frag->page); 1681 get_page(alloc_frag->page);
@@ -1679,7 +1684,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1679 goto err_redirect; 1684 goto err_redirect;
1680 tun_xdp_flush(tun->dev); 1685 tun_xdp_flush(tun->dev);
1681 rcu_read_unlock(); 1686 rcu_read_unlock();
1682 preempt_enable(); 1687 local_bh_enable();
1683 return NULL; 1688 return NULL;
1684 case XDP_PASS: 1689 case XDP_PASS:
1685 delta = orig_data - xdp.data; 1690 delta = orig_data - xdp.data;
@@ -1698,7 +1703,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1698 skb = build_skb(buf, buflen); 1703 skb = build_skb(buf, buflen);
1699 if (!skb) { 1704 if (!skb) {
1700 rcu_read_unlock(); 1705 rcu_read_unlock();
1701 preempt_enable(); 1706 local_bh_enable();
1702 return ERR_PTR(-ENOMEM); 1707 return ERR_PTR(-ENOMEM);
1703 } 1708 }
1704 1709
@@ -1708,7 +1713,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1708 alloc_frag->offset += buflen; 1713 alloc_frag->offset += buflen;
1709 1714
1710 rcu_read_unlock(); 1715 rcu_read_unlock();
1711 preempt_enable(); 1716 local_bh_enable();
1712 1717
1713 return skb; 1718 return skb;
1714 1719
@@ -1716,7 +1721,7 @@ err_redirect:
1716 put_page(alloc_frag->page); 1721 put_page(alloc_frag->page);
1717err_xdp: 1722err_xdp:
1718 rcu_read_unlock(); 1723 rcu_read_unlock();
1719 preempt_enable(); 1724 local_bh_enable();
1720 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1725 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1721 return NULL; 1726 return NULL;
1722} 1727}
@@ -1912,16 +1917,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1912 struct bpf_prog *xdp_prog; 1917 struct bpf_prog *xdp_prog;
1913 int ret; 1918 int ret;
1914 1919
1920 local_bh_disable();
1915 rcu_read_lock(); 1921 rcu_read_lock();
1916 xdp_prog = rcu_dereference(tun->xdp_prog); 1922 xdp_prog = rcu_dereference(tun->xdp_prog);
1917 if (xdp_prog) { 1923 if (xdp_prog) {
1918 ret = do_xdp_generic(xdp_prog, skb); 1924 ret = do_xdp_generic(xdp_prog, skb);
1919 if (ret != XDP_PASS) { 1925 if (ret != XDP_PASS) {
1920 rcu_read_unlock(); 1926 rcu_read_unlock();
1927 local_bh_enable();
1921 return total_len; 1928 return total_len;
1922 } 1929 }
1923 } 1930 }
1924 rcu_read_unlock(); 1931 rcu_read_unlock();
1932 local_bh_enable();
1925 } 1933 }
1926 1934
1927 rcu_read_lock(); 1935 rcu_read_lock();
@@ -3219,6 +3227,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3219 &tun_proto, 0); 3227 &tun_proto, 0);
3220 if (!tfile) 3228 if (!tfile)
3221 return -ENOMEM; 3229 return -ENOMEM;
3230 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3231 sk_free(&tfile->sk);
3232 return -ENOMEM;
3233 }
3234
3222 RCU_INIT_POINTER(tfile->tun, NULL); 3235 RCU_INIT_POINTER(tfile->tun, NULL);
3223 tfile->flags = 0; 3236 tfile->flags = 0;
3224 tfile->ifindex = 0; 3237 tfile->ifindex = 0;
@@ -3239,8 +3252,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
3239 3252
3240 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3253 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3241 3254
3242 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
3243
3244 return 0; 3255 return 0;
3245} 3256}
3246 3257
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 7220cd620717..0362acd5cdca 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
609 */ 609 */
610static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { 610static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
611 .description = "CDC MBIM", 611 .description = "CDC MBIM",
612 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 612 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
613 .bind = cdc_mbim_bind, 613 .bind = cdc_mbim_bind,
614 .unbind = cdc_mbim_unbind, 614 .unbind = cdc_mbim_unbind,
615 .manage_power = cdc_mbim_manage_power, 615 .manage_power = cdc_mbim_manage_power,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 42565dd33aa6..094680871687 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1105 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1105 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1106 {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
1106 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1107 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1107 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1108 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1108 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 770422e953f7..032e1ac10a30 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
707 void *data; 707 void *data;
708 u32 act; 708 u32 act;
709 709
710 /* Transient failure which in theory could occur if
711 * in-flight packets from before XDP was enabled reach
712 * the receive path after XDP is loaded.
713 */
714 if (unlikely(hdr->hdr.gso_type))
715 goto err_xdp;
716
710 /* This happens when rx buffer size is underestimated 717 /* This happens when rx buffer size is underestimated
711 * or headroom is not enough because of the buffer 718 * or headroom is not enough because of the buffer
712 * was refilled before XDP is set. This should only 719 * was refilled before XDP is set. This should only
@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
727 xdp_page = page; 734 xdp_page = page;
728 } 735 }
729 736
730 /* Transient failure which in theory could occur if
731 * in-flight packets from before XDP was enabled reach
732 * the receive path after XDP is loaded. In practice I
733 * was not able to create this condition.
734 */
735 if (unlikely(hdr->hdr.gso_type))
736 goto err_xdp;
737
738 /* Allow consuming headroom but reserve enough space to push 737 /* Allow consuming headroom but reserve enough space to push
739 * the descriptor on if we get an XDP_TX return code. 738 * the descriptor on if we get an XDP_TX return code.
740 */ 739 */
@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
775 } 774 }
776 *xdp_xmit = true; 775 *xdp_xmit = true;
777 if (unlikely(xdp_page != page)) 776 if (unlikely(xdp_page != page))
778 goto err_xdp; 777 put_page(page);
779 rcu_read_unlock(); 778 rcu_read_unlock();
780 goto xdp_xmit; 779 goto xdp_xmit;
781 case XDP_REDIRECT: 780 case XDP_REDIRECT:
@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
787 } 786 }
788 *xdp_xmit = true; 787 *xdp_xmit = true;
789 if (unlikely(xdp_page != page)) 788 if (unlikely(xdp_page != page))
790 goto err_xdp; 789 put_page(page);
791 rcu_read_unlock(); 790 rcu_read_unlock();
792 goto xdp_xmit; 791 goto xdp_xmit;
793 default: 792 default:
@@ -875,7 +874,7 @@ err_xdp:
875 rcu_read_unlock(); 874 rcu_read_unlock();
876err_skb: 875err_skb:
877 put_page(page); 876 put_page(page);
878 while (--num_buf) { 877 while (num_buf-- > 1) {
879 buf = virtqueue_get_buf(rq->vq, &len); 878 buf = virtqueue_get_buf(rq->vq, &len);
880 if (unlikely(!buf)) { 879 if (unlikely(!buf)) {
881 pr_debug("%s: rx error: %d buffers missing\n", 880 pr_debug("%s: rx error: %d buffers missing\n",
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9ebe2a689966..27a9bb8c9611 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
369 369
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { 371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
372 /* Prevent any &gdesc->tcd field from being (speculatively)
373 * read before (&gdesc->tcd)->gen is read.
374 */
375 dma_rmb();
376
372 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( 377 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
373 &gdesc->tcd), tq, adapter->pdev, 378 &gdesc->tcd), tq, adapter->pdev,
374 adapter); 379 adapter);
@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1103 gdesc->txd.tci = skb_vlan_tag_get(skb); 1108 gdesc->txd.tci = skb_vlan_tag_get(skb);
1104 } 1109 }
1105 1110
1111 /* Ensure that the write to (&gdesc->txd)->gen will be observed after
1112 * all other writes to &gdesc->txd.
1113 */
1114 dma_wmb();
1115
1106 /* finally flips the GEN bit of the SOP desc. */ 1116 /* finally flips the GEN bit of the SOP desc. */
1107 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ 1117 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1108 VMXNET3_TXD_GEN); 1118 VMXNET3_TXD_GEN);
@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1298 */ 1308 */
1299 break; 1309 break;
1300 } 1310 }
1311
1312 /* Prevent any rcd field from being (speculatively) read before
1313 * rcd->gen is read.
1314 */
1315 dma_rmb();
1316
1301 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && 1317 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
1302 rcd->rqID != rq->dataRingQid); 1318 rcd->rqID != rq->dataRingQid);
1303 idx = rcd->rxdIdx; 1319 idx = rcd->rxdIdx;
@@ -1528,6 +1544,12 @@ rcd_done:
1528 ring->next2comp = idx; 1544 ring->next2comp = idx;
1529 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); 1545 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1530 ring = rq->rx_ring + ring_idx; 1546 ring = rq->rx_ring + ring_idx;
1547
1548 /* Ensure that the writes to rxd->gen bits will be observed
1549 * after all other writes to rxd objects.
1550 */
1551 dma_wmb();
1552
1531 while (num_to_alloc) { 1553 while (num_to_alloc) {
1532 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, 1554 vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1533 &rxCmdDesc); 1555 &rxCmdDesc);
@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2688/* ==================== initialization and cleanup routines ============ */ 2710/* ==================== initialization and cleanup routines ============ */
2689 2711
2690static int 2712static int
2691vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) 2713vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
2692{ 2714{
2693 int err; 2715 int err;
2694 unsigned long mmio_start, mmio_len; 2716 unsigned long mmio_start, mmio_len;
@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2700 return err; 2722 return err;
2701 } 2723 }
2702 2724
2703 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2704 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2705 dev_err(&pdev->dev,
2706 "pci_set_consistent_dma_mask failed\n");
2707 err = -EIO;
2708 goto err_set_mask;
2709 }
2710 *dma64 = true;
2711 } else {
2712 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2713 dev_err(&pdev->dev,
2714 "pci_set_dma_mask failed\n");
2715 err = -EIO;
2716 goto err_set_mask;
2717 }
2718 *dma64 = false;
2719 }
2720
2721 err = pci_request_selected_regions(pdev, (1 << 2) - 1, 2725 err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2722 vmxnet3_driver_name); 2726 vmxnet3_driver_name);
2723 if (err) { 2727 if (err) {
2724 dev_err(&pdev->dev, 2728 dev_err(&pdev->dev,
2725 "Failed to request region for adapter: error %d\n", err); 2729 "Failed to request region for adapter: error %d\n", err);
2726 goto err_set_mask; 2730 goto err_enable_device;
2727 } 2731 }
2728 2732
2729 pci_set_master(pdev); 2733 pci_set_master(pdev);
@@ -2751,7 +2755,7 @@ err_bar1:
2751 iounmap(adapter->hw_addr0); 2755 iounmap(adapter->hw_addr0);
2752err_ioremap: 2756err_ioremap:
2753 pci_release_selected_regions(pdev, (1 << 2) - 1); 2757 pci_release_selected_regions(pdev, (1 << 2) - 1);
2754err_set_mask: 2758err_enable_device:
2755 pci_disable_device(pdev); 2759 pci_disable_device(pdev);
2756 return err; 2760 return err;
2757} 2761}
@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3254#endif 3258#endif
3255 }; 3259 };
3256 int err; 3260 int err;
3257 bool dma64 = false; /* stupid gcc */ 3261 bool dma64;
3258 u32 ver; 3262 u32 ver;
3259 struct net_device *netdev; 3263 struct net_device *netdev;
3260 struct vmxnet3_adapter *adapter; 3264 struct vmxnet3_adapter *adapter;
@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3300 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; 3304 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3301 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; 3305 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3302 3306
3307 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
3308 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
3309 dev_err(&pdev->dev,
3310 "pci_set_consistent_dma_mask failed\n");
3311 err = -EIO;
3312 goto err_set_mask;
3313 }
3314 dma64 = true;
3315 } else {
3316 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
3317 dev_err(&pdev->dev,
3318 "pci_set_dma_mask failed\n");
3319 err = -EIO;
3320 goto err_set_mask;
3321 }
3322 dma64 = false;
3323 }
3324
3303 spin_lock_init(&adapter->cmd_lock); 3325 spin_lock_init(&adapter->cmd_lock);
3304 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 3326 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3305 sizeof(struct vmxnet3_adapter), 3327 sizeof(struct vmxnet3_adapter),
@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3307 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { 3329 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3308 dev_err(&pdev->dev, "Failed to map dma\n"); 3330 dev_err(&pdev->dev, "Failed to map dma\n");
3309 err = -EFAULT; 3331 err = -EFAULT;
3310 goto err_dma_map; 3332 goto err_set_mask;
3311 } 3333 }
3312 adapter->shared = dma_alloc_coherent( 3334 adapter->shared = dma_alloc_coherent(
3313 &adapter->pdev->dev, 3335 &adapter->pdev->dev,
@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3358 } 3380 }
3359#endif /* VMXNET3_RSS */ 3381#endif /* VMXNET3_RSS */
3360 3382
3361 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 3383 err = vmxnet3_alloc_pci_resources(adapter);
3362 if (err < 0) 3384 if (err < 0)
3363 goto err_alloc_pci; 3385 goto err_alloc_pci;
3364 3386
@@ -3504,7 +3526,7 @@ err_alloc_queue_desc:
3504err_alloc_shared: 3526err_alloc_shared:
3505 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, 3527 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
3506 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); 3528 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
3507err_dma_map: 3529err_set_mask:
3508 free_netdev(netdev); 3530 free_netdev(netdev);
3509 return err; 3531 return err;
3510} 3532}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a3326463b71f..a2c554f8a61b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,12 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* Each byte of this 32-bit integer encodes a version number in
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 75 * VMXNET3_DRIVER_VERSION_STRING.
76 */
77#define VMXNET3_DRIVER_VERSION_NUM 0x01041000
76 78
77#if defined(CONFIG_PCI_MSI) 79#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 80 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f8a0234d332c..5517ea4c2aa0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1590,14 +1590,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1590 struct iwl_trans *trans) 1590 struct iwl_trans *trans)
1591{ 1591{
1592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1593 int max_irqs, num_irqs, i, ret, nr_online_cpus; 1593 int max_irqs, num_irqs, i, ret;
1594 u16 pci_cmd; 1594 u16 pci_cmd;
1595 1595
1596 if (!trans->cfg->mq_rx_supported) 1596 if (!trans->cfg->mq_rx_supported)
1597 goto enable_msi; 1597 goto enable_msi;
1598 1598
1599 nr_online_cpus = num_online_cpus(); 1599 max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
1600 max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
1601 for (i = 0; i < max_irqs; i++) 1600 for (i = 0; i < max_irqs; i++)
1602 trans_pcie->msix_entries[i].entry = i; 1601 trans_pcie->msix_entries[i].entry = i;
1603 1602
@@ -1623,16 +1622,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1623 * Two interrupts less: non rx causes shared with FBQ and RSS. 1622 * Two interrupts less: non rx causes shared with FBQ and RSS.
1624 * More than two interrupts: we will use fewer RSS queues. 1623 * More than two interrupts: we will use fewer RSS queues.
1625 */ 1624 */
1626 if (num_irqs <= nr_online_cpus) { 1625 if (num_irqs <= max_irqs - 2) {
1627 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1626 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1628 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1627 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1629 IWL_SHARED_IRQ_FIRST_RSS; 1628 IWL_SHARED_IRQ_FIRST_RSS;
1630 } else if (num_irqs == nr_online_cpus + 1) { 1629 } else if (num_irqs == max_irqs - 1) {
1631 trans_pcie->trans->num_rx_queues = num_irqs; 1630 trans_pcie->trans->num_rx_queues = num_irqs;
1632 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1631 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1633 } else { 1632 } else {
1634 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1633 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1635 } 1634 }
1635 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1636 1636
1637 trans_pcie->alloc_vecs = num_irqs; 1637 trans_pcie->alloc_vecs = num_irqs;
1638 trans_pcie->msix_enabled = true; 1638 trans_pcie->msix_enabled = true;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4a017a0d71ea..920c23e542a5 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3340,7 +3340,7 @@ out_err:
3340static int hwsim_dump_radio_nl(struct sk_buff *skb, 3340static int hwsim_dump_radio_nl(struct sk_buff *skb,
3341 struct netlink_callback *cb) 3341 struct netlink_callback *cb)
3342{ 3342{
3343 int last_idx = cb->args[0]; 3343 int last_idx = cb->args[0] - 1;
3344 struct mac80211_hwsim_data *data = NULL; 3344 struct mac80211_hwsim_data *data = NULL;
3345 int res = 0; 3345 int res = 0;
3346 void *hdr; 3346 void *hdr;
@@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
3368 last_idx = data->idx; 3368 last_idx = data->idx;
3369 } 3369 }
3370 3370
3371 cb->args[0] = last_idx; 3371 cb->args[0] = last_idx + 1;
3372 3372
3373 /* list changed, but no new element sent, set interrupted flag */ 3373 /* list changed, but no new element sent, set interrupted flag */
3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { 3374 if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index a6884e73d2ab..7ddee980048b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
372 372
373 /* 373 /*
374 * Determine IFS values 374 * Determine IFS values
375 * - Use TXOP_BACKOFF for probe and management frames except beacons 375 * - Use TXOP_BACKOFF for management frames except beacons
376 * - Use TXOP_SIFS for fragment bursts 376 * - Use TXOP_SIFS for fragment bursts
377 * - Use TXOP_HTTXOP for everything else 377 * - Use TXOP_HTTXOP for everything else
378 * 378 *
379 * Note: rt2800 devices won't use CTS protection (if used) 379 * Note: rt2800 devices won't use CTS protection (if used)
380 * for frames not transmitted with TXOP_HTTXOP 380 * for frames not transmitted with TXOP_HTTXOP
381 */ 381 */
382 if ((ieee80211_is_mgmt(hdr->frame_control) && 382 if (ieee80211_is_mgmt(hdr->frame_control) &&
383 !ieee80211_is_beacon(hdr->frame_control)) || 383 !ieee80211_is_beacon(hdr->frame_control))
384 (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
385 txdesc->u.ht.txop = TXOP_BACKOFF; 384 txdesc->u.ht.txop = TXOP_BACKOFF;
386 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 385 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
387 txdesc->u.ht.txop = TXOP_SIFS; 386 txdesc->u.ht.txop = TXOP_SIFS;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 99b857e5a7a9..b9ca782fe82d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1447,8 +1447,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1447 if (ns->lba_shift == 0) 1447 if (ns->lba_shift == 0)
1448 ns->lba_shift = 9; 1448 ns->lba_shift = 9;
1449 ns->noiob = le16_to_cpu(id->noiob); 1449 ns->noiob = le16_to_cpu(id->noiob);
1450 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1451 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1450 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
1451 ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
1452 /* the PI implementation requires metadata equal t10 pi tuple size */ 1452 /* the PI implementation requires metadata equal t10 pi tuple size */
1453 if (ns->ms == sizeof(struct t10_pi_tuple)) 1453 if (ns->ms == sizeof(struct t10_pi_tuple))
1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1454 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index e7bbdf947bbc..8350ca2311c7 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -91,6 +91,8 @@ static int send_command(struct cros_ec_device *ec_dev,
91 usleep_range(10000, 11000); 91 usleep_range(10000, 11000);
92 92
93 ret = (*xfer_fxn)(ec_dev, status_msg); 93 ret = (*xfer_fxn)(ec_dev, status_msg);
94 if (ret == -EAGAIN)
95 continue;
94 if (ret < 0) 96 if (ret < 0)
95 break; 97 break;
96 98
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a32c5c00e0e7..ffffb9909ae1 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -163,6 +163,16 @@ MODULE_LICENSE("GPL");
163 163
164static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; 164static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
165 165
166static bool ashs_present(void)
167{
168 int i = 0;
169 while (ashs_ids[i]) {
170 if (acpi_dev_found(ashs_ids[i++]))
171 return true;
172 }
173 return false;
174}
175
166struct bios_args { 176struct bios_args {
167 u32 arg0; 177 u32 arg0;
168 u32 arg1; 178 u32 arg1;
@@ -1025,6 +1035,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
1025 1035
1026static void asus_wmi_rfkill_exit(struct asus_wmi *asus) 1036static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
1027{ 1037{
1038 if (asus->driver->wlan_ctrl_by_user && ashs_present())
1039 return;
1040
1028 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5"); 1041 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
1029 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6"); 1042 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
1030 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7"); 1043 asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
@@ -2121,16 +2134,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
2121 return 0; 2134 return 0;
2122} 2135}
2123 2136
2124static bool ashs_present(void)
2125{
2126 int i = 0;
2127 while (ashs_ids[i]) {
2128 if (acpi_dev_found(ashs_ids[i++]))
2129 return true;
2130 }
2131 return false;
2132}
2133
2134/* 2137/*
2135 * WMI Driver 2138 * WMI Driver
2136 */ 2139 */
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 04143c08bd6e..02c03e418c27 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3034,7 +3034,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3034 cqr->callback_data = req; 3034 cqr->callback_data = req;
3035 cqr->status = DASD_CQR_FILLED; 3035 cqr->status = DASD_CQR_FILLED;
3036 cqr->dq = dq; 3036 cqr->dq = dq;
3037 req->completion_data = cqr; 3037 *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
3038
3038 blk_mq_start_request(req); 3039 blk_mq_start_request(req);
3039 spin_lock(&block->queue_lock); 3040 spin_lock(&block->queue_lock);
3040 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3041 list_add_tail(&cqr->blocklist, &block->ccw_queue);
@@ -3058,12 +3059,13 @@ out:
3058 */ 3059 */
3059enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3060enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
3060{ 3061{
3061 struct dasd_ccw_req *cqr = req->completion_data;
3062 struct dasd_block *block = req->q->queuedata; 3062 struct dasd_block *block = req->q->queuedata;
3063 struct dasd_device *device; 3063 struct dasd_device *device;
3064 struct dasd_ccw_req *cqr;
3064 unsigned long flags; 3065 unsigned long flags;
3065 int rc = 0; 3066 int rc = 0;
3066 3067
3068 cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
3067 if (!cqr) 3069 if (!cqr)
3068 return BLK_EH_NOT_HANDLED; 3070 return BLK_EH_NOT_HANDLED;
3069 3071
@@ -3169,6 +3171,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
3169 int rc; 3171 int rc;
3170 3172
3171 block->tag_set.ops = &dasd_mq_ops; 3173 block->tag_set.ops = &dasd_mq_ops;
3174 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
3172 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3175 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
3173 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3176 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
3174 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3177 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a8b831000b2d..18c4f933e8b9 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Debug traces for zfcp. 5 * Debug traces for zfcp.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
308 spin_unlock_irqrestore(&dbf->rec_lock, flags); 308 spin_unlock_irqrestore(&dbf->rec_lock, flags);
309} 309}
310 310
311/**
312 * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
313 * @tag: identifier for event
314 * @adapter: adapter on which the erp_action should run
315 * @port: remote port involved in the erp_action
316 * @sdev: scsi device involved in the erp_action
317 * @want: wanted erp_action
318 * @need: required erp_action
319 *
320 * The adapter->erp_lock must not be held.
321 */
322void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
323 struct zfcp_port *port, struct scsi_device *sdev,
324 u8 want, u8 need)
325{
326 unsigned long flags;
327
328 read_lock_irqsave(&adapter->erp_lock, flags);
329 zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
330 read_unlock_irqrestore(&adapter->erp_lock, flags);
331}
311 332
312/** 333/**
313 * zfcp_dbf_rec_run_lvl - trace event related to running recovery 334 * zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index bf8ea4df2bb8..e5eed8aac0ce 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -4,7 +4,7 @@
4 * 4 *
5 * External function declarations. 5 * External function declarations.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2016 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#ifndef ZFCP_EXT_H 10#ifndef ZFCP_EXT_H
@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
35extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); 35extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
36extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, 36extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
37 struct zfcp_port *, struct scsi_device *, u8, u8); 37 struct zfcp_port *, struct scsi_device *, u8, u8);
38extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
39 struct zfcp_port *port,
40 struct scsi_device *sdev, u8 want, u8 need);
38extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); 41extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
39extern void zfcp_dbf_rec_run_lvl(int level, char *tag, 42extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
40 struct zfcp_erp_action *erp); 43 struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 4d2ba5682493..22f9562f415c 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * Interface to Linux SCSI midlayer. 5 * Interface to Linux SCSI midlayer.
6 * 6 *
7 * Copyright IBM Corp. 2002, 2017 7 * Copyright IBM Corp. 2002, 2018
8 */ 8 */
9 9
10#define KMSG_COMPONENT "zfcp" 10#define KMSG_COMPONENT "zfcp"
@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
618 ids.port_id = port->d_id; 618 ids.port_id = port->d_id;
619 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 619 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
620 620
621 zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, 621 zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
622 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, 622 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
623 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); 623 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
624 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 624 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
625 if (!rport) { 625 if (!rport) {
626 dev_err(&port->adapter->ccw_device->dev, 626 dev_err(&port->adapter->ccw_device->dev,
@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
642 struct fc_rport *rport = port->rport; 642 struct fc_rport *rport = port->rport;
643 643
644 if (rport) { 644 if (rport) {
645 zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, 645 zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
646 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, 646 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
647 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); 647 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
648 fc_remote_port_delete(rport); 648 fc_remote_port_delete(rport);
649 port->rport = NULL; 649 port->rport = NULL;
650 } 650 }
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e29f9b8fd66d..56c940394729 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -182,7 +182,7 @@ zalon7xx-objs := zalon.o ncr53c8xx.o
182NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 182NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
183 183
184# Files generated that shall be removed upon make clean 184# Files generated that shall be removed upon make clean
185clean-files := 53c700_d.h 53c700_u.h 185clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
186 186
187$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h 187$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
188 188
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 36f6190931bc..456ce9f19569 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -51,6 +51,8 @@ struct srp_internal {
51 struct transport_container rport_attr_cont; 51 struct transport_container rport_attr_cont;
52}; 52};
53 53
54static int scsi_is_srp_rport(const struct device *dev);
55
54#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t) 56#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
55 57
56#define dev_to_rport(d) container_of(d, struct srp_rport, dev) 58#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
60 return dev_to_shost(r->dev.parent); 62 return dev_to_shost(r->dev.parent);
61} 63}
62 64
65static int find_child_rport(struct device *dev, void *data)
66{
67 struct device **child = data;
68
69 if (scsi_is_srp_rport(dev)) {
70 WARN_ON_ONCE(*child);
71 *child = dev;
72 }
73 return 0;
74}
75
63static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost) 76static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
64{ 77{
65 return transport_class_to_srp_rport(&shost->shost_gendev); 78 struct device *child = NULL;
79
80 WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
81 find_child_rport) < 0);
82 return child ? dev_to_rport(child) : NULL;
66} 83}
67 84
68/** 85/**
@@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
600 struct srp_rport *rport = shost_to_rport(shost); 617 struct srp_rport *rport = shost_to_rport(shost);
601 618
602 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); 619 pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
603 return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && 620 return rport && rport->fast_io_fail_tmo < 0 &&
621 rport->dev_loss_tmo < 0 &&
604 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? 622 i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
605 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 623 BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
606} 624}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c198b96368dd..5c40d809830f 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1894,7 +1894,7 @@ retry:
1894 num = (rem_sz > scatter_elem_sz_prev) ? 1894 num = (rem_sz > scatter_elem_sz_prev) ?
1895 scatter_elem_sz_prev : rem_sz; 1895 scatter_elem_sz_prev : rem_sz;
1896 1896
1897 schp->pages[k] = alloc_pages(gfp_mask, order); 1897 schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
1898 if (!schp->pages[k]) 1898 if (!schp->pages[k])
1899 goto out; 1899 goto out;
1900 1900
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 2a21f2d48592..35fab1e18adc 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
188 struct scsi_device *SDev; 188 struct scsi_device *SDev;
189 struct scsi_sense_hdr sshdr; 189 struct scsi_sense_hdr sshdr;
190 int result, err = 0, retries = 0; 190 int result, err = 0, retries = 0;
191 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
191 192
192 SDev = cd->device; 193 SDev = cd->device;
193 194
195 if (cgc->sense)
196 senseptr = sense_buffer;
197
194 retry: 198 retry:
195 if (!scsi_block_when_processing_errors(SDev)) { 199 if (!scsi_block_when_processing_errors(SDev)) {
196 err = -ENODEV; 200 err = -ENODEV;
@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
198 } 202 }
199 203
200 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, 204 result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
201 cgc->buffer, cgc->buflen, 205 cgc->buffer, cgc->buflen, senseptr, &sshdr,
202 (unsigned char *)cgc->sense, &sshdr,
203 cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); 206 cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
204 207
208 if (cgc->sense)
209 memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
210
205 /* Minimal error checking. Ignore cases we know about, and report the rest. */ 211 /* Minimal error checking. Ignore cases we know about, and report the rest. */
206 if (driver_byte(result) != 0) { 212 if (driver_byte(result) != 0) {
207 switch (sshdr.sense_key) { 213 switch (sshdr.sense_key) {
diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
index 8d8659463b3e..feeb17cebc25 100644
--- a/drivers/soc/lantiq/gphy.c
+++ b/drivers/soc/lantiq/gphy.c
@@ -30,7 +30,6 @@ struct xway_gphy_priv {
30 struct clk *gphy_clk_gate; 30 struct clk *gphy_clk_gate;
31 struct reset_control *gphy_reset; 31 struct reset_control *gphy_reset;
32 struct reset_control *gphy_reset2; 32 struct reset_control *gphy_reset2;
33 struct notifier_block gphy_reboot_nb;
34 void __iomem *membase; 33 void __iomem *membase;
35 char *fw_name; 34 char *fw_name;
36}; 35};
@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
64}; 63};
65MODULE_DEVICE_TABLE(of, xway_gphy_match); 64MODULE_DEVICE_TABLE(of, xway_gphy_match);
66 65
67static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
68{
69 return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
70}
71
72static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
73 unsigned long code, void *unused)
74{
75 struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
76
77 if (priv) {
78 reset_control_assert(priv->gphy_reset);
79 reset_control_assert(priv->gphy_reset2);
80 }
81
82 return NOTIFY_DONE;
83}
84
85static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, 66static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
86 dma_addr_t *dev_addr) 67 dma_addr_t *dev_addr)
87{ 68{
@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
205 reset_control_deassert(priv->gphy_reset); 186 reset_control_deassert(priv->gphy_reset);
206 reset_control_deassert(priv->gphy_reset2); 187 reset_control_deassert(priv->gphy_reset2);
207 188
208 /* assert the gphy reset because it can hang after a reboot: */
209 priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
210 priv->gphy_reboot_nb.priority = -1;
211
212 ret = register_reboot_notifier(&priv->gphy_reboot_nb);
213 if (ret)
214 dev_warn(dev, "Failed to register reboot notifier\n");
215
216 platform_set_drvdata(pdev, priv); 189 platform_set_drvdata(pdev, priv);
217 190
218 return ret; 191 return ret;
@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
220 193
221static int xway_gphy_remove(struct platform_device *pdev) 194static int xway_gphy_remove(struct platform_device *pdev)
222{ 195{
223 struct device *dev = &pdev->dev;
224 struct xway_gphy_priv *priv = platform_get_drvdata(pdev); 196 struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
225 int ret;
226
227 reset_control_assert(priv->gphy_reset);
228 reset_control_assert(priv->gphy_reset2);
229 197
230 iowrite32be(0, priv->membase); 198 iowrite32be(0, priv->membase);
231 199
232 clk_disable_unprepare(priv->gphy_clk_gate); 200 clk_disable_unprepare(priv->gphy_clk_gate);
233 201
234 ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
235 if (ret)
236 dev_warn(dev, "Failed to unregister reboot notifier\n");
237
238 return 0; 202 return 0;
239} 203}
240 204
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 9371651d8017..c574dd210500 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -117,7 +117,7 @@ config SSB_SERIAL
117 117
118config SSB_DRIVER_PCICORE_POSSIBLE 118config SSB_DRIVER_PCICORE_POSSIBLE
119 bool 119 bool
120 depends on SSB_PCIHOST && SSB = y 120 depends on SSB_PCIHOST
121 default y 121 default y
122 122
123config SSB_DRIVER_PCICORE 123config SSB_DRIVER_PCICORE
@@ -131,7 +131,7 @@ config SSB_DRIVER_PCICORE
131 131
132config SSB_PCICORE_HOSTMODE 132config SSB_PCICORE_HOSTMODE
133 bool "Hostmode support for SSB PCI core" 133 bool "Hostmode support for SSB PCI core"
134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS 134 depends on SSB_DRIVER_PCICORE && SSB_DRIVER_MIPS && SSB = y
135 help 135 help
136 PCIcore hostmode operation (external PCI bus). 136 PCIcore hostmode operation (external PCI bus).
137 137
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4ad89ea71a70..4f26bdc3d1dc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2121,6 +2121,8 @@ static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2121 2121
2122 if (val >= 0) { 2122 if (val >= 0) {
2123 udev->qfull_time_out = val * MSEC_PER_SEC; 2123 udev->qfull_time_out = val * MSEC_PER_SEC;
2124 } else if (val == -1) {
2125 udev->qfull_time_out = val;
2124 } else { 2126 } else {
2125 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2127 printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2126 return -EINVAL; 2128 return -EINVAL;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 2d2ceda9aa26..500911f16498 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1255,7 +1255,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
1255 /* Map empty entries to null UUID */ 1255 /* Map empty entries to null UUID */
1256 uuid[0] = 0; 1256 uuid[0] = 0;
1257 uuid[1] = 0; 1257 uuid[1] = 0;
1258 } else { 1258 } else if (uuid[0] != 0 || uuid[1] != 0) {
1259 /* Upper two DWs are always one's */ 1259 /* Upper two DWs are always one's */
1260 uuid[2] = 0xffffffff; 1260 uuid[2] = 0xffffffff;
1261 uuid[3] = 0xffffffff; 1261 uuid[3] = 0xffffffff;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 5c212bf29640..3c082451ab1a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -404,6 +404,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
404{ 404{
405 unsigned long pfn = 0; 405 unsigned long pfn = 0;
406 long ret, pinned = 0, lock_acct = 0; 406 long ret, pinned = 0, lock_acct = 0;
407 bool rsvd;
407 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; 408 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
408 409
409 /* This code path is only user initiated */ 410 /* This code path is only user initiated */
@@ -414,23 +415,14 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
414 if (ret) 415 if (ret)
415 return ret; 416 return ret;
416 417
417 if (is_invalid_reserved_pfn(*pfn_base)) {
418 struct vm_area_struct *vma;
419
420 down_read(&current->mm->mmap_sem);
421 vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
422 pinned = min_t(long, npage, vma_pages(vma));
423 up_read(&current->mm->mmap_sem);
424 return pinned;
425 }
426
427 pinned++; 418 pinned++;
419 rsvd = is_invalid_reserved_pfn(*pfn_base);
428 420
429 /* 421 /*
430 * Reserved pages aren't counted against the user, externally pinned 422 * Reserved pages aren't counted against the user, externally pinned
431 * pages are already counted against the user. 423 * pages are already counted against the user.
432 */ 424 */
433 if (!vfio_find_vpfn(dma, iova)) { 425 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
434 if (!lock_cap && current->mm->locked_vm + 1 > limit) { 426 if (!lock_cap && current->mm->locked_vm + 1 > limit) {
435 put_pfn(*pfn_base, dma->prot); 427 put_pfn(*pfn_base, dma->prot);
436 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, 428 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
@@ -450,12 +442,13 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
450 if (ret) 442 if (ret)
451 break; 443 break;
452 444
453 if (pfn != *pfn_base + pinned) { 445 if (pfn != *pfn_base + pinned ||
446 rsvd != is_invalid_reserved_pfn(pfn)) {
454 put_pfn(pfn, dma->prot); 447 put_pfn(pfn, dma->prot);
455 break; 448 break;
456 } 449 }
457 450
458 if (!vfio_find_vpfn(dma, iova)) { 451 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
459 if (!lock_cap && 452 if (!lock_cap &&
460 current->mm->locked_vm + lock_acct + 1 > limit) { 453 current->mm->locked_vm + lock_acct + 1 > limit) {
461 put_pfn(pfn, dma->prot); 454 put_pfn(pfn, dma->prot);
@@ -473,8 +466,10 @@ out:
473 466
474unpin_out: 467unpin_out:
475 if (ret) { 468 if (ret) {
476 for (pfn = *pfn_base ; pinned ; pfn++, pinned--) 469 if (!rsvd) {
477 put_pfn(pfn, dma->prot); 470 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
471 put_pfn(pfn, dma->prot);
472 }
478 473
479 return ret; 474 return ret;
480 } 475 }
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 986058a57917..eeaf6739215f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -105,7 +105,9 @@ struct vhost_net_virtqueue {
105 /* vhost zerocopy support fields below: */ 105 /* vhost zerocopy support fields below: */
106 /* last used idx for outstanding DMA zerocopy buffers */ 106 /* last used idx for outstanding DMA zerocopy buffers */
107 int upend_idx; 107 int upend_idx;
108 /* first used idx for DMA done zerocopy buffers */ 108 /* For TX, first used idx for DMA done zerocopy buffers
109 * For RX, number of batched heads
110 */
109 int done_idx; 111 int done_idx;
110 /* an array of userspace buffers info */ 112 /* an array of userspace buffers info */
111 struct ubuf_info *ubuf_info; 113 struct ubuf_info *ubuf_info;
@@ -626,6 +628,18 @@ static int sk_has_rx_data(struct sock *sk)
626 return skb_queue_empty(&sk->sk_receive_queue); 628 return skb_queue_empty(&sk->sk_receive_queue);
627} 629}
628 630
631static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
632{
633 struct vhost_virtqueue *vq = &nvq->vq;
634 struct vhost_dev *dev = vq->dev;
635
636 if (!nvq->done_idx)
637 return;
638
639 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
640 nvq->done_idx = 0;
641}
642
629static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) 643static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
630{ 644{
631 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; 645 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
@@ -635,6 +649,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
635 int len = peek_head_len(rvq, sk); 649 int len = peek_head_len(rvq, sk);
636 650
637 if (!len && vq->busyloop_timeout) { 651 if (!len && vq->busyloop_timeout) {
652 /* Flush batched heads first */
653 vhost_rx_signal_used(rvq);
638 /* Both tx vq and rx socket were polled here */ 654 /* Both tx vq and rx socket were polled here */
639 mutex_lock_nested(&vq->mutex, 1); 655 mutex_lock_nested(&vq->mutex, 1);
640 vhost_disable_notify(&net->dev, vq); 656 vhost_disable_notify(&net->dev, vq);
@@ -762,7 +778,7 @@ static void handle_rx(struct vhost_net *net)
762 }; 778 };
763 size_t total_len = 0; 779 size_t total_len = 0;
764 int err, mergeable; 780 int err, mergeable;
765 s16 headcount, nheads = 0; 781 s16 headcount;
766 size_t vhost_hlen, sock_hlen; 782 size_t vhost_hlen, sock_hlen;
767 size_t vhost_len, sock_len; 783 size_t vhost_len, sock_len;
768 struct socket *sock; 784 struct socket *sock;
@@ -790,8 +806,8 @@ static void handle_rx(struct vhost_net *net)
790 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { 806 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
791 sock_len += sock_hlen; 807 sock_len += sock_hlen;
792 vhost_len = sock_len + vhost_hlen; 808 vhost_len = sock_len + vhost_hlen;
793 headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, 809 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
794 &in, vq_log, &log, 810 vhost_len, &in, vq_log, &log,
795 likely(mergeable) ? UIO_MAXIOV : 1); 811 likely(mergeable) ? UIO_MAXIOV : 1);
796 /* On error, stop handling until the next kick. */ 812 /* On error, stop handling until the next kick. */
797 if (unlikely(headcount < 0)) 813 if (unlikely(headcount < 0))
@@ -862,12 +878,9 @@ static void handle_rx(struct vhost_net *net)
862 vhost_discard_vq_desc(vq, headcount); 878 vhost_discard_vq_desc(vq, headcount);
863 goto out; 879 goto out;
864 } 880 }
865 nheads += headcount; 881 nvq->done_idx += headcount;
866 if (nheads > VHOST_RX_BATCH) { 882 if (nvq->done_idx > VHOST_RX_BATCH)
867 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 883 vhost_rx_signal_used(nvq);
868 nheads);
869 nheads = 0;
870 }
871 if (unlikely(vq_log)) 884 if (unlikely(vq_log))
872 vhost_log_write(vq, vq_log, log, vhost_len); 885 vhost_log_write(vq, vq_log, log, vhost_len);
873 total_len += vhost_len; 886 total_len += vhost_len;
@@ -878,9 +891,7 @@ static void handle_rx(struct vhost_net *net)
878 } 891 }
879 vhost_net_enable_vq(net, vq); 892 vhost_net_enable_vq(net, vq);
880out: 893out:
881 if (nheads) 894 vhost_rx_signal_used(nvq);
882 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
883 nheads);
884 mutex_unlock(&vq->mutex); 895 mutex_unlock(&vq->mutex);
885} 896}
886 897
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f3bd8e941224..f0be5f35ab28 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
981{ 981{
982 int ret = 0; 982 int ret = 0;
983 983
984 mutex_lock(&dev->mutex);
984 vhost_dev_lock_vqs(dev); 985 vhost_dev_lock_vqs(dev);
985 switch (msg->type) { 986 switch (msg->type) {
986 case VHOST_IOTLB_UPDATE: 987 case VHOST_IOTLB_UPDATE:
@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
1016 } 1017 }
1017 1018
1018 vhost_dev_unlock_vqs(dev); 1019 vhost_dev_unlock_vqs(dev);
1020 mutex_unlock(&dev->mutex);
1021
1019 return ret; 1022 return ret;
1020} 1023}
1021ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1024ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index e1c60899fdbc..a6f9ba85dc4b 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -351,7 +351,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
351 * physical address */ 351 * physical address */
352 phys = xen_bus_to_phys(dev_addr); 352 phys = xen_bus_to_phys(dev_addr);
353 353
354 if (((dev_addr + size - 1 > dma_mask)) || 354 if (((dev_addr + size - 1 <= dma_mask)) ||
355 range_straddles_page_boundary(phys, size)) 355 range_straddles_page_boundary(phys, size))
356 xen_destroy_contiguous_region(phys, order); 356 xen_destroy_contiguous_region(phys, order);
357 357
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index d8aa0ae3d037..41c5749f4db7 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -201,14 +201,16 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
201 struct super_block *sb = dir->i_sb; 201 struct super_block *sb = dir->i_sb;
202 struct buffer_head *bh; 202 struct buffer_head *bh;
203 struct inode *inode = NULL; 203 struct inode *inode = NULL;
204 struct dentry *res;
204 205
205 pr_debug("%s(\"%pd\")\n", __func__, dentry); 206 pr_debug("%s(\"%pd\")\n", __func__, dentry);
206 207
207 affs_lock_dir(dir); 208 affs_lock_dir(dir);
208 bh = affs_find_entry(dir, dentry); 209 bh = affs_find_entry(dir, dentry);
209 affs_unlock_dir(dir); 210 if (IS_ERR(bh)) {
210 if (IS_ERR(bh)) 211 affs_unlock_dir(dir);
211 return ERR_CAST(bh); 212 return ERR_CAST(bh);
213 }
212 if (bh) { 214 if (bh) {
213 u32 ino = bh->b_blocknr; 215 u32 ino = bh->b_blocknr;
214 216
@@ -222,11 +224,12 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
222 } 224 }
223 affs_brelse(bh); 225 affs_brelse(bh);
224 inode = affs_iget(sb, ino); 226 inode = affs_iget(sb, ino);
225 if (IS_ERR(inode))
226 return ERR_CAST(inode);
227 } 227 }
228 d_add(dentry, inode); 228 res = d_splice_alias(inode, dentry);
229 return NULL; 229 if (!IS_ERR_OR_NULL(res))
230 res->d_fsdata = dentry->d_fsdata;
231 affs_unlock_dir(dir);
232 return res;
230} 233}
231 234
232int 235int
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 1992b0ffa543..81dfedb7879f 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -372,18 +372,14 @@ int afs_permission(struct inode *inode, int mask)
372 mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file"); 372 mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
373 373
374 if (S_ISDIR(inode->i_mode)) { 374 if (S_ISDIR(inode->i_mode)) {
375 if (mask & MAY_EXEC) { 375 if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
376 if (!(access & AFS_ACE_LOOKUP)) 376 if (!(access & AFS_ACE_LOOKUP))
377 goto permission_denied; 377 goto permission_denied;
378 } else if (mask & MAY_READ) { 378 }
379 if (!(access & AFS_ACE_LOOKUP)) 379 if (mask & MAY_WRITE) {
380 goto permission_denied;
381 } else if (mask & MAY_WRITE) {
382 if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */ 380 if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
383 AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */ 381 AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */
384 goto permission_denied; 382 goto permission_denied;
385 } else {
386 BUG();
387 } 383 }
388 } else { 384 } else {
389 if (!(access & AFS_ACE_LOOKUP)) 385 if (!(access & AFS_ACE_LOOKUP))
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 1ed7e2fd2f35..c3b740813fc7 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
23 struct afs_uvldbentry__xdr *uvldb; 23 struct afs_uvldbentry__xdr *uvldb;
24 struct afs_vldb_entry *entry; 24 struct afs_vldb_entry *entry;
25 bool new_only = false; 25 bool new_only = false;
26 u32 tmp, nr_servers; 26 u32 tmp, nr_servers, vlflags;
27 int i, ret; 27 int i, ret;
28 28
29 _enter(""); 29 _enter("");
@@ -55,6 +55,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
55 new_only = true; 55 new_only = true;
56 } 56 }
57 57
58 vlflags = ntohl(uvldb->flags);
58 for (i = 0; i < nr_servers; i++) { 59 for (i = 0; i < nr_servers; i++) {
59 struct afs_uuid__xdr *xdr; 60 struct afs_uuid__xdr *xdr;
60 struct afs_uuid *uuid; 61 struct afs_uuid *uuid;
@@ -64,12 +65,13 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
64 if (tmp & AFS_VLSF_DONTUSE || 65 if (tmp & AFS_VLSF_DONTUSE ||
65 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 66 (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
66 continue; 67 continue;
67 if (tmp & AFS_VLSF_RWVOL) 68 if (tmp & AFS_VLSF_RWVOL) {
68 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 69 entry->fs_mask[i] |= AFS_VOL_VTM_RW;
70 if (vlflags & AFS_VLF_BACKEXISTS)
71 entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
72 }
69 if (tmp & AFS_VLSF_ROVOL) 73 if (tmp & AFS_VLSF_ROVOL)
70 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 74 entry->fs_mask[i] |= AFS_VOL_VTM_RO;
71 if (tmp & AFS_VLSF_BACKVOL)
72 entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
73 if (!entry->fs_mask[i]) 75 if (!entry->fs_mask[i])
74 continue; 76 continue;
75 77
@@ -89,15 +91,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
89 for (i = 0; i < AFS_MAXTYPES; i++) 91 for (i = 0; i < AFS_MAXTYPES; i++)
90 entry->vid[i] = ntohl(uvldb->volumeId[i]); 92 entry->vid[i] = ntohl(uvldb->volumeId[i]);
91 93
92 tmp = ntohl(uvldb->flags); 94 if (vlflags & AFS_VLF_RWEXISTS)
93 if (tmp & AFS_VLF_RWEXISTS)
94 __set_bit(AFS_VLDB_HAS_RW, &entry->flags); 95 __set_bit(AFS_VLDB_HAS_RW, &entry->flags);
95 if (tmp & AFS_VLF_ROEXISTS) 96 if (vlflags & AFS_VLF_ROEXISTS)
96 __set_bit(AFS_VLDB_HAS_RO, &entry->flags); 97 __set_bit(AFS_VLDB_HAS_RO, &entry->flags);
97 if (tmp & AFS_VLF_BACKEXISTS) 98 if (vlflags & AFS_VLF_BACKEXISTS)
98 __set_bit(AFS_VLDB_HAS_BAK, &entry->flags); 99 __set_bit(AFS_VLDB_HAS_BAK, &entry->flags);
99 100
100 if (!(tmp & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) { 101 if (!(vlflags & (AFS_VLF_RWEXISTS | AFS_VLF_ROEXISTS | AFS_VLF_BACKEXISTS))) {
101 entry->error = -ENOMEDIUM; 102 entry->error = -ENOMEDIUM;
102 __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags); 103 __set_bit(AFS_VLDB_QUERY_ERROR, &entry->flags);
103 } 104 }
diff --git a/fs/aio.c b/fs/aio.c
index 88d7927ffbc6..49f53516eef0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -634,9 +634,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
634 while (!list_empty(&ctx->active_reqs)) { 634 while (!list_empty(&ctx->active_reqs)) {
635 req = list_first_entry(&ctx->active_reqs, 635 req = list_first_entry(&ctx->active_reqs,
636 struct aio_kiocb, ki_list); 636 struct aio_kiocb, ki_list);
637
638 list_del_init(&req->ki_list);
639 kiocb_cancel(req); 637 kiocb_cancel(req);
638 list_del_init(&req->ki_list);
640 } 639 }
641 640
642 spin_unlock_irq(&ctx->ctx_lock); 641 spin_unlock_irq(&ctx->ctx_lock);
@@ -1078,8 +1077,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1078 1077
1079 ctx = rcu_dereference(table->table[id]); 1078 ctx = rcu_dereference(table->table[id]);
1080 if (ctx && ctx->user_id == ctx_id) { 1079 if (ctx && ctx->user_id == ctx_id) {
1081 percpu_ref_get(&ctx->users); 1080 if (percpu_ref_tryget_live(&ctx->users))
1082 ret = ctx; 1081 ret = ctx;
1083 } 1082 }
1084out: 1083out:
1085 rcu_read_unlock(); 1084 rcu_read_unlock();
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index af2832aaeec5..4700b4534439 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -198,23 +198,16 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
198 198
199 if (ret == BEFS_BT_NOT_FOUND) { 199 if (ret == BEFS_BT_NOT_FOUND) {
200 befs_debug(sb, "<--- %s %pd not found", __func__, dentry); 200 befs_debug(sb, "<--- %s %pd not found", __func__, dentry);
201 d_add(dentry, NULL); 201 inode = NULL;
202 return ERR_PTR(-ENOENT);
203
204 } else if (ret != BEFS_OK || offset == 0) { 202 } else if (ret != BEFS_OK || offset == 0) {
205 befs_error(sb, "<--- %s Error", __func__); 203 befs_error(sb, "<--- %s Error", __func__);
206 return ERR_PTR(-ENODATA); 204 inode = ERR_PTR(-ENODATA);
205 } else {
206 inode = befs_iget(dir->i_sb, (ino_t) offset);
207 } 207 }
208
209 inode = befs_iget(dir->i_sb, (ino_t) offset);
210 if (IS_ERR(inode))
211 return ERR_CAST(inode);
212
213 d_add(dentry, inode);
214
215 befs_debug(sb, "<--- %s", __func__); 208 befs_debug(sb, "<--- %s", __func__);
216 209
217 return NULL; 210 return d_splice_alias(inode, dentry);
218} 211}
219 212
220static int 213static int
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8e604e7071f1..0b86cf10cf2a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6586,8 +6586,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6586 goto out_unlock_inode; 6586 goto out_unlock_inode;
6587 } else { 6587 } else {
6588 btrfs_update_inode(trans, root, inode); 6588 btrfs_update_inode(trans, root, inode);
6589 unlock_new_inode(inode); 6589 d_instantiate_new(dentry, inode);
6590 d_instantiate(dentry, inode);
6591 } 6590 }
6592 6591
6593out_unlock: 6592out_unlock:
@@ -6663,8 +6662,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
6663 goto out_unlock_inode; 6662 goto out_unlock_inode;
6664 6663
6665 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 6664 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6666 unlock_new_inode(inode); 6665 d_instantiate_new(dentry, inode);
6667 d_instantiate(dentry, inode);
6668 6666
6669out_unlock: 6667out_unlock:
6670 btrfs_end_transaction(trans); 6668 btrfs_end_transaction(trans);
@@ -6809,12 +6807,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6809 if (err) 6807 if (err)
6810 goto out_fail_inode; 6808 goto out_fail_inode;
6811 6809
6812 d_instantiate(dentry, inode); 6810 d_instantiate_new(dentry, inode);
6813 /*
6814 * mkdir is special. We're unlocking after we call d_instantiate
6815 * to avoid a race with nfsd calling d_instantiate.
6816 */
6817 unlock_new_inode(inode);
6818 drop_on_err = 0; 6811 drop_on_err = 0;
6819 6812
6820out_fail: 6813out_fail:
@@ -9124,7 +9117,8 @@ static int btrfs_truncate(struct inode *inode, bool skip_writeback)
9124 BTRFS_EXTENT_DATA_KEY); 9117 BTRFS_EXTENT_DATA_KEY);
9125 trans->block_rsv = &fs_info->trans_block_rsv; 9118 trans->block_rsv = &fs_info->trans_block_rsv;
9126 if (ret != -ENOSPC && ret != -EAGAIN) { 9119 if (ret != -ENOSPC && ret != -EAGAIN) {
9127 err = ret; 9120 if (ret < 0)
9121 err = ret;
9128 break; 9122 break;
9129 } 9123 }
9130 9124
@@ -10257,8 +10251,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
10257 goto out_unlock_inode; 10251 goto out_unlock_inode;
10258 } 10252 }
10259 10253
10260 unlock_new_inode(inode); 10254 d_instantiate_new(dentry, inode);
10261 d_instantiate(dentry, inode);
10262 10255
10263out_unlock: 10256out_unlock:
10264 btrfs_end_transaction(trans); 10257 btrfs_end_transaction(trans);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 0daa1e3fe0df..ab0bbe93b398 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -572,6 +572,11 @@ lookup_again:
572 if (ret < 0) 572 if (ret < 0)
573 goto create_error; 573 goto create_error;
574 574
575 if (unlikely(d_unhashed(next))) {
576 dput(next);
577 inode_unlock(d_inode(dir));
578 goto lookup_again;
579 }
575 ASSERT(d_backing_inode(next)); 580 ASSERT(d_backing_inode(next));
576 581
577 _debug("mkdir -> %p{%p{ino=%lu}}", 582 _debug("mkdir -> %p{%p{ino=%lu}}",
@@ -764,6 +769,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
764 /* search the current directory for the element name */ 769 /* search the current directory for the element name */
765 inode_lock(d_inode(dir)); 770 inode_lock(d_inode(dir));
766 771
772retry:
767 start = jiffies; 773 start = jiffies;
768 subdir = lookup_one_len(dirname, dir, strlen(dirname)); 774 subdir = lookup_one_len(dirname, dir, strlen(dirname));
769 cachefiles_hist(cachefiles_lookup_histogram, start); 775 cachefiles_hist(cachefiles_lookup_histogram, start);
@@ -793,6 +799,10 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
793 if (ret < 0) 799 if (ret < 0)
794 goto mkdir_error; 800 goto mkdir_error;
795 801
802 if (unlikely(d_unhashed(subdir))) {
803 dput(subdir);
804 goto retry;
805 }
796 ASSERT(d_backing_inode(subdir)); 806 ASSERT(d_backing_inode(subdir));
797 807
798 _debug("mkdir -> %p{%p{ino=%lu}}", 808 _debug("mkdir -> %p{%p{ino=%lu}}",
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 017b0ab19bc4..124b093d14e5 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -492,7 +492,7 @@ static void cramfs_kill_sb(struct super_block *sb)
492{ 492{
493 struct cramfs_sb_info *sbi = CRAMFS_SB(sb); 493 struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
494 494
495 if (IS_ENABLED(CCONFIG_CRAMFS_MTD) && sb->s_mtd) { 495 if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
496 if (sbi && sbi->mtd_point_size) 496 if (sbi && sbi->mtd_point_size)
497 mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size); 497 mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
498 kill_mtd_super(sb); 498 kill_mtd_super(sb);
diff --git a/fs/dcache.c b/fs/dcache.c
index 86d2de63461e..2acfc69878f5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1899,6 +1899,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
1899} 1899}
1900EXPORT_SYMBOL(d_instantiate); 1900EXPORT_SYMBOL(d_instantiate);
1901 1901
1902/*
1903 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1904 * with lockdep-related part of unlock_new_inode() done before
1905 * anything else. Use that instead of open-coding d_instantiate()/
1906 * unlock_new_inode() combinations.
1907 */
1908void d_instantiate_new(struct dentry *entry, struct inode *inode)
1909{
1910 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1911 BUG_ON(!inode);
1912 lockdep_annotate_inode_mutex_key(inode);
1913 security_d_instantiate(entry, inode);
1914 spin_lock(&inode->i_lock);
1915 __d_instantiate(entry, inode);
1916 WARN_ON(!(inode->i_state & I_NEW));
1917 inode->i_state &= ~I_NEW;
1918 smp_mb();
1919 wake_up_bit(&inode->i_state, __I_NEW);
1920 spin_unlock(&inode->i_lock);
1921}
1922EXPORT_SYMBOL(d_instantiate_new);
1923
1902/** 1924/**
1903 * d_instantiate_no_diralias - instantiate a non-aliased dentry 1925 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1904 * @entry: dentry to complete 1926 * @entry: dentry to complete
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 97d17eaeba07..49121e5a8de2 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -283,8 +283,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
283 iget_failed(ecryptfs_inode); 283 iget_failed(ecryptfs_inode);
284 goto out; 284 goto out;
285 } 285 }
286 unlock_new_inode(ecryptfs_inode); 286 d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
287 d_instantiate(ecryptfs_dentry, ecryptfs_inode);
288out: 287out:
289 return rc; 288 return rc;
290} 289}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 1e01fabef130..71635909df3b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1264,21 +1264,11 @@ do_indirects:
1264 1264
1265static void ext2_truncate_blocks(struct inode *inode, loff_t offset) 1265static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
1266{ 1266{
1267 /*
1268 * XXX: it seems like a bug here that we don't allow
1269 * IS_APPEND inode to have blocks-past-i_size trimmed off.
1270 * review and fix this.
1271 *
1272 * Also would be nice to be able to handle IO errors and such,
1273 * but that's probably too much to ask.
1274 */
1275 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1267 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1276 S_ISLNK(inode->i_mode))) 1268 S_ISLNK(inode->i_mode)))
1277 return; 1269 return;
1278 if (ext2_inode_is_fast_symlink(inode)) 1270 if (ext2_inode_is_fast_symlink(inode))
1279 return; 1271 return;
1280 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1281 return;
1282 1272
1283 dax_sem_down_write(EXT2_I(inode)); 1273 dax_sem_down_write(EXT2_I(inode));
1284 __ext2_truncate_blocks(inode, offset); 1274 __ext2_truncate_blocks(inode, offset);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 55f7caadb093..152453a91877 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
41{ 41{
42 int err = ext2_add_link(dentry, inode); 42 int err = ext2_add_link(dentry, inode);
43 if (!err) { 43 if (!err) {
44 unlock_new_inode(inode); 44 d_instantiate_new(dentry, inode);
45 d_instantiate(dentry, inode);
46 return 0; 45 return 0;
47 } 46 }
48 inode_dec_link_count(inode); 47 inode_dec_link_count(inode);
@@ -255,8 +254,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
255 if (err) 254 if (err)
256 goto out_fail; 255 goto out_fail;
257 256
258 unlock_new_inode(inode); 257 d_instantiate_new(dentry, inode);
259 d_instantiate(dentry, inode);
260out: 258out:
261 return err; 259 return err;
262 260
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b1f21e3a0763..4a09063ce1d2 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2411,8 +2411,7 @@ static int ext4_add_nondir(handle_t *handle,
2411 int err = ext4_add_entry(handle, dentry, inode); 2411 int err = ext4_add_entry(handle, dentry, inode);
2412 if (!err) { 2412 if (!err) {
2413 ext4_mark_inode_dirty(handle, inode); 2413 ext4_mark_inode_dirty(handle, inode);
2414 unlock_new_inode(inode); 2414 d_instantiate_new(dentry, inode);
2415 d_instantiate(dentry, inode);
2416 return 0; 2415 return 0;
2417 } 2416 }
2418 drop_nlink(inode); 2417 drop_nlink(inode);
@@ -2651,8 +2650,7 @@ out_clear_inode:
2651 err = ext4_mark_inode_dirty(handle, dir); 2650 err = ext4_mark_inode_dirty(handle, dir);
2652 if (err) 2651 if (err)
2653 goto out_clear_inode; 2652 goto out_clear_inode;
2654 unlock_new_inode(inode); 2653 d_instantiate_new(dentry, inode);
2655 d_instantiate(dentry, inode);
2656 if (IS_DIRSYNC(dir)) 2654 if (IS_DIRSYNC(dir))
2657 ext4_handle_sync(handle); 2655 ext4_handle_sync(handle);
2658 2656
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index d5098efe577c..75e37fd720b2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -294,8 +294,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
294 294
295 alloc_nid_done(sbi, ino); 295 alloc_nid_done(sbi, ino);
296 296
297 d_instantiate(dentry, inode); 297 d_instantiate_new(dentry, inode);
298 unlock_new_inode(inode);
299 298
300 if (IS_DIRSYNC(dir)) 299 if (IS_DIRSYNC(dir))
301 f2fs_sync_fs(sbi->sb, 1); 300 f2fs_sync_fs(sbi->sb, 1);
@@ -597,8 +596,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
597 err = page_symlink(inode, disk_link.name, disk_link.len); 596 err = page_symlink(inode, disk_link.name, disk_link.len);
598 597
599err_out: 598err_out:
600 d_instantiate(dentry, inode); 599 d_instantiate_new(dentry, inode);
601 unlock_new_inode(inode);
602 600
603 /* 601 /*
604 * Let's flush symlink data in order to avoid broken symlink as much as 602 * Let's flush symlink data in order to avoid broken symlink as much as
@@ -661,8 +659,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
661 659
662 alloc_nid_done(sbi, inode->i_ino); 660 alloc_nid_done(sbi, inode->i_ino);
663 661
664 d_instantiate(dentry, inode); 662 d_instantiate_new(dentry, inode);
665 unlock_new_inode(inode);
666 663
667 if (IS_DIRSYNC(dir)) 664 if (IS_DIRSYNC(dir))
668 f2fs_sync_fs(sbi->sb, 1); 665 f2fs_sync_fs(sbi->sb, 1);
@@ -713,8 +710,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
713 710
714 alloc_nid_done(sbi, inode->i_ino); 711 alloc_nid_done(sbi, inode->i_ino);
715 712
716 d_instantiate(dentry, inode); 713 d_instantiate_new(dentry, inode);
717 unlock_new_inode(inode);
718 714
719 if (IS_DIRSYNC(dir)) 715 if (IS_DIRSYNC(dir))
720 f2fs_sync_fs(sbi->sb, 1); 716 f2fs_sync_fs(sbi->sb, 1);
diff --git a/fs/inode.c b/fs/inode.c
index 13ceb98c3bd3..3b55391072f3 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -178,6 +178,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
178 mapping->a_ops = &empty_aops; 178 mapping->a_ops = &empty_aops;
179 mapping->host = inode; 179 mapping->host = inode;
180 mapping->flags = 0; 180 mapping->flags = 0;
181 mapping->wb_err = 0;
181 atomic_set(&mapping->i_mmap_writable, 0); 182 atomic_set(&mapping->i_mmap_writable, 0);
182 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 183 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
183 mapping->private_data = NULL; 184 mapping->private_data = NULL;
diff --git a/fs/internal.h b/fs/internal.h
index e08972db0303..980d005b21b4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -125,6 +125,7 @@ int do_fchmodat(int dfd, const char __user *filename, umode_t mode);
125int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, 125int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
126 int flag); 126 int flag);
127 127
128extern int open_check_o_direct(struct file *f);
128extern int vfs_open(const struct path *, struct file *, const struct cred *); 129extern int vfs_open(const struct path *, struct file *, const struct cred *);
129extern struct file *filp_clone_open(struct file *); 130extern struct file *filp_clone_open(struct file *);
130 131
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 0a754f38462e..e5a6deb38e1e 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
209 __func__, inode->i_ino, inode->i_mode, inode->i_nlink, 209 __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
210 f->inocache->pino_nlink, inode->i_mapping->nrpages); 210 f->inocache->pino_nlink, inode->i_mapping->nrpages);
211 211
212 unlock_new_inode(inode); 212 d_instantiate_new(dentry, inode);
213 d_instantiate(dentry, inode);
214 return 0; 213 return 0;
215 214
216 fail: 215 fail:
@@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
430 mutex_unlock(&dir_f->sem); 429 mutex_unlock(&dir_f->sem);
431 jffs2_complete_reservation(c); 430 jffs2_complete_reservation(c);
432 431
433 unlock_new_inode(inode); 432 d_instantiate_new(dentry, inode);
434 d_instantiate(dentry, inode);
435 return 0; 433 return 0;
436 434
437 fail: 435 fail:
@@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
575 mutex_unlock(&dir_f->sem); 573 mutex_unlock(&dir_f->sem);
576 jffs2_complete_reservation(c); 574 jffs2_complete_reservation(c);
577 575
578 unlock_new_inode(inode); 576 d_instantiate_new(dentry, inode);
579 d_instantiate(dentry, inode);
580 return 0; 577 return 0;
581 578
582 fail: 579 fail:
@@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
747 mutex_unlock(&dir_f->sem); 744 mutex_unlock(&dir_f->sem);
748 jffs2_complete_reservation(c); 745 jffs2_complete_reservation(c);
749 746
750 unlock_new_inode(inode); 747 d_instantiate_new(dentry, inode);
751 d_instantiate(dentry, inode);
752 return 0; 748 return 0;
753 749
754 fail: 750 fail:
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index b41596d71858..56c3fcbfe80e 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
178 unlock_new_inode(ip); 178 unlock_new_inode(ip);
179 iput(ip); 179 iput(ip);
180 } else { 180 } else {
181 unlock_new_inode(ip); 181 d_instantiate_new(dentry, ip);
182 d_instantiate(dentry, ip);
183 } 182 }
184 183
185 out2: 184 out2:
@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
313 unlock_new_inode(ip); 312 unlock_new_inode(ip);
314 iput(ip); 313 iput(ip);
315 } else { 314 } else {
316 unlock_new_inode(ip); 315 d_instantiate_new(dentry, ip);
317 d_instantiate(dentry, ip);
318 } 316 }
319 317
320 out2: 318 out2:
@@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
1059 unlock_new_inode(ip); 1057 unlock_new_inode(ip);
1060 iput(ip); 1058 iput(ip);
1061 } else { 1059 } else {
1062 unlock_new_inode(ip); 1060 d_instantiate_new(dentry, ip);
1063 d_instantiate(dentry, ip);
1064 } 1061 }
1065 1062
1066 out2: 1063 out2:
@@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
1447 unlock_new_inode(ip); 1444 unlock_new_inode(ip);
1448 iput(ip); 1445 iput(ip);
1449 } else { 1446 } else {
1450 unlock_new_inode(ip); 1447 d_instantiate_new(dentry, ip);
1451 d_instantiate(dentry, ip);
1452 } 1448 }
1453 1449
1454 out1: 1450 out1:
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 26dd9a50f383..ff2716f9322e 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -316,6 +316,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
316 316
317 info->root = root; 317 info->root = root;
318 info->ns = ns; 318 info->ns = ns;
319 INIT_LIST_HEAD(&info->node);
319 320
320 sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags, 321 sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags,
321 &init_user_ns, info); 322 &init_user_ns, info);
diff --git a/fs/namei.c b/fs/namei.c
index 186bd2464fd5..4eb916996345 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3367,7 +3367,9 @@ finish_open_created:
3367 goto out; 3367 goto out;
3368 *opened |= FILE_OPENED; 3368 *opened |= FILE_OPENED;
3369opened: 3369opened:
3370 error = ima_file_check(file, op->acc_mode, *opened); 3370 error = open_check_o_direct(file);
3371 if (!error)
3372 error = ima_file_check(file, op->acc_mode, *opened);
3371 if (!error && will_truncate) 3373 if (!error && will_truncate)
3372 error = handle_truncate(file); 3374 error = handle_truncate(file);
3373out: 3375out:
@@ -3447,6 +3449,9 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags,
3447 error = finish_open(file, child, NULL, opened); 3449 error = finish_open(file, child, NULL, opened);
3448 if (error) 3450 if (error)
3449 goto out2; 3451 goto out2;
3452 error = open_check_o_direct(file);
3453 if (error)
3454 fput(file);
3450out2: 3455out2:
3451 mnt_drop_write(path.mnt); 3456 mnt_drop_write(path.mnt);
3452out: 3457out:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2410b093a2e6..b0555d7d8200 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1201,6 +1201,28 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
1201 break; 1201 break;
1202 case S_IFDIR: 1202 case S_IFDIR:
1203 host_err = vfs_mkdir(dirp, dchild, iap->ia_mode); 1203 host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
1204 if (!host_err && unlikely(d_unhashed(dchild))) {
1205 struct dentry *d;
1206 d = lookup_one_len(dchild->d_name.name,
1207 dchild->d_parent,
1208 dchild->d_name.len);
1209 if (IS_ERR(d)) {
1210 host_err = PTR_ERR(d);
1211 break;
1212 }
1213 if (unlikely(d_is_negative(d))) {
1214 dput(d);
1215 err = nfserr_serverfault;
1216 goto out;
1217 }
1218 dput(resfhp->fh_dentry);
1219 resfhp->fh_dentry = dget(d);
1220 err = fh_update(resfhp);
1221 dput(dchild);
1222 dchild = d;
1223 if (err)
1224 goto out;
1225 }
1204 break; 1226 break;
1205 case S_IFCHR: 1227 case S_IFCHR:
1206 case S_IFBLK: 1228 case S_IFBLK:
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1a2894aa0194..dd52d3f82e8d 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
46 int err = nilfs_add_link(dentry, inode); 46 int err = nilfs_add_link(dentry, inode);
47 47
48 if (!err) { 48 if (!err) {
49 d_instantiate(dentry, inode); 49 d_instantiate_new(dentry, inode);
50 unlock_new_inode(inode);
51 return 0; 50 return 0;
52 } 51 }
53 inode_dec_link_count(inode); 52 inode_dec_link_count(inode);
@@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
243 goto out_fail; 242 goto out_fail;
244 243
245 nilfs_mark_inode_dirty(inode); 244 nilfs_mark_inode_dirty(inode);
246 d_instantiate(dentry, inode); 245 d_instantiate_new(dentry, inode);
247 unlock_new_inode(inode);
248out: 246out:
249 if (!err) 247 if (!err)
250 err = nilfs_transaction_commit(dir->i_sb); 248 err = nilfs_transaction_commit(dir->i_sb);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 91a8889abf9b..ea8c551bcd7e 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -570,16 +570,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
570 current_page, vec_len, vec_start); 570 current_page, vec_len, vec_start);
571 571
572 len = bio_add_page(bio, page, vec_len, vec_start); 572 len = bio_add_page(bio, page, vec_len, vec_start);
573 if (len != vec_len) { 573 if (len != vec_len) break;
574 mlog(ML_ERROR, "Adding page[%d] to bio failed, "
575 "page %p, len %d, vec_len %u, vec_start %u, "
576 "bi_sector %llu\n", current_page, page, len,
577 vec_len, vec_start,
578 (unsigned long long)bio->bi_iter.bi_sector);
579 bio_put(bio);
580 bio = ERR_PTR(-EIO);
581 return bio;
582 }
583 574
584 cs += vec_len / (PAGE_SIZE/spp); 575 cs += vec_len / (PAGE_SIZE/spp);
585 vec_start = 0; 576 vec_start = 0;
diff --git a/fs/open.c b/fs/open.c
index c5ee7cd60424..d0e955b558ad 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -724,6 +724,16 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
724 return ksys_fchown(fd, user, group); 724 return ksys_fchown(fd, user, group);
725} 725}
726 726
727int open_check_o_direct(struct file *f)
728{
729 /* NB: we're sure to have correct a_ops only after f_op->open */
730 if (f->f_flags & O_DIRECT) {
731 if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO)
732 return -EINVAL;
733 }
734 return 0;
735}
736
727static int do_dentry_open(struct file *f, 737static int do_dentry_open(struct file *f,
728 struct inode *inode, 738 struct inode *inode,
729 int (*open)(struct inode *, struct file *), 739 int (*open)(struct inode *, struct file *),
@@ -745,7 +755,7 @@ static int do_dentry_open(struct file *f,
745 if (unlikely(f->f_flags & O_PATH)) { 755 if (unlikely(f->f_flags & O_PATH)) {
746 f->f_mode = FMODE_PATH; 756 f->f_mode = FMODE_PATH;
747 f->f_op = &empty_fops; 757 f->f_op = &empty_fops;
748 goto done; 758 return 0;
749 } 759 }
750 760
751 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { 761 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
@@ -798,12 +808,7 @@ static int do_dentry_open(struct file *f,
798 f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); 808 f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
799 809
800 file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); 810 file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
801done: 811
802 /* NB: we're sure to have correct a_ops only after f_op->open */
803 error = -EINVAL;
804 if ((f->f_flags & O_DIRECT) &&
805 (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO))
806 goto out_fput;
807 return 0; 812 return 0;
808 813
809cleanup_all: 814cleanup_all:
@@ -818,9 +823,6 @@ cleanup_file:
818 f->f_path.dentry = NULL; 823 f->f_path.dentry = NULL;
819 f->f_inode = NULL; 824 f->f_inode = NULL;
820 return error; 825 return error;
821out_fput:
822 fput(f);
823 return error;
824} 826}
825 827
826/** 828/**
@@ -918,14 +920,20 @@ struct file *dentry_open(const struct path *path, int flags,
918 BUG_ON(!path->mnt); 920 BUG_ON(!path->mnt);
919 921
920 f = get_empty_filp(); 922 f = get_empty_filp();
921 if (IS_ERR(f)) 923 if (!IS_ERR(f)) {
922 return f; 924 f->f_flags = flags;
923 925 error = vfs_open(path, f, cred);
924 f->f_flags = flags; 926 if (!error) {
925 error = vfs_open(path, f, cred); 927 /* from now on we need fput() to dispose of f */
926 if (error) { 928 error = open_check_o_direct(f);
927 put_filp(f); 929 if (error) {
928 return ERR_PTR(error); 930 fput(f);
931 f = ERR_PTR(error);
932 }
933 } else {
934 put_filp(f);
935 f = ERR_PTR(error);
936 }
929 } 937 }
930 return f; 938 return f;
931} 939}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 6e3134e6d98a..1b5707c44c3f 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -75,8 +75,7 @@ static int orangefs_create(struct inode *dir,
75 get_khandle_from_ino(inode), 75 get_khandle_from_ino(inode),
76 dentry); 76 dentry);
77 77
78 d_instantiate(dentry, inode); 78 d_instantiate_new(dentry, inode);
79 unlock_new_inode(inode);
80 orangefs_set_timeout(dentry); 79 orangefs_set_timeout(dentry);
81 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 80 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
82 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; 81 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
@@ -332,8 +331,7 @@ static int orangefs_symlink(struct inode *dir,
332 "Assigned symlink inode new number of %pU\n", 331 "Assigned symlink inode new number of %pU\n",
333 get_khandle_from_ino(inode)); 332 get_khandle_from_ino(inode));
334 333
335 d_instantiate(dentry, inode); 334 d_instantiate_new(dentry, inode);
336 unlock_new_inode(inode);
337 orangefs_set_timeout(dentry); 335 orangefs_set_timeout(dentry);
338 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 336 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
339 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; 337 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
@@ -402,8 +400,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
402 "Assigned dir inode new number of %pU\n", 400 "Assigned dir inode new number of %pU\n",
403 get_khandle_from_ino(inode)); 401 get_khandle_from_ino(inode));
404 402
405 d_instantiate(dentry, inode); 403 d_instantiate_new(dentry, inode);
406 unlock_new_inode(inode);
407 orangefs_set_timeout(dentry); 404 orangefs_set_timeout(dentry);
408 ORANGEFS_I(inode)->getattr_time = jiffies - 1; 405 ORANGEFS_I(inode)->getattr_time = jiffies - 1;
409 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS; 406 ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ae2c807fd719..72391b3f6927 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -85,6 +85,7 @@
85#include <linux/delayacct.h> 85#include <linux/delayacct.h>
86#include <linux/seq_file.h> 86#include <linux/seq_file.h>
87#include <linux/pid_namespace.h> 87#include <linux/pid_namespace.h>
88#include <linux/prctl.h>
88#include <linux/ptrace.h> 89#include <linux/ptrace.h>
89#include <linux/tracehook.h> 90#include <linux/tracehook.h>
90#include <linux/string_helpers.h> 91#include <linux/string_helpers.h>
@@ -335,6 +336,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
335#ifdef CONFIG_SECCOMP 336#ifdef CONFIG_SECCOMP
336 seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); 337 seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
337#endif 338#endif
339 seq_printf(m, "\nSpeculation_Store_Bypass:\t");
340 switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
341 case -EINVAL:
342 seq_printf(m, "unknown");
343 break;
344 case PR_SPEC_NOT_AFFECTED:
345 seq_printf(m, "not vulnerable");
346 break;
347 case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
348 seq_printf(m, "thread force mitigated");
349 break;
350 case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
351 seq_printf(m, "thread mitigated");
352 break;
353 case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
354 seq_printf(m, "thread vulnerable");
355 break;
356 case PR_SPEC_DISABLE:
357 seq_printf(m, "globally mitigated");
358 break;
359 default:
360 seq_printf(m, "vulnerable");
361 break;
362 }
338 seq_putc(m, '\n'); 363 seq_putc(m, '\n');
339} 364}
340 365
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index bd39a998843d..5089dac02660 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
687 reiserfs_update_inode_transaction(inode); 687 reiserfs_update_inode_transaction(inode);
688 reiserfs_update_inode_transaction(dir); 688 reiserfs_update_inode_transaction(dir);
689 689
690 unlock_new_inode(inode); 690 d_instantiate_new(dentry, inode);
691 d_instantiate(dentry, inode);
692 retval = journal_end(&th); 691 retval = journal_end(&th);
693 692
694out_failed: 693out_failed:
@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
771 goto out_failed; 770 goto out_failed;
772 } 771 }
773 772
774 unlock_new_inode(inode); 773 d_instantiate_new(dentry, inode);
775 d_instantiate(dentry, inode);
776 retval = journal_end(&th); 774 retval = journal_end(&th);
777 775
778out_failed: 776out_failed:
@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
871 /* the above add_entry did not update dir's stat data */ 869 /* the above add_entry did not update dir's stat data */
872 reiserfs_update_sd(&th, dir); 870 reiserfs_update_sd(&th, dir);
873 871
874 unlock_new_inode(inode); 872 d_instantiate_new(dentry, inode);
875 d_instantiate(dentry, inode);
876 retval = journal_end(&th); 873 retval = journal_end(&th);
877out_failed: 874out_failed:
878 reiserfs_write_unlock(dir->i_sb); 875 reiserfs_write_unlock(dir->i_sb);
@@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
1187 goto out_failed; 1184 goto out_failed;
1188 } 1185 }
1189 1186
1190 unlock_new_inode(inode); 1187 d_instantiate_new(dentry, inode);
1191 d_instantiate(dentry, inode);
1192 retval = journal_end(&th); 1188 retval = journal_end(&th);
1193out_failed: 1189out_failed:
1194 reiserfs_write_unlock(parent_dir->i_sb); 1190 reiserfs_write_unlock(parent_dir->i_sb);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index c6c27f1f9c98..4cc090b50cc5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -709,11 +709,6 @@ void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
709 if (m->count + width >= m->size) 709 if (m->count + width >= m->size)
710 goto overflow; 710 goto overflow;
711 711
712 if (num < 10) {
713 m->buf[m->count++] = num + '0';
714 return;
715 }
716
717 len = num_to_str(m->buf + m->count, m->size - m->count, num, width); 712 len = num_to_str(m->buf + m->count, m->size - m->count, num, width);
718 if (!len) 713 if (!len)
719 goto overflow; 714 goto overflow;
diff --git a/fs/super.c b/fs/super.c
index 122c402049a2..4b5b562176d0 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -121,13 +121,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
121 sb = container_of(shrink, struct super_block, s_shrink); 121 sb = container_of(shrink, struct super_block, s_shrink);
122 122
123 /* 123 /*
124 * Don't call trylock_super as it is a potential 124 * We don't call trylock_super() here as it is a scalability bottleneck,
125 * scalability bottleneck. The counts could get updated 125 * so we're exposed to partial setup state. The shrinker rwsem does not
126 * between super_cache_count and super_cache_scan anyway. 126 * protect filesystem operations backing list_lru_shrink_count() or
127 * Call to super_cache_count with shrinker_rwsem held 127 * s_op->nr_cached_objects(). Counts can change between
128 * ensures the safety of call to list_lru_shrink_count() and 128 * super_cache_count and super_cache_scan, so we really don't need locks
129 * s_op->nr_cached_objects(). 129 * here.
130 *
131 * However, if we are currently mounting the superblock, the underlying
132 * filesystem might be in a state of partial construction and hence it
133 * is dangerous to access it. trylock_super() uses a SB_BORN check to
134 * avoid this situation, so do the same here. The memory barrier is
135 * matched with the one in mount_fs() as we don't hold locks here.
130 */ 136 */
137 if (!(sb->s_flags & SB_BORN))
138 return 0;
139 smp_rmb();
140
131 if (sb->s_op && sb->s_op->nr_cached_objects) 141 if (sb->s_op && sb->s_op->nr_cached_objects)
132 total_objects = sb->s_op->nr_cached_objects(sb, sc); 142 total_objects = sb->s_op->nr_cached_objects(sb, sc);
133 143
@@ -1272,6 +1282,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1272 sb = root->d_sb; 1282 sb = root->d_sb;
1273 BUG_ON(!sb); 1283 BUG_ON(!sb);
1274 WARN_ON(!sb->s_bdi); 1284 WARN_ON(!sb->s_bdi);
1285
1286 /*
1287 * Write barrier is for super_cache_count(). We place it before setting
1288 * SB_BORN as the data dependency between the two functions is the
1289 * superblock structure contents that we just set up, not the SB_BORN
1290 * flag.
1291 */
1292 smp_wmb();
1275 sb->s_flags |= SB_BORN; 1293 sb->s_flags |= SB_BORN;
1276 1294
1277 error = security_sb_kern_mount(sb, flags, secdata); 1295 error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index b428d317ae92..92682fcc41f6 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -25,7 +25,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
25{ 25{
26 struct dentry *root; 26 struct dentry *root;
27 void *ns; 27 void *ns;
28 bool new_sb; 28 bool new_sb = false;
29 29
30 if (!(flags & SB_KERNMOUNT)) { 30 if (!(flags & SB_KERNMOUNT)) {
31 if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET)) 31 if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
@@ -35,9 +35,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
35 ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); 35 ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
36 root = kernfs_mount_ns(fs_type, flags, sysfs_root, 36 root = kernfs_mount_ns(fs_type, flags, sysfs_root,
37 SYSFS_MAGIC, &new_sb, ns); 37 SYSFS_MAGIC, &new_sb, ns);
38 if (IS_ERR(root) || !new_sb) 38 if (!new_sb)
39 kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); 39 kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
40 else if (new_sb) 40 else if (!IS_ERR(root))
41 root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE; 41 root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE;
42 42
43 return root; 43 return root;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 0458dd47e105..c586026508db 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -622,8 +622,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
622 if (fibh.sbh != fibh.ebh) 622 if (fibh.sbh != fibh.ebh)
623 brelse(fibh.ebh); 623 brelse(fibh.ebh);
624 brelse(fibh.sbh); 624 brelse(fibh.sbh);
625 unlock_new_inode(inode); 625 d_instantiate_new(dentry, inode);
626 d_instantiate(dentry, inode);
627 626
628 return 0; 627 return 0;
629} 628}
@@ -733,8 +732,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
733 inc_nlink(dir); 732 inc_nlink(dir);
734 dir->i_ctime = dir->i_mtime = current_time(dir); 733 dir->i_ctime = dir->i_mtime = current_time(dir);
735 mark_inode_dirty(dir); 734 mark_inode_dirty(dir);
736 unlock_new_inode(inode); 735 d_instantiate_new(dentry, inode);
737 d_instantiate(dentry, inode);
738 if (fibh.sbh != fibh.ebh) 736 if (fibh.sbh != fibh.ebh)
739 brelse(fibh.ebh); 737 brelse(fibh.ebh);
740 brelse(fibh.sbh); 738 brelse(fibh.sbh);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 32545cd00ceb..d5f43ba76c59 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
39{ 39{
40 int err = ufs_add_link(dentry, inode); 40 int err = ufs_add_link(dentry, inode);
41 if (!err) { 41 if (!err) {
42 unlock_new_inode(inode); 42 d_instantiate_new(dentry, inode);
43 d_instantiate(dentry, inode);
44 return 0; 43 return 0;
45 } 44 }
46 inode_dec_link_count(inode); 45 inode_dec_link_count(inode);
@@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
193 if (err) 192 if (err)
194 goto out_fail; 193 goto out_fail;
195 194
196 unlock_new_inode(inode); 195 d_instantiate_new(dentry, inode);
197 d_instantiate(dentry, inode);
198 return 0; 196 return 0;
199 197
200out_fail: 198out_fail:
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index dd2a8cf7d20b..ccb5aa8468e0 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -151,7 +151,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
151 struct drm_encoder *encoder, 151 struct drm_encoder *encoder,
152 const struct dw_hdmi_plat_data *plat_data); 152 const struct dw_hdmi_plat_data *plat_data);
153 153
154void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense); 154void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
155 155
156void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); 156void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
157void dw_hdmi_audio_enable(struct dw_hdmi *hdmi); 157void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1ee46f492267..acf5e8df3504 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -105,6 +105,14 @@
105 */ 105 */
106 106
107/* 107/*
108 * Allocation and deallocation of bitmap.
109 * Provided in lib/bitmap.c to avoid circular dependency.
110 */
111extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
112extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
113extern void bitmap_free(const unsigned long *bitmap);
114
115/*
108 * lib/bitmap.c provides these functions: 116 * lib/bitmap.c provides these functions:
109 */ 117 */
110 118
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7e61c395fddf..df36b1b08af0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -142,10 +142,11 @@ struct bpf_verifier_state_list {
142struct bpf_insn_aux_data { 142struct bpf_insn_aux_data {
143 union { 143 union {
144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 145 unsigned long map_state; /* pointer/poison value for maps */
146 s32 call_imm; /* saved imm field of call insn */ 146 s32 call_imm; /* saved imm field of call insn */
147 }; 147 };
148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
149 int sanitize_stack_off; /* stack slot to be cleared */
149 bool seen; /* this insn was processed by the verifier */ 150 bool seen; /* this insn was processed by the verifier */
150}; 151};
151 152
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7b01bc11c692..a97a63eef59f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
53 struct device_attribute *attr, char *buf); 53 struct device_attribute *attr, char *buf);
54extern ssize_t cpu_show_spectre_v2(struct device *dev, 54extern ssize_t cpu_show_spectre_v2(struct device *dev,
55 struct device_attribute *attr, char *buf); 55 struct device_attribute *attr, char *buf);
56extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
57 struct device_attribute *attr, char *buf);
56 58
57extern __printf(4, 5) 59extern __printf(4, 5)
58struct device *cpu_device_create(struct device *parent, void *drvdata, 60struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 94acbde17bb1..66c6e17e61e5 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -224,6 +224,7 @@ extern seqlock_t rename_lock;
224 * These are the low-level FS interfaces to the dcache.. 224 * These are the low-level FS interfaces to the dcache..
225 */ 225 */
226extern void d_instantiate(struct dentry *, struct inode *); 226extern void d_instantiate(struct dentry *, struct inode *);
227extern void d_instantiate_new(struct dentry *, struct inode *);
227extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 228extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
228extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); 229extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
229extern int d_instantiate_no_diralias(struct dentry *, struct inode *); 230extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1a4582b44d32..fc5ab85278d5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -464,7 +464,7 @@ static inline struct page *
464__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 464__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
465{ 465{
466 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 466 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
467 VM_WARN_ON(!node_online(nid)); 467 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
468 468
469 return __alloc_pages(gfp_mask, order, nid); 469 return __alloc_pages(gfp_mask, order, nid);
470} 470}
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 7160df54a6fe..3f84aeb81e48 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,6 +2,8 @@
2#ifndef _GPIO_KEYS_H 2#ifndef _GPIO_KEYS_H
3#define _GPIO_KEYS_H 3#define _GPIO_KEYS_H
4 4
5#include <linux/types.h>
6
5struct device; 7struct device;
6 8
7/** 9/**
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index b9e22b7e2f28..d1171db23742 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
53 int (*request_update)(struct iio_buffer *buffer); 53 int (*request_update)(struct iio_buffer *buffer);
54 54
55 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd); 55 int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
56 int (*set_length)(struct iio_buffer *buffer, int length); 56 int (*set_length)(struct iio_buffer *buffer, unsigned int length);
57 57
58 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); 58 int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
59 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev); 59 int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
72 */ 72 */
73struct iio_buffer { 73struct iio_buffer {
74 /** @length: Number of datums in buffer. */ 74 /** @length: Number of datums in buffer. */
75 int length; 75 unsigned int length;
76 76
77 /** @bytes_per_datum: Size of individual datum including timestamp. */ 77 /** @bytes_per_datum: Size of individual datum including timestamp. */
78 int bytes_per_datum; 78 size_t bytes_per_datum;
79 79
80 /** 80 /**
81 * @access: Buffer access functions associated with the 81 * @access: Buffer access functions associated with the
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index cbf2aa9e93b9..5153f5b9294c 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -17,10 +17,6 @@
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Should you need to contact me, the author, you can do so either by
22 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
23 * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
24 */ 20 */
25#ifndef _LINUX_JOYSTICK_H 21#ifndef _LINUX_JOYSTICK_H
26#define _LINUX_JOYSTICK_H 22#define _LINUX_JOYSTICK_H
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e0e49b5b1ee1..2b0265265c28 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -216,6 +216,9 @@ void put_online_mems(void);
216void mem_hotplug_begin(void); 216void mem_hotplug_begin(void);
217void mem_hotplug_done(void); 217void mem_hotplug_done(void);
218 218
219extern void set_zone_contiguous(struct zone *zone);
220extern void clear_zone_contiguous(struct zone *zone);
221
219#else /* ! CONFIG_MEMORY_HOTPLUG */ 222#else /* ! CONFIG_MEMORY_HOTPLUG */
220#define pfn_to_online_page(pfn) \ 223#define pfn_to_online_page(pfn) \
221({ \ 224({ \
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2a156c5dfadd..d703774982ca 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1286,17 +1286,7 @@ enum {
1286static inline const struct cpumask * 1286static inline const struct cpumask *
1287mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) 1287mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
1288{ 1288{
1289 struct irq_desc *desc; 1289 return dev->priv.irq_info[vector].mask;
1290 unsigned int irq;
1291 int eqn;
1292 int err;
1293
1294 err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
1295 if (err)
1296 return NULL;
1297
1298 desc = irq_to_desc(irq);
1299 return desc->affinity_hint;
1300} 1290}
1301 1291
1302#endif /* MLX5_DRIVER_H */ 1292#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c6fa9a255dbf..02a616e2f17d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2109,7 +2109,6 @@ extern void setup_per_cpu_pageset(void);
2109 2109
2110extern void zone_pcp_update(struct zone *zone); 2110extern void zone_pcp_update(struct zone *zone);
2111extern void zone_pcp_reset(struct zone *zone); 2111extern void zone_pcp_reset(struct zone *zone);
2112extern void setup_zone_pageset(struct zone *zone);
2113 2112
2114/* page_alloc.c */ 2113/* page_alloc.c */
2115extern int min_free_kbytes; 2114extern int min_free_kbytes;
diff --git a/include/linux/node.h b/include/linux/node.h
index 41f171861dcc..6d336e38d155 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -32,9 +32,11 @@ extern struct node *node_devices[];
32typedef void (*node_registration_func_t)(struct node *); 32typedef void (*node_registration_func_t)(struct node *);
33 33
34#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) 34#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
35extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages); 35extern int link_mem_sections(int nid, unsigned long start_pfn,
36 unsigned long nr_pages, bool check_nid);
36#else 37#else
37static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages) 38static inline int link_mem_sections(int nid, unsigned long start_pfn,
39 unsigned long nr_pages, bool check_nid)
38{ 40{
39 return 0; 41 return 0;
40} 42}
@@ -57,7 +59,7 @@ static inline int register_one_node(int nid)
57 if (error) 59 if (error)
58 return error; 60 return error;
59 /* link memory sections under this node */ 61 /* link memory sections under this node */
60 error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages); 62 error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
61 } 63 }
62 64
63 return error; 65 return error;
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc65c9c..0c5ef54fd416 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
7#define _LINUX_NOSPEC_H 7#define _LINUX_NOSPEC_H
8#include <asm/barrier.h> 8#include <asm/barrier.h>
9 9
10struct task_struct;
11
10/** 12/**
11 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise 13 * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
12 * @index: array element index 14 * @index: array element index
@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
55 \ 57 \
56 (typeof(_i)) (_i & _mask); \ 58 (typeof(_i)) (_i & _mask); \
57}) 59})
60
61/* Speculation control prctl */
62int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
63int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
64 unsigned long ctrl);
65/* Speculation control for seccomp enforced mitigation */
66void arch_seccomp_spec_mitigate(struct task_struct *task);
67
58#endif /* _LINUX_NOSPEC_H */ 68#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c2413703f45d..ca3f3eae8980 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1433,7 +1433,8 @@ static inline bool is_percpu_thread(void)
1433#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ 1433#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1434#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ 1434#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1435#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ 1435#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1436 1436#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
1437#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
1437 1438
1438#define TASK_PFA_TEST(name, func) \ 1439#define TASK_PFA_TEST(name, func) \
1439 static inline bool task_##func(struct task_struct *p) \ 1440 static inline bool task_##func(struct task_struct *p) \
@@ -1458,6 +1459,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1458TASK_PFA_SET(SPREAD_SLAB, spread_slab) 1459TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1459TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) 1460TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1460 1461
1462TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1463TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1464TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1465
1466TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1467TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1468
1461static inline void 1469static inline void
1462current_restore_flags(unsigned long orig_flags, unsigned long flags) 1470current_restore_flags(unsigned long orig_flags, unsigned long flags)
1463{ 1471{
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index c723a5c4e3ff..e5320f6c8654 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -4,8 +4,9 @@
4 4
5#include <uapi/linux/seccomp.h> 5#include <uapi/linux/seccomp.h>
6 6
7#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \ 7#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
8 SECCOMP_FILTER_FLAG_LOG) 8 SECCOMP_FILTER_FLAG_LOG | \
9 SECCOMP_FILTER_FLAG_SPEC_ALLOW)
9 10
10#ifdef CONFIG_SECCOMP 11#ifdef CONFIG_SECCOMP
11 12
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index cd368d1b8cb8..a1e28dd5d0bf 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -170,6 +170,7 @@ struct nft_data_desc {
170int nft_data_init(const struct nft_ctx *ctx, 170int nft_data_init(const struct nft_ctx *ctx,
171 struct nft_data *data, unsigned int size, 171 struct nft_data *data, unsigned int size,
172 struct nft_data_desc *desc, const struct nlattr *nla); 172 struct nft_data_desc *desc, const struct nlattr *nla);
173void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
173void nft_data_release(const struct nft_data *data, enum nft_data_types type); 174void nft_data_release(const struct nft_data *data, enum nft_data_types type);
174int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, 175int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
175 enum nft_data_types type, unsigned int len); 176 enum nft_data_types type, unsigned int len);
@@ -736,6 +737,10 @@ struct nft_expr_ops {
736 int (*init)(const struct nft_ctx *ctx, 737 int (*init)(const struct nft_ctx *ctx,
737 const struct nft_expr *expr, 738 const struct nft_expr *expr,
738 const struct nlattr * const tb[]); 739 const struct nlattr * const tb[]);
740 void (*activate)(const struct nft_ctx *ctx,
741 const struct nft_expr *expr);
742 void (*deactivate)(const struct nft_ctx *ctx,
743 const struct nft_expr *expr);
739 void (*destroy)(const struct nft_ctx *ctx, 744 void (*destroy)(const struct nft_ctx *ctx,
740 const struct nft_expr *expr); 745 const struct nft_expr *expr);
741 int (*dump)(struct sk_buff *skb, 746 int (*dump)(struct sk_buff *skb,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 28b996d63490..35498e613ff5 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -103,6 +103,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
103/* 103/*
104 * sctp/socket.c 104 * sctp/socket.c
105 */ 105 */
106int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
107 int addr_len, int flags);
106int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 108int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
107int sctp_inet_listen(struct socket *sock, int backlog); 109int sctp_inet_listen(struct socket *sock, int backlog);
108void sctp_write_space(struct sock *sk); 110void sctp_write_space(struct sock *sk);
diff --git a/include/net/tls.h b/include/net/tls.h
index b400d0bb7448..f5fb16da3860 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -97,6 +97,9 @@ struct tls_sw_context {
97 u8 control; 97 u8 control;
98 bool decrypted; 98 bool decrypted;
99 99
100 char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
101 char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
102
100 /* Sending context */ 103 /* Sending context */
101 char aad_space[TLS_AAD_SPACE_SIZE]; 104 char aad_space[TLS_AAD_SPACE_SIZE];
102 105
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 23159dd5be18..a1fd63871d17 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -48,7 +48,6 @@ struct ib_umem {
48 int writable; 48 int writable;
49 int hugetlb; 49 int hugetlb;
50 struct work_struct work; 50 struct work_struct work;
51 struct pid *pid;
52 struct mm_struct *mm; 51 struct mm_struct *mm;
53 unsigned long diff; 52 unsigned long diff;
54 struct ib_umem_odp *odp_data; 53 struct ib_umem_odp *odp_data;
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 4a4201d997a7..095383a4bd1a 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -411,13 +411,13 @@ static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs
411static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle, 411static inline void *uverbs_attr_get_obj(const struct uverbs_attr_bundle *attrs_bundle,
412 u16 idx) 412 u16 idx)
413{ 413{
414 struct ib_uobject *uobj = 414 const struct uverbs_attr *attr;
415 uverbs_attr_get(attrs_bundle, idx)->obj_attr.uobject;
416 415
417 if (IS_ERR(uobj)) 416 attr = uverbs_attr_get(attrs_bundle, idx);
418 return uobj; 417 if (IS_ERR(attr))
418 return ERR_CAST(attr);
419 419
420 return uobj->object; 420 return attr->obj_attr.uobject->object;
421} 421}
422 422
423static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, 423static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index bc01e06bc716..0be866c91f62 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -435,7 +435,9 @@ TRACE_EVENT(sched_pi_setprio,
435 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); 435 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
436 __entry->pid = tsk->pid; 436 __entry->pid = tsk->pid;
437 __entry->oldprio = tsk->prio; 437 __entry->oldprio = tsk->prio;
438 __entry->newprio = pi_task ? pi_task->prio : tsk->prio; 438 __entry->newprio = pi_task ?
439 min(tsk->normal_prio, pi_task->prio) :
440 tsk->normal_prio;
439 /* XXX SCHED_DEADLINE bits missing */ 441 /* XXX SCHED_DEADLINE bits missing */
440 ), 442 ),
441 443
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c5ec89732a8d..8c317737ba3f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1017,6 +1017,7 @@ struct bpf_prog_info {
1017 __aligned_u64 map_ids; 1017 __aligned_u64 map_ids;
1018 char name[BPF_OBJ_NAME_LEN]; 1018 char name[BPF_OBJ_NAME_LEN];
1019 __u32 ifindex; 1019 __u32 ifindex;
1020 __u32 :32;
1020 __u64 netns_dev; 1021 __u64 netns_dev;
1021 __u64 netns_ino; 1022 __u64 netns_ino;
1022} __attribute__((aligned(8))); 1023} __attribute__((aligned(8)));
@@ -1030,6 +1031,7 @@ struct bpf_map_info {
1030 __u32 map_flags; 1031 __u32 map_flags;
1031 char name[BPF_OBJ_NAME_LEN]; 1032 char name[BPF_OBJ_NAME_LEN];
1032 __u32 ifindex; 1033 __u32 ifindex;
1034 __u32 :32;
1033 __u64 netns_dev; 1035 __u64 netns_dev;
1034 __u64 netns_ino; 1036 __u64 netns_ino;
1035} __attribute__((aligned(8))); 1037} __attribute__((aligned(8)));
diff --git a/include/uapi/linux/joystick.h b/include/uapi/linux/joystick.h
index 64aabb84a66d..192bf2cf182d 100644
--- a/include/uapi/linux/joystick.h
+++ b/include/uapi/linux/joystick.h
@@ -18,10 +18,6 @@
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 * Should you need to contact me, the author, you can do so either by
23 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
24 * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
25 */ 21 */
26#ifndef _UAPI_LINUX_JOYSTICK_H 22#ifndef _UAPI_LINUX_JOYSTICK_H
27#define _UAPI_LINUX_JOYSTICK_H 23#define _UAPI_LINUX_JOYSTICK_H
diff --git a/include/uapi/linux/netfilter/nf_conntrack_tcp.h b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
index 74b91151d494..bcba72def817 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_tcp.h
@@ -46,6 +46,9 @@ enum tcp_conntrack {
46/* Marks possibility for expected RFC5961 challenge ACK */ 46/* Marks possibility for expected RFC5961 challenge ACK */
47#define IP_CT_EXP_CHALLENGE_ACK 0x40 47#define IP_CT_EXP_CHALLENGE_ACK 0x40
48 48
49/* Simultaneous open initialized */
50#define IP_CT_TCP_SIMULTANEOUS_OPEN 0x80
51
49struct nf_ct_tcp_flags { 52struct nf_ct_tcp_flags {
50 __u8 flags; 53 __u8 flags;
51 __u8 mask; 54 __u8 mask;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 9c3630146cec..271b93783d28 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2698,7 +2698,7 @@ enum nl80211_attrs {
2698#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS 2698#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
2699#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS 2699#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
2700 2700
2701#define NL80211_WIPHY_NAME_MAXLEN 128 2701#define NL80211_WIPHY_NAME_MAXLEN 64
2702 2702
2703#define NL80211_MAX_SUPP_RATES 32 2703#define NL80211_MAX_SUPP_RATES 32
2704#define NL80211_MAX_SUPP_HT_RATES 77 2704#define NL80211_MAX_SUPP_HT_RATES 77
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index b19a9c249b15..784c2e3e572e 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -106,7 +106,7 @@ struct pppol2tp_ioc_stats {
106#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */ 106#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
107#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */ 107#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
108#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */ 108#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
109#define PPPIOCDETACH _IOW('t', 60, int) /* detach from ppp unit/chan */ 109#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
110#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */ 110#define PPPIOCSMRRU _IOW('t', 59, int) /* set multilink MRU */
111#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */ 111#define PPPIOCCONNECT _IOW('t', 58, int) /* connect channel to unit */
112#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */ 112#define PPPIOCDISCONN _IO('t', 57) /* disconnect channel */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index af5f8c2df87a..db9f15f5db04 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -207,4 +207,16 @@ struct prctl_mm_map {
207# define PR_SVE_VL_LEN_MASK 0xffff 207# define PR_SVE_VL_LEN_MASK 0xffff
208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */ 208# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
209 209
210/* Per task speculation control */
211#define PR_GET_SPECULATION_CTRL 52
212#define PR_SET_SPECULATION_CTRL 53
213/* Speculation control variants */
214# define PR_SPEC_STORE_BYPASS 0
215/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
216# define PR_SPEC_NOT_AFFECTED 0
217# define PR_SPEC_PRCTL (1UL << 0)
218# define PR_SPEC_ENABLE (1UL << 1)
219# define PR_SPEC_DISABLE (1UL << 2)
220# define PR_SPEC_FORCE_DISABLE (1UL << 3)
221
210#endif /* _LINUX_PRCTL_H */ 222#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 2a0bd9dd104d..9efc0e73d50b 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -17,8 +17,9 @@
17#define SECCOMP_GET_ACTION_AVAIL 2 17#define SECCOMP_GET_ACTION_AVAIL 2
18 18
19/* Valid flags for SECCOMP_SET_MODE_FILTER */ 19/* Valid flags for SECCOMP_SET_MODE_FILTER */
20#define SECCOMP_FILTER_FLAG_TSYNC 1 20#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
21#define SECCOMP_FILTER_FLAG_LOG 2 21#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
22#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
22 23
23/* 24/*
24 * All BPF programs must return a 32-bit value. 25 * All BPF programs must return a 32-bit value.
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index 596578d9be3e..fdc279dc4a88 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -189,6 +189,13 @@
189 * 189 *
190 *----------------------------- Connector settings ---------------------------- 190 *----------------------------- Connector settings ----------------------------
191 * 191 *
192 * unique-id
193 * Values: <string>
194 *
195 * After device instance initialization each connector is assigned a
196 * unique ID, so it can be identified by the backend by this ID.
197 * This can be UUID or such.
198 *
192 * resolution 199 * resolution
193 * Values: <width, uint32_t>x<height, uint32_t> 200 * Values: <width, uint32_t>x<height, uint32_t>
194 * 201 *
@@ -368,6 +375,7 @@
368#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel" 375#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel"
369#define XENDISPL_FIELD_RESOLUTION "resolution" 376#define XENDISPL_FIELD_RESOLUTION "resolution"
370#define XENDISPL_FIELD_BE_ALLOC "be-alloc" 377#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
378#define XENDISPL_FIELD_UNIQUE_ID "unique-id"
371 379
372/* 380/*
373 ****************************************************************************** 381 ******************************************************************************
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 2a9510ade701..b4439cf48220 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -51,6 +51,18 @@
51 * corresponding entries in XenStore and puts 1 as the value of the entry. 51 * corresponding entries in XenStore and puts 1 as the value of the entry.
52 * If a feature is not supported then 0 must be set or feature entry omitted. 52 * If a feature is not supported then 0 must be set or feature entry omitted.
53 * 53 *
54 * feature-disable-keyboard
55 * Values: <uint>
56 *
57 * If there is no need to expose a virtual keyboard device by the
58 * frontend then this must be set to 1.
59 *
60 * feature-disable-pointer
61 * Values: <uint>
62 *
63 * If there is no need to expose a virtual pointer device by the
64 * frontend then this must be set to 1.
65 *
54 * feature-abs-pointer 66 * feature-abs-pointer
55 * Values: <uint> 67 * Values: <uint>
56 * 68 *
@@ -63,6 +75,22 @@
63 * Backends, which support reporting of multi-touch events 75 * Backends, which support reporting of multi-touch events
64 * should set this to 1. 76 * should set this to 1.
65 * 77 *
78 * feature-raw-pointer
79 * Values: <uint>
80 *
81 * Backends, which support reporting raw (unscaled) absolute coordinates
82 * for pointer devices should set this to 1. Raw (unscaled) values have
83 * a range of [0, 0x7fff].
84 *
85 *----------------------- Device Instance Parameters ------------------------
86 *
87 * unique-id
88 * Values: <string>
89 *
90 * After device instance initialization it is assigned a unique ID,
91 * so every instance of the frontend can be identified by the backend
92 * by this ID. This can be UUID or such.
93 *
66 *------------------------- Pointer Device Parameters ------------------------ 94 *------------------------- Pointer Device Parameters ------------------------
67 * 95 *
68 * width 96 * width
@@ -77,6 +105,25 @@
77 * Maximum Y coordinate (height) to be used by the frontend 105 * Maximum Y coordinate (height) to be used by the frontend
78 * while reporting input events, pixels, [0; UINT32_MAX]. 106 * while reporting input events, pixels, [0; UINT32_MAX].
79 * 107 *
108 *----------------------- Multi-touch Device Parameters ----------------------
109 *
110 * multi-touch-num-contacts
111 * Values: <uint>
112 *
113 * Number of simultaneous touches reported.
114 *
115 * multi-touch-width
116 * Values: <uint>
117 *
118 * Width of the touch area to be used by the frontend
119 * while reporting input events, pixels, [0; UINT32_MAX].
120 *
121 * multi-touch-height
122 * Values: <uint>
123 *
124 * Height of the touch area to be used by the frontend
125 * while reporting input events, pixels, [0; UINT32_MAX].
126 *
80 ***************************************************************************** 127 *****************************************************************************
81 * Frontend XenBus Nodes 128 * Frontend XenBus Nodes
82 ***************************************************************************** 129 *****************************************************************************
@@ -98,6 +145,13 @@
98 * 145 *
99 * Request backend to report multi-touch events. 146 * Request backend to report multi-touch events.
100 * 147 *
148 * request-raw-pointer
149 * Values: <uint>
150 *
151 * Request backend to report raw unscaled absolute pointer coordinates.
152 * This option is only valid if request-abs-pointer is also set.
153 * Raw unscaled coordinates have the range [0, 0x7fff]
154 *
101 *----------------------- Request Transport Parameters ----------------------- 155 *----------------------- Request Transport Parameters -----------------------
102 * 156 *
103 * event-channel 157 * event-channel
@@ -117,25 +171,6 @@
117 * 171 *
118 * OBSOLETE, not recommended for use. 172 * OBSOLETE, not recommended for use.
119 * PFN of the shared page. 173 * PFN of the shared page.
120 *
121 *----------------------- Multi-touch Device Parameters -----------------------
122 *
123 * multi-touch-num-contacts
124 * Values: <uint>
125 *
126 * Number of simultaneous touches reported.
127 *
128 * multi-touch-width
129 * Values: <uint>
130 *
131 * Width of the touch area to be used by the frontend
132 * while reporting input events, pixels, [0; UINT32_MAX].
133 *
134 * multi-touch-height
135 * Values: <uint>
136 *
137 * Height of the touch area to be used by the frontend
138 * while reporting input events, pixels, [0; UINT32_MAX].
139 */ 174 */
140 175
141/* 176/*
@@ -163,9 +198,13 @@
163 198
164#define XENKBD_DRIVER_NAME "vkbd" 199#define XENKBD_DRIVER_NAME "vkbd"
165 200
201#define XENKBD_FIELD_FEAT_DSBL_KEYBRD "feature-disable-keyboard"
202#define XENKBD_FIELD_FEAT_DSBL_POINTER "feature-disable-pointer"
166#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer" 203#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
204#define XENKBD_FIELD_FEAT_RAW_POINTER "feature-raw-pointer"
167#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch" 205#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
168#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer" 206#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
207#define XENKBD_FIELD_REQ_RAW_POINTER "request-raw-pointer"
169#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch" 208#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
170#define XENKBD_FIELD_RING_GREF "page-gref" 209#define XENKBD_FIELD_RING_GREF "page-gref"
171#define XENKBD_FIELD_EVT_CHANNEL "event-channel" 210#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
@@ -174,6 +213,7 @@
174#define XENKBD_FIELD_MT_WIDTH "multi-touch-width" 213#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
175#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height" 214#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
176#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts" 215#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
216#define XENKBD_FIELD_UNIQUE_ID "unique-id"
177 217
178/* OBSOLETE, not recommended for use */ 218/* OBSOLETE, not recommended for use */
179#define XENKBD_FIELD_RING_REF "page-ref" 219#define XENKBD_FIELD_RING_REF "page-ref"
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 78bb5d9f8d83..2aac8f73614c 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -278,13 +278,11 @@
278 * defined under the same device. 278 * defined under the same device.
279 * 279 *
280 * unique-id 280 * unique-id
281 * Values: <uint32_t> 281 * Values: <string>
282 * 282 *
283 * After stream initialization it is assigned a unique ID (within the front 283 * After stream initialization it is assigned a unique ID, so every
284 * driver), so every stream of the frontend can be identified by the 284 * stream of the frontend can be identified by the backend by this ID.
285 * backend by this ID. This is not equal to stream-idx as the later is 285 * This can be UUID or such.
286 * zero based within the device, but this index is contigous within the
287 * driver.
288 * 286 *
289 *-------------------- Stream Request Transport Parameters -------------------- 287 *-------------------- Stream Request Transport Parameters --------------------
290 * 288 *
diff --git a/init/main.c b/init/main.c
index fd37315835b4..3b4ada11ed52 100644
--- a/init/main.c
+++ b/init/main.c
@@ -91,6 +91,7 @@
91#include <linux/cache.h> 91#include <linux/cache.h>
92#include <linux/rodata_test.h> 92#include <linux/rodata_test.h>
93#include <linux/jump_label.h> 93#include <linux/jump_label.h>
94#include <linux/mem_encrypt.h>
94 95
95#include <asm/io.h> 96#include <asm/io.h>
96#include <asm/bugs.h> 97#include <asm/bugs.h>
diff --git a/ipc/shm.c b/ipc/shm.c
index 3cf48988d68c..d73269381ec7 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1363,14 +1363,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
1363 1363
1364 if (addr) { 1364 if (addr) {
1365 if (addr & (shmlba - 1)) { 1365 if (addr & (shmlba - 1)) {
1366 /* 1366 if (shmflg & SHM_RND) {
1367 * Round down to the nearest multiple of shmlba. 1367 addr &= ~(shmlba - 1); /* round down */
1368 * For sane do_mmap_pgoff() parameters, avoid 1368
1369 * round downs that trigger nil-page and MAP_FIXED. 1369 /*
1370 */ 1370 * Ensure that the round-down is non-nil
1371 if ((shmflg & SHM_RND) && addr >= shmlba) 1371 * when remapping. This can happen for
1372 addr &= ~(shmlba - 1); 1372 * cases when addr < shmlba.
1373 else 1373 */
1374 if (!addr && (shmflg & SHM_REMAP))
1375 goto out;
1376 } else
1374#ifndef __ARCH_FORCE_SHMLBA 1377#ifndef __ARCH_FORCE_SHMLBA
1375 if (addr & ~PAGE_MASK) 1378 if (addr & ~PAGE_MASK)
1376#endif 1379#endif
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ba03ec39efb3..6ef6746a7871 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -218,47 +218,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
218 return 0; 218 return 0;
219} 219}
220 220
221static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) 221static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
222 u32 curr, const bool probe_pass)
222{ 223{
224 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
225 s64 imm = insn->imm;
226
227 if (curr < pos && curr + imm + 1 > pos)
228 imm += delta;
229 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
230 imm -= delta;
231 if (imm < imm_min || imm > imm_max)
232 return -ERANGE;
233 if (!probe_pass)
234 insn->imm = imm;
235 return 0;
236}
237
238static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
239 u32 curr, const bool probe_pass)
240{
241 const s32 off_min = S16_MIN, off_max = S16_MAX;
242 s32 off = insn->off;
243
244 if (curr < pos && curr + off + 1 > pos)
245 off += delta;
246 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
247 off -= delta;
248 if (off < off_min || off > off_max)
249 return -ERANGE;
250 if (!probe_pass)
251 insn->off = off;
252 return 0;
253}
254
255static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
256 const bool probe_pass)
257{
258 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
223 struct bpf_insn *insn = prog->insnsi; 259 struct bpf_insn *insn = prog->insnsi;
224 u32 i, insn_cnt = prog->len; 260 int ret = 0;
225 bool pseudo_call;
226 u8 code;
227 int off;
228 261
229 for (i = 0; i < insn_cnt; i++, insn++) { 262 for (i = 0; i < insn_cnt; i++, insn++) {
263 u8 code;
264
265 /* In the probing pass we still operate on the original,
266 * unpatched image in order to check overflows before we
267 * do any other adjustments. Therefore skip the patchlet.
268 */
269 if (probe_pass && i == pos) {
270 i += delta + 1;
271 insn++;
272 }
230 code = insn->code; 273 code = insn->code;
231 if (BPF_CLASS(code) != BPF_JMP) 274 if (BPF_CLASS(code) != BPF_JMP ||
232 continue; 275 BPF_OP(code) == BPF_EXIT)
233 if (BPF_OP(code) == BPF_EXIT)
234 continue; 276 continue;
277 /* Adjust offset of jmps if we cross patch boundaries. */
235 if (BPF_OP(code) == BPF_CALL) { 278 if (BPF_OP(code) == BPF_CALL) {
236 if (insn->src_reg == BPF_PSEUDO_CALL) 279 if (insn->src_reg != BPF_PSEUDO_CALL)
237 pseudo_call = true;
238 else
239 continue; 280 continue;
281 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
282 probe_pass);
240 } else { 283 } else {
241 pseudo_call = false; 284 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
285 probe_pass);
242 } 286 }
243 off = pseudo_call ? insn->imm : insn->off; 287 if (ret)
244 288 break;
245 /* Adjust offset of jmps if we cross boundaries. */
246 if (i < pos && i + off + 1 > pos)
247 off += delta;
248 else if (i > pos + delta && i + off + 1 <= pos + delta)
249 off -= delta;
250
251 if (pseudo_call)
252 insn->imm = off;
253 else
254 insn->off = off;
255 } 289 }
290
291 return ret;
256} 292}
257 293
258struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 294struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
259 const struct bpf_insn *patch, u32 len) 295 const struct bpf_insn *patch, u32 len)
260{ 296{
261 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 297 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
298 const u32 cnt_max = S16_MAX;
262 struct bpf_prog *prog_adj; 299 struct bpf_prog *prog_adj;
263 300
264 /* Since our patchlet doesn't expand the image, we're done. */ 301 /* Since our patchlet doesn't expand the image, we're done. */
@@ -269,6 +306,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
269 306
270 insn_adj_cnt = prog->len + insn_delta; 307 insn_adj_cnt = prog->len + insn_delta;
271 308
309 /* Reject anything that would potentially let the insn->off
310 * target overflow when we have excessive program expansions.
311 * We need to probe here before we do any reallocation where
312 * we afterwards may not fail anymore.
313 */
314 if (insn_adj_cnt > cnt_max &&
315 bpf_adj_branches(prog, off, insn_delta, true))
316 return NULL;
317
272 /* Several new instructions need to be inserted. Make room 318 /* Several new instructions need to be inserted. Make room
273 * for them. Likely, there's no need for a new allocation as 319 * for them. Likely, there's no need for a new allocation as
274 * last page could have large enough tailroom. 320 * last page could have large enough tailroom.
@@ -294,7 +340,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
294 sizeof(*patch) * insn_rest); 340 sizeof(*patch) * insn_rest);
295 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 341 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
296 342
297 bpf_adj_branches(prog_adj, off, insn_delta); 343 /* We are guaranteed to not fail at this point, otherwise
344 * the ship has sailed to reverse to the original state. An
345 * overflow cannot happen at this point.
346 */
347 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
298 348
299 return prog_adj; 349 return prog_adj;
300} 350}
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 098eca568c2b..95a84b2f10ce 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -1703,11 +1703,11 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1703 * we increment the refcnt. If this is the case abort with an 1703 * we increment the refcnt. If this is the case abort with an
1704 * error. 1704 * error.
1705 */ 1705 */
1706 verdict = bpf_prog_inc_not_zero(stab->bpf_verdict); 1706 verdict = bpf_prog_inc_not_zero(verdict);
1707 if (IS_ERR(verdict)) 1707 if (IS_ERR(verdict))
1708 return PTR_ERR(verdict); 1708 return PTR_ERR(verdict);
1709 1709
1710 parse = bpf_prog_inc_not_zero(stab->bpf_parse); 1710 parse = bpf_prog_inc_not_zero(parse);
1711 if (IS_ERR(parse)) { 1711 if (IS_ERR(parse)) {
1712 bpf_prog_put(verdict); 1712 bpf_prog_put(verdict);
1713 return PTR_ERR(parse); 1713 return PTR_ERR(parse);
@@ -1715,12 +1715,12 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1715 } 1715 }
1716 1716
1717 if (tx_msg) { 1717 if (tx_msg) {
1718 tx_msg = bpf_prog_inc_not_zero(stab->bpf_tx_msg); 1718 tx_msg = bpf_prog_inc_not_zero(tx_msg);
1719 if (IS_ERR(tx_msg)) { 1719 if (IS_ERR(tx_msg)) {
1720 if (verdict) 1720 if (parse && verdict) {
1721 bpf_prog_put(verdict);
1722 if (parse)
1723 bpf_prog_put(parse); 1721 bpf_prog_put(parse);
1722 bpf_prog_put(verdict);
1723 }
1724 return PTR_ERR(tx_msg); 1724 return PTR_ERR(tx_msg);
1725 } 1725 }
1726 } 1726 }
@@ -1805,10 +1805,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
1805out_free: 1805out_free:
1806 smap_release_sock(psock, sock); 1806 smap_release_sock(psock, sock);
1807out_progs: 1807out_progs:
1808 if (verdict) 1808 if (parse && verdict) {
1809 bpf_prog_put(verdict);
1810 if (parse)
1811 bpf_prog_put(parse); 1809 bpf_prog_put(parse);
1810 bpf_prog_put(verdict);
1811 }
1812 if (tx_msg) 1812 if (tx_msg)
1813 bpf_prog_put(tx_msg); 1813 bpf_prog_put(tx_msg);
1814 write_unlock_bh(&sock->sk_callback_lock); 1814 write_unlock_bh(&sock->sk_callback_lock);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5dd1dcb902bf..1904e814f282 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -156,7 +156,29 @@ struct bpf_verifier_stack_elem {
156#define BPF_COMPLEXITY_LIMIT_INSNS 131072 156#define BPF_COMPLEXITY_LIMIT_INSNS 131072
157#define BPF_COMPLEXITY_LIMIT_STACK 1024 157#define BPF_COMPLEXITY_LIMIT_STACK 1024
158 158
159#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 159#define BPF_MAP_PTR_UNPRIV 1UL
160#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
161 POISON_POINTER_DELTA))
162#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
163
164static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
165{
166 return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
167}
168
169static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
170{
171 return aux->map_state & BPF_MAP_PTR_UNPRIV;
172}
173
174static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
175 const struct bpf_map *map, bool unpriv)
176{
177 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
178 unpriv |= bpf_map_ptr_unpriv(aux);
179 aux->map_state = (unsigned long)map |
180 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
181}
160 182
161struct bpf_call_arg_meta { 183struct bpf_call_arg_meta {
162 struct bpf_map *map_ptr; 184 struct bpf_map *map_ptr;
@@ -978,7 +1000,7 @@ static bool register_is_null(struct bpf_reg_state *reg)
978 */ 1000 */
979static int check_stack_write(struct bpf_verifier_env *env, 1001static int check_stack_write(struct bpf_verifier_env *env,
980 struct bpf_func_state *state, /* func where register points to */ 1002 struct bpf_func_state *state, /* func where register points to */
981 int off, int size, int value_regno) 1003 int off, int size, int value_regno, int insn_idx)
982{ 1004{
983 struct bpf_func_state *cur; /* state of the current function */ 1005 struct bpf_func_state *cur; /* state of the current function */
984 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 1006 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
@@ -1017,8 +1039,33 @@ static int check_stack_write(struct bpf_verifier_env *env,
1017 state->stack[spi].spilled_ptr = cur->regs[value_regno]; 1039 state->stack[spi].spilled_ptr = cur->regs[value_regno];
1018 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1040 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1019 1041
1020 for (i = 0; i < BPF_REG_SIZE; i++) 1042 for (i = 0; i < BPF_REG_SIZE; i++) {
1043 if (state->stack[spi].slot_type[i] == STACK_MISC &&
1044 !env->allow_ptr_leaks) {
1045 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
1046 int soff = (-spi - 1) * BPF_REG_SIZE;
1047
1048 /* detected reuse of integer stack slot with a pointer
1049 * which means either llvm is reusing stack slot or
1050 * an attacker is trying to exploit CVE-2018-3639
1051 * (speculative store bypass)
1052 * Have to sanitize that slot with preemptive
1053 * store of zero.
1054 */
1055 if (*poff && *poff != soff) {
1056 /* disallow programs where single insn stores
1057 * into two different stack slots, since verifier
1058 * cannot sanitize them
1059 */
1060 verbose(env,
1061 "insn %d cannot access two stack slots fp%d and fp%d",
1062 insn_idx, *poff, soff);
1063 return -EINVAL;
1064 }
1065 *poff = soff;
1066 }
1021 state->stack[spi].slot_type[i] = STACK_SPILL; 1067 state->stack[spi].slot_type[i] = STACK_SPILL;
1068 }
1022 } else { 1069 } else {
1023 u8 type = STACK_MISC; 1070 u8 type = STACK_MISC;
1024 1071
@@ -1694,7 +1741,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1694 1741
1695 if (t == BPF_WRITE) 1742 if (t == BPF_WRITE)
1696 err = check_stack_write(env, state, off, size, 1743 err = check_stack_write(env, state, off, size,
1697 value_regno); 1744 value_regno, insn_idx);
1698 else 1745 else
1699 err = check_stack_read(env, state, off, size, 1746 err = check_stack_read(env, state, off, size,
1700 value_regno); 1747 value_regno);
@@ -2333,6 +2380,29 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2333 return 0; 2380 return 0;
2334} 2381}
2335 2382
2383static int
2384record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2385 int func_id, int insn_idx)
2386{
2387 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
2388
2389 if (func_id != BPF_FUNC_tail_call &&
2390 func_id != BPF_FUNC_map_lookup_elem)
2391 return 0;
2392 if (meta->map_ptr == NULL) {
2393 verbose(env, "kernel subsystem misconfigured verifier\n");
2394 return -EINVAL;
2395 }
2396
2397 if (!BPF_MAP_PTR(aux->map_state))
2398 bpf_map_ptr_store(aux, meta->map_ptr,
2399 meta->map_ptr->unpriv_array);
2400 else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
2401 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2402 meta->map_ptr->unpriv_array);
2403 return 0;
2404}
2405
2336static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2406static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2337{ 2407{
2338 const struct bpf_func_proto *fn = NULL; 2408 const struct bpf_func_proto *fn = NULL;
@@ -2387,13 +2457,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2387 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 2457 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
2388 if (err) 2458 if (err)
2389 return err; 2459 return err;
2390 if (func_id == BPF_FUNC_tail_call) {
2391 if (meta.map_ptr == NULL) {
2392 verbose(env, "verifier bug\n");
2393 return -EINVAL;
2394 }
2395 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
2396 }
2397 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 2460 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
2398 if (err) 2461 if (err)
2399 return err; 2462 return err;
@@ -2404,6 +2467,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2404 if (err) 2467 if (err)
2405 return err; 2468 return err;
2406 2469
2470 err = record_func_map(env, &meta, func_id, insn_idx);
2471 if (err)
2472 return err;
2473
2407 /* Mark slots with STACK_MISC in case of raw mode, stack offset 2474 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2408 * is inferred from register state. 2475 * is inferred from register state.
2409 */ 2476 */
@@ -2428,8 +2495,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2428 } else if (fn->ret_type == RET_VOID) { 2495 } else if (fn->ret_type == RET_VOID) {
2429 regs[BPF_REG_0].type = NOT_INIT; 2496 regs[BPF_REG_0].type = NOT_INIT;
2430 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { 2497 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
2431 struct bpf_insn_aux_data *insn_aux;
2432
2433 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; 2498 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
2434 /* There is no offset yet applied, variable or fixed */ 2499 /* There is no offset yet applied, variable or fixed */
2435 mark_reg_known_zero(env, regs, BPF_REG_0); 2500 mark_reg_known_zero(env, regs, BPF_REG_0);
@@ -2445,11 +2510,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2445 } 2510 }
2446 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2511 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2447 regs[BPF_REG_0].id = ++env->id_gen; 2512 regs[BPF_REG_0].id = ++env->id_gen;
2448 insn_aux = &env->insn_aux_data[insn_idx];
2449 if (!insn_aux->map_ptr)
2450 insn_aux->map_ptr = meta.map_ptr;
2451 else if (insn_aux->map_ptr != meta.map_ptr)
2452 insn_aux->map_ptr = BPF_MAP_PTR_POISON;
2453 } else { 2513 } else {
2454 verbose(env, "unknown return type %d of func %s#%d\n", 2514 verbose(env, "unknown return type %d of func %s#%d\n",
2455 fn->ret_type, func_id_name(func_id), func_id); 2515 fn->ret_type, func_id_name(func_id), func_id);
@@ -5169,6 +5229,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
5169 else 5229 else
5170 continue; 5230 continue;
5171 5231
5232 if (type == BPF_WRITE &&
5233 env->insn_aux_data[i + delta].sanitize_stack_off) {
5234 struct bpf_insn patch[] = {
5235 /* Sanitize suspicious stack slot with zero.
5236 * There are no memory dependencies for this store,
5237 * since it's only using frame pointer and immediate
5238 * constant of zero
5239 */
5240 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
5241 env->insn_aux_data[i + delta].sanitize_stack_off,
5242 0),
5243 /* the original STX instruction will immediately
5244 * overwrite the same stack slot with appropriate value
5245 */
5246 *insn,
5247 };
5248
5249 cnt = ARRAY_SIZE(patch);
5250 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
5251 if (!new_prog)
5252 return -ENOMEM;
5253
5254 delta += cnt - 1;
5255 env->prog = new_prog;
5256 insn = new_prog->insnsi + i + delta;
5257 continue;
5258 }
5259
5172 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) 5260 if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
5173 continue; 5261 continue;
5174 5262
@@ -5417,6 +5505,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5417 struct bpf_insn *insn = prog->insnsi; 5505 struct bpf_insn *insn = prog->insnsi;
5418 const struct bpf_func_proto *fn; 5506 const struct bpf_func_proto *fn;
5419 const int insn_cnt = prog->len; 5507 const int insn_cnt = prog->len;
5508 struct bpf_insn_aux_data *aux;
5420 struct bpf_insn insn_buf[16]; 5509 struct bpf_insn insn_buf[16];
5421 struct bpf_prog *new_prog; 5510 struct bpf_prog *new_prog;
5422 struct bpf_map *map_ptr; 5511 struct bpf_map *map_ptr;
@@ -5491,19 +5580,22 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5491 insn->imm = 0; 5580 insn->imm = 0;
5492 insn->code = BPF_JMP | BPF_TAIL_CALL; 5581 insn->code = BPF_JMP | BPF_TAIL_CALL;
5493 5582
5583 aux = &env->insn_aux_data[i + delta];
5584 if (!bpf_map_ptr_unpriv(aux))
5585 continue;
5586
5494 /* instead of changing every JIT dealing with tail_call 5587 /* instead of changing every JIT dealing with tail_call
5495 * emit two extra insns: 5588 * emit two extra insns:
5496 * if (index >= max_entries) goto out; 5589 * if (index >= max_entries) goto out;
5497 * index &= array->index_mask; 5590 * index &= array->index_mask;
5498 * to avoid out-of-bounds cpu speculation 5591 * to avoid out-of-bounds cpu speculation
5499 */ 5592 */
5500 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5593 if (bpf_map_ptr_poisoned(aux)) {
5501 if (map_ptr == BPF_MAP_PTR_POISON) {
5502 verbose(env, "tail_call abusing map_ptr\n"); 5594 verbose(env, "tail_call abusing map_ptr\n");
5503 return -EINVAL; 5595 return -EINVAL;
5504 } 5596 }
5505 if (!map_ptr->unpriv_array) 5597
5506 continue; 5598 map_ptr = BPF_MAP_PTR(aux->map_state);
5507 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 5599 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
5508 map_ptr->max_entries, 2); 5600 map_ptr->max_entries, 2);
5509 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 5601 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
@@ -5527,9 +5619,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
5527 */ 5619 */
5528 if (prog->jit_requested && BITS_PER_LONG == 64 && 5620 if (prog->jit_requested && BITS_PER_LONG == 64 &&
5529 insn->imm == BPF_FUNC_map_lookup_elem) { 5621 insn->imm == BPF_FUNC_map_lookup_elem) {
5530 map_ptr = env->insn_aux_data[i + delta].map_ptr; 5622 aux = &env->insn_aux_data[i + delta];
5531 if (map_ptr == BPF_MAP_PTR_POISON || 5623 if (bpf_map_ptr_poisoned(aux))
5532 !map_ptr->ops->map_gen_lookup) 5624 goto patch_call_imm;
5625
5626 map_ptr = BPF_MAP_PTR(aux->map_state);
5627 if (!map_ptr->ops->map_gen_lookup)
5533 goto patch_call_imm; 5628 goto patch_call_imm;
5534 5629
5535 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); 5630 cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 2017a39ab490..481951bf091d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme);
193 193
194void kthread_park_complete(struct task_struct *k) 194void kthread_park_complete(struct task_struct *k)
195{ 195{
196 complete(&to_kthread(k)->parked); 196 complete_all(&to_kthread(k)->parked);
197} 197}
198 198
199static int kthread(void *_create) 199static int kthread(void *_create)
@@ -459,6 +459,7 @@ void kthread_unpark(struct task_struct *k)
459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) 459 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
460 __kthread_bind(k, kthread->cpu, TASK_PARKED); 460 __kthread_bind(k, kthread->cpu, TASK_PARKED);
461 461
462 reinit_completion(&kthread->parked);
462 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 463 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
463 wake_up_state(k, TASK_PARKED); 464 wake_up_state(k, TASK_PARKED);
464} 465}
@@ -483,9 +484,6 @@ int kthread_park(struct task_struct *k)
483 if (WARN_ON(k->flags & PF_EXITING)) 484 if (WARN_ON(k->flags & PF_EXITING))
484 return -ENOSYS; 485 return -ENOSYS;
485 486
486 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
487 return -EBUSY;
488
489 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); 487 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
490 if (k != current) { 488 if (k != current) {
491 wake_up_process(k); 489 wake_up_process(k);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 092f7c4de903..211890edf37e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -881,6 +881,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
881} 881}
882 882
883#ifdef CONFIG_SMP 883#ifdef CONFIG_SMP
884
885static inline bool is_per_cpu_kthread(struct task_struct *p)
886{
887 if (!(p->flags & PF_KTHREAD))
888 return false;
889
890 if (p->nr_cpus_allowed != 1)
891 return false;
892
893 return true;
894}
895
896/*
897 * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
898 * __set_cpus_allowed_ptr() and select_fallback_rq().
899 */
900static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
901{
902 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
903 return false;
904
905 if (is_per_cpu_kthread(p))
906 return cpu_online(cpu);
907
908 return cpu_active(cpu);
909}
910
884/* 911/*
885 * This is how migration works: 912 * This is how migration works:
886 * 913 *
@@ -938,16 +965,8 @@ struct migration_arg {
938static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, 965static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
939 struct task_struct *p, int dest_cpu) 966 struct task_struct *p, int dest_cpu)
940{ 967{
941 if (p->flags & PF_KTHREAD) {
942 if (unlikely(!cpu_online(dest_cpu)))
943 return rq;
944 } else {
945 if (unlikely(!cpu_active(dest_cpu)))
946 return rq;
947 }
948
949 /* Affinity changed (again). */ 968 /* Affinity changed (again). */
950 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) 969 if (!is_cpu_allowed(p, dest_cpu))
951 return rq; 970 return rq;
952 971
953 update_rq_clock(rq); 972 update_rq_clock(rq);
@@ -1476,10 +1495,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1476 for (;;) { 1495 for (;;) {
1477 /* Any allowed, online CPU? */ 1496 /* Any allowed, online CPU? */
1478 for_each_cpu(dest_cpu, &p->cpus_allowed) { 1497 for_each_cpu(dest_cpu, &p->cpus_allowed) {
1479 if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) 1498 if (!is_cpu_allowed(p, dest_cpu))
1480 continue;
1481 if (!cpu_online(dest_cpu))
1482 continue; 1499 continue;
1500
1483 goto out; 1501 goto out;
1484 } 1502 }
1485 1503
@@ -1542,8 +1560,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1542 * [ this allows ->select_task() to simply return task_cpu(p) and 1560 * [ this allows ->select_task() to simply return task_cpu(p) and
1543 * not worry about this generic constraint ] 1561 * not worry about this generic constraint ]
1544 */ 1562 */
1545 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || 1563 if (unlikely(!is_cpu_allowed(p, cpu)))
1546 !cpu_online(cpu)))
1547 cpu = select_fallback_rq(task_cpu(p), p); 1564 cpu = select_fallback_rq(task_cpu(p), p);
1548 1565
1549 return cpu; 1566 return cpu;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 1356afd1eeb6..fbfc3f1d368a 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1259,6 +1259,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1259 1259
1260 rq = task_rq_lock(p, &rf); 1260 rq = task_rq_lock(p, &rf);
1261 1261
1262 sched_clock_tick();
1263 update_rq_clock(rq);
1264
1262 if (!dl_task(p) || p->state == TASK_DEAD) { 1265 if (!dl_task(p) || p->state == TASK_DEAD) {
1263 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); 1266 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1264 1267
@@ -1278,9 +1281,6 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1278 if (dl_se->dl_non_contending == 0) 1281 if (dl_se->dl_non_contending == 0)
1279 goto unlock; 1282 goto unlock;
1280 1283
1281 sched_clock_tick();
1282 update_rq_clock(rq);
1283
1284 sub_running_bw(dl_se, &rq->dl); 1284 sub_running_bw(dl_se, &rq->dl);
1285 dl_se->dl_non_contending = 0; 1285 dl_se->dl_non_contending = 0;
1286unlock: 1286unlock:
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1f0a4bc6a39d..cb467c221b15 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -983,7 +983,7 @@ static inline void rq_clock_skip_update(struct rq *rq)
983} 983}
984 984
985/* 985/*
986 * See rt task throttoling, which is the only time a skip 986 * See rt task throttling, which is the only time a skip
987 * request is cancelled. 987 * request is cancelled.
988 */ 988 */
989static inline void rq_clock_cancel_skipupdate(struct rq *rq) 989static inline void rq_clock_cancel_skipupdate(struct rq *rq)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 64cc564f5255..61a1125c1ae4 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1708,7 +1708,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
1708 rcu_read_unlock(); 1708 rcu_read_unlock();
1709 1709
1710 if (rq && sched_debug_enabled) { 1710 if (rq && sched_debug_enabled) {
1711 pr_info("span: %*pbl (max cpu_capacity = %lu)\n", 1711 pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
1712 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); 1712 cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
1713 } 1713 }
1714 1714
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index dc77548167ef..e691d9a6c58d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -19,6 +19,8 @@
19#include <linux/compat.h> 19#include <linux/compat.h>
20#include <linux/coredump.h> 20#include <linux/coredump.h>
21#include <linux/kmemleak.h> 21#include <linux/kmemleak.h>
22#include <linux/nospec.h>
23#include <linux/prctl.h>
22#include <linux/sched.h> 24#include <linux/sched.h>
23#include <linux/sched/task_stack.h> 25#include <linux/sched/task_stack.h>
24#include <linux/seccomp.h> 26#include <linux/seccomp.h>
@@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
227 return true; 229 return true;
228} 230}
229 231
232void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
233
230static inline void seccomp_assign_mode(struct task_struct *task, 234static inline void seccomp_assign_mode(struct task_struct *task,
231 unsigned long seccomp_mode) 235 unsigned long seccomp_mode,
236 unsigned long flags)
232{ 237{
233 assert_spin_locked(&task->sighand->siglock); 238 assert_spin_locked(&task->sighand->siglock);
234 239
@@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
238 * filter) is set. 243 * filter) is set.
239 */ 244 */
240 smp_mb__before_atomic(); 245 smp_mb__before_atomic();
246 /* Assume default seccomp processes want spec flaw mitigation. */
247 if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
248 arch_seccomp_spec_mitigate(task);
241 set_tsk_thread_flag(task, TIF_SECCOMP); 249 set_tsk_thread_flag(task, TIF_SECCOMP);
242} 250}
243 251
@@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void)
305 * without dropping the locks. 313 * without dropping the locks.
306 * 314 *
307 */ 315 */
308static inline void seccomp_sync_threads(void) 316static inline void seccomp_sync_threads(unsigned long flags)
309{ 317{
310 struct task_struct *thread, *caller; 318 struct task_struct *thread, *caller;
311 319
@@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void)
346 * allow one thread to transition the other. 354 * allow one thread to transition the other.
347 */ 355 */
348 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) 356 if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
349 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); 357 seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
358 flags);
350 } 359 }
351} 360}
352 361
@@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags,
469 478
470 /* Now that the new filter is in place, synchronize to all threads. */ 479 /* Now that the new filter is in place, synchronize to all threads. */
471 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 480 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
472 seccomp_sync_threads(); 481 seccomp_sync_threads(flags);
473 482
474 return 0; 483 return 0;
475} 484}
@@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void)
818#ifdef TIF_NOTSC 827#ifdef TIF_NOTSC
819 disable_TSC(); 828 disable_TSC();
820#endif 829#endif
821 seccomp_assign_mode(current, seccomp_mode); 830 seccomp_assign_mode(current, seccomp_mode, 0);
822 ret = 0; 831 ret = 0;
823 832
824out: 833out:
@@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
876 /* Do not free the successfully attached filter. */ 885 /* Do not free the successfully attached filter. */
877 prepared = NULL; 886 prepared = NULL;
878 887
879 seccomp_assign_mode(current, seccomp_mode); 888 seccomp_assign_mode(current, seccomp_mode, flags);
880out: 889out:
881 spin_unlock_irq(&current->sighand->siglock); 890 spin_unlock_irq(&current->sighand->siglock);
882 if (flags & SECCOMP_FILTER_FLAG_TSYNC) 891 if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/sys.c b/kernel/sys.c
index ad692183dfe9..d1b2b8d934bb 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -61,6 +61,8 @@
61#include <linux/uidgid.h> 61#include <linux/uidgid.h>
62#include <linux/cred.h> 62#include <linux/cred.h>
63 63
64#include <linux/nospec.h>
65
64#include <linux/kmsg_dump.h> 66#include <linux/kmsg_dump.h>
65/* Move somewhere else to avoid recompiling? */ 67/* Move somewhere else to avoid recompiling? */
66#include <generated/utsrelease.h> 68#include <generated/utsrelease.h>
@@ -69,6 +71,9 @@
69#include <asm/io.h> 71#include <asm/io.h>
70#include <asm/unistd.h> 72#include <asm/unistd.h>
71 73
74/* Hardening for Spectre-v1 */
75#include <linux/nospec.h>
76
72#include "uid16.h" 77#include "uid16.h"
73 78
74#ifndef SET_UNALIGN_CTL 79#ifndef SET_UNALIGN_CTL
@@ -1451,6 +1456,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1451 if (resource >= RLIM_NLIMITS) 1456 if (resource >= RLIM_NLIMITS)
1452 return -EINVAL; 1457 return -EINVAL;
1453 1458
1459 resource = array_index_nospec(resource, RLIM_NLIMITS);
1454 task_lock(current->group_leader); 1460 task_lock(current->group_leader);
1455 x = current->signal->rlim[resource]; 1461 x = current->signal->rlim[resource];
1456 task_unlock(current->group_leader); 1462 task_unlock(current->group_leader);
@@ -1470,6 +1476,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1470 if (resource >= RLIM_NLIMITS) 1476 if (resource >= RLIM_NLIMITS)
1471 return -EINVAL; 1477 return -EINVAL;
1472 1478
1479 resource = array_index_nospec(resource, RLIM_NLIMITS);
1473 task_lock(current->group_leader); 1480 task_lock(current->group_leader);
1474 r = current->signal->rlim[resource]; 1481 r = current->signal->rlim[resource];
1475 task_unlock(current->group_leader); 1482 task_unlock(current->group_leader);
@@ -2242,6 +2249,17 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2242 return 1; 2249 return 1;
2243} 2250}
2244 2251
2252int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2253{
2254 return -EINVAL;
2255}
2256
2257int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2258 unsigned long ctrl)
2259{
2260 return -EINVAL;
2261}
2262
2245SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2263SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2246 unsigned long, arg4, unsigned long, arg5) 2264 unsigned long, arg4, unsigned long, arg5)
2247{ 2265{
@@ -2450,6 +2468,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2450 case PR_SVE_GET_VL: 2468 case PR_SVE_GET_VL:
2451 error = SVE_GET_VL(); 2469 error = SVE_GET_VL();
2452 break; 2470 break;
2471 case PR_GET_SPECULATION_CTRL:
2472 if (arg3 || arg4 || arg5)
2473 return -EINVAL;
2474 error = arch_prctl_spec_ctrl_get(me, arg2);
2475 break;
2476 case PR_SET_SPECULATION_CTRL:
2477 if (arg4 || arg5)
2478 return -EINVAL;
2479 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2480 break;
2453 default: 2481 default:
2454 error = -EINVAL; 2482 error = -EINVAL;
2455 break; 2483 break;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 414d7210b2ec..bcd93031d042 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -893,7 +893,7 @@ int __trace_bputs(unsigned long ip, const char *str)
893EXPORT_SYMBOL_GPL(__trace_bputs); 893EXPORT_SYMBOL_GPL(__trace_bputs);
894 894
895#ifdef CONFIG_TRACER_SNAPSHOT 895#ifdef CONFIG_TRACER_SNAPSHOT
896static void tracing_snapshot_instance(struct trace_array *tr) 896void tracing_snapshot_instance(struct trace_array *tr)
897{ 897{
898 struct tracer *tracer = tr->current_trace; 898 struct tracer *tracer = tr->current_trace;
899 unsigned long flags; 899 unsigned long flags;
@@ -949,7 +949,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
949 struct trace_buffer *size_buf, int cpu_id); 949 struct trace_buffer *size_buf, int cpu_id);
950static void set_buffer_entries(struct trace_buffer *buf, unsigned long val); 950static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
951 951
952static int alloc_snapshot(struct trace_array *tr) 952int tracing_alloc_snapshot_instance(struct trace_array *tr)
953{ 953{
954 int ret; 954 int ret;
955 955
@@ -995,7 +995,7 @@ int tracing_alloc_snapshot(void)
995 struct trace_array *tr = &global_trace; 995 struct trace_array *tr = &global_trace;
996 int ret; 996 int ret;
997 997
998 ret = alloc_snapshot(tr); 998 ret = tracing_alloc_snapshot_instance(tr);
999 WARN_ON(ret < 0); 999 WARN_ON(ret < 0);
1000 1000
1001 return ret; 1001 return ret;
@@ -5408,7 +5408,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5408 5408
5409#ifdef CONFIG_TRACER_MAX_TRACE 5409#ifdef CONFIG_TRACER_MAX_TRACE
5410 if (t->use_max_tr && !had_max_tr) { 5410 if (t->use_max_tr && !had_max_tr) {
5411 ret = alloc_snapshot(tr); 5411 ret = tracing_alloc_snapshot_instance(tr);
5412 if (ret < 0) 5412 if (ret < 0)
5413 goto out; 5413 goto out;
5414 } 5414 }
@@ -6451,7 +6451,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6451 } 6451 }
6452#endif 6452#endif
6453 if (!tr->allocated_snapshot) { 6453 if (!tr->allocated_snapshot) {
6454 ret = alloc_snapshot(tr); 6454 ret = tracing_alloc_snapshot_instance(tr);
6455 if (ret < 0) 6455 if (ret < 0)
6456 break; 6456 break;
6457 } 6457 }
@@ -7179,7 +7179,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7179 return ret; 7179 return ret;
7180 7180
7181 out_reg: 7181 out_reg:
7182 ret = alloc_snapshot(tr); 7182 ret = tracing_alloc_snapshot_instance(tr);
7183 if (ret < 0) 7183 if (ret < 0)
7184 goto out; 7184 goto out;
7185 7185
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6fb46a06c9dc..507954b4e058 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1817,6 +1817,17 @@ static inline void __init trace_event_init(void) { }
1817static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1817static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1818#endif 1818#endif
1819 1819
1820#ifdef CONFIG_TRACER_SNAPSHOT
1821void tracing_snapshot_instance(struct trace_array *tr);
1822int tracing_alloc_snapshot_instance(struct trace_array *tr);
1823#else
1824static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1825static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1826{
1827 return 0;
1828}
1829#endif
1830
1820extern struct trace_iterator *tracepoint_print_iter; 1831extern struct trace_iterator *tracepoint_print_iter;
1821 1832
1822#endif /* _LINUX_KERNEL_TRACE_H */ 1833#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d251cabcf69a..8b5bdcf64871 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -483,9 +483,10 @@ clear_event_triggers(struct trace_array *tr)
483 struct trace_event_file *file; 483 struct trace_event_file *file;
484 484
485 list_for_each_entry(file, &tr->events, list) { 485 list_for_each_entry(file, &tr->events, list) {
486 struct event_trigger_data *data; 486 struct event_trigger_data *data, *n;
487 list_for_each_entry_rcu(data, &file->triggers, list) { 487 list_for_each_entry_safe(data, n, &file->triggers, list) {
488 trace_event_trigger_enable_disable(file, 0); 488 trace_event_trigger_enable_disable(file, 0);
489 list_del_rcu(&data->list);
489 if (data->ops->free) 490 if (data->ops->free)
490 data->ops->free(data->ops, data); 491 data->ops->free(data->ops, data);
491 } 492 }
@@ -642,6 +643,7 @@ event_trigger_callback(struct event_command *cmd_ops,
642 trigger_data->count = -1; 643 trigger_data->count = -1;
643 trigger_data->ops = trigger_ops; 644 trigger_data->ops = trigger_ops;
644 trigger_data->cmd_ops = cmd_ops; 645 trigger_data->cmd_ops = cmd_ops;
646 trigger_data->private_data = file;
645 INIT_LIST_HEAD(&trigger_data->list); 647 INIT_LIST_HEAD(&trigger_data->list);
646 INIT_LIST_HEAD(&trigger_data->named_list); 648 INIT_LIST_HEAD(&trigger_data->named_list);
647 649
@@ -1053,7 +1055,12 @@ static void
1053snapshot_trigger(struct event_trigger_data *data, void *rec, 1055snapshot_trigger(struct event_trigger_data *data, void *rec,
1054 struct ring_buffer_event *event) 1056 struct ring_buffer_event *event)
1055{ 1057{
1056 tracing_snapshot(); 1058 struct trace_event_file *file = data->private_data;
1059
1060 if (file)
1061 tracing_snapshot_instance(file->tr);
1062 else
1063 tracing_snapshot();
1057} 1064}
1058 1065
1059static void 1066static void
@@ -1076,7 +1083,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1076{ 1083{
1077 int ret = register_trigger(glob, ops, data, file); 1084 int ret = register_trigger(glob, ops, data, file);
1078 1085
1079 if (ret > 0 && tracing_alloc_snapshot() != 0) { 1086 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1080 unregister_trigger(glob, ops, data, file); 1087 unregister_trigger(glob, ops, data, file);
1081 ret = 0; 1088 ret = 0;
1082 } 1089 }
diff --git a/lib/bitmap.c b/lib/bitmap.c
index a42eff7e8c48..5b476d8414be 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/slab.h>
16#include <linux/string.h> 17#include <linux/string.h>
17#include <linux/uaccess.h> 18#include <linux/uaccess.h>
18 19
@@ -1128,6 +1129,25 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
1128EXPORT_SYMBOL(bitmap_copy_le); 1129EXPORT_SYMBOL(bitmap_copy_le);
1129#endif 1130#endif
1130 1131
1132unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
1133{
1134 return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
1135 flags);
1136}
1137EXPORT_SYMBOL(bitmap_alloc);
1138
1139unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
1140{
1141 return bitmap_alloc(nbits, flags | __GFP_ZERO);
1142}
1143EXPORT_SYMBOL(bitmap_zalloc);
1144
1145void bitmap_free(const unsigned long *bitmap)
1146{
1147 kfree(bitmap);
1148}
1149EXPORT_SYMBOL(bitmap_free);
1150
1131#if BITS_PER_LONG == 64 1151#if BITS_PER_LONG == 64
1132/** 1152/**
1133 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap 1153 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 970212670b6a..fdae394172fa 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1012,7 +1012,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1012} 1012}
1013EXPORT_SYMBOL(iov_iter_gap_alignment); 1013EXPORT_SYMBOL(iov_iter_gap_alignment);
1014 1014
1015static inline size_t __pipe_get_pages(struct iov_iter *i, 1015static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1016 size_t maxsize, 1016 size_t maxsize,
1017 struct page **pages, 1017 struct page **pages,
1018 int idx, 1018 int idx,
@@ -1102,7 +1102,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1102 size_t *start) 1102 size_t *start)
1103{ 1103{
1104 struct page **p; 1104 struct page **p;
1105 size_t n; 1105 ssize_t n;
1106 int idx; 1106 int idx;
1107 int npages; 1107 int npages;
1108 1108
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 43e0cbedc3a0..a9e41aed6de4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
2034 unsigned long index, void *item) 2034 unsigned long index, void *item)
2035{ 2035{
2036 struct radix_tree_node *node = NULL; 2036 struct radix_tree_node *node = NULL;
2037 void __rcu **slot; 2037 void __rcu **slot = NULL;
2038 void *entry; 2038 void *entry;
2039 2039
2040 entry = __radix_tree_lookup(root, index, &node, &slot); 2040 entry = __radix_tree_lookup(root, index, &node, &slot);
2041 if (!slot)
2042 return NULL;
2041 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, 2043 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
2042 get_slot_offset(node, slot)))) 2044 get_slot_offset(node, slot))))
2043 return NULL; 2045 return NULL;
diff --git a/mm/cma.c b/mm/cma.c
index aa40e6c7b042..5809bbe360d7 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -39,7 +39,6 @@
39#include <trace/events/cma.h> 39#include <trace/events/cma.h>
40 40
41#include "cma.h" 41#include "cma.h"
42#include "internal.h"
43 42
44struct cma cma_areas[MAX_CMA_AREAS]; 43struct cma cma_areas[MAX_CMA_AREAS];
45unsigned cma_area_count; 44unsigned cma_area_count;
@@ -110,25 +109,23 @@ static int __init cma_activate_area(struct cma *cma)
110 if (!cma->bitmap) 109 if (!cma->bitmap)
111 return -ENOMEM; 110 return -ENOMEM;
112 111
112 WARN_ON_ONCE(!pfn_valid(pfn));
113 zone = page_zone(pfn_to_page(pfn));
114
113 do { 115 do {
114 unsigned j; 116 unsigned j;
115 117
116 base_pfn = pfn; 118 base_pfn = pfn;
117 if (!pfn_valid(base_pfn))
118 goto err;
119
120 zone = page_zone(pfn_to_page(base_pfn));
121 for (j = pageblock_nr_pages; j; --j, pfn++) { 119 for (j = pageblock_nr_pages; j; --j, pfn++) {
122 if (!pfn_valid(pfn)) 120 WARN_ON_ONCE(!pfn_valid(pfn));
123 goto err;
124
125 /* 121 /*
126 * In init_cma_reserved_pageblock(), present_pages 122 * alloc_contig_range requires the pfn range
127 * is adjusted with assumption that all pages in 123 * specified to be in the same zone. Make this
128 * the pageblock come from a single zone. 124 * simple by forcing the entire CMA resv range
125 * to be in the same zone.
129 */ 126 */
130 if (page_zone(pfn_to_page(pfn)) != zone) 127 if (page_zone(pfn_to_page(pfn)) != zone)
131 goto err; 128 goto not_in_zone;
132 } 129 }
133 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 130 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
134 } while (--i); 131 } while (--i);
@@ -142,7 +139,7 @@ static int __init cma_activate_area(struct cma *cma)
142 139
143 return 0; 140 return 0;
144 141
145err: 142not_in_zone:
146 pr_err("CMA area %s could not be activated\n", cma->name); 143 pr_err("CMA area %s could not be activated\n", cma->name);
147 kfree(cma->bitmap); 144 kfree(cma->bitmap);
148 cma->count = 0; 145 cma->count = 0;
@@ -152,41 +149,6 @@ err:
152static int __init cma_init_reserved_areas(void) 149static int __init cma_init_reserved_areas(void)
153{ 150{
154 int i; 151 int i;
155 struct zone *zone;
156 pg_data_t *pgdat;
157
158 if (!cma_area_count)
159 return 0;
160
161 for_each_online_pgdat(pgdat) {
162 unsigned long start_pfn = UINT_MAX, end_pfn = 0;
163
164 zone = &pgdat->node_zones[ZONE_MOVABLE];
165
166 /*
167 * In this case, we cannot adjust the zone range
168 * since it is now maximum node span and we don't
169 * know original zone range.
170 */
171 if (populated_zone(zone))
172 continue;
173
174 for (i = 0; i < cma_area_count; i++) {
175 if (pfn_to_nid(cma_areas[i].base_pfn) !=
176 pgdat->node_id)
177 continue;
178
179 start_pfn = min(start_pfn, cma_areas[i].base_pfn);
180 end_pfn = max(end_pfn, cma_areas[i].base_pfn +
181 cma_areas[i].count);
182 }
183
184 if (!end_pfn)
185 continue;
186
187 zone->zone_start_pfn = start_pfn;
188 zone->spanned_pages = end_pfn - start_pfn;
189 }
190 152
191 for (i = 0; i < cma_area_count; i++) { 153 for (i = 0; i < cma_area_count; i++) {
192 int ret = cma_activate_area(&cma_areas[i]); 154 int ret = cma_activate_area(&cma_areas[i]);
@@ -195,32 +157,9 @@ static int __init cma_init_reserved_areas(void)
195 return ret; 157 return ret;
196 } 158 }
197 159
198 /*
199 * Reserved pages for ZONE_MOVABLE are now activated and
200 * this would change ZONE_MOVABLE's managed page counter and
201 * the other zones' present counter. We need to re-calculate
202 * various zone information that depends on this initialization.
203 */
204 build_all_zonelists(NULL);
205 for_each_populated_zone(zone) {
206 if (zone_idx(zone) == ZONE_MOVABLE) {
207 zone_pcp_reset(zone);
208 setup_zone_pageset(zone);
209 } else
210 zone_pcp_update(zone);
211
212 set_zone_contiguous(zone);
213 }
214
215 /*
216 * We need to re-init per zone wmark by calling
217 * init_per_zone_wmark_min() but doesn't call here because it is
218 * registered on core_initcall and it will be called later than us.
219 */
220
221 return 0; 160 return 0;
222} 161}
223pure_initcall(cma_init_reserved_areas); 162core_initcall(cma_init_reserved_areas);
224 163
225/** 164/**
226 * cma_init_reserved_mem() - create custom contiguous area from reserved memory 165 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
diff --git a/mm/compaction.c b/mm/compaction.c
index 028b7210a669..29bd1df18b98 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1450,12 +1450,14 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
1450 * if compaction succeeds. 1450 * if compaction succeeds.
1451 * For costly orders, we require low watermark instead of min for 1451 * For costly orders, we require low watermark instead of min for
1452 * compaction to proceed to increase its chances. 1452 * compaction to proceed to increase its chances.
1453 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1454 * suitable migration targets
1453 */ 1455 */
1454 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 1456 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1455 low_wmark_pages(zone) : min_wmark_pages(zone); 1457 low_wmark_pages(zone) : min_wmark_pages(zone);
1456 watermark += compact_gap(order); 1458 watermark += compact_gap(order);
1457 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 1459 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1458 0, wmark_target)) 1460 ALLOC_CMA, wmark_target))
1459 return COMPACT_SKIPPED; 1461 return COMPACT_SKIPPED;
1460 1462
1461 return COMPACT_CONTINUE; 1463 return COMPACT_CONTINUE;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a3a1815f8e11..b9f3dbd885bd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2431,7 +2431,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2431 __split_huge_page_tail(head, i, lruvec, list); 2431 __split_huge_page_tail(head, i, lruvec, list);
2432 /* Some pages can be beyond i_size: drop them from page cache */ 2432 /* Some pages can be beyond i_size: drop them from page cache */
2433 if (head[i].index >= end) { 2433 if (head[i].index >= end) {
2434 __ClearPageDirty(head + i); 2434 ClearPageDirty(head + i);
2435 __delete_from_page_cache(head + i, NULL); 2435 __delete_from_page_cache(head + i, NULL);
2436 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) 2436 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2437 shmem_uncharge(head->mapping->host, 1); 2437 shmem_uncharge(head->mapping->host, 1);
diff --git a/mm/internal.h b/mm/internal.h
index 62d8c34e63d5..502d14189794 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -168,9 +168,6 @@ extern void post_alloc_hook(struct page *page, unsigned int order,
168 gfp_t gfp_flags); 168 gfp_t gfp_flags);
169extern int user_min_free_kbytes; 169extern int user_min_free_kbytes;
170 170
171extern void set_zone_contiguous(struct zone *zone);
172extern void clear_zone_contiguous(struct zone *zone);
173
174#if defined CONFIG_COMPACTION || defined CONFIG_CMA 171#if defined CONFIG_COMPACTION || defined CONFIG_CMA
175 172
176/* 173/*
@@ -498,6 +495,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
498#define ALLOC_HARDER 0x10 /* try to alloc harder */ 495#define ALLOC_HARDER 0x10 /* try to alloc harder */
499#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 496#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
500#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 497#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
498#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
501 499
502enum ttu_flags; 500enum ttu_flags;
503struct tlbflush_unmap_batch; 501struct tlbflush_unmap_batch;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index bc0e68f7dc75..f185455b3406 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -792,6 +792,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
792DEFINE_ASAN_SET_SHADOW(f8); 792DEFINE_ASAN_SET_SHADOW(f8);
793 793
794#ifdef CONFIG_MEMORY_HOTPLUG 794#ifdef CONFIG_MEMORY_HOTPLUG
795static bool shadow_mapped(unsigned long addr)
796{
797 pgd_t *pgd = pgd_offset_k(addr);
798 p4d_t *p4d;
799 pud_t *pud;
800 pmd_t *pmd;
801 pte_t *pte;
802
803 if (pgd_none(*pgd))
804 return false;
805 p4d = p4d_offset(pgd, addr);
806 if (p4d_none(*p4d))
807 return false;
808 pud = pud_offset(p4d, addr);
809 if (pud_none(*pud))
810 return false;
811
812 /*
813 * We can't use pud_large() or pud_huge(), the first one is
814 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
815 * pud_bad(), if pud is bad then it's bad because it's huge.
816 */
817 if (pud_bad(*pud))
818 return true;
819 pmd = pmd_offset(pud, addr);
820 if (pmd_none(*pmd))
821 return false;
822
823 if (pmd_bad(*pmd))
824 return true;
825 pte = pte_offset_kernel(pmd, addr);
826 return !pte_none(*pte);
827}
828
795static int __meminit kasan_mem_notifier(struct notifier_block *nb, 829static int __meminit kasan_mem_notifier(struct notifier_block *nb,
796 unsigned long action, void *data) 830 unsigned long action, void *data)
797{ 831{
@@ -813,6 +847,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
813 case MEM_GOING_ONLINE: { 847 case MEM_GOING_ONLINE: {
814 void *ret; 848 void *ret;
815 849
850 /*
851 * If shadow is mapped already than it must have been mapped
852 * during the boot. This could happen if we onlining previously
853 * offlined memory.
854 */
855 if (shadow_mapped(shadow_start))
856 return NOTIFY_OK;
857
816 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start, 858 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
817 shadow_end, GFP_KERNEL, 859 shadow_end, GFP_KERNEL,
818 PAGE_KERNEL, VM_NO_GUARD, 860 PAGE_KERNEL, VM_NO_GUARD,
@@ -824,8 +866,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
824 kmemleak_ignore(ret); 866 kmemleak_ignore(ret);
825 return NOTIFY_OK; 867 return NOTIFY_OK;
826 } 868 }
827 case MEM_OFFLINE: 869 case MEM_CANCEL_ONLINE:
828 vfree((void *)shadow_start); 870 case MEM_OFFLINE: {
871 struct vm_struct *vm;
872
873 /*
874 * shadow_start was either mapped during boot by kasan_init()
875 * or during memory online by __vmalloc_node_range().
876 * In the latter case we can use vfree() to free shadow.
877 * Non-NULL result of the find_vm_area() will tell us if
878 * that was the second case.
879 *
880 * Currently it's not possible to free shadow mapped
881 * during boot by kasan_init(). It's because the code
882 * to do that hasn't been written yet. So we'll just
883 * leak the memory.
884 */
885 vm = find_vm_area((void *)shadow_start);
886 if (vm)
887 vfree((void *)shadow_start);
888 }
829 } 889 }
830 890
831 return NOTIFY_OK; 891 return NOTIFY_OK;
@@ -838,5 +898,5 @@ static int __init kasan_memhotplug_init(void)
838 return 0; 898 return 0;
839} 899}
840 900
841module_init(kasan_memhotplug_init); 901core_initcall(kasan_memhotplug_init);
842#endif 902#endif
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f74826cdceea..25982467800b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1158,7 +1158,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
1158 * nodes have to go through register_node. 1158 * nodes have to go through register_node.
1159 * TODO clean up this mess. 1159 * TODO clean up this mess.
1160 */ 1160 */
1161 ret = link_mem_sections(nid, start_pfn, nr_pages); 1161 ret = link_mem_sections(nid, start_pfn, nr_pages, false);
1162register_fail: 1162register_fail:
1163 /* 1163 /*
1164 * If sysfs file of new node can't create, cpu on the node 1164 * If sysfs file of new node can't create, cpu on the node
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 905db9d7962f..22320ea27489 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1743,38 +1743,16 @@ void __init page_alloc_init_late(void)
1743} 1743}
1744 1744
1745#ifdef CONFIG_CMA 1745#ifdef CONFIG_CMA
1746static void __init adjust_present_page_count(struct page *page, long count)
1747{
1748 struct zone *zone = page_zone(page);
1749
1750 /* We don't need to hold a lock since it is boot-up process */
1751 zone->present_pages += count;
1752}
1753
1754/* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1746/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
1755void __init init_cma_reserved_pageblock(struct page *page) 1747void __init init_cma_reserved_pageblock(struct page *page)
1756{ 1748{
1757 unsigned i = pageblock_nr_pages; 1749 unsigned i = pageblock_nr_pages;
1758 unsigned long pfn = page_to_pfn(page);
1759 struct page *p = page; 1750 struct page *p = page;
1760 int nid = page_to_nid(page);
1761
1762 /*
1763 * ZONE_MOVABLE will steal present pages from other zones by
1764 * changing page links so page_zone() is changed. Before that,
1765 * we need to adjust previous zone's page count first.
1766 */
1767 adjust_present_page_count(page, -pageblock_nr_pages);
1768 1751
1769 do { 1752 do {
1770 __ClearPageReserved(p); 1753 __ClearPageReserved(p);
1771 set_page_count(p, 0); 1754 set_page_count(p, 0);
1772 1755 } while (++p, --i);
1773 /* Steal pages from other zones */
1774 set_page_links(p, ZONE_MOVABLE, nid, pfn);
1775 } while (++p, ++pfn, --i);
1776
1777 adjust_present_page_count(page, pageblock_nr_pages);
1778 1756
1779 set_pageblock_migratetype(page, MIGRATE_CMA); 1757 set_pageblock_migratetype(page, MIGRATE_CMA);
1780 1758
@@ -2889,7 +2867,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2889 * exists. 2867 * exists.
2890 */ 2868 */
2891 watermark = min_wmark_pages(zone) + (1UL << order); 2869 watermark = min_wmark_pages(zone) + (1UL << order);
2892 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 2870 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2893 return 0; 2871 return 0;
2894 2872
2895 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2873 __mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -3165,6 +3143,12 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3165 } 3143 }
3166 3144
3167 3145
3146#ifdef CONFIG_CMA
3147 /* If allocation can't use CMA areas don't use free CMA pages */
3148 if (!(alloc_flags & ALLOC_CMA))
3149 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
3150#endif
3151
3168 /* 3152 /*
3169 * Check watermarks for an order-0 allocation request. If these 3153 * Check watermarks for an order-0 allocation request. If these
3170 * are not met, then a high-order request also cannot go ahead 3154 * are not met, then a high-order request also cannot go ahead
@@ -3191,8 +3175,10 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3191 } 3175 }
3192 3176
3193#ifdef CONFIG_CMA 3177#ifdef CONFIG_CMA
3194 if (!list_empty(&area->free_list[MIGRATE_CMA])) 3178 if ((alloc_flags & ALLOC_CMA) &&
3179 !list_empty(&area->free_list[MIGRATE_CMA])) {
3195 return true; 3180 return true;
3181 }
3196#endif 3182#endif
3197 if (alloc_harder && 3183 if (alloc_harder &&
3198 !list_empty(&area->free_list[MIGRATE_HIGHATOMIC])) 3184 !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
@@ -3212,6 +3198,13 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3212 unsigned long mark, int classzone_idx, unsigned int alloc_flags) 3198 unsigned long mark, int classzone_idx, unsigned int alloc_flags)
3213{ 3199{
3214 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3200 long free_pages = zone_page_state(z, NR_FREE_PAGES);
3201 long cma_pages = 0;
3202
3203#ifdef CONFIG_CMA
3204 /* If allocation can't use CMA areas don't use free CMA pages */
3205 if (!(alloc_flags & ALLOC_CMA))
3206 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
3207#endif
3215 3208
3216 /* 3209 /*
3217 * Fast check for order-0 only. If this fails then the reserves 3210 * Fast check for order-0 only. If this fails then the reserves
@@ -3220,7 +3213,7 @@ static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3220 * the caller is !atomic then it'll uselessly search the free 3213 * the caller is !atomic then it'll uselessly search the free
3221 * list. That corner case is then slower but it is harmless. 3214 * list. That corner case is then slower but it is harmless.
3222 */ 3215 */
3223 if (!order && free_pages > mark + z->lowmem_reserve[classzone_idx]) 3216 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
3224 return true; 3217 return true;
3225 3218
3226 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 3219 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
@@ -3856,6 +3849,10 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3856 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3849 } else if (unlikely(rt_task(current)) && !in_interrupt())
3857 alloc_flags |= ALLOC_HARDER; 3850 alloc_flags |= ALLOC_HARDER;
3858 3851
3852#ifdef CONFIG_CMA
3853 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3854 alloc_flags |= ALLOC_CMA;
3855#endif
3859 return alloc_flags; 3856 return alloc_flags;
3860} 3857}
3861 3858
@@ -4322,6 +4319,9 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4322 if (should_fail_alloc_page(gfp_mask, order)) 4319 if (should_fail_alloc_page(gfp_mask, order))
4323 return false; 4320 return false;
4324 4321
4322 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
4323 *alloc_flags |= ALLOC_CMA;
4324
4325 return true; 4325 return true;
4326} 4326}
4327 4327
@@ -6204,7 +6204,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6204{ 6204{
6205 enum zone_type j; 6205 enum zone_type j;
6206 int nid = pgdat->node_id; 6206 int nid = pgdat->node_id;
6207 unsigned long node_end_pfn = 0;
6208 6207
6209 pgdat_resize_init(pgdat); 6208 pgdat_resize_init(pgdat);
6210#ifdef CONFIG_NUMA_BALANCING 6209#ifdef CONFIG_NUMA_BALANCING
@@ -6232,13 +6231,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6232 struct zone *zone = pgdat->node_zones + j; 6231 struct zone *zone = pgdat->node_zones + j;
6233 unsigned long size, realsize, freesize, memmap_pages; 6232 unsigned long size, realsize, freesize, memmap_pages;
6234 unsigned long zone_start_pfn = zone->zone_start_pfn; 6233 unsigned long zone_start_pfn = zone->zone_start_pfn;
6235 unsigned long movable_size = 0;
6236 6234
6237 size = zone->spanned_pages; 6235 size = zone->spanned_pages;
6238 realsize = freesize = zone->present_pages; 6236 realsize = freesize = zone->present_pages;
6239 if (zone_end_pfn(zone) > node_end_pfn)
6240 node_end_pfn = zone_end_pfn(zone);
6241
6242 6237
6243 /* 6238 /*
6244 * Adjust freesize so that it accounts for how much memory 6239 * Adjust freesize so that it accounts for how much memory
@@ -6287,30 +6282,12 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
6287 zone_seqlock_init(zone); 6282 zone_seqlock_init(zone);
6288 zone_pcp_init(zone); 6283 zone_pcp_init(zone);
6289 6284
6290 /* 6285 if (!size)
6291 * The size of the CMA area is unknown now so we need to
6292 * prepare the memory for the usemap at maximum.
6293 */
6294 if (IS_ENABLED(CONFIG_CMA) && j == ZONE_MOVABLE &&
6295 pgdat->node_spanned_pages) {
6296 movable_size = node_end_pfn - pgdat->node_start_pfn;
6297 }
6298
6299 if (!size && !movable_size)
6300 continue; 6286 continue;
6301 6287
6302 set_pageblock_order(); 6288 set_pageblock_order();
6303 if (movable_size) { 6289 setup_usemap(pgdat, zone, zone_start_pfn, size);
6304 zone->zone_start_pfn = pgdat->node_start_pfn; 6290 init_currently_empty_zone(zone, zone_start_pfn, size);
6305 zone->spanned_pages = movable_size;
6306 setup_usemap(pgdat, zone,
6307 pgdat->node_start_pfn, movable_size);
6308 init_currently_empty_zone(zone,
6309 pgdat->node_start_pfn, movable_size);
6310 } else {
6311 setup_usemap(pgdat, zone, zone_start_pfn, size);
6312 init_currently_empty_zone(zone, zone_start_pfn, size);
6313 }
6314 memmap_init(size, nid, j, zone_start_pfn); 6291 memmap_init(size, nid, j, zone_start_pfn);
6315 } 6292 }
6316} 6293}
@@ -7621,11 +7598,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7621 unsigned long pfn, iter, found; 7598 unsigned long pfn, iter, found;
7622 7599
7623 /* 7600 /*
7624 * For avoiding noise data, lru_add_drain_all() should be called 7601 * TODO we could make this much more efficient by not checking every
7625 * If ZONE_MOVABLE, the zone never contains unmovable pages 7602 * page in the range if we know all of them are in MOVABLE_ZONE and
7603 * that the movable zone guarantees that pages are migratable but
7604 * the later is not the case right now unfortunatelly. E.g. movablecore
7605 * can still lead to having bootmem allocations in zone_movable.
7626 */ 7606 */
7627 if (zone_idx(zone) == ZONE_MOVABLE)
7628 return false;
7629 7607
7630 /* 7608 /*
7631 * CMA allocations (alloc_contig_range) really need to mark isolate 7609 * CMA allocations (alloc_contig_range) really need to mark isolate
@@ -7646,7 +7624,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7646 page = pfn_to_page(check); 7624 page = pfn_to_page(check);
7647 7625
7648 if (PageReserved(page)) 7626 if (PageReserved(page))
7649 return true; 7627 goto unmovable;
7650 7628
7651 /* 7629 /*
7652 * Hugepages are not in LRU lists, but they're movable. 7630 * Hugepages are not in LRU lists, but they're movable.
@@ -7696,9 +7674,12 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
7696 * page at boot. 7674 * page at boot.
7697 */ 7675 */
7698 if (found > count) 7676 if (found > count)
7699 return true; 7677 goto unmovable;
7700 } 7678 }
7701 return false; 7679 return false;
7680unmovable:
7681 WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
7682 return true;
7702} 7683}
7703 7684
7704bool is_pageblock_removable_nolock(struct page *page) 7685bool is_pageblock_removable_nolock(struct page *page)
@@ -7951,7 +7932,7 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
7951} 7932}
7952#endif 7933#endif
7953 7934
7954#if defined CONFIG_MEMORY_HOTPLUG || defined CONFIG_CMA 7935#ifdef CONFIG_MEMORY_HOTPLUG
7955/* 7936/*
7956 * The zone indicated has a new number of managed_pages; batch sizes and percpu 7937 * The zone indicated has a new number of managed_pages; batch sizes and percpu
7957 * page high values need to be recalulated. 7938 * page high values need to be recalulated.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index cc2cf04d9018..78a015fcec3b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3112,6 +3112,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3112 unsigned long *frontswap_map = NULL; 3112 unsigned long *frontswap_map = NULL;
3113 struct page *page = NULL; 3113 struct page *page = NULL;
3114 struct inode *inode = NULL; 3114 struct inode *inode = NULL;
3115 bool inced_nr_rotate_swap = false;
3115 3116
3116 if (swap_flags & ~SWAP_FLAGS_VALID) 3117 if (swap_flags & ~SWAP_FLAGS_VALID)
3117 return -EINVAL; 3118 return -EINVAL;
@@ -3215,8 +3216,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3215 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3216 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3216 cluster_set_null(&cluster->index); 3217 cluster_set_null(&cluster->index);
3217 } 3218 }
3218 } else 3219 } else {
3219 atomic_inc(&nr_rotate_swap); 3220 atomic_inc(&nr_rotate_swap);
3221 inced_nr_rotate_swap = true;
3222 }
3220 3223
3221 error = swap_cgroup_swapon(p->type, maxpages); 3224 error = swap_cgroup_swapon(p->type, maxpages);
3222 if (error) 3225 if (error)
@@ -3307,6 +3310,8 @@ bad_swap:
3307 vfree(swap_map); 3310 vfree(swap_map);
3308 kvfree(cluster_info); 3311 kvfree(cluster_info);
3309 kvfree(frontswap_map); 3312 kvfree(frontswap_map);
3313 if (inced_nr_rotate_swap)
3314 atomic_dec(&nr_rotate_swap);
3310 if (swap_file) { 3315 if (swap_file) {
3311 if (inode && S_ISREG(inode->i_mode)) { 3316 if (inode && S_ISREG(inode->i_mode)) {
3312 inode_unlock(inode); 3317 inode_unlock(inode);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9b697323a88c..9270a4370d54 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1418,7 +1418,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1418 return ret; 1418 return ret;
1419 1419
1420 mapping = page_mapping(page); 1420 mapping = page_mapping(page);
1421 migrate_dirty = mapping && mapping->a_ops->migratepage; 1421 migrate_dirty = !mapping || mapping->a_ops->migratepage;
1422 unlock_page(page); 1422 unlock_page(page);
1423 if (!migrate_dirty) 1423 if (!migrate_dirty)
1424 return ret; 1424 return ret;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index a11d3d89f012..a35f597e8c8b 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1536,7 +1536,7 @@ out:
1536 1536
1537 if (!ret && primary_if) 1537 if (!ret && primary_if)
1538 *primary_if = hard_iface; 1538 *primary_if = hard_iface;
1539 else 1539 else if (hard_iface)
1540 batadv_hardif_put(hard_iface); 1540 batadv_hardif_put(hard_iface);
1541 1541
1542 return ret; 1542 return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0225616d5771..3986551397ca 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -862,7 +862,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
862 struct batadv_orig_node_vlan *vlan; 862 struct batadv_orig_node_vlan *vlan;
863 u8 *tt_change_ptr; 863 u8 *tt_change_ptr;
864 864
865 rcu_read_lock(); 865 spin_lock_bh(&orig_node->vlan_list_lock);
866 hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) { 866 hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
867 num_vlan++; 867 num_vlan++;
868 num_entries += atomic_read(&vlan->tt.num_entries); 868 num_entries += atomic_read(&vlan->tt.num_entries);
@@ -900,7 +900,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
900 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; 900 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
901 901
902out: 902out:
903 rcu_read_unlock(); 903 spin_unlock_bh(&orig_node->vlan_list_lock);
904 return tvlv_len; 904 return tvlv_len;
905} 905}
906 906
@@ -931,15 +931,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
931 struct batadv_tvlv_tt_vlan_data *tt_vlan; 931 struct batadv_tvlv_tt_vlan_data *tt_vlan;
932 struct batadv_softif_vlan *vlan; 932 struct batadv_softif_vlan *vlan;
933 u16 num_vlan = 0; 933 u16 num_vlan = 0;
934 u16 num_entries = 0; 934 u16 vlan_entries = 0;
935 u16 total_entries = 0;
935 u16 tvlv_len; 936 u16 tvlv_len;
936 u8 *tt_change_ptr; 937 u8 *tt_change_ptr;
937 int change_offset; 938 int change_offset;
938 939
939 rcu_read_lock(); 940 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
940 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { 941 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
942 vlan_entries = atomic_read(&vlan->tt.num_entries);
943 if (vlan_entries < 1)
944 continue;
945
941 num_vlan++; 946 num_vlan++;
942 num_entries += atomic_read(&vlan->tt.num_entries); 947 total_entries += vlan_entries;
943 } 948 }
944 949
945 change_offset = sizeof(**tt_data); 950 change_offset = sizeof(**tt_data);
@@ -947,7 +952,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
947 952
948 /* if tt_len is negative, allocate the space needed by the full table */ 953 /* if tt_len is negative, allocate the space needed by the full table */
949 if (*tt_len < 0) 954 if (*tt_len < 0)
950 *tt_len = batadv_tt_len(num_entries); 955 *tt_len = batadv_tt_len(total_entries);
951 956
952 tvlv_len = *tt_len; 957 tvlv_len = *tt_len;
953 tvlv_len += change_offset; 958 tvlv_len += change_offset;
@@ -964,6 +969,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
964 969
965 tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1); 970 tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
966 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { 971 hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
972 vlan_entries = atomic_read(&vlan->tt.num_entries);
973 if (vlan_entries < 1)
974 continue;
975
967 tt_vlan->vid = htons(vlan->vid); 976 tt_vlan->vid = htons(vlan->vid);
968 tt_vlan->crc = htonl(vlan->tt.crc); 977 tt_vlan->crc = htonl(vlan->tt.crc);
969 978
@@ -974,7 +983,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
974 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; 983 *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
975 984
976out: 985out:
977 rcu_read_unlock(); 986 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
978 return tvlv_len; 987 return tvlv_len;
979} 988}
980 989
@@ -1538,6 +1547,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
1538 * handled by a given originator 1547 * handled by a given originator
1539 * @entry: the TT global entry to check 1548 * @entry: the TT global entry to check
1540 * @orig_node: the originator to search in the list 1549 * @orig_node: the originator to search in the list
1550 * @flags: a pointer to store TT flags for the given @entry received
1551 * from @orig_node
1541 * 1552 *
1542 * find out if an orig_node is already in the list of a tt_global_entry. 1553 * find out if an orig_node is already in the list of a tt_global_entry.
1543 * 1554 *
@@ -1545,7 +1556,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
1545 */ 1556 */
1546static bool 1557static bool
1547batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, 1558batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1548 const struct batadv_orig_node *orig_node) 1559 const struct batadv_orig_node *orig_node,
1560 u8 *flags)
1549{ 1561{
1550 struct batadv_tt_orig_list_entry *orig_entry; 1562 struct batadv_tt_orig_list_entry *orig_entry;
1551 bool found = false; 1563 bool found = false;
@@ -1553,6 +1565,10 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1553 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); 1565 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
1554 if (orig_entry) { 1566 if (orig_entry) {
1555 found = true; 1567 found = true;
1568
1569 if (flags)
1570 *flags = orig_entry->flags;
1571
1556 batadv_tt_orig_list_entry_put(orig_entry); 1572 batadv_tt_orig_list_entry_put(orig_entry);
1557 } 1573 }
1558 1574
@@ -1731,7 +1747,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1731 if (!(common->flags & BATADV_TT_CLIENT_TEMP)) 1747 if (!(common->flags & BATADV_TT_CLIENT_TEMP))
1732 goto out; 1748 goto out;
1733 if (batadv_tt_global_entry_has_orig(tt_global_entry, 1749 if (batadv_tt_global_entry_has_orig(tt_global_entry,
1734 orig_node)) 1750 orig_node, NULL))
1735 goto out_remove; 1751 goto out_remove;
1736 batadv_tt_global_del_orig_list(tt_global_entry); 1752 batadv_tt_global_del_orig_list(tt_global_entry);
1737 goto add_orig_entry; 1753 goto add_orig_entry;
@@ -2880,23 +2896,46 @@ unlock:
2880} 2896}
2881 2897
2882/** 2898/**
2883 * batadv_tt_local_valid() - verify that given tt entry is a valid one 2899 * batadv_tt_local_valid() - verify local tt entry and get flags
2884 * @entry_ptr: to be checked local tt entry 2900 * @entry_ptr: to be checked local tt entry
2885 * @data_ptr: not used but definition required to satisfy the callback prototype 2901 * @data_ptr: not used but definition required to satisfy the callback prototype
2902 * @flags: a pointer to store TT flags for this client to
2903 *
2904 * Checks the validity of the given local TT entry. If it is, then the provided
2905 * flags pointer is updated.
2886 * 2906 *
2887 * Return: true if the entry is a valid, false otherwise. 2907 * Return: true if the entry is a valid, false otherwise.
2888 */ 2908 */
2889static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr) 2909static bool batadv_tt_local_valid(const void *entry_ptr,
2910 const void *data_ptr,
2911 u8 *flags)
2890{ 2912{
2891 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 2913 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2892 2914
2893 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) 2915 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
2894 return false; 2916 return false;
2917
2918 if (flags)
2919 *flags = tt_common_entry->flags;
2920
2895 return true; 2921 return true;
2896} 2922}
2897 2923
2924/**
2925 * batadv_tt_global_valid() - verify global tt entry and get flags
2926 * @entry_ptr: to be checked global tt entry
2927 * @data_ptr: an orig_node object (may be NULL)
2928 * @flags: a pointer to store TT flags for this client to
2929 *
2930 * Checks the validity of the given global TT entry. If it is, then the provided
2931 * flags pointer is updated either with the common (summed) TT flags if data_ptr
2932 * is NULL or the specific, per originator TT flags otherwise.
2933 *
2934 * Return: true if the entry is a valid, false otherwise.
2935 */
2898static bool batadv_tt_global_valid(const void *entry_ptr, 2936static bool batadv_tt_global_valid(const void *entry_ptr,
2899 const void *data_ptr) 2937 const void *data_ptr,
2938 u8 *flags)
2900{ 2939{
2901 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 2940 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2902 const struct batadv_tt_global_entry *tt_global_entry; 2941 const struct batadv_tt_global_entry *tt_global_entry;
@@ -2910,7 +2949,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2910 struct batadv_tt_global_entry, 2949 struct batadv_tt_global_entry,
2911 common); 2950 common);
2912 2951
2913 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node); 2952 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
2953 flags);
2914} 2954}
2915 2955
2916/** 2956/**
@@ -2920,25 +2960,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2920 * @hash: hash table containing the tt entries 2960 * @hash: hash table containing the tt entries
2921 * @tt_len: expected tvlv tt data buffer length in number of bytes 2961 * @tt_len: expected tvlv tt data buffer length in number of bytes
2922 * @tvlv_buff: pointer to the buffer to fill with the TT data 2962 * @tvlv_buff: pointer to the buffer to fill with the TT data
2923 * @valid_cb: function to filter tt change entries 2963 * @valid_cb: function to filter tt change entries and to return TT flags
2924 * @cb_data: data passed to the filter function as argument 2964 * @cb_data: data passed to the filter function as argument
2965 *
2966 * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
2967 * is not provided then this becomes a no-op.
2925 */ 2968 */
2926static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, 2969static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2927 struct batadv_hashtable *hash, 2970 struct batadv_hashtable *hash,
2928 void *tvlv_buff, u16 tt_len, 2971 void *tvlv_buff, u16 tt_len,
2929 bool (*valid_cb)(const void *, 2972 bool (*valid_cb)(const void *,
2930 const void *), 2973 const void *,
2974 u8 *flags),
2931 void *cb_data) 2975 void *cb_data)
2932{ 2976{
2933 struct batadv_tt_common_entry *tt_common_entry; 2977 struct batadv_tt_common_entry *tt_common_entry;
2934 struct batadv_tvlv_tt_change *tt_change; 2978 struct batadv_tvlv_tt_change *tt_change;
2935 struct hlist_head *head; 2979 struct hlist_head *head;
2936 u16 tt_tot, tt_num_entries = 0; 2980 u16 tt_tot, tt_num_entries = 0;
2981 u8 flags;
2982 bool ret;
2937 u32 i; 2983 u32 i;
2938 2984
2939 tt_tot = batadv_tt_entries(tt_len); 2985 tt_tot = batadv_tt_entries(tt_len);
2940 tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff; 2986 tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
2941 2987
2988 if (!valid_cb)
2989 return;
2990
2942 rcu_read_lock(); 2991 rcu_read_lock();
2943 for (i = 0; i < hash->size; i++) { 2992 for (i = 0; i < hash->size; i++) {
2944 head = &hash->table[i]; 2993 head = &hash->table[i];
@@ -2948,11 +2997,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2948 if (tt_tot == tt_num_entries) 2997 if (tt_tot == tt_num_entries)
2949 break; 2998 break;
2950 2999
2951 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) 3000 ret = valid_cb(tt_common_entry, cb_data, &flags);
3001 if (!ret)
2952 continue; 3002 continue;
2953 3003
2954 ether_addr_copy(tt_change->addr, tt_common_entry->addr); 3004 ether_addr_copy(tt_change->addr, tt_common_entry->addr);
2955 tt_change->flags = tt_common_entry->flags; 3005 tt_change->flags = flags;
2956 tt_change->vid = htons(tt_common_entry->vid); 3006 tt_change->vid = htons(tt_common_entry->vid);
2957 memset(tt_change->reserved, 0, 3007 memset(tt_change->reserved, 0,
2958 sizeof(tt_change->reserved)); 3008 sizeof(tt_change->reserved));
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 47ba98db145d..46c1fe7637ea 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -161,8 +161,8 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
161 /* Make sure the match only receives stp frames */ 161 /* Make sure the match only receives stp frames */
162 if (!par->nft_compat && 162 if (!par->nft_compat &&
163 (!ether_addr_equal(e->destmac, eth_stp_addr) || 163 (!ether_addr_equal(e->destmac, eth_stp_addr) ||
164 !is_broadcast_ether_addr(e->destmsk) || 164 !(e->bitmask & EBT_DESTMAC) ||
165 !(e->bitmask & EBT_DESTMAC))) 165 !is_broadcast_ether_addr(e->destmsk)))
166 return -EINVAL; 166 return -EINVAL;
167 167
168 return 0; 168 return 0;
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 28a4c3490359..6ba639f6c51d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1954,7 +1954,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1954 int off, pad = 0; 1954 int off, pad = 0;
1955 unsigned int size_kern, match_size = mwt->match_size; 1955 unsigned int size_kern, match_size = mwt->match_size;
1956 1956
1957 strlcpy(name, mwt->u.name, sizeof(name)); 1957 if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1958 return -EINVAL;
1958 1959
1959 if (state->buf_kern_start) 1960 if (state->buf_kern_start)
1960 dst = state->buf_kern_start + state->buf_kern_offset; 1961 dst = state->buf_kern_start + state->buf_kern_offset;
diff --git a/net/core/dev.c b/net/core/dev.c
index af0558b00c6c..2af787e8b130 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2124,7 +2124,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
2124 int i, j; 2124 int i, j;
2125 2125
2126 for (i = count, j = offset; i--; j++) { 2126 for (i = count, j = offset; i--; j++) {
2127 if (!remove_xps_queue(dev_maps, cpu, j)) 2127 if (!remove_xps_queue(dev_maps, tci, j))
2128 break; 2128 break;
2129 } 2129 }
2130 2130
diff --git a/net/core/filter.c b/net/core/filter.c
index e77c30ca491d..201ff36b17a8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -481,11 +481,18 @@ do_pass:
481 481
482#define BPF_EMIT_JMP \ 482#define BPF_EMIT_JMP \
483 do { \ 483 do { \
484 const s32 off_min = S16_MIN, off_max = S16_MAX; \
485 s32 off; \
486 \
484 if (target >= len || target < 0) \ 487 if (target >= len || target < 0) \
485 goto err; \ 488 goto err; \
486 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 489 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
487 /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 490 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
488 insn->off -= insn - tmp_insns; \ 491 off -= insn - tmp_insns; \
492 /* Reject anything not fitting into insn->off. */ \
493 if (off < off_min || off > off_max) \
494 goto err; \
495 insn->off = off; \
489 } while (0) 496 } while (0)
490 497
491 case BPF_JMP | BPF_JA: 498 case BPF_JMP | BPF_JA:
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c476f0794132..bb7e80f4ced3 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1214 cpumask_var_t mask; 1214 cpumask_var_t mask;
1215 unsigned long index; 1215 unsigned long index;
1216 1216
1217 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1218 return -ENOMEM;
1219
1220 index = get_netdev_queue_index(queue); 1217 index = get_netdev_queue_index(queue);
1221 1218
1222 if (dev->num_tc) { 1219 if (dev->num_tc) {
@@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1226 return -EINVAL; 1223 return -EINVAL;
1227 } 1224 }
1228 1225
1226 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1227 return -ENOMEM;
1228
1229 rcu_read_lock(); 1229 rcu_read_lock();
1230 dev_maps = rcu_dereference(dev->xps_maps); 1230 dev_maps = rcu_dereference(dev->xps_maps);
1231 if (dev_maps) { 1231 if (dev_maps) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 6444525f610c..3b6d02854e57 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1606,7 +1606,7 @@ static void __sk_free(struct sock *sk)
1606 if (likely(sk->sk_net_refcnt)) 1606 if (likely(sk->sk_net_refcnt))
1607 sock_inuse_add(sock_net(sk), -1); 1607 sock_inuse_add(sock_net(sk), -1);
1608 1608
1609 if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) 1609 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1610 sock_diag_broadcast_destroy(sk); 1610 sock_diag_broadcast_destroy(sk);
1611 else 1611 else
1612 sk_destruct(sk); 1612 sk_destruct(sk);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 84cd4e3fd01b..0d56e36a6db7 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags)
283 283
284 dccp_clear_xmit_timers(sk); 284 dccp_clear_xmit_timers(sk);
285 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); 285 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
286 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
287 dp->dccps_hc_rx_ccid = NULL; 286 dp->dccps_hc_rx_ccid = NULL;
288 dp->dccps_hc_tx_ccid = NULL;
289 287
290 __skb_queue_purge(&sk->sk_receive_queue); 288 __skb_queue_purge(&sk->sk_receive_queue);
291 __skb_queue_purge(&sk->sk_write_queue); 289 __skb_queue_purge(&sk->sk_write_queue);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index adf50fbc4c13..47725250b4ca 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
258static int dsa_port_setup(struct dsa_port *dp) 258static int dsa_port_setup(struct dsa_port *dp)
259{ 259{
260 struct dsa_switch *ds = dp->ds; 260 struct dsa_switch *ds = dp->ds;
261 int err; 261 int err = 0;
262 262
263 memset(&dp->devlink_port, 0, sizeof(dp->devlink_port)); 263 memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
264 264
265 err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index); 265 if (dp->type != DSA_PORT_TYPE_UNUSED)
266 err = devlink_port_register(ds->devlink, &dp->devlink_port,
267 dp->index);
266 if (err) 268 if (err)
267 return err; 269 return err;
268 270
@@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
293 295
294static void dsa_port_teardown(struct dsa_port *dp) 296static void dsa_port_teardown(struct dsa_port *dp)
295{ 297{
296 devlink_port_unregister(&dp->devlink_port); 298 if (dp->type != DSA_PORT_TYPE_UNUSED)
299 devlink_port_unregister(&dp->devlink_port);
297 300
298 switch (dp->type) { 301 switch (dp->type) {
299 case DSA_PORT_TYPE_UNUSED: 302 case DSA_PORT_TYPE_UNUSED:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f05afaf3235c..e66172aaf241 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -326,10 +326,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
326 u8 tos, int oif, struct net_device *dev, 326 u8 tos, int oif, struct net_device *dev,
327 int rpf, struct in_device *idev, u32 *itag) 327 int rpf, struct in_device *idev, u32 *itag)
328{ 328{
329 struct net *net = dev_net(dev);
330 struct flow_keys flkeys;
329 int ret, no_addr; 331 int ret, no_addr;
330 struct fib_result res; 332 struct fib_result res;
331 struct flowi4 fl4; 333 struct flowi4 fl4;
332 struct net *net = dev_net(dev);
333 bool dev_match; 334 bool dev_match;
334 335
335 fl4.flowi4_oif = 0; 336 fl4.flowi4_oif = 0;
@@ -347,6 +348,11 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
347 no_addr = idev->ifa_list == NULL; 348 no_addr = idev->ifa_list == NULL;
348 349
349 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; 350 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
351 if (!fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys)) {
352 fl4.flowi4_proto = 0;
353 fl4.fl4_sport = 0;
354 fl4.fl4_dport = 0;
355 }
350 356
351 trace_fib_validate_source(dev, &fl4); 357 trace_fib_validate_source(dev, &fl4);
352 358
@@ -643,6 +649,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
643 [RTA_ENCAP] = { .type = NLA_NESTED }, 649 [RTA_ENCAP] = { .type = NLA_NESTED },
644 [RTA_UID] = { .type = NLA_U32 }, 650 [RTA_UID] = { .type = NLA_U32 },
645 [RTA_MARK] = { .type = NLA_U32 }, 651 [RTA_MARK] = { .type = NLA_U32 },
652 [RTA_TABLE] = { .type = NLA_U32 },
646}; 653};
647 654
648static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, 655static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9c169bb2444d..f200b304f76c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -722,10 +722,12 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
722 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 722 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
723 tunnel->index, 723 tunnel->index,
724 truncate, true); 724 truncate, true);
725 else 725 else if (tunnel->erspan_ver == 2)
726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 726 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
727 tunnel->dir, tunnel->hwid, 727 tunnel->dir, tunnel->hwid,
728 truncate, true); 728 truncate, true);
729 else
730 goto free_skb;
729 731
730 tunnel->parms.o_flags &= ~TUNNEL_KEY; 732 tunnel->parms.o_flags &= ~TUNNEL_KEY;
731 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 733 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 83c73bab2c3d..d54abc097800 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1045,7 +1045,8 @@ alloc_new_skb:
1045 if (copy > length) 1045 if (copy > length)
1046 copy = length; 1046 copy = length;
1047 1047
1048 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1048 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1049 skb_tailroom(skb) >= copy) {
1049 unsigned int off; 1050 unsigned int off;
1050 1051
1051 off = skb->len; 1052 off = skb->len;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5ad2d8ed3a3f..57bbb060faaf 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -505,8 +505,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
505 int err; 505 int err;
506 int copied; 506 int copied;
507 507
508 WARN_ON_ONCE(sk->sk_family == AF_INET6);
509
510 err = -EAGAIN; 508 err = -EAGAIN;
511 skb = sock_dequeue_err_skb(sk); 509 skb = sock_dequeue_err_skb(sk);
512 if (!skb) 510 if (!skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 6b0e362cc99b..38d906baf1df 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -328,7 +328,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
328 328
329 if (tdev) { 329 if (tdev) {
330 hlen = tdev->hard_header_len + tdev->needed_headroom; 330 hlen = tdev->hard_header_len + tdev->needed_headroom;
331 mtu = tdev->mtu; 331 mtu = min(tdev->mtu, IP_MAX_MTU);
332 } 332 }
333 333
334 dev->needed_headroom = t_hlen + hlen; 334 dev->needed_headroom = t_hlen + hlen;
@@ -362,7 +362,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
362 nt = netdev_priv(dev); 362 nt = netdev_priv(dev);
363 t_hlen = nt->hlen + sizeof(struct iphdr); 363 t_hlen = nt->hlen + sizeof(struct iphdr);
364 dev->min_mtu = ETH_MIN_MTU; 364 dev->min_mtu = ETH_MIN_MTU;
365 dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 365 dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
366 ip_tunnel_add(itn, nt); 366 ip_tunnel_add(itn, nt);
367 return nt; 367 return nt;
368 368
@@ -930,7 +930,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
930{ 930{
931 struct ip_tunnel *tunnel = netdev_priv(dev); 931 struct ip_tunnel *tunnel = netdev_priv(dev);
932 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 932 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
933 int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 933 int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
934 934
935 if (new_mtu < ETH_MIN_MTU) 935 if (new_mtu < ETH_MIN_MTU)
936 return -EINVAL; 936 return -EINVAL;
@@ -1107,7 +1107,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
1107 1107
1108 mtu = ip_tunnel_bind_dev(dev); 1108 mtu = ip_tunnel_bind_dev(dev);
1109 if (tb[IFLA_MTU]) { 1109 if (tb[IFLA_MTU]) {
1110 unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; 1110 unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
1111 1111
1112 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, 1112 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
1113 (unsigned int)(max - sizeof(struct iphdr))); 1113 (unsigned int)(max - sizeof(struct iphdr)));
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 4fe97723b53f..30221701614c 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -43,7 +43,10 @@ mr_table_alloc(struct net *net, u32 id,
43 write_pnet(&mrt->net, net); 43 write_pnet(&mrt->net, net);
44 44
45 mrt->ops = *ops; 45 mrt->ops = *ops;
46 rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params); 46 if (rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params)) {
47 kfree(mrt);
48 return NULL;
49 }
47 INIT_LIST_HEAD(&mrt->mfc_cache_list); 50 INIT_LIST_HEAD(&mrt->mfc_cache_list);
48 INIT_LIST_HEAD(&mrt->mfc_unres_queue); 51 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
49 52
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 44b308d93ec2..e85f35b89c49 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -34,6 +34,7 @@
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36MODULE_DESCRIPTION("IPv4 packet filter"); 36MODULE_DESCRIPTION("IPv4 packet filter");
37MODULE_ALIAS("ipt_icmp");
37 38
38void *ipt_alloc_initial_table(const struct xt_table *info) 39void *ipt_alloc_initial_table(const struct xt_table *info)
39{ 40{
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index fd01f13c896a..12843c9ef142 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -89,10 +89,10 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
89 return true ^ invert; 89 return true ^ invert;
90 } 90 }
91 91
92 memset(&flow, 0, sizeof(flow));
92 flow.flowi4_iif = LOOPBACK_IFINDEX; 93 flow.flowi4_iif = LOOPBACK_IFINDEX;
93 flow.daddr = iph->saddr; 94 flow.daddr = iph->saddr;
94 flow.saddr = rpfilter_get_saddr(iph->daddr); 95 flow.saddr = rpfilter_get_saddr(iph->daddr);
95 flow.flowi4_oif = 0;
96 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 96 flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
97 flow.flowi4_tos = RT_TOS(iph->tos); 97 flow.flowi4_tos = RT_TOS(iph->tos);
98 flow.flowi4_scope = RT_SCOPE_UNIVERSE; 98 flow.flowi4_scope = RT_SCOPE_UNIVERSE;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 29268efad247..2cfa1b518f8d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1961,8 +1961,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1961 fl4.saddr = saddr; 1961 fl4.saddr = saddr;
1962 fl4.flowi4_uid = sock_net_uid(net, NULL); 1962 fl4.flowi4_uid = sock_net_uid(net, NULL);
1963 1963
1964 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) 1964 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
1965 flkeys = &_flkeys; 1965 flkeys = &_flkeys;
1966 } else {
1967 fl4.flowi4_proto = 0;
1968 fl4.fl4_sport = 0;
1969 fl4.fl4_dport = 0;
1970 }
1966 1971
1967 err = fib_lookup(net, &fl4, res, 0); 1972 err = fib_lookup(net, &fl4, res, 0);
1968 if (err != 0) { 1973 if (err != 0) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383cac0ff0ec..d07e34f8e309 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2833,8 +2833,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2833 return -EBUSY; 2833 return -EBUSY;
2834 2834
2835 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2835 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2836 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2836 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
2837 BUG(); 2837 WARN_ON_ONCE(1);
2838 return -EINVAL;
2839 }
2838 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2840 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2839 return -ENOMEM; 2841 return -ENOMEM;
2840 } 2842 }
@@ -3342,6 +3344,7 @@ static void tcp_connect_init(struct sock *sk)
3342 sock_reset_flag(sk, SOCK_DONE); 3344 sock_reset_flag(sk, SOCK_DONE);
3343 tp->snd_wnd = 0; 3345 tp->snd_wnd = 0;
3344 tcp_init_wl(tp, 0); 3346 tcp_init_wl(tp, 0);
3347 tcp_write_queue_purge(sk);
3345 tp->snd_una = tp->write_seq; 3348 tp->snd_una = tp->write_seq;
3346 tp->snd_sml = tp->write_seq; 3349 tp->snd_sml = tp->write_seq;
3347 tp->snd_up = tp->write_seq; 3350 tp->snd_up = tp->write_seq;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 69727bc168cb..458de353f5d9 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -71,6 +71,7 @@ struct ip6gre_net {
71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE]; 71 struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
72 72
73 struct ip6_tnl __rcu *collect_md_tun; 73 struct ip6_tnl __rcu *collect_md_tun;
74 struct ip6_tnl __rcu *collect_md_tun_erspan;
74 struct net_device *fb_tunnel_dev; 75 struct net_device *fb_tunnel_dev;
75}; 76};
76 77
@@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
81static void ip6gre_tunnel_setup(struct net_device *dev); 82static void ip6gre_tunnel_setup(struct net_device *dev);
82static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); 83static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
83static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); 84static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
85static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
84 86
85/* Tunnel hash table */ 87/* Tunnel hash table */
86 88
@@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
232 if (cand) 234 if (cand)
233 return cand; 235 return cand;
234 236
235 t = rcu_dereference(ign->collect_md_tun); 237 if (gre_proto == htons(ETH_P_ERSPAN) ||
238 gre_proto == htons(ETH_P_ERSPAN2))
239 t = rcu_dereference(ign->collect_md_tun_erspan);
240 else
241 t = rcu_dereference(ign->collect_md_tun);
242
236 if (t && t->dev->flags & IFF_UP) 243 if (t && t->dev->flags & IFF_UP)
237 return t; 244 return t;
238 245
@@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
261 return &ign->tunnels[prio][h]; 268 return &ign->tunnels[prio][h];
262} 269}
263 270
271static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
272{
273 if (t->parms.collect_md)
274 rcu_assign_pointer(ign->collect_md_tun, t);
275}
276
277static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
278{
279 if (t->parms.collect_md)
280 rcu_assign_pointer(ign->collect_md_tun_erspan, t);
281}
282
283static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
284{
285 if (t->parms.collect_md)
286 rcu_assign_pointer(ign->collect_md_tun, NULL);
287}
288
289static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
290 struct ip6_tnl *t)
291{
292 if (t->parms.collect_md)
293 rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
294}
295
264static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, 296static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
265 const struct ip6_tnl *t) 297 const struct ip6_tnl *t)
266{ 298{
@@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
271{ 303{
272 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); 304 struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
273 305
274 if (t->parms.collect_md)
275 rcu_assign_pointer(ign->collect_md_tun, t);
276
277 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 306 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
278 rcu_assign_pointer(*tp, t); 307 rcu_assign_pointer(*tp, t);
279} 308}
@@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
283 struct ip6_tnl __rcu **tp; 312 struct ip6_tnl __rcu **tp;
284 struct ip6_tnl *iter; 313 struct ip6_tnl *iter;
285 314
286 if (t->parms.collect_md)
287 rcu_assign_pointer(ign->collect_md_tun, NULL);
288
289 for (tp = ip6gre_bucket(ign, t); 315 for (tp = ip6gre_bucket(ign, t);
290 (iter = rtnl_dereference(*tp)) != NULL; 316 (iter = rtnl_dereference(*tp)) != NULL;
291 tp = &iter->next) { 317 tp = &iter->next) {
@@ -374,11 +400,23 @@ failed_free:
374 return NULL; 400 return NULL;
375} 401}
376 402
403static void ip6erspan_tunnel_uninit(struct net_device *dev)
404{
405 struct ip6_tnl *t = netdev_priv(dev);
406 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
407
408 ip6erspan_tunnel_unlink_md(ign, t);
409 ip6gre_tunnel_unlink(ign, t);
410 dst_cache_reset(&t->dst_cache);
411 dev_put(dev);
412}
413
377static void ip6gre_tunnel_uninit(struct net_device *dev) 414static void ip6gre_tunnel_uninit(struct net_device *dev)
378{ 415{
379 struct ip6_tnl *t = netdev_priv(dev); 416 struct ip6_tnl *t = netdev_priv(dev);
380 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); 417 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
381 418
419 ip6gre_tunnel_unlink_md(ign, t);
382 ip6gre_tunnel_unlink(ign, t); 420 ip6gre_tunnel_unlink(ign, t);
383 dst_cache_reset(&t->dst_cache); 421 dst_cache_reset(&t->dst_cache);
384 dev_put(dev); 422 dev_put(dev);
@@ -698,6 +736,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
698 else 736 else
699 fl6->daddr = tunnel->parms.raddr; 737 fl6->daddr = tunnel->parms.raddr;
700 738
739 if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
740 return -ENOMEM;
741
701 /* Push GRE header. */ 742 /* Push GRE header. */
702 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; 743 protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
703 744
@@ -908,7 +949,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
908 truncate = true; 949 truncate = true;
909 } 950 }
910 951
911 if (skb_cow_head(skb, dev->needed_headroom)) 952 if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
912 goto tx_err; 953 goto tx_err;
913 954
914 t->parms.o_flags &= ~TUNNEL_KEY; 955 t->parms.o_flags &= ~TUNNEL_KEY;
@@ -979,11 +1020,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
979 erspan_build_header(skb, ntohl(t->parms.o_key), 1020 erspan_build_header(skb, ntohl(t->parms.o_key),
980 t->parms.index, 1021 t->parms.index,
981 truncate, false); 1022 truncate, false);
982 else 1023 else if (t->parms.erspan_ver == 2)
983 erspan_build_header_v2(skb, ntohl(t->parms.o_key), 1024 erspan_build_header_v2(skb, ntohl(t->parms.o_key),
984 t->parms.dir, 1025 t->parms.dir,
985 t->parms.hwid, 1026 t->parms.hwid,
986 truncate, false); 1027 truncate, false);
1028 else
1029 goto tx_err;
1030
987 fl6.daddr = t->parms.raddr; 1031 fl6.daddr = t->parms.raddr;
988 } 1032 }
989 1033
@@ -1019,12 +1063,11 @@ tx_err:
1019 return NETDEV_TX_OK; 1063 return NETDEV_TX_OK;
1020} 1064}
1021 1065
1022static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) 1066static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1023{ 1067{
1024 struct net_device *dev = t->dev; 1068 struct net_device *dev = t->dev;
1025 struct __ip6_tnl_parm *p = &t->parms; 1069 struct __ip6_tnl_parm *p = &t->parms;
1026 struct flowi6 *fl6 = &t->fl.u.ip6; 1070 struct flowi6 *fl6 = &t->fl.u.ip6;
1027 int t_hlen;
1028 1071
1029 if (dev->type != ARPHRD_ETHER) { 1072 if (dev->type != ARPHRD_ETHER) {
1030 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); 1073 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1051,12 +1094,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1051 dev->flags |= IFF_POINTOPOINT; 1094 dev->flags |= IFF_POINTOPOINT;
1052 else 1095 else
1053 dev->flags &= ~IFF_POINTOPOINT; 1096 dev->flags &= ~IFF_POINTOPOINT;
1097}
1054 1098
1055 t->tun_hlen = gre_calc_hlen(t->parms.o_flags); 1099static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1056 1100 int t_hlen)
1057 t->hlen = t->encap_hlen + t->tun_hlen; 1101{
1058 1102 const struct __ip6_tnl_parm *p = &t->parms;
1059 t_hlen = t->hlen + sizeof(struct ipv6hdr); 1103 struct net_device *dev = t->dev;
1060 1104
1061 if (p->flags & IP6_TNL_F_CAP_XMIT) { 1105 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1062 int strict = (ipv6_addr_type(&p->raddr) & 1106 int strict = (ipv6_addr_type(&p->raddr) &
@@ -1088,8 +1132,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1088 } 1132 }
1089} 1133}
1090 1134
1091static int ip6gre_tnl_change(struct ip6_tnl *t, 1135static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1092 const struct __ip6_tnl_parm *p, int set_mtu) 1136{
1137 int t_hlen;
1138
1139 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1140 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1141
1142 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1143 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1144 return t_hlen;
1145}
1146
1147static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1148{
1149 ip6gre_tnl_link_config_common(t);
1150 ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1151}
1152
1153static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1154 const struct __ip6_tnl_parm *p)
1093{ 1155{
1094 t->parms.laddr = p->laddr; 1156 t->parms.laddr = p->laddr;
1095 t->parms.raddr = p->raddr; 1157 t->parms.raddr = p->raddr;
@@ -1105,6 +1167,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
1105 t->parms.o_flags = p->o_flags; 1167 t->parms.o_flags = p->o_flags;
1106 t->parms.fwmark = p->fwmark; 1168 t->parms.fwmark = p->fwmark;
1107 dst_cache_reset(&t->dst_cache); 1169 dst_cache_reset(&t->dst_cache);
1170}
1171
1172static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1173 int set_mtu)
1174{
1175 ip6gre_tnl_copy_tnl_parm(t, p);
1108 ip6gre_tnl_link_config(t, set_mtu); 1176 ip6gre_tnl_link_config(t, set_mtu);
1109 return 0; 1177 return 0;
1110} 1178}
@@ -1381,11 +1449,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
1381 return ret; 1449 return ret;
1382 } 1450 }
1383 1451
1384 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags); 1452 t_hlen = ip6gre_calc_hlen(tunnel);
1385 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1386 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1387
1388 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1389 dev->mtu = ETH_DATA_LEN - t_hlen; 1453 dev->mtu = ETH_DATA_LEN - t_hlen;
1390 if (dev->type == ARPHRD_ETHER) 1454 if (dev->type == ARPHRD_ETHER)
1391 dev->mtu -= ETH_HLEN; 1455 dev->mtu -= ETH_HLEN;
@@ -1728,6 +1792,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1728 .ndo_get_iflink = ip6_tnl_get_iflink, 1792 .ndo_get_iflink = ip6_tnl_get_iflink,
1729}; 1793};
1730 1794
1795static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1796{
1797 int t_hlen;
1798
1799 tunnel->tun_hlen = 8;
1800 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1801 erspan_hdr_len(tunnel->parms.erspan_ver);
1802
1803 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1804 tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1805 return t_hlen;
1806}
1807
1731static int ip6erspan_tap_init(struct net_device *dev) 1808static int ip6erspan_tap_init(struct net_device *dev)
1732{ 1809{
1733 struct ip6_tnl *tunnel; 1810 struct ip6_tnl *tunnel;
@@ -1751,12 +1828,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
1751 return ret; 1828 return ret;
1752 } 1829 }
1753 1830
1754 tunnel->tun_hlen = 8; 1831 t_hlen = ip6erspan_calc_hlen(tunnel);
1755 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1756 erspan_hdr_len(tunnel->parms.erspan_ver);
1757 t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1758
1759 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1760 dev->mtu = ETH_DATA_LEN - t_hlen; 1832 dev->mtu = ETH_DATA_LEN - t_hlen;
1761 if (dev->type == ARPHRD_ETHER) 1833 if (dev->type == ARPHRD_ETHER)
1762 dev->mtu -= ETH_HLEN; 1834 dev->mtu -= ETH_HLEN;
@@ -1764,14 +1836,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
1764 dev->mtu -= 8; 1836 dev->mtu -= 8;
1765 1837
1766 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1838 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1767 ip6gre_tnl_link_config(tunnel, 1); 1839 ip6erspan_tnl_link_config(tunnel, 1);
1768 1840
1769 return 0; 1841 return 0;
1770} 1842}
1771 1843
1772static const struct net_device_ops ip6erspan_netdev_ops = { 1844static const struct net_device_ops ip6erspan_netdev_ops = {
1773 .ndo_init = ip6erspan_tap_init, 1845 .ndo_init = ip6erspan_tap_init,
1774 .ndo_uninit = ip6gre_tunnel_uninit, 1846 .ndo_uninit = ip6erspan_tunnel_uninit,
1775 .ndo_start_xmit = ip6erspan_tunnel_xmit, 1847 .ndo_start_xmit = ip6erspan_tunnel_xmit,
1776 .ndo_set_mac_address = eth_mac_addr, 1848 .ndo_set_mac_address = eth_mac_addr,
1777 .ndo_validate_addr = eth_validate_addr, 1849 .ndo_validate_addr = eth_validate_addr,
@@ -1835,13 +1907,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1835 return ret; 1907 return ret;
1836} 1908}
1837 1909
1838static int ip6gre_newlink(struct net *src_net, struct net_device *dev, 1910static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
1839 struct nlattr *tb[], struct nlattr *data[], 1911 struct nlattr *tb[], struct nlattr *data[],
1840 struct netlink_ext_ack *extack) 1912 struct netlink_ext_ack *extack)
1841{ 1913{
1842 struct ip6_tnl *nt; 1914 struct ip6_tnl *nt;
1843 struct net *net = dev_net(dev);
1844 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1845 struct ip_tunnel_encap ipencap; 1915 struct ip_tunnel_encap ipencap;
1846 int err; 1916 int err;
1847 1917
@@ -1854,16 +1924,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1854 return err; 1924 return err;
1855 } 1925 }
1856 1926
1857 ip6gre_netlink_parms(data, &nt->parms);
1858
1859 if (nt->parms.collect_md) {
1860 if (rtnl_dereference(ign->collect_md_tun))
1861 return -EEXIST;
1862 } else {
1863 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1864 return -EEXIST;
1865 }
1866
1867 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1927 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1868 eth_hw_addr_random(dev); 1928 eth_hw_addr_random(dev);
1869 1929
@@ -1874,51 +1934,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1874 if (err) 1934 if (err)
1875 goto out; 1935 goto out;
1876 1936
1877 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1878
1879 if (tb[IFLA_MTU]) 1937 if (tb[IFLA_MTU])
1880 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 1938 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1881 1939
1882 dev_hold(dev); 1940 dev_hold(dev);
1883 ip6gre_tunnel_link(ign, nt);
1884 1941
1885out: 1942out:
1886 return err; 1943 return err;
1887} 1944}
1888 1945
1889static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], 1946static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1890 struct nlattr *data[], 1947 struct nlattr *tb[], struct nlattr *data[],
1891 struct netlink_ext_ack *extack) 1948 struct netlink_ext_ack *extack)
1949{
1950 struct ip6_tnl *nt = netdev_priv(dev);
1951 struct net *net = dev_net(dev);
1952 struct ip6gre_net *ign;
1953 int err;
1954
1955 ip6gre_netlink_parms(data, &nt->parms);
1956 ign = net_generic(net, ip6gre_net_id);
1957
1958 if (nt->parms.collect_md) {
1959 if (rtnl_dereference(ign->collect_md_tun))
1960 return -EEXIST;
1961 } else {
1962 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1963 return -EEXIST;
1964 }
1965
1966 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
1967 if (!err) {
1968 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1969 ip6gre_tunnel_link_md(ign, nt);
1970 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
1971 }
1972 return err;
1973}
1974
1975static struct ip6_tnl *
1976ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
1977 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
1978 struct netlink_ext_ack *extack)
1892{ 1979{
1893 struct ip6_tnl *t, *nt = netdev_priv(dev); 1980 struct ip6_tnl *t, *nt = netdev_priv(dev);
1894 struct net *net = nt->net; 1981 struct net *net = nt->net;
1895 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 1982 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1896 struct __ip6_tnl_parm p;
1897 struct ip_tunnel_encap ipencap; 1983 struct ip_tunnel_encap ipencap;
1898 1984
1899 if (dev == ign->fb_tunnel_dev) 1985 if (dev == ign->fb_tunnel_dev)
1900 return -EINVAL; 1986 return ERR_PTR(-EINVAL);
1901 1987
1902 if (ip6gre_netlink_encap_parms(data, &ipencap)) { 1988 if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1903 int err = ip6_tnl_encap_setup(nt, &ipencap); 1989 int err = ip6_tnl_encap_setup(nt, &ipencap);
1904 1990
1905 if (err < 0) 1991 if (err < 0)
1906 return err; 1992 return ERR_PTR(err);
1907 } 1993 }
1908 1994
1909 ip6gre_netlink_parms(data, &p); 1995 ip6gre_netlink_parms(data, p_p);
1910 1996
1911 t = ip6gre_tunnel_locate(net, &p, 0); 1997 t = ip6gre_tunnel_locate(net, p_p, 0);
1912 1998
1913 if (t) { 1999 if (t) {
1914 if (t->dev != dev) 2000 if (t->dev != dev)
1915 return -EEXIST; 2001 return ERR_PTR(-EEXIST);
1916 } else { 2002 } else {
1917 t = nt; 2003 t = nt;
1918 } 2004 }
1919 2005
2006 return t;
2007}
2008
2009static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2010 struct nlattr *data[],
2011 struct netlink_ext_ack *extack)
2012{
2013 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2014 struct __ip6_tnl_parm p;
2015 struct ip6_tnl *t;
2016
2017 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2018 if (IS_ERR(t))
2019 return PTR_ERR(t);
2020
2021 ip6gre_tunnel_unlink_md(ign, t);
1920 ip6gre_tunnel_unlink(ign, t); 2022 ip6gre_tunnel_unlink(ign, t);
1921 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); 2023 ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2024 ip6gre_tunnel_link_md(ign, t);
1922 ip6gre_tunnel_link(ign, t); 2025 ip6gre_tunnel_link(ign, t);
1923 return 0; 2026 return 0;
1924} 2027}
@@ -2068,6 +2171,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
2068 netif_keep_dst(dev); 2171 netif_keep_dst(dev);
2069} 2172}
2070 2173
2174static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
2175 struct nlattr *tb[], struct nlattr *data[],
2176 struct netlink_ext_ack *extack)
2177{
2178 struct ip6_tnl *nt = netdev_priv(dev);
2179 struct net *net = dev_net(dev);
2180 struct ip6gre_net *ign;
2181 int err;
2182
2183 ip6gre_netlink_parms(data, &nt->parms);
2184 ign = net_generic(net, ip6gre_net_id);
2185
2186 if (nt->parms.collect_md) {
2187 if (rtnl_dereference(ign->collect_md_tun_erspan))
2188 return -EEXIST;
2189 } else {
2190 if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2191 return -EEXIST;
2192 }
2193
2194 err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
2195 if (!err) {
2196 ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2197 ip6erspan_tunnel_link_md(ign, nt);
2198 ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2199 }
2200 return err;
2201}
2202
2203static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2204{
2205 ip6gre_tnl_link_config_common(t);
2206 ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2207}
2208
2209static int ip6erspan_tnl_change(struct ip6_tnl *t,
2210 const struct __ip6_tnl_parm *p, int set_mtu)
2211{
2212 ip6gre_tnl_copy_tnl_parm(t, p);
2213 ip6erspan_tnl_link_config(t, set_mtu);
2214 return 0;
2215}
2216
2217static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2218 struct nlattr *data[],
2219 struct netlink_ext_ack *extack)
2220{
2221 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2222 struct __ip6_tnl_parm p;
2223 struct ip6_tnl *t;
2224
2225 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2226 if (IS_ERR(t))
2227 return PTR_ERR(t);
2228
2229 ip6gre_tunnel_unlink_md(ign, t);
2230 ip6gre_tunnel_unlink(ign, t);
2231 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2232 ip6erspan_tunnel_link_md(ign, t);
2233 ip6gre_tunnel_link(ign, t);
2234 return 0;
2235}
2236
2071static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { 2237static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2072 .kind = "ip6gre", 2238 .kind = "ip6gre",
2073 .maxtype = IFLA_GRE_MAX, 2239 .maxtype = IFLA_GRE_MAX,
@@ -2104,8 +2270,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2104 .priv_size = sizeof(struct ip6_tnl), 2270 .priv_size = sizeof(struct ip6_tnl),
2105 .setup = ip6erspan_tap_setup, 2271 .setup = ip6erspan_tap_setup,
2106 .validate = ip6erspan_tap_validate, 2272 .validate = ip6erspan_tap_validate,
2107 .newlink = ip6gre_newlink, 2273 .newlink = ip6erspan_newlink,
2108 .changelink = ip6gre_changelink, 2274 .changelink = ip6erspan_changelink,
2109 .get_size = ip6gre_get_size, 2275 .get_size = ip6gre_get_size,
2110 .fill_info = ip6gre_fill_info, 2276 .fill_info = ip6gre_fill_info,
2111 .get_link_net = ip6_tnl_get_link_net, 2277 .get_link_net = ip6_tnl_get_link_net,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2e891d2c30ef..7b6d1689087b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1503,7 +1503,8 @@ alloc_new_skb:
1503 if (copy > length) 1503 if (copy > length)
1504 copy = length; 1504 copy = length;
1505 1505
1506 if (!(rt->dst.dev->features&NETIF_F_SG)) { 1506 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1507 skb_tailroom(skb) >= copy) {
1507 unsigned int off; 1508 unsigned int off;
1508 1509
1509 off = skb->len; 1510 off = skb->len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index da66aaac51ce..00e138a44cbb 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1692,8 +1692,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1692 if (new_mtu < ETH_MIN_MTU) 1692 if (new_mtu < ETH_MIN_MTU)
1693 return -EINVAL; 1693 return -EINVAL;
1694 } 1694 }
1695 if (new_mtu > 0xFFF8 - dev->hard_header_len) 1695 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1696 return -EINVAL; 1696 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1697 return -EINVAL;
1698 } else {
1699 if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1700 return -EINVAL;
1701 }
1697 dev->mtu = new_mtu; 1702 dev->mtu = new_mtu;
1698 return 0; 1703 return 0;
1699} 1704}
@@ -1841,7 +1846,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1841 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1846 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1842 dev->mtu -= 8; 1847 dev->mtu -= 8;
1843 dev->min_mtu = ETH_MIN_MTU; 1848 dev->min_mtu = ETH_MIN_MTU;
1844 dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1849 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1845 1850
1846 return 0; 1851 return 0;
1847 1852
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 65c9e1a58305..97f79dc943d7 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -38,6 +38,7 @@
38MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); 39MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40MODULE_DESCRIPTION("IPv6 packet filter"); 40MODULE_DESCRIPTION("IPv6 packet filter");
41MODULE_ALIAS("ip6t_icmp6");
41 42
42void *ip6t_alloc_initial_table(const struct xt_table *info) 43void *ip6t_alloc_initial_table(const struct xt_table *info)
43{ 44{
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 5fe139484919..bf4763fd68c2 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
103 hdrlen = (osrh->hdrlen + 1) << 3; 103 hdrlen = (osrh->hdrlen + 1) << 3;
104 tot_len = hdrlen + sizeof(*hdr); 104 tot_len = hdrlen + sizeof(*hdr);
105 105
106 err = skb_cow_head(skb, tot_len); 106 err = skb_cow_head(skb, tot_len + skb->mac_len);
107 if (unlikely(err)) 107 if (unlikely(err))
108 return err; 108 return err;
109 109
@@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
161 161
162 hdrlen = (osrh->hdrlen + 1) << 3; 162 hdrlen = (osrh->hdrlen + 1) << 3;
163 163
164 err = skb_cow_head(skb, hdrlen); 164 err = skb_cow_head(skb, hdrlen + skb->mac_len);
165 if (unlikely(err)) 165 if (unlikely(err))
166 return err; 166 return err;
167 167
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 2afce37a7177..e9400ffa7875 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1371 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1371 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1372 dev->mtu = ETH_DATA_LEN - t_hlen; 1372 dev->mtu = ETH_DATA_LEN - t_hlen;
1373 dev->min_mtu = IPV6_MIN_MTU; 1373 dev->min_mtu = IPV6_MIN_MTU;
1374 dev->max_mtu = 0xFFF8 - t_hlen; 1374 dev->max_mtu = IP6_MAX_MTU - t_hlen;
1375 dev->flags = IFF_NOARP; 1375 dev->flags = IFF_NOARP;
1376 netif_keep_dst(dev); 1376 netif_keep_dst(dev);
1377 dev->addr_len = 4; 1377 dev->addr_len = 4;
@@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1583 if (tb[IFLA_MTU]) { 1583 if (tb[IFLA_MTU]) {
1584 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 1584 u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1585 1585
1586 if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) 1586 if (mtu >= IPV6_MIN_MTU &&
1587 mtu <= IP6_MAX_MTU - dev->hard_header_len)
1587 dev->mtu = mtu; 1588 dev->mtu = mtu;
1588 } 1589 }
1589 1590
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 416fe67271a9..86dba282a147 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -126,7 +126,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
126 struct flowi6 *fl6 = &fl->u.ip6; 126 struct flowi6 *fl6 = &fl->u.ip6;
127 int onlyproto = 0; 127 int onlyproto = 0;
128 const struct ipv6hdr *hdr = ipv6_hdr(skb); 128 const struct ipv6hdr *hdr = ipv6_hdr(skb);
129 u16 offset = sizeof(*hdr); 129 u32 offset = sizeof(*hdr);
130 struct ipv6_opt_hdr *exthdr; 130 struct ipv6_opt_hdr *exthdr;
131 const unsigned char *nh = skb_network_header(skb); 131 const unsigned char *nh = skb_network_header(skb);
132 u16 nhoff = IP6CB(skb)->nhoff; 132 u16 nhoff = IP6CB(skb)->nhoff;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index dc76bc346829..d3601d421571 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
1671 __module_get(newsock->ops->owner); 1671 __module_get(newsock->ops->owner);
1672 1672
1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, 1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1674 &kcm_proto, true); 1674 &kcm_proto, false);
1675 if (!newsk) { 1675 if (!newsk) {
1676 sock_release(newsock); 1676 sock_release(newsock);
1677 return ERR_PTR(-ENOMEM); 1677 return ERR_PTR(-ENOMEM);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f6c9ca59062..5b5b0f95ffd1 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -401,7 +401,7 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
401 401
402static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, 402static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
403 struct sta_info *sta, 403 struct sta_info *sta,
404 struct ieee802_11_elems *elems, bool insert) 404 struct ieee802_11_elems *elems)
405{ 405{
406 struct ieee80211_local *local = sdata->local; 406 struct ieee80211_local *local = sdata->local;
407 struct ieee80211_supported_band *sband; 407 struct ieee80211_supported_band *sband;
@@ -447,7 +447,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
447 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20; 447 sta->sta.bandwidth = IEEE80211_STA_RX_BW_20;
448 } 448 }
449 449
450 if (insert) 450 if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
451 rate_control_rate_init(sta); 451 rate_control_rate_init(sta);
452 else 452 else
453 rate_control_rate_update(local, sband, sta, changed); 453 rate_control_rate_update(local, sband, sta, changed);
@@ -551,7 +551,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
551 rcu_read_lock(); 551 rcu_read_lock();
552 sta = sta_info_get(sdata, addr); 552 sta = sta_info_get(sdata, addr);
553 if (sta) { 553 if (sta) {
554 mesh_sta_info_init(sdata, sta, elems, false); 554 mesh_sta_info_init(sdata, sta, elems);
555 } else { 555 } else {
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 /* can't run atomic */ 557 /* can't run atomic */
@@ -561,7 +561,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
561 return NULL; 561 return NULL;
562 } 562 }
563 563
564 mesh_sta_info_init(sdata, sta, elems, true); 564 mesh_sta_info_init(sdata, sta, elems);
565 565
566 if (sta_info_insert_rcu(sta)) 566 if (sta_info_insert_rcu(sta))
567 return NULL; 567 return NULL;
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 8d7e849d4825..41cede4041d3 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -215,7 +215,7 @@ err:
215static int ncsi_pkg_info_all_nl(struct sk_buff *skb, 215static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
216 struct netlink_callback *cb) 216 struct netlink_callback *cb)
217{ 217{
218 struct nlattr *attrs[NCSI_ATTR_MAX]; 218 struct nlattr *attrs[NCSI_ATTR_MAX + 1];
219 struct ncsi_package *np, *package; 219 struct ncsi_package *np, *package;
220 struct ncsi_dev_priv *ndp; 220 struct ncsi_dev_priv *ndp;
221 unsigned int package_id; 221 unsigned int package_id;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 0f6b8172fb9a..206fb2c4c319 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -585,7 +585,8 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
585EXPORT_SYMBOL(nf_nat_decode_session_hook); 585EXPORT_SYMBOL(nf_nat_decode_session_hook);
586#endif 586#endif
587 587
588static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max) 588static void __net_init
589__netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
589{ 590{
590 int h; 591 int h;
591 592
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 370abbf6f421..75de46576f51 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -232,7 +232,10 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
232static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) 232static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
233{ 233{
234 unsigned int hash; 234 unsigned int hash;
235 bool ret; 235 bool ret = false;
236
237 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
238 return refcount_dec_if_one(&cp->refcnt);
236 239
237 hash = ip_vs_conn_hashkey_conn(cp); 240 hash = ip_vs_conn_hashkey_conn(cp);
238 241
@@ -240,15 +243,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
240 spin_lock(&cp->lock); 243 spin_lock(&cp->lock);
241 244
242 if (cp->flags & IP_VS_CONN_F_HASHED) { 245 if (cp->flags & IP_VS_CONN_F_HASHED) {
243 ret = false;
244 /* Decrease refcnt and unlink conn only if we are last user */ 246 /* Decrease refcnt and unlink conn only if we are last user */
245 if (refcount_dec_if_one(&cp->refcnt)) { 247 if (refcount_dec_if_one(&cp->refcnt)) {
246 hlist_del_rcu(&cp->c_list); 248 hlist_del_rcu(&cp->c_list);
247 cp->flags &= ~IP_VS_CONN_F_HASHED; 249 cp->flags &= ~IP_VS_CONN_F_HASHED;
248 ret = true; 250 ret = true;
249 } 251 }
250 } else 252 }
251 ret = refcount_read(&cp->refcnt) ? false : true;
252 253
253 spin_unlock(&cp->lock); 254 spin_unlock(&cp->lock);
254 ct_write_unlock_bh(hash); 255 ct_write_unlock_bh(hash);
@@ -454,12 +455,6 @@ ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
454} 455}
455EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto); 456EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
456 457
457static void __ip_vs_conn_put_notimer(struct ip_vs_conn *cp)
458{
459 __ip_vs_conn_put(cp);
460 ip_vs_conn_expire(&cp->timer);
461}
462
463/* 458/*
464 * Put back the conn and restart its timer with its timeout 459 * Put back the conn and restart its timer with its timeout
465 */ 460 */
@@ -478,7 +473,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
478 (refcount_read(&cp->refcnt) == 1) && 473 (refcount_read(&cp->refcnt) == 1) &&
479 !timer_pending(&cp->timer)) 474 !timer_pending(&cp->timer))
480 /* expire connection immediately */ 475 /* expire connection immediately */
481 __ip_vs_conn_put_notimer(cp); 476 ip_vs_conn_expire(&cp->timer);
482 else 477 else
483 __ip_vs_conn_put_timer(cp); 478 __ip_vs_conn_put_timer(cp);
484} 479}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5f6f73cf2174..0679dd101e72 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -119,6 +119,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
119 struct ip_vs_cpu_stats *s; 119 struct ip_vs_cpu_stats *s;
120 struct ip_vs_service *svc; 120 struct ip_vs_service *svc;
121 121
122 local_bh_disable();
123
122 s = this_cpu_ptr(dest->stats.cpustats); 124 s = this_cpu_ptr(dest->stats.cpustats);
123 u64_stats_update_begin(&s->syncp); 125 u64_stats_update_begin(&s->syncp);
124 s->cnt.inpkts++; 126 s->cnt.inpkts++;
@@ -137,6 +139,8 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
137 s->cnt.inpkts++; 139 s->cnt.inpkts++;
138 s->cnt.inbytes += skb->len; 140 s->cnt.inbytes += skb->len;
139 u64_stats_update_end(&s->syncp); 141 u64_stats_update_end(&s->syncp);
142
143 local_bh_enable();
140 } 144 }
141} 145}
142 146
@@ -151,6 +155,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
151 struct ip_vs_cpu_stats *s; 155 struct ip_vs_cpu_stats *s;
152 struct ip_vs_service *svc; 156 struct ip_vs_service *svc;
153 157
158 local_bh_disable();
159
154 s = this_cpu_ptr(dest->stats.cpustats); 160 s = this_cpu_ptr(dest->stats.cpustats);
155 u64_stats_update_begin(&s->syncp); 161 u64_stats_update_begin(&s->syncp);
156 s->cnt.outpkts++; 162 s->cnt.outpkts++;
@@ -169,6 +175,8 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
169 s->cnt.outpkts++; 175 s->cnt.outpkts++;
170 s->cnt.outbytes += skb->len; 176 s->cnt.outbytes += skb->len;
171 u64_stats_update_end(&s->syncp); 177 u64_stats_update_end(&s->syncp);
178
179 local_bh_enable();
172 } 180 }
173} 181}
174 182
@@ -179,6 +187,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
179 struct netns_ipvs *ipvs = svc->ipvs; 187 struct netns_ipvs *ipvs = svc->ipvs;
180 struct ip_vs_cpu_stats *s; 188 struct ip_vs_cpu_stats *s;
181 189
190 local_bh_disable();
191
182 s = this_cpu_ptr(cp->dest->stats.cpustats); 192 s = this_cpu_ptr(cp->dest->stats.cpustats);
183 u64_stats_update_begin(&s->syncp); 193 u64_stats_update_begin(&s->syncp);
184 s->cnt.conns++; 194 s->cnt.conns++;
@@ -193,6 +203,8 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
193 u64_stats_update_begin(&s->syncp); 203 u64_stats_update_begin(&s->syncp);
194 s->cnt.conns++; 204 s->cnt.conns++;
195 u64_stats_update_end(&s->syncp); 205 u64_stats_update_end(&s->syncp);
206
207 local_bh_enable();
196} 208}
197 209
198 210
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index f36098887ad0..3ecca0616d8c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2381,8 +2381,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2381 struct ipvs_sync_daemon_cfg cfg; 2381 struct ipvs_sync_daemon_cfg cfg;
2382 2382
2383 memset(&cfg, 0, sizeof(cfg)); 2383 memset(&cfg, 0, sizeof(cfg));
2384 strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2384 ret = -EINVAL;
2385 sizeof(cfg.mcast_ifn)); 2385 if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
2386 sizeof(cfg.mcast_ifn)) <= 0)
2387 goto out_dec;
2386 cfg.syncid = dm->syncid; 2388 cfg.syncid = dm->syncid;
2387 ret = start_sync_thread(ipvs, &cfg, dm->state); 2389 ret = start_sync_thread(ipvs, &cfg, dm->state);
2388 } else { 2390 } else {
@@ -2420,12 +2422,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2420 } 2422 }
2421 } 2423 }
2422 2424
2425 if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
2426 strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
2427 IP_VS_SCHEDNAME_MAXLEN) {
2428 ret = -EINVAL;
2429 goto out_unlock;
2430 }
2431
2423 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ 2432 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
2424 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && 2433 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
2425 usvc.protocol != IPPROTO_SCTP) { 2434 usvc.protocol != IPPROTO_SCTP) {
2426 pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2435 pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
2427 usvc.protocol, &usvc.addr.ip, 2436 usvc.protocol, &usvc.addr.ip,
2428 ntohs(usvc.port), usvc.sched_name); 2437 ntohs(usvc.port));
2429 ret = -EFAULT; 2438 ret = -EFAULT;
2430 goto out_unlock; 2439 goto out_unlock;
2431 } 2440 }
@@ -2847,7 +2856,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2847static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { 2856static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2848 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, 2857 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2849 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, 2858 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2850 .len = IP_VS_IFNAME_MAXLEN }, 2859 .len = IP_VS_IFNAME_MAXLEN - 1 },
2851 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, 2860 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2852 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, 2861 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
2853 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, 2862 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
@@ -2865,7 +2874,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2865 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, 2874 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2866 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, 2875 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2867 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, 2876 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2868 .len = IP_VS_SCHEDNAME_MAXLEN }, 2877 .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
2869 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, 2878 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
2870 .len = IP_VS_PENAME_MAXLEN }, 2879 .len = IP_VS_PENAME_MAXLEN },
2871 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY, 2880 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index e97cdc1cf98c..8e67910185a0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -981,6 +981,17 @@ static int tcp_packet(struct nf_conn *ct,
981 return NF_ACCEPT; /* Don't change state */ 981 return NF_ACCEPT; /* Don't change state */
982 } 982 }
983 break; 983 break;
984 case TCP_CONNTRACK_SYN_SENT2:
985 /* tcp_conntracks table is not smart enough to handle
986 * simultaneous open.
987 */
988 ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
989 break;
990 case TCP_CONNTRACK_SYN_RECV:
991 if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
992 ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
993 new_state = TCP_CONNTRACK_ESTABLISHED;
994 break;
984 case TCP_CONNTRACK_CLOSE: 995 case TCP_CONNTRACK_CLOSE:
985 if (index == TCP_RST_SET 996 if (index == TCP_RST_SET
986 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) 997 && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 04d4e3772584..501e48a7965b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -214,6 +214,34 @@ static int nft_delchain(struct nft_ctx *ctx)
214 return err; 214 return err;
215} 215}
216 216
217static void nft_rule_expr_activate(const struct nft_ctx *ctx,
218 struct nft_rule *rule)
219{
220 struct nft_expr *expr;
221
222 expr = nft_expr_first(rule);
223 while (expr != nft_expr_last(rule) && expr->ops) {
224 if (expr->ops->activate)
225 expr->ops->activate(ctx, expr);
226
227 expr = nft_expr_next(expr);
228 }
229}
230
231static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
232 struct nft_rule *rule)
233{
234 struct nft_expr *expr;
235
236 expr = nft_expr_first(rule);
237 while (expr != nft_expr_last(rule) && expr->ops) {
238 if (expr->ops->deactivate)
239 expr->ops->deactivate(ctx, expr);
240
241 expr = nft_expr_next(expr);
242 }
243}
244
217static int 245static int
218nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) 246nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
219{ 247{
@@ -259,6 +287,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
259 nft_trans_destroy(trans); 287 nft_trans_destroy(trans);
260 return err; 288 return err;
261 } 289 }
290 nft_rule_expr_deactivate(ctx, rule);
262 291
263 return 0; 292 return 0;
264} 293}
@@ -1269,8 +1298,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
1269 rcu_assign_pointer(chain->stats, newstats); 1298 rcu_assign_pointer(chain->stats, newstats);
1270 synchronize_rcu(); 1299 synchronize_rcu();
1271 free_percpu(oldstats); 1300 free_percpu(oldstats);
1272 } else 1301 } else {
1273 rcu_assign_pointer(chain->stats, newstats); 1302 rcu_assign_pointer(chain->stats, newstats);
1303 static_branch_inc(&nft_counters_enabled);
1304 }
1274} 1305}
1275 1306
1276static void nf_tables_chain_destroy(struct nft_ctx *ctx) 1307static void nf_tables_chain_destroy(struct nft_ctx *ctx)
@@ -2238,6 +2269,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2238 kfree(rule); 2269 kfree(rule);
2239} 2270}
2240 2271
2272static void nf_tables_rule_release(const struct nft_ctx *ctx,
2273 struct nft_rule *rule)
2274{
2275 nft_rule_expr_deactivate(ctx, rule);
2276 nf_tables_rule_destroy(ctx, rule);
2277}
2278
2241#define NFT_RULE_MAXEXPRS 128 2279#define NFT_RULE_MAXEXPRS 128
2242 2280
2243static struct nft_expr_info *info; 2281static struct nft_expr_info *info;
@@ -2402,7 +2440,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2402 return 0; 2440 return 0;
2403 2441
2404err2: 2442err2:
2405 nf_tables_rule_destroy(&ctx, rule); 2443 nf_tables_rule_release(&ctx, rule);
2406err1: 2444err1:
2407 for (i = 0; i < n; i++) { 2445 for (i = 0; i < n; i++) {
2408 if (info[i].ops != NULL) 2446 if (info[i].ops != NULL)
@@ -4044,8 +4082,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
4044 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ 4082 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
4045 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || 4083 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
4046 nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ 4084 nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
4047 nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) 4085 nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
4048 return -EBUSY; 4086 err = -EBUSY;
4087 goto err5;
4088 }
4049 if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && 4089 if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
4050 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && 4090 nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
4051 memcmp(nft_set_ext_data(ext), 4091 memcmp(nft_set_ext_data(ext),
@@ -4130,7 +4170,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
4130 * NFT_GOTO verdicts. This function must be called on active data objects 4170 * NFT_GOTO verdicts. This function must be called on active data objects
4131 * from the second phase of the commit protocol. 4171 * from the second phase of the commit protocol.
4132 */ 4172 */
4133static void nft_data_hold(const struct nft_data *data, enum nft_data_types type) 4173void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
4134{ 4174{
4135 if (type == NFT_DATA_VERDICT) { 4175 if (type == NFT_DATA_VERDICT) {
4136 switch (data->verdict.code) { 4176 switch (data->verdict.code) {
@@ -4668,7 +4708,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
4668 if (idx > s_idx) 4708 if (idx > s_idx)
4669 memset(&cb->args[1], 0, 4709 memset(&cb->args[1], 0,
4670 sizeof(cb->args) - sizeof(cb->args[0])); 4710 sizeof(cb->args) - sizeof(cb->args[0]));
4671 if (filter && filter->table[0] && 4711 if (filter && filter->table &&
4672 strcmp(filter->table, table->name)) 4712 strcmp(filter->table, table->name))
4673 goto cont; 4713 goto cont;
4674 if (filter && 4714 if (filter &&
@@ -5342,7 +5382,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb,
5342 if (idx > s_idx) 5382 if (idx > s_idx)
5343 memset(&cb->args[1], 0, 5383 memset(&cb->args[1], 0,
5344 sizeof(cb->args) - sizeof(cb->args[0])); 5384 sizeof(cb->args) - sizeof(cb->args[0]));
5345 if (filter && filter->table[0] && 5385 if (filter && filter->table &&
5346 strcmp(filter->table, table->name)) 5386 strcmp(filter->table, table->name))
5347 goto cont; 5387 goto cont;
5348 5388
@@ -5761,7 +5801,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
5761 } 5801 }
5762} 5802}
5763 5803
5764static void nf_tables_commit_release(struct nft_trans *trans) 5804static void nft_commit_release(struct nft_trans *trans)
5765{ 5805{
5766 switch (trans->msg_type) { 5806 switch (trans->msg_type) {
5767 case NFT_MSG_DELTABLE: 5807 case NFT_MSG_DELTABLE:
@@ -5790,6 +5830,21 @@ static void nf_tables_commit_release(struct nft_trans *trans)
5790 kfree(trans); 5830 kfree(trans);
5791} 5831}
5792 5832
5833static void nf_tables_commit_release(struct net *net)
5834{
5835 struct nft_trans *trans, *next;
5836
5837 if (list_empty(&net->nft.commit_list))
5838 return;
5839
5840 synchronize_rcu();
5841
5842 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
5843 list_del(&trans->list);
5844 nft_commit_release(trans);
5845 }
5846}
5847
5793static int nf_tables_commit(struct net *net, struct sk_buff *skb) 5848static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5794{ 5849{
5795 struct nft_trans *trans, *next; 5850 struct nft_trans *trans, *next;
@@ -5920,13 +5975,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
5920 } 5975 }
5921 } 5976 }
5922 5977
5923 synchronize_rcu(); 5978 nf_tables_commit_release(net);
5924
5925 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
5926 list_del(&trans->list);
5927 nf_tables_commit_release(trans);
5928 }
5929
5930 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); 5979 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
5931 5980
5932 return 0; 5981 return 0;
@@ -6006,10 +6055,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
6006 case NFT_MSG_NEWRULE: 6055 case NFT_MSG_NEWRULE:
6007 trans->ctx.chain->use--; 6056 trans->ctx.chain->use--;
6008 list_del_rcu(&nft_trans_rule(trans)->list); 6057 list_del_rcu(&nft_trans_rule(trans)->list);
6058 nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
6009 break; 6059 break;
6010 case NFT_MSG_DELRULE: 6060 case NFT_MSG_DELRULE:
6011 trans->ctx.chain->use++; 6061 trans->ctx.chain->use++;
6012 nft_clear(trans->ctx.net, nft_trans_rule(trans)); 6062 nft_clear(trans->ctx.net, nft_trans_rule(trans));
6063 nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
6013 nft_trans_destroy(trans); 6064 nft_trans_destroy(trans);
6014 break; 6065 break;
6015 case NFT_MSG_NEWSET: 6066 case NFT_MSG_NEWSET:
@@ -6585,7 +6636,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
6585 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { 6636 list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
6586 list_del(&rule->list); 6637 list_del(&rule->list);
6587 ctx->chain->use--; 6638 ctx->chain->use--;
6588 nf_tables_rule_destroy(ctx, rule); 6639 nf_tables_rule_release(ctx, rule);
6589 } 6640 }
6590 list_del(&ctx->chain->list); 6641 list_del(&ctx->chain->list);
6591 ctx->table->use--; 6642 ctx->table->use--;
@@ -6623,7 +6674,7 @@ static void __nft_release_tables(struct net *net)
6623 list_for_each_entry_safe(rule, nr, &chain->rules, list) { 6674 list_for_each_entry_safe(rule, nr, &chain->rules, list) {
6624 list_del(&rule->list); 6675 list_del(&rule->list);
6625 chain->use--; 6676 chain->use--;
6626 nf_tables_rule_destroy(&ctx, rule); 6677 nf_tables_rule_release(&ctx, rule);
6627 } 6678 }
6628 } 6679 }
6629 list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { 6680 list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index dfd0bf3810d2..40e744572283 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -119,14 +119,21 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
119static noinline void nft_update_chain_stats(const struct nft_chain *chain, 119static noinline void nft_update_chain_stats(const struct nft_chain *chain,
120 const struct nft_pktinfo *pkt) 120 const struct nft_pktinfo *pkt)
121{ 121{
122 struct nft_base_chain *base_chain;
122 struct nft_stats *stats; 123 struct nft_stats *stats;
123 124
125 base_chain = nft_base_chain(chain);
126 if (!base_chain->stats)
127 return;
128
124 local_bh_disable(); 129 local_bh_disable();
125 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats)); 130 stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
126 u64_stats_update_begin(&stats->syncp); 131 if (stats) {
127 stats->pkts++; 132 u64_stats_update_begin(&stats->syncp);
128 stats->bytes += pkt->skb->len; 133 stats->pkts++;
129 u64_stats_update_end(&stats->syncp); 134 stats->bytes += pkt->skb->len;
135 u64_stats_update_end(&stats->syncp);
136 }
130 local_bh_enable(); 137 local_bh_enable();
131} 138}
132 139
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index b9505bcd3827..a0e5adf0b3b6 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -115,7 +115,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
115 nfacct->flags = flags; 115 nfacct->flags = flags;
116 } 116 }
117 117
118 strncpy(nfacct->name, nla_data(tb[NFACCT_NAME]), NFACCT_NAME_MAX); 118 nla_strlcpy(nfacct->name, tb[NFACCT_NAME], NFACCT_NAME_MAX);
119 119
120 if (tb[NFACCT_BYTES]) { 120 if (tb[NFACCT_BYTES]) {
121 atomic64_set(&nfacct->bytes, 121 atomic64_set(&nfacct->bytes,
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 4a4b293fb2e5..cb5b5f207777 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -149,8 +149,8 @@ nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
149 !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) 149 !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
150 return -EINVAL; 150 return -EINVAL;
151 151
152 strncpy(expect_policy->name, 152 nla_strlcpy(expect_policy->name,
153 nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN); 153 tb[NFCTH_POLICY_NAME], NF_CT_HELPER_NAME_LEN);
154 expect_policy->max_expected = 154 expect_policy->max_expected =
155 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); 155 ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
156 if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT) 156 if (expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
@@ -234,7 +234,8 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
234 if (ret < 0) 234 if (ret < 0)
235 goto err1; 235 goto err1;
236 236
237 strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); 237 nla_strlcpy(helper->name,
238 tb[NFCTH_NAME], NF_CT_HELPER_NAME_LEN);
238 size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); 239 size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
239 if (size > FIELD_SIZEOF(struct nf_conn_help, data)) { 240 if (size > FIELD_SIZEOF(struct nf_conn_help, data)) {
240 ret = -ENOMEM; 241 ret = -ENOMEM;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8e23726b9081..1d99a1efdafc 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -27,14 +27,31 @@ struct nft_xt {
27 struct list_head head; 27 struct list_head head;
28 struct nft_expr_ops ops; 28 struct nft_expr_ops ops;
29 unsigned int refcnt; 29 unsigned int refcnt;
30
31 /* Unlike other expressions, ops doesn't have static storage duration.
32 * nft core assumes they do. We use kfree_rcu so that nft core can
33 * can check expr->ops->size even after nft_compat->destroy() frees
34 * the nft_xt struct that holds the ops structure.
35 */
36 struct rcu_head rcu_head;
37};
38
39/* Used for matches where *info is larger than X byte */
40#define NFT_MATCH_LARGE_THRESH 192
41
42struct nft_xt_match_priv {
43 void *info;
30}; 44};
31 45
32static void nft_xt_put(struct nft_xt *xt) 46static bool nft_xt_put(struct nft_xt *xt)
33{ 47{
34 if (--xt->refcnt == 0) { 48 if (--xt->refcnt == 0) {
35 list_del(&xt->head); 49 list_del(&xt->head);
36 kfree(xt); 50 kfree_rcu(xt, rcu_head);
51 return true;
37 } 52 }
53
54 return false;
38} 55}
39 56
40static int nft_compat_chain_validate_dependency(const char *tablename, 57static int nft_compat_chain_validate_dependency(const char *tablename,
@@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
226 struct xt_target *target = expr->ops->data; 243 struct xt_target *target = expr->ops->data;
227 struct xt_tgchk_param par; 244 struct xt_tgchk_param par;
228 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); 245 size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
246 struct nft_xt *nft_xt;
229 u16 proto = 0; 247 u16 proto = 0;
230 bool inv = false; 248 bool inv = false;
231 union nft_entry e = {}; 249 union nft_entry e = {};
@@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
236 if (ctx->nla[NFTA_RULE_COMPAT]) { 254 if (ctx->nla[NFTA_RULE_COMPAT]) {
237 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); 255 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
238 if (ret < 0) 256 if (ret < 0)
239 goto err; 257 return ret;
240 } 258 }
241 259
242 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); 260 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
243 261
244 ret = xt_check_target(&par, size, proto, inv); 262 ret = xt_check_target(&par, size, proto, inv);
245 if (ret < 0) 263 if (ret < 0)
246 goto err; 264 return ret;
247 265
248 /* The standard target cannot be used */ 266 /* The standard target cannot be used */
249 if (target->target == NULL) { 267 if (!target->target)
250 ret = -EINVAL; 268 return -EINVAL;
251 goto err;
252 }
253 269
270 nft_xt = container_of(expr->ops, struct nft_xt, ops);
271 nft_xt->refcnt++;
254 return 0; 272 return 0;
255err:
256 module_put(target->me);
257 return ret;
258} 273}
259 274
260static void 275static void
@@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
271 if (par.target->destroy != NULL) 286 if (par.target->destroy != NULL)
272 par.target->destroy(&par); 287 par.target->destroy(&par);
273 288
274 nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); 289 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
275 module_put(target->me); 290 module_put(target->me);
276} 291}
277 292
278static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) 293static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx,
316 return 0; 331 return 0;
317} 332}
318 333
319static void nft_match_eval(const struct nft_expr *expr, 334static void __nft_match_eval(const struct nft_expr *expr,
320 struct nft_regs *regs, 335 struct nft_regs *regs,
321 const struct nft_pktinfo *pkt) 336 const struct nft_pktinfo *pkt,
337 void *info)
322{ 338{
323 void *info = nft_expr_priv(expr);
324 struct xt_match *match = expr->ops->data; 339 struct xt_match *match = expr->ops->data;
325 struct sk_buff *skb = pkt->skb; 340 struct sk_buff *skb = pkt->skb;
326 bool ret; 341 bool ret;
@@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr,
344 } 359 }
345} 360}
346 361
362static void nft_match_large_eval(const struct nft_expr *expr,
363 struct nft_regs *regs,
364 const struct nft_pktinfo *pkt)
365{
366 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
367
368 __nft_match_eval(expr, regs, pkt, priv->info);
369}
370
371static void nft_match_eval(const struct nft_expr *expr,
372 struct nft_regs *regs,
373 const struct nft_pktinfo *pkt)
374{
375 __nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
376}
377
347static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = { 378static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
348 [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING }, 379 [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
349 [NFTA_MATCH_REV] = { .type = NLA_U32 }, 380 [NFTA_MATCH_REV] = { .type = NLA_U32 },
@@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, void *in, void *out)
404} 435}
405 436
406static int 437static int
407nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, 438__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
408 const struct nlattr * const tb[]) 439 const struct nlattr * const tb[],
440 void *info)
409{ 441{
410 void *info = nft_expr_priv(expr);
411 struct xt_match *match = expr->ops->data; 442 struct xt_match *match = expr->ops->data;
412 struct xt_mtchk_param par; 443 struct xt_mtchk_param par;
413 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); 444 size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
445 struct nft_xt *nft_xt;
414 u16 proto = 0; 446 u16 proto = 0;
415 bool inv = false; 447 bool inv = false;
416 union nft_entry e = {}; 448 union nft_entry e = {};
@@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
421 if (ctx->nla[NFTA_RULE_COMPAT]) { 453 if (ctx->nla[NFTA_RULE_COMPAT]) {
422 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv); 454 ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
423 if (ret < 0) 455 if (ret < 0)
424 goto err; 456 return ret;
425 } 457 }
426 458
427 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); 459 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
428 460
429 ret = xt_check_match(&par, size, proto, inv); 461 ret = xt_check_match(&par, size, proto, inv);
430 if (ret < 0) 462 if (ret < 0)
431 goto err; 463 return ret;
432 464
465 nft_xt = container_of(expr->ops, struct nft_xt, ops);
466 nft_xt->refcnt++;
433 return 0; 467 return 0;
434err: 468}
435 module_put(match->me); 469
470static int
471nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
472 const struct nlattr * const tb[])
473{
474 return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
475}
476
477static int
478nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
479 const struct nlattr * const tb[])
480{
481 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
482 struct xt_match *m = expr->ops->data;
483 int ret;
484
485 priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
486 if (!priv->info)
487 return -ENOMEM;
488
489 ret = __nft_match_init(ctx, expr, tb, priv->info);
490 if (ret)
491 kfree(priv->info);
436 return ret; 492 return ret;
437} 493}
438 494
439static void 495static void
440nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 496__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
497 void *info)
441{ 498{
442 struct xt_match *match = expr->ops->data; 499 struct xt_match *match = expr->ops->data;
443 void *info = nft_expr_priv(expr);
444 struct xt_mtdtor_param par; 500 struct xt_mtdtor_param par;
445 501
446 par.net = ctx->net; 502 par.net = ctx->net;
@@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
450 if (par.match->destroy != NULL) 506 if (par.match->destroy != NULL)
451 par.match->destroy(&par); 507 par.match->destroy(&par);
452 508
453 nft_xt_put(container_of(expr->ops, struct nft_xt, ops)); 509 if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
454 module_put(match->me); 510 module_put(match->me);
455} 511}
456 512
457static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr) 513static void
514nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
515{
516 __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
517}
518
519static void
520nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
521{
522 struct nft_xt_match_priv *priv = nft_expr_priv(expr);
523
524 __nft_match_destroy(ctx, expr, priv->info);
525 kfree(priv->info);
526}
527
528static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
529 void *info)
458{ 530{
459 void *info = nft_expr_priv(expr);
460 struct xt_match *match = expr->ops->data; 531 struct xt_match *match = expr->ops->data;
461 532
462 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) || 533 if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
@@ -470,6 +541,18 @@ nla_put_failure:
470 return -1; 541 return -1;
471} 542}
472 543
544static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
545{
546 return __nft_match_dump(skb, expr, nft_expr_priv(expr));
547}
548
549static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
550{
551 struct nft_xt_match_priv *priv = nft_expr_priv(e);
552
553 return __nft_match_dump(skb, e, priv->info);
554}
555
473static int nft_match_validate(const struct nft_ctx *ctx, 556static int nft_match_validate(const struct nft_ctx *ctx,
474 const struct nft_expr *expr, 557 const struct nft_expr *expr,
475 const struct nft_data **data) 558 const struct nft_data **data)
@@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
637{ 720{
638 struct nft_xt *nft_match; 721 struct nft_xt *nft_match;
639 struct xt_match *match; 722 struct xt_match *match;
723 unsigned int matchsize;
640 char *mt_name; 724 char *mt_name;
641 u32 rev, family; 725 u32 rev, family;
642 int err; 726 int err;
@@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
654 list_for_each_entry(nft_match, &nft_match_list, head) { 738 list_for_each_entry(nft_match, &nft_match_list, head) {
655 struct xt_match *match = nft_match->ops.data; 739 struct xt_match *match = nft_match->ops.data;
656 740
657 if (nft_match_cmp(match, mt_name, rev, family)) { 741 if (nft_match_cmp(match, mt_name, rev, family))
658 if (!try_module_get(match->me))
659 return ERR_PTR(-ENOENT);
660
661 nft_match->refcnt++;
662 return &nft_match->ops; 742 return &nft_match->ops;
663 }
664 } 743 }
665 744
666 match = xt_request_find_match(family, mt_name, rev); 745 match = xt_request_find_match(family, mt_name, rev);
@@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
679 goto err; 758 goto err;
680 } 759 }
681 760
682 nft_match->refcnt = 1; 761 nft_match->refcnt = 0;
683 nft_match->ops.type = &nft_match_type; 762 nft_match->ops.type = &nft_match_type;
684 nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
685 nft_match->ops.eval = nft_match_eval; 763 nft_match->ops.eval = nft_match_eval;
686 nft_match->ops.init = nft_match_init; 764 nft_match->ops.init = nft_match_init;
687 nft_match->ops.destroy = nft_match_destroy; 765 nft_match->ops.destroy = nft_match_destroy;
@@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx,
689 nft_match->ops.validate = nft_match_validate; 767 nft_match->ops.validate = nft_match_validate;
690 nft_match->ops.data = match; 768 nft_match->ops.data = match;
691 769
770 matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
771 if (matchsize > NFT_MATCH_LARGE_THRESH) {
772 matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
773
774 nft_match->ops.eval = nft_match_large_eval;
775 nft_match->ops.init = nft_match_large_init;
776 nft_match->ops.destroy = nft_match_large_destroy;
777 nft_match->ops.dump = nft_match_large_dump;
778 }
779
780 nft_match->ops.size = matchsize;
781
692 list_add(&nft_match->head, &nft_match_list); 782 list_add(&nft_match->head, &nft_match_list);
693 783
694 return &nft_match->ops; 784 return &nft_match->ops;
@@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
739 list_for_each_entry(nft_target, &nft_target_list, head) { 829 list_for_each_entry(nft_target, &nft_target_list, head) {
740 struct xt_target *target = nft_target->ops.data; 830 struct xt_target *target = nft_target->ops.data;
741 831
742 if (nft_target_cmp(target, tg_name, rev, family)) { 832 if (nft_target_cmp(target, tg_name, rev, family))
743 if (!try_module_get(target->me))
744 return ERR_PTR(-ENOENT);
745
746 nft_target->refcnt++;
747 return &nft_target->ops; 833 return &nft_target->ops;
748 }
749 } 834 }
750 835
751 target = xt_request_find_target(family, tg_name, rev); 836 target = xt_request_find_target(family, tg_name, rev);
@@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
764 goto err; 849 goto err;
765 } 850 }
766 851
767 nft_target->refcnt = 1; 852 nft_target->refcnt = 0;
768 nft_target->ops.type = &nft_target_type; 853 nft_target->ops.type = &nft_target_type;
769 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 854 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
770 nft_target->ops.init = nft_target_init; 855 nft_target->ops.init = nft_target_init;
@@ -823,6 +908,32 @@ err_match:
823 908
824static void __exit nft_compat_module_exit(void) 909static void __exit nft_compat_module_exit(void)
825{ 910{
911 struct nft_xt *xt, *next;
912
913 /* list should be empty here, it can be non-empty only in case there
914 * was an error that caused nft_xt expr to not be initialized fully
915 * and noone else requested the same expression later.
916 *
917 * In this case, the lists contain 0-refcount entries that still
918 * hold module reference.
919 */
920 list_for_each_entry_safe(xt, next, &nft_target_list, head) {
921 struct xt_target *target = xt->ops.data;
922
923 if (WARN_ON_ONCE(xt->refcnt))
924 continue;
925 module_put(target->me);
926 kfree(xt);
927 }
928
929 list_for_each_entry_safe(xt, next, &nft_match_list, head) {
930 struct xt_match *match = xt->ops.data;
931
932 if (WARN_ON_ONCE(xt->refcnt))
933 continue;
934 module_put(match->me);
935 kfree(xt);
936 }
826 nfnetlink_subsys_unregister(&nfnl_compat_subsys); 937 nfnetlink_subsys_unregister(&nfnl_compat_subsys);
827 nft_unregister_expr(&nft_target_type); 938 nft_unregister_expr(&nft_target_type);
828 nft_unregister_expr(&nft_match_type); 939 nft_unregister_expr(&nft_match_type);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index ea737fd789e8..5c0de704bad5 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -880,22 +880,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
880 struct nft_object *obj, bool reset) 880 struct nft_object *obj, bool reset)
881{ 881{
882 const struct nft_ct_helper_obj *priv = nft_obj_data(obj); 882 const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
883 const struct nf_conntrack_helper *helper = priv->helper4; 883 const struct nf_conntrack_helper *helper;
884 u16 family; 884 u16 family;
885 885
886 if (priv->helper4 && priv->helper6) {
887 family = NFPROTO_INET;
888 helper = priv->helper4;
889 } else if (priv->helper6) {
890 family = NFPROTO_IPV6;
891 helper = priv->helper6;
892 } else {
893 family = NFPROTO_IPV4;
894 helper = priv->helper4;
895 }
896
886 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) 897 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
887 return -1; 898 return -1;
888 899
889 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) 900 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
890 return -1; 901 return -1;
891 902
892 if (priv->helper4 && priv->helper6)
893 family = NFPROTO_INET;
894 else if (priv->helper6)
895 family = NFPROTO_IPV6;
896 else
897 family = NFPROTO_IPV4;
898
899 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) 903 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
900 return -1; 904 return -1;
901 905
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 4717d7796927..aa87ff8beae8 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -69,8 +69,16 @@ err1:
69 return err; 69 return err;
70} 70}
71 71
72static void nft_immediate_destroy(const struct nft_ctx *ctx, 72static void nft_immediate_activate(const struct nft_ctx *ctx,
73 const struct nft_expr *expr) 73 const struct nft_expr *expr)
74{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76
77 return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
78}
79
80static void nft_immediate_deactivate(const struct nft_ctx *ctx,
81 const struct nft_expr *expr)
74{ 82{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 83 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76 84
@@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = {
108 .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), 116 .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
109 .eval = nft_immediate_eval, 117 .eval = nft_immediate_eval,
110 .init = nft_immediate_init, 118 .init = nft_immediate_init,
111 .destroy = nft_immediate_destroy, 119 .activate = nft_immediate_activate,
120 .deactivate = nft_immediate_deactivate,
112 .dump = nft_immediate_dump, 121 .dump = nft_immediate_dump,
113 .validate = nft_immediate_validate, 122 .validate = nft_immediate_validate,
114}; 123};
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index a9fc298ef4c3..72f13a1144dd 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
51 return !limit->invert; 51 return !limit->invert;
52} 52}
53 53
54/* Use same default as in iptables. */
55#define NFT_LIMIT_PKT_BURST_DEFAULT 5
56
54static int nft_limit_init(struct nft_limit *limit, 57static int nft_limit_init(struct nft_limit *limit,
55 const struct nlattr * const tb[]) 58 const struct nlattr * const tb[], bool pkts)
56{ 59{
57 u64 unit; 60 u64 unit, tokens;
58 61
59 if (tb[NFTA_LIMIT_RATE] == NULL || 62 if (tb[NFTA_LIMIT_RATE] == NULL ||
60 tb[NFTA_LIMIT_UNIT] == NULL) 63 tb[NFTA_LIMIT_UNIT] == NULL)
@@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
68 71
69 if (tb[NFTA_LIMIT_BURST]) 72 if (tb[NFTA_LIMIT_BURST])
70 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); 73 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
71 else 74
72 limit->burst = 0; 75 if (pkts && limit->burst == 0)
76 limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
73 77
74 if (limit->rate + limit->burst < limit->rate) 78 if (limit->rate + limit->burst < limit->rate)
75 return -EOVERFLOW; 79 return -EOVERFLOW;
76 80
77 /* The token bucket size limits the number of tokens can be 81 if (pkts) {
78 * accumulated. tokens_max specifies the bucket size. 82 tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
79 * tokens_max = unit * (rate + burst) / rate. 83 } else {
80 */ 84 /* The token bucket size limits the number of tokens can be
81 limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 85 * accumulated. tokens_max specifies the bucket size.
82 limit->rate); 86 * tokens_max = unit * (rate + burst) / rate.
87 */
88 tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
89 limit->rate);
90 }
91
92 limit->tokens = tokens;
83 limit->tokens_max = limit->tokens; 93 limit->tokens_max = limit->tokens;
84 94
85 if (tb[NFTA_LIMIT_FLAGS]) { 95 if (tb[NFTA_LIMIT_FLAGS]) {
@@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
144 struct nft_limit_pkts *priv = nft_expr_priv(expr); 154 struct nft_limit_pkts *priv = nft_expr_priv(expr);
145 int err; 155 int err;
146 156
147 err = nft_limit_init(&priv->limit, tb); 157 err = nft_limit_init(&priv->limit, tb, true);
148 if (err < 0) 158 if (err < 0)
149 return err; 159 return err;
150 160
@@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
185{ 195{
186 struct nft_limit *priv = nft_expr_priv(expr); 196 struct nft_limit *priv = nft_expr_priv(expr);
187 197
188 return nft_limit_init(priv, tb); 198 return nft_limit_init(priv, tb, false);
189} 199}
190 200
191static int nft_limit_bytes_dump(struct sk_buff *skb, 201static int nft_limit_bytes_dump(struct sk_buff *skb,
@@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
246 struct nft_limit_pkts *priv = nft_obj_data(obj); 256 struct nft_limit_pkts *priv = nft_obj_data(obj);
247 int err; 257 int err;
248 258
249 err = nft_limit_init(&priv->limit, tb); 259 err = nft_limit_init(&priv->limit, tb, true);
250 if (err < 0) 260 if (err < 0)
251 return err; 261 return err;
252 262
@@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
289{ 299{
290 struct nft_limit *priv = nft_obj_data(obj); 300 struct nft_limit *priv = nft_obj_data(obj);
291 301
292 return nft_limit_init(priv, tb); 302 return nft_limit_init(priv, tb, false);
293} 303}
294 304
295static int nft_limit_obj_bytes_dump(struct sk_buff *skb, 305static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 8fb91940e2e7..204af9899482 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -234,7 +234,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
234 struct sk_buff *skb = pkt->skb; 234 struct sk_buff *skb = pkt->skb;
235 u32 *sreg = &regs->data[meta->sreg]; 235 u32 *sreg = &regs->data[meta->sreg];
236 u32 value = *sreg; 236 u32 value = *sreg;
237 u8 pkt_type; 237 u8 value8;
238 238
239 switch (meta->key) { 239 switch (meta->key) {
240 case NFT_META_MARK: 240 case NFT_META_MARK:
@@ -244,15 +244,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
244 skb->priority = value; 244 skb->priority = value;
245 break; 245 break;
246 case NFT_META_PKTTYPE: 246 case NFT_META_PKTTYPE:
247 pkt_type = nft_reg_load8(sreg); 247 value8 = nft_reg_load8(sreg);
248 248
249 if (skb->pkt_type != pkt_type && 249 if (skb->pkt_type != value8 &&
250 skb_pkt_type_ok(pkt_type) && 250 skb_pkt_type_ok(value8) &&
251 skb_pkt_type_ok(skb->pkt_type)) 251 skb_pkt_type_ok(skb->pkt_type))
252 skb->pkt_type = pkt_type; 252 skb->pkt_type = value8;
253 break; 253 break;
254 case NFT_META_NFTRACE: 254 case NFT_META_NFTRACE:
255 skb->nf_trace = !!value; 255 value8 = nft_reg_load8(sreg);
256
257 skb->nf_trace = !!value8;
256 break; 258 break;
257 default: 259 default:
258 WARN_ON(1); 260 WARN_ON(1);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 71325fef647d..cb7cb300c3bc 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -183,6 +183,9 @@ struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
183 struct xt_match *m; 183 struct xt_match *m;
184 int err = -ENOENT; 184 int err = -ENOENT;
185 185
186 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
187 return ERR_PTR(-EINVAL);
188
186 mutex_lock(&xt[af].mutex); 189 mutex_lock(&xt[af].mutex);
187 list_for_each_entry(m, &xt[af].match, list) { 190 list_for_each_entry(m, &xt[af].match, list) {
188 if (strcmp(m->name, name) == 0) { 191 if (strcmp(m->name, name) == 0) {
@@ -229,6 +232,9 @@ struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
229 struct xt_target *t; 232 struct xt_target *t;
230 int err = -ENOENT; 233 int err = -ENOENT;
231 234
235 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
236 return ERR_PTR(-EINVAL);
237
232 mutex_lock(&xt[af].mutex); 238 mutex_lock(&xt[af].mutex);
233 list_for_each_entry(t, &xt[af].target, list) { 239 list_for_each_entry(t, &xt[af].target, list) {
234 if (strcmp(t->name, name) == 0) { 240 if (strcmp(t->name, name) == 0) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 01f3515cada0..acb7b86574cd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2903 if (skb == NULL) 2903 if (skb == NULL)
2904 goto out_unlock; 2904 goto out_unlock;
2905 2905
2906 skb_set_network_header(skb, reserve); 2906 skb_reset_network_header(skb);
2907 2907
2908 err = -EINVAL; 2908 err = -EINVAL;
2909 if (sock->type == SOCK_DGRAM) { 2909 if (sock->type == SOCK_DGRAM) {
2910 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); 2910 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2911 if (unlikely(offset < 0)) 2911 if (unlikely(offset < 0))
2912 goto out_free; 2912 goto out_free;
2913 } else if (reserve) {
2914 skb_reserve(skb, -reserve);
2913 } 2915 }
2914 2916
2915 /* Returns -EFAULT on error */ 2917 /* Returns -EFAULT on error */
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 853604685965..1fb39e1f9d07 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
161 case htons(ETH_P_8021AD): 161 case htons(ETH_P_8021AD):
162 break; 162 break;
163 default: 163 default:
164 if (exists)
165 tcf_idr_release(*a, bind);
164 return -EPROTONOSUPPORT; 166 return -EPROTONOSUPPORT;
165 } 167 }
166 } else { 168 } else {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 963e4bf0aab8..a57e112d9b3e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1588,7 +1588,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1588 return ret; 1588 return ret;
1589 ok_count = ret; 1589 ok_count = ret;
1590 1590
1591 if (!exts) 1591 if (!exts || ok_count)
1592 return ok_count; 1592 return ok_count;
1593 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); 1593 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1594 if (ret < 0) 1594 if (ret < 0)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d964e60c730e..c79f6e71512e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -977,7 +977,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
977 return 0; 977 return 0;
978 978
979errout_idr: 979errout_idr:
980 if (fnew->handle) 980 if (!fold)
981 idr_remove(&head->handle_idr, fnew->handle); 981 idr_remove(&head->handle_idr, fnew->handle);
982errout: 982errout:
983 tcf_exts_destroy(&fnew->exts); 983 tcf_exts_destroy(&fnew->exts);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 16644b3d2362..56c181c3feeb 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
222 extack); 222 extack);
223 if (IS_ERR(child)) 223 if (IS_ERR(child))
224 return PTR_ERR(child); 224 return PTR_ERR(child);
225 }
226 225
227 if (child != &noop_qdisc) 226 /* child is fifo, no need to check for noop_qdisc */
228 qdisc_hash_add(child, true); 227 qdisc_hash_add(child, true);
228 }
229
229 sch_tree_lock(sch); 230 sch_tree_lock(sch);
230 q->flags = ctl->flags; 231 q->flags = ctl->flags;
231 q->limit = ctl->limit; 232 q->limit = ctl->limit;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 03225a8df973..6f74a426f159 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
383 err = PTR_ERR(child); 383 err = PTR_ERR(child);
384 goto done; 384 goto done;
385 } 385 }
386
387 /* child is fifo, no need to check for noop_qdisc */
388 qdisc_hash_add(child, true);
386 } 389 }
387 390
388 sch_tree_lock(sch); 391 sch_tree_lock(sch);
@@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
391 q->qdisc->qstats.backlog); 394 q->qdisc->qstats.backlog);
392 qdisc_destroy(q->qdisc); 395 qdisc_destroy(q->qdisc);
393 q->qdisc = child; 396 q->qdisc = child;
394 if (child != &noop_qdisc)
395 qdisc_hash_add(child, true);
396 } 397 }
397 q->limit = qopt->limit; 398 q->limit = qopt->limit;
398 if (tb[TCA_TBF_PBURST]) 399 if (tb[TCA_TBF_PBURST])
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 42247110d842..0cd2e764f47f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -1006,7 +1006,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
1006 .owner = THIS_MODULE, 1006 .owner = THIS_MODULE,
1007 .release = inet6_release, 1007 .release = inet6_release,
1008 .bind = inet6_bind, 1008 .bind = inet6_bind,
1009 .connect = inet_dgram_connect, 1009 .connect = sctp_inet_connect,
1010 .socketpair = sock_no_socketpair, 1010 .socketpair = sock_no_socketpair,
1011 .accept = inet_accept, 1011 .accept = inet_accept,
1012 .getname = sctp_getname, 1012 .getname = sctp_getname,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d685f8456762..6bf0a9971888 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1012,7 +1012,7 @@ static const struct proto_ops inet_seqpacket_ops = {
1012 .owner = THIS_MODULE, 1012 .owner = THIS_MODULE,
1013 .release = inet_release, /* Needs to be wrapped... */ 1013 .release = inet_release, /* Needs to be wrapped... */
1014 .bind = inet_bind, 1014 .bind = inet_bind,
1015 .connect = inet_dgram_connect, 1015 .connect = sctp_inet_connect,
1016 .socketpair = sock_no_socketpair, 1016 .socketpair = sock_no_socketpair,
1017 .accept = inet_accept, 1017 .accept = inet_accept,
1018 .getname = inet_getname, /* Semantics are different. */ 1018 .getname = inet_getname, /* Semantics are different. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 80835ac26d2c..ae7e7c606f72 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1086,7 +1086,7 @@ out:
1086 */ 1086 */
1087static int __sctp_connect(struct sock *sk, 1087static int __sctp_connect(struct sock *sk,
1088 struct sockaddr *kaddrs, 1088 struct sockaddr *kaddrs,
1089 int addrs_size, 1089 int addrs_size, int flags,
1090 sctp_assoc_t *assoc_id) 1090 sctp_assoc_t *assoc_id)
1091{ 1091{
1092 struct net *net = sock_net(sk); 1092 struct net *net = sock_net(sk);
@@ -1104,7 +1104,6 @@ static int __sctp_connect(struct sock *sk,
1104 union sctp_addr *sa_addr = NULL; 1104 union sctp_addr *sa_addr = NULL;
1105 void *addr_buf; 1105 void *addr_buf;
1106 unsigned short port; 1106 unsigned short port;
1107 unsigned int f_flags = 0;
1108 1107
1109 sp = sctp_sk(sk); 1108 sp = sctp_sk(sk);
1110 ep = sp->ep; 1109 ep = sp->ep;
@@ -1254,13 +1253,7 @@ static int __sctp_connect(struct sock *sk,
1254 sp->pf->to_sk_daddr(sa_addr, sk); 1253 sp->pf->to_sk_daddr(sa_addr, sk);
1255 sk->sk_err = 0; 1254 sk->sk_err = 0;
1256 1255
1257 /* in-kernel sockets don't generally have a file allocated to them 1256 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1258 * if all they do is call sock_create_kern().
1259 */
1260 if (sk->sk_socket->file)
1261 f_flags = sk->sk_socket->file->f_flags;
1262
1263 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1264 1257
1265 if (assoc_id) 1258 if (assoc_id)
1266 *assoc_id = asoc->assoc_id; 1259 *assoc_id = asoc->assoc_id;
@@ -1348,7 +1341,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1348 sctp_assoc_t *assoc_id) 1341 sctp_assoc_t *assoc_id)
1349{ 1342{
1350 struct sockaddr *kaddrs; 1343 struct sockaddr *kaddrs;
1351 int err = 0; 1344 int err = 0, flags = 0;
1352 1345
1353 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", 1346 pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
1354 __func__, sk, addrs, addrs_size); 1347 __func__, sk, addrs, addrs_size);
@@ -1367,7 +1360,13 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1367 if (err) 1360 if (err)
1368 goto out_free; 1361 goto out_free;
1369 1362
1370 err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); 1363 /* in-kernel sockets don't generally have a file allocated to them
1364 * if all they do is call sock_create_kern().
1365 */
1366 if (sk->sk_socket->file)
1367 flags = sk->sk_socket->file->f_flags;
1368
1369 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
1371 1370
1372out_free: 1371out_free:
1373 kvfree(kaddrs); 1372 kvfree(kaddrs);
@@ -4397,16 +4396,26 @@ out_nounlock:
4397 * len: the size of the address. 4396 * len: the size of the address.
4398 */ 4397 */
4399static int sctp_connect(struct sock *sk, struct sockaddr *addr, 4398static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4400 int addr_len) 4399 int addr_len, int flags)
4401{ 4400{
4402 int err = 0; 4401 struct inet_sock *inet = inet_sk(sk);
4403 struct sctp_af *af; 4402 struct sctp_af *af;
4403 int err = 0;
4404 4404
4405 lock_sock(sk); 4405 lock_sock(sk);
4406 4406
4407 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, 4407 pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
4408 addr, addr_len); 4408 addr, addr_len);
4409 4409
4410 /* We may need to bind the socket. */
4411 if (!inet->inet_num) {
4412 if (sk->sk_prot->get_port(sk, 0)) {
4413 release_sock(sk);
4414 return -EAGAIN;
4415 }
4416 inet->inet_sport = htons(inet->inet_num);
4417 }
4418
4410 /* Validate addr_len before calling common connect/connectx routine. */ 4419 /* Validate addr_len before calling common connect/connectx routine. */
4411 af = sctp_get_af_specific(addr->sa_family); 4420 af = sctp_get_af_specific(addr->sa_family);
4412 if (!af || addr_len < af->sockaddr_len) { 4421 if (!af || addr_len < af->sockaddr_len) {
@@ -4415,13 +4424,25 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4415 /* Pass correct addr len to common routine (so it knows there 4424 /* Pass correct addr len to common routine (so it knows there
4416 * is only one address being passed. 4425 * is only one address being passed.
4417 */ 4426 */
4418 err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); 4427 err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
4419 } 4428 }
4420 4429
4421 release_sock(sk); 4430 release_sock(sk);
4422 return err; 4431 return err;
4423} 4432}
4424 4433
4434int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
4435 int addr_len, int flags)
4436{
4437 if (addr_len < sizeof(uaddr->sa_family))
4438 return -EINVAL;
4439
4440 if (uaddr->sa_family == AF_UNSPEC)
4441 return -EOPNOTSUPP;
4442
4443 return sctp_connect(sock->sk, uaddr, addr_len, flags);
4444}
4445
4425/* FIXME: Write comments. */ 4446/* FIXME: Write comments. */
4426static int sctp_disconnect(struct sock *sk, int flags) 4447static int sctp_disconnect(struct sock *sk, int flags)
4427{ 4448{
@@ -8724,7 +8745,6 @@ struct proto sctp_prot = {
8724 .name = "SCTP", 8745 .name = "SCTP",
8725 .owner = THIS_MODULE, 8746 .owner = THIS_MODULE,
8726 .close = sctp_close, 8747 .close = sctp_close,
8727 .connect = sctp_connect,
8728 .disconnect = sctp_disconnect, 8748 .disconnect = sctp_disconnect,
8729 .accept = sctp_accept, 8749 .accept = sctp_accept,
8730 .ioctl = sctp_ioctl, 8750 .ioctl = sctp_ioctl,
@@ -8767,7 +8787,6 @@ struct proto sctpv6_prot = {
8767 .name = "SCTPv6", 8787 .name = "SCTPv6",
8768 .owner = THIS_MODULE, 8788 .owner = THIS_MODULE,
8769 .close = sctp_close, 8789 .close = sctp_close,
8770 .connect = sctp_connect,
8771 .disconnect = sctp_disconnect, 8790 .disconnect = sctp_disconnect,
8772 .accept = sctp_accept, 8791 .accept = sctp_accept,
8773 .ioctl = sctp_ioctl, 8792 .ioctl = sctp_ioctl,
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 74568cdbca70..d7b88b2d1b22 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -245,40 +245,45 @@ out:
245static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem, 245static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
246 struct nlattr *tb[]) 246 struct nlattr *tb[])
247{ 247{
248 char *string, *ibname = NULL; 248 char *string, *ibname;
249 int rc = 0; 249 int rc;
250 250
251 memset(pnetelem, 0, sizeof(*pnetelem)); 251 memset(pnetelem, 0, sizeof(*pnetelem));
252 INIT_LIST_HEAD(&pnetelem->list); 252 INIT_LIST_HEAD(&pnetelem->list);
253 if (tb[SMC_PNETID_NAME]) { 253
254 string = (char *)nla_data(tb[SMC_PNETID_NAME]); 254 rc = -EINVAL;
255 if (!smc_pnetid_valid(string, pnetelem->pnet_name)) { 255 if (!tb[SMC_PNETID_NAME])
256 rc = -EINVAL; 256 goto error;
257 goto error; 257 string = (char *)nla_data(tb[SMC_PNETID_NAME]);
258 } 258 if (!smc_pnetid_valid(string, pnetelem->pnet_name))
259 } 259 goto error;
260 if (tb[SMC_PNETID_ETHNAME]) { 260
261 string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]); 261 rc = -EINVAL;
262 pnetelem->ndev = dev_get_by_name(net, string); 262 if (!tb[SMC_PNETID_ETHNAME])
263 if (!pnetelem->ndev) 263 goto error;
264 return -ENOENT; 264 rc = -ENOENT;
265 } 265 string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
266 if (tb[SMC_PNETID_IBNAME]) { 266 pnetelem->ndev = dev_get_by_name(net, string);
267 ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]); 267 if (!pnetelem->ndev)
268 ibname = strim(ibname); 268 goto error;
269 pnetelem->smcibdev = smc_pnet_find_ib(ibname); 269
270 if (!pnetelem->smcibdev) { 270 rc = -EINVAL;
271 rc = -ENOENT; 271 if (!tb[SMC_PNETID_IBNAME])
272 goto error; 272 goto error;
273 } 273 rc = -ENOENT;
274 } 274 ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
275 if (tb[SMC_PNETID_IBPORT]) { 275 ibname = strim(ibname);
276 pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]); 276 pnetelem->smcibdev = smc_pnet_find_ib(ibname);
277 if (pnetelem->ib_port > SMC_MAX_PORTS) { 277 if (!pnetelem->smcibdev)
278 rc = -EINVAL; 278 goto error;
279 goto error; 279
280 } 280 rc = -EINVAL;
281 } 281 if (!tb[SMC_PNETID_IBPORT])
282 goto error;
283 pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
284 if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
285 goto error;
286
282 return 0; 287 return 0;
283 288
284error: 289error:
@@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
307 void *hdr; 312 void *hdr;
308 int rc; 313 int rc;
309 314
315 if (!info->attrs[SMC_PNETID_NAME])
316 return -EINVAL;
310 pnetelem = smc_pnet_find_pnetid( 317 pnetelem = smc_pnet_find_pnetid(
311 (char *)nla_data(info->attrs[SMC_PNETID_NAME])); 318 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
312 if (!pnetelem) 319 if (!pnetelem)
@@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
359 366
360static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info) 367static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
361{ 368{
369 if (!info->attrs[SMC_PNETID_NAME])
370 return -EINVAL;
362 return smc_pnet_remove_by_pnetid( 371 return smc_pnet_remove_by_pnetid(
363 (char *)nla_data(info->attrs[SMC_PNETID_NAME])); 372 (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
364} 373}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 71e79597f940..e1c93ce74e0f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -680,7 +680,6 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
680 struct scatterlist *sgin = &sgin_arr[0]; 680 struct scatterlist *sgin = &sgin_arr[0];
681 struct strp_msg *rxm = strp_msg(skb); 681 struct strp_msg *rxm = strp_msg(skb);
682 int ret, nsg = ARRAY_SIZE(sgin_arr); 682 int ret, nsg = ARRAY_SIZE(sgin_arr);
683 char aad_recv[TLS_AAD_SPACE_SIZE];
684 struct sk_buff *unused; 683 struct sk_buff *unused;
685 684
686 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, 685 ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
@@ -698,13 +697,13 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
698 } 697 }
699 698
700 sg_init_table(sgin, nsg); 699 sg_init_table(sgin, nsg);
701 sg_set_buf(&sgin[0], aad_recv, sizeof(aad_recv)); 700 sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
702 701
703 nsg = skb_to_sgvec(skb, &sgin[1], 702 nsg = skb_to_sgvec(skb, &sgin[1],
704 rxm->offset + tls_ctx->rx.prepend_size, 703 rxm->offset + tls_ctx->rx.prepend_size,
705 rxm->full_len - tls_ctx->rx.prepend_size); 704 rxm->full_len - tls_ctx->rx.prepend_size);
706 705
707 tls_make_aad(aad_recv, 706 tls_make_aad(ctx->rx_aad_ciphertext,
708 rxm->full_len - tls_ctx->rx.overhead_size, 707 rxm->full_len - tls_ctx->rx.overhead_size,
709 tls_ctx->rx.rec_seq, 708 tls_ctx->rx.rec_seq,
710 tls_ctx->rx.rec_seq_size, 709 tls_ctx->rx.rec_seq_size,
@@ -803,12 +802,12 @@ int tls_sw_recvmsg(struct sock *sk,
803 if (to_copy <= len && page_count < MAX_SKB_FRAGS && 802 if (to_copy <= len && page_count < MAX_SKB_FRAGS &&
804 likely(!(flags & MSG_PEEK))) { 803 likely(!(flags & MSG_PEEK))) {
805 struct scatterlist sgin[MAX_SKB_FRAGS + 1]; 804 struct scatterlist sgin[MAX_SKB_FRAGS + 1];
806 char unused[21];
807 int pages = 0; 805 int pages = 0;
808 806
809 zc = true; 807 zc = true;
810 sg_init_table(sgin, MAX_SKB_FRAGS + 1); 808 sg_init_table(sgin, MAX_SKB_FRAGS + 1);
811 sg_set_buf(&sgin[0], unused, 13); 809 sg_set_buf(&sgin[0], ctx->rx_aad_plaintext,
810 TLS_AAD_SPACE_SIZE);
812 811
813 err = zerocopy_from_iter(sk, &msg->msg_iter, 812 err = zerocopy_from_iter(sk, &msg->msg_iter,
814 to_copy, &pages, 813 to_copy, &pages,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a052693c2e85..7c5135a92d76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -15555,7 +15555,8 @@ void cfg80211_ft_event(struct net_device *netdev,
15555 if (!ft_event->target_ap) 15555 if (!ft_event->target_ap)
15556 return; 15556 return;
15557 15557
15558 msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL); 15558 msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
15559 GFP_KERNEL);
15559 if (!msg) 15560 if (!msg)
15560 return; 15561 return;
15561 15562
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ac3e12c32aa3..5fcec5c94eb7 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -916,6 +916,9 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
916 const struct fwdb_header *hdr = regdb; 916 const struct fwdb_header *hdr = regdb;
917 const struct fwdb_country *country; 917 const struct fwdb_country *country;
918 918
919 if (!regdb)
920 return -ENODATA;
921
919 if (IS_ERR(regdb)) 922 if (IS_ERR(regdb))
920 return PTR_ERR(regdb); 923 return PTR_ERR(regdb);
921 924
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 40b54cc64243..5f48251c1319 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1658,7 +1658,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1658 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 1658 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
1659 } 1659 }
1660 1660
1661out:
1662 return &xdst0->u.dst; 1661 return &xdst0->u.dst;
1663 1662
1664put_states: 1663put_states:
@@ -1667,8 +1666,8 @@ put_states:
1667free_dst: 1666free_dst:
1668 if (xdst0) 1667 if (xdst0)
1669 dst_release_immediate(&xdst0->u.dst); 1668 dst_release_immediate(&xdst0->u.dst);
1670 xdst0 = ERR_PTR(err); 1669
1671 goto out; 1670 return ERR_PTR(err);
1672} 1671}
1673 1672
1674static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1673static int xfrm_expand_policies(const struct flowi *fl, u16 family,
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e16d6713f236..2d42eb9cd1a5 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5041,7 +5041,7 @@ sub process {
5041 $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g; 5041 $tmp_stmt =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
5042 $tmp_stmt =~ s/\#+\s*$arg\b//g; 5042 $tmp_stmt =~ s/\#+\s*$arg\b//g;
5043 $tmp_stmt =~ s/\b$arg\s*\#\#//g; 5043 $tmp_stmt =~ s/\b$arg\s*\#\#//g;
5044 my $use_cnt = $tmp_stmt =~ s/\b$arg\b//g; 5044 my $use_cnt = () = $tmp_stmt =~ /\b$arg\b/g;
5045 if ($use_cnt > 1) { 5045 if ($use_cnt > 1) {
5046 CHK("MACRO_ARG_REUSE", 5046 CHK("MACRO_ARG_REUSE",
5047 "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx"); 5047 "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index be5817df0a9d..179dd20bec0a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1568,8 +1568,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1568 /* Called from d_instantiate or d_splice_alias. */ 1568 /* Called from d_instantiate or d_splice_alias. */
1569 dentry = dget(opt_dentry); 1569 dentry = dget(opt_dentry);
1570 } else { 1570 } else {
1571 /* Called from selinux_complete_init, try to find a dentry. */ 1571 /*
1572 * Called from selinux_complete_init, try to find a dentry.
1573 * Some filesystems really want a connected one, so try
1574 * that first. We could split SECURITY_FS_USE_XATTR in
1575 * two, depending upon that...
1576 */
1572 dentry = d_find_alias(inode); 1577 dentry = d_find_alias(inode);
1578 if (!dentry)
1579 dentry = d_find_any_alias(inode);
1573 } 1580 }
1574 if (!dentry) { 1581 if (!dentry) {
1575 /* 1582 /*
@@ -1674,14 +1681,19 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
1674 if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) { 1681 if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
1675 /* We must have a dentry to determine the label on 1682 /* We must have a dentry to determine the label on
1676 * procfs inodes */ 1683 * procfs inodes */
1677 if (opt_dentry) 1684 if (opt_dentry) {
1678 /* Called from d_instantiate or 1685 /* Called from d_instantiate or
1679 * d_splice_alias. */ 1686 * d_splice_alias. */
1680 dentry = dget(opt_dentry); 1687 dentry = dget(opt_dentry);
1681 else 1688 } else {
1682 /* Called from selinux_complete_init, try to 1689 /* Called from selinux_complete_init, try to
1683 * find a dentry. */ 1690 * find a dentry. Some filesystems really want
1691 * a connected one, so try that first.
1692 */
1684 dentry = d_find_alias(inode); 1693 dentry = d_find_alias(inode);
1694 if (!dentry)
1695 dentry = d_find_any_alias(inode);
1696 }
1685 /* 1697 /*
1686 * This can be hit on boot when a file is accessed 1698 * This can be hit on boot when a file is accessed
1687 * before the policy is loaded. When we load policy we 1699 * before the policy is loaded. When we load policy we
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 8057e19dc15f..3ce225e3f142 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1494,7 +1494,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
1494 scontext_len, &context, def_sid); 1494 scontext_len, &context, def_sid);
1495 if (rc == -EINVAL && force) { 1495 if (rc == -EINVAL && force) {
1496 context.str = str; 1496 context.str = str;
1497 context.len = scontext_len; 1497 context.len = strlen(str) + 1;
1498 str = NULL; 1498 str = NULL;
1499 } else if (rc) 1499 } else if (rc)
1500 goto out_unlock; 1500 goto out_unlock;
diff --git a/sound/core/timer.c b/sound/core/timer.c
index dc87728c5b74..0ddcae495838 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
592 else 592 else
593 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED; 593 timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
594 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : 594 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
595 SNDRV_TIMER_EVENT_CONTINUE); 595 SNDRV_TIMER_EVENT_PAUSE);
596 unlock: 596 unlock:
597 spin_unlock_irqrestore(&timer->lock, flags); 597 spin_unlock_irqrestore(&timer->lock, flags);
598 return result; 598 return result;
@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
614 list_del_init(&timeri->ack_list); 614 list_del_init(&timeri->ack_list);
615 list_del_init(&timeri->active_list); 615 list_del_init(&timeri->active_list);
616 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP : 616 snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
617 SNDRV_TIMER_EVENT_CONTINUE); 617 SNDRV_TIMER_EVENT_PAUSE);
618 spin_unlock(&timeri->timer->lock); 618 spin_unlock(&timeri->timer->lock);
619 } 619 }
620 spin_unlock_irqrestore(&slave_active_lock, flags); 620 spin_unlock_irqrestore(&slave_active_lock, flags);
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 321e78baa63c..9bd935216c18 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -622,8 +622,10 @@ snd_hda_check_power_state(struct hda_codec *codec, hda_nid_t nid,
622{ 622{
623 return snd_hdac_check_power_state(&codec->core, nid, target_state); 623 return snd_hdac_check_power_state(&codec->core, nid, target_state);
624} 624}
625static inline bool snd_hda_sync_power_state(struct hda_codec *codec, 625
626 hda_nid_t nid, unsigned int target_state) 626static inline unsigned int snd_hda_sync_power_state(struct hda_codec *codec,
627 hda_nid_t nid,
628 unsigned int target_state)
627{ 629{
628 return snd_hdac_sync_power_state(&codec->core, nid, target_state); 630 return snd_hdac_sync_power_state(&codec->core, nid, target_state);
629} 631}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index c5ec89732a8d..8c317737ba3f 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1017,6 +1017,7 @@ struct bpf_prog_info {
1017 __aligned_u64 map_ids; 1017 __aligned_u64 map_ids;
1018 char name[BPF_OBJ_NAME_LEN]; 1018 char name[BPF_OBJ_NAME_LEN];
1019 __u32 ifindex; 1019 __u32 ifindex;
1020 __u32 :32;
1020 __u64 netns_dev; 1021 __u64 netns_dev;
1021 __u64 netns_ino; 1022 __u64 netns_ino;
1022} __attribute__((aligned(8))); 1023} __attribute__((aligned(8)));
@@ -1030,6 +1031,7 @@ struct bpf_map_info {
1030 __u32 map_flags; 1031 __u32 map_flags;
1031 char name[BPF_OBJ_NAME_LEN]; 1032 char name[BPF_OBJ_NAME_LEN];
1032 __u32 ifindex; 1033 __u32 ifindex;
1034 __u32 :32;
1033 __u64 netns_dev; 1035 __u64 netns_dev;
1034 __u64 netns_ino; 1036 __u64 netns_ino;
1035} __attribute__((aligned(8))); 1037} __attribute__((aligned(8)));
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5922443063f0..0f9f06df49bc 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2035,7 +2035,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2035 return -EINVAL; 2035 return -EINVAL;
2036 2036
2037 obj = bpf_object__open(attr->file); 2037 obj = bpf_object__open(attr->file);
2038 if (IS_ERR(obj)) 2038 if (IS_ERR_OR_NULL(obj))
2039 return -ENOENT; 2039 return -ENOENT;
2040 2040
2041 bpf_object__for_each_program(prog, obj) { 2041 bpf_object__for_each_program(prog, obj) {
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index d00f0d51cab8..dfb218feaad9 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -111,8 +111,8 @@ A perf_header_string with the CPU architecture (uname -m)
111A structure defining the number of CPUs. 111A structure defining the number of CPUs.
112 112
113struct nr_cpus { 113struct nr_cpus {
114 uint32_t nr_cpus_online;
115 uint32_t nr_cpus_available; /* CPUs not yet onlined */ 114 uint32_t nr_cpus_available; /* CPUs not yet onlined */
115 uint32_t nr_cpus_online;
116}; 116};
117 117
118 HEADER_CPUDESC = 8, 118 HEADER_CPUDESC = 8,
@@ -153,10 +153,18 @@ struct {
153 HEADER_CPU_TOPOLOGY = 13, 153 HEADER_CPU_TOPOLOGY = 13,
154 154
155String lists defining the core and CPU threads topology. 155String lists defining the core and CPU threads topology.
156The string lists are followed by a variable length array
157which contains core_id and socket_id of each cpu.
158The number of entries can be determined by the size of the
159section minus the sizes of both string lists.
156 160
157struct { 161struct {
158 struct perf_header_string_list cores; /* Variable length */ 162 struct perf_header_string_list cores; /* Variable length */
159 struct perf_header_string_list threads; /* Variable length */ 163 struct perf_header_string_list threads; /* Variable length */
164 struct {
165 uint32_t core_id;
166 uint32_t socket_id;
167 } cpus[nr]; /* Variable length records */
160}; 168};
161 169
162Example: 170Example:
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 17cb1bb3448c..40e30a26b23c 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -70,6 +70,27 @@ static int check_cpu_topology(char *path, struct cpu_map *map)
70 session = perf_session__new(&data, false, NULL); 70 session = perf_session__new(&data, false, NULL);
71 TEST_ASSERT_VAL("can't get session", session); 71 TEST_ASSERT_VAL("can't get session", session);
72 72
73 /* On platforms with large numbers of CPUs process_cpu_topology()
74 * might issue an error while reading the perf.data file section
75 * HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member
76 * cpu is a NULL pointer.
77 * Example: On s390
78 * CPU 0 is on core_id 0 and physical_package_id 6
79 * CPU 1 is on core_id 1 and physical_package_id 3
80 *
81 * Core_id and physical_package_id are platform and architecture
82 * dependend and might have higher numbers than the CPU id.
83 * This actually depends on the configuration.
84 *
85 * In this case process_cpu_topology() prints error message:
86 * "socket_id number is too big. You may need to upgrade the
87 * perf tool."
88 *
89 * This is the reason why this test might be skipped.
90 */
91 if (!session->header.env.cpu)
92 return TEST_SKIP;
93
73 for (i = 0; i < session->header.env.nr_cpus_avail; i++) { 94 for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
74 if (!cpu_map__has(map, i)) 95 if (!cpu_map__has(map, i))
75 continue; 96 continue;
@@ -95,7 +116,7 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
95{ 116{
96 char path[PATH_MAX]; 117 char path[PATH_MAX];
97 struct cpu_map *map; 118 struct cpu_map *map;
98 int ret = -1; 119 int ret = TEST_FAIL;
99 120
100 TEST_ASSERT_VAL("can't get templ file", !get_temp(path)); 121 TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
101 122
@@ -110,12 +131,9 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
110 goto free_path; 131 goto free_path;
111 } 132 }
112 133
113 if (check_cpu_topology(path, map)) 134 ret = check_cpu_topology(path, map);
114 goto free_map;
115 ret = 0;
116
117free_map:
118 cpu_map__put(map); 135 cpu_map__put(map);
136
119free_path: 137free_path:
120 unlink(path); 138 unlink(path);
121 return ret; 139 return ret;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index af7ad814b2c3..cee658733e2c 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -66,7 +66,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
66 } 66 }
67 67
68 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); 68 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
69 if (IS_ERR(obj)) { 69 if (IS_ERR_OR_NULL(obj)) {
70 pr_debug("bpf: failed to load buffer\n"); 70 pr_debug("bpf: failed to load buffer\n");
71 return ERR_PTR(-EINVAL); 71 return ERR_PTR(-EINVAL);
72 } 72 }
@@ -102,14 +102,14 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
102 pr_debug("bpf: successfull builtin compilation\n"); 102 pr_debug("bpf: successfull builtin compilation\n");
103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); 103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
104 104
105 if (!IS_ERR(obj) && llvm_param.dump_obj) 105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
106 llvm__dump_obj(filename, obj_buf, obj_buf_sz); 106 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
107 107
108 free(obj_buf); 108 free(obj_buf);
109 } else 109 } else
110 obj = bpf_object__open(filename); 110 obj = bpf_object__open(filename);
111 111
112 if (IS_ERR(obj)) { 112 if (IS_ERR_OR_NULL(obj)) {
113 pr_debug("bpf: failed to load %s\n", filename); 113 pr_debug("bpf: failed to load %s\n", filename);
114 return obj; 114 return obj;
115 } 115 }
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index c8b98fa22997..4d5fc374e730 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -96,11 +96,19 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
96 /* Nothing to do, might as well just return */ 96 /* Nothing to do, might as well just return */
97 if (decoder->packet_count == 0) 97 if (decoder->packet_count == 0)
98 return 0; 98 return 0;
99 /*
100 * The queueing process in function cs_etm_decoder__buffer_packet()
101 * increments the tail *before* using it. This is somewhat counter
102 * intuitive but it has the advantage of centralizing tail management
103 * at a single location. Because of that we need to follow the same
104 * heuristic with the head, i.e we increment it before using its
105 * value. Otherwise the first element of the packet queue is not
106 * used.
107 */
108 decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
99 109
100 *packet = decoder->packet_buffer[decoder->head]; 110 *packet = decoder->packet_buffer[decoder->head];
101 111
102 decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
103
104 decoder->packet_count--; 112 decoder->packet_count--;
105 113
106 return 1; 114 return 1;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 92ec009a292d..b13f5f234c8f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -127,6 +127,7 @@ struct perf_evsel {
127 bool precise_max; 127 bool precise_max;
128 bool ignore_missing_thread; 128 bool ignore_missing_thread;
129 bool forced_leader; 129 bool forced_leader;
130 bool use_uncore_alias;
130 /* parse modifier helper */ 131 /* parse modifier helper */
131 int exclude_GH; 132 int exclude_GH;
132 int nr_members; 133 int nr_members;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index b8b8a9558d32..2fc4ee8b86c1 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1219,13 +1219,16 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
1219 1219
1220int parse_events_add_pmu(struct parse_events_state *parse_state, 1220int parse_events_add_pmu(struct parse_events_state *parse_state,
1221 struct list_head *list, char *name, 1221 struct list_head *list, char *name,
1222 struct list_head *head_config, bool auto_merge_stats) 1222 struct list_head *head_config,
1223 bool auto_merge_stats,
1224 bool use_alias)
1223{ 1225{
1224 struct perf_event_attr attr; 1226 struct perf_event_attr attr;
1225 struct perf_pmu_info info; 1227 struct perf_pmu_info info;
1226 struct perf_pmu *pmu; 1228 struct perf_pmu *pmu;
1227 struct perf_evsel *evsel; 1229 struct perf_evsel *evsel;
1228 struct parse_events_error *err = parse_state->error; 1230 struct parse_events_error *err = parse_state->error;
1231 bool use_uncore_alias;
1229 LIST_HEAD(config_terms); 1232 LIST_HEAD(config_terms);
1230 1233
1231 pmu = perf_pmu__find(name); 1234 pmu = perf_pmu__find(name);
@@ -1244,11 +1247,14 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
1244 memset(&attr, 0, sizeof(attr)); 1247 memset(&attr, 0, sizeof(attr));
1245 } 1248 }
1246 1249
1250 use_uncore_alias = (pmu->is_uncore && use_alias);
1251
1247 if (!head_config) { 1252 if (!head_config) {
1248 attr.type = pmu->type; 1253 attr.type = pmu->type;
1249 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); 1254 evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
1250 if (evsel) { 1255 if (evsel) {
1251 evsel->pmu_name = name; 1256 evsel->pmu_name = name;
1257 evsel->use_uncore_alias = use_uncore_alias;
1252 return 0; 1258 return 0;
1253 } else { 1259 } else {
1254 return -ENOMEM; 1260 return -ENOMEM;
@@ -1282,6 +1288,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
1282 evsel->metric_expr = info.metric_expr; 1288 evsel->metric_expr = info.metric_expr;
1283 evsel->metric_name = info.metric_name; 1289 evsel->metric_name = info.metric_name;
1284 evsel->pmu_name = name; 1290 evsel->pmu_name = name;
1291 evsel->use_uncore_alias = use_uncore_alias;
1285 } 1292 }
1286 1293
1287 return evsel ? 0 : -ENOMEM; 1294 return evsel ? 0 : -ENOMEM;
@@ -1317,7 +1324,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
1317 list_add_tail(&term->list, head); 1324 list_add_tail(&term->list, head);
1318 1325
1319 if (!parse_events_add_pmu(parse_state, list, 1326 if (!parse_events_add_pmu(parse_state, list,
1320 pmu->name, head, true)) { 1327 pmu->name, head,
1328 true, true)) {
1321 pr_debug("%s -> %s/%s/\n", str, 1329 pr_debug("%s -> %s/%s/\n", str,
1322 pmu->name, alias->str); 1330 pmu->name, alias->str);
1323 ok++; 1331 ok++;
@@ -1339,7 +1347,120 @@ int parse_events__modifier_group(struct list_head *list,
1339 return parse_events__modifier_event(list, event_mod, true); 1347 return parse_events__modifier_event(list, event_mod, true);
1340} 1348}
1341 1349
1342void parse_events__set_leader(char *name, struct list_head *list) 1350/*
1351 * Check if the two uncore PMUs are from the same uncore block
1352 * The format of the uncore PMU name is uncore_#blockname_#pmuidx
1353 */
1354static bool is_same_uncore_block(const char *pmu_name_a, const char *pmu_name_b)
1355{
1356 char *end_a, *end_b;
1357
1358 end_a = strrchr(pmu_name_a, '_');
1359 end_b = strrchr(pmu_name_b, '_');
1360
1361 if (!end_a || !end_b)
1362 return false;
1363
1364 if ((end_a - pmu_name_a) != (end_b - pmu_name_b))
1365 return false;
1366
1367 return (strncmp(pmu_name_a, pmu_name_b, end_a - pmu_name_a) == 0);
1368}
1369
1370static int
1371parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
1372 struct parse_events_state *parse_state)
1373{
1374 struct perf_evsel *evsel, *leader;
1375 uintptr_t *leaders;
1376 bool is_leader = true;
1377 int i, nr_pmu = 0, total_members, ret = 0;
1378
1379 leader = list_first_entry(list, struct perf_evsel, node);
1380 evsel = list_last_entry(list, struct perf_evsel, node);
1381 total_members = evsel->idx - leader->idx + 1;
1382
1383 leaders = calloc(total_members, sizeof(uintptr_t));
1384 if (WARN_ON(!leaders))
1385 return 0;
1386
1387 /*
1388 * Going through the whole group and doing sanity check.
1389 * All members must use alias, and be from the same uncore block.
1390 * Also, storing the leader events in an array.
1391 */
1392 __evlist__for_each_entry(list, evsel) {
1393
1394 /* Only split the uncore group which members use alias */
1395 if (!evsel->use_uncore_alias)
1396 goto out;
1397
1398 /* The events must be from the same uncore block */
1399 if (!is_same_uncore_block(leader->pmu_name, evsel->pmu_name))
1400 goto out;
1401
1402 if (!is_leader)
1403 continue;
1404 /*
1405 * If the event's PMU name starts to repeat, it must be a new
1406 * event. That can be used to distinguish the leader from
1407 * other members, even they have the same event name.
1408 */
1409 if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
1410 is_leader = false;
1411 continue;
1412 }
1413 /* The name is always alias name */
1414 WARN_ON(strcmp(leader->name, evsel->name));
1415
1416 /* Store the leader event for each PMU */
1417 leaders[nr_pmu++] = (uintptr_t) evsel;
1418 }
1419
1420 /* only one event alias */
1421 if (nr_pmu == total_members) {
1422 parse_state->nr_groups--;
1423 goto handled;
1424 }
1425
1426 /*
1427 * An uncore event alias is a joint name which means the same event
1428 * runs on all PMUs of a block.
1429 * Perf doesn't support mixed events from different PMUs in the same
1430 * group. The big group has to be split into multiple small groups
1431 * which only include the events from the same PMU.
1432 *
1433 * Here the uncore event aliases must be from the same uncore block.
1434 * The number of PMUs must be same for each alias. The number of new
1435 * small groups equals to the number of PMUs.
1436 * Setting the leader event for corresponding members in each group.
1437 */
1438 i = 0;
1439 __evlist__for_each_entry(list, evsel) {
1440 if (i >= nr_pmu)
1441 i = 0;
1442 evsel->leader = (struct perf_evsel *) leaders[i++];
1443 }
1444
1445 /* The number of members and group name are same for each group */
1446 for (i = 0; i < nr_pmu; i++) {
1447 evsel = (struct perf_evsel *) leaders[i];
1448 evsel->nr_members = total_members / nr_pmu;
1449 evsel->group_name = name ? strdup(name) : NULL;
1450 }
1451
1452 /* Take the new small groups into account */
1453 parse_state->nr_groups += nr_pmu - 1;
1454
1455handled:
1456 ret = 1;
1457out:
1458 free(leaders);
1459 return ret;
1460}
1461
1462void parse_events__set_leader(char *name, struct list_head *list,
1463 struct parse_events_state *parse_state)
1343{ 1464{
1344 struct perf_evsel *leader; 1465 struct perf_evsel *leader;
1345 1466
@@ -1348,6 +1469,9 @@ void parse_events__set_leader(char *name, struct list_head *list)
1348 return; 1469 return;
1349 } 1470 }
1350 1471
1472 if (parse_events__set_leader_for_uncore_aliase(name, list, parse_state))
1473 return;
1474
1351 __perf_evlist__set_leader(list); 1475 __perf_evlist__set_leader(list);
1352 leader = list_entry(list->next, struct perf_evsel, node); 1476 leader = list_entry(list->next, struct perf_evsel, node);
1353 leader->group_name = name ? strdup(name) : NULL; 1477 leader->group_name = name ? strdup(name) : NULL;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 5015cfd58277..4473dac27aee 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -167,7 +167,9 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
167 void *ptr, char *type, u64 len); 167 void *ptr, char *type, u64 len);
168int parse_events_add_pmu(struct parse_events_state *parse_state, 168int parse_events_add_pmu(struct parse_events_state *parse_state,
169 struct list_head *list, char *name, 169 struct list_head *list, char *name,
170 struct list_head *head_config, bool auto_merge_stats); 170 struct list_head *head_config,
171 bool auto_merge_stats,
172 bool use_alias);
171 173
172int parse_events_multi_pmu_add(struct parse_events_state *parse_state, 174int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
173 char *str, 175 char *str,
@@ -178,7 +180,8 @@ int parse_events_copy_term_list(struct list_head *old,
178 180
179enum perf_pmu_event_symbol_type 181enum perf_pmu_event_symbol_type
180perf_pmu__parse_check(const char *name); 182perf_pmu__parse_check(const char *name);
181void parse_events__set_leader(char *name, struct list_head *list); 183void parse_events__set_leader(char *name, struct list_head *list,
184 struct parse_events_state *parse_state);
182void parse_events_update_lists(struct list_head *list_event, 185void parse_events_update_lists(struct list_head *list_event,
183 struct list_head *list_all); 186 struct list_head *list_all);
184void parse_events_evlist_error(struct parse_events_state *parse_state, 187void parse_events_evlist_error(struct parse_events_state *parse_state,
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 7afeb80cc39e..e37608a87dba 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -161,7 +161,7 @@ PE_NAME '{' events '}'
161 struct list_head *list = $3; 161 struct list_head *list = $3;
162 162
163 inc_group_count(list, _parse_state); 163 inc_group_count(list, _parse_state);
164 parse_events__set_leader($1, list); 164 parse_events__set_leader($1, list, _parse_state);
165 $$ = list; 165 $$ = list;
166} 166}
167| 167|
@@ -170,7 +170,7 @@ PE_NAME '{' events '}'
170 struct list_head *list = $2; 170 struct list_head *list = $2;
171 171
172 inc_group_count(list, _parse_state); 172 inc_group_count(list, _parse_state);
173 parse_events__set_leader(NULL, list); 173 parse_events__set_leader(NULL, list, _parse_state);
174 $$ = list; 174 $$ = list;
175} 175}
176 176
@@ -232,7 +232,7 @@ PE_NAME opt_event_config
232 YYABORT; 232 YYABORT;
233 233
234 ALLOC_LIST(list); 234 ALLOC_LIST(list);
235 if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) { 235 if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
236 struct perf_pmu *pmu = NULL; 236 struct perf_pmu *pmu = NULL;
237 int ok = 0; 237 int ok = 0;
238 char *pattern; 238 char *pattern;
@@ -251,7 +251,7 @@ PE_NAME opt_event_config
251 free(pattern); 251 free(pattern);
252 YYABORT; 252 YYABORT;
253 } 253 }
254 if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true)) 254 if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
255 ok++; 255 ok++;
256 parse_events_terms__delete(terms); 256 parse_events_terms__delete(terms);
257 } 257 }
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 10dd5fce082b..7f8afacd08ee 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -531,6 +531,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
531 PyLong_FromUnsignedLongLong(sample->period)); 531 PyLong_FromUnsignedLongLong(sample->period));
532 pydict_set_item_string_decref(dict_sample, "phys_addr", 532 pydict_set_item_string_decref(dict_sample, "phys_addr",
533 PyLong_FromUnsignedLongLong(sample->phys_addr)); 533 PyLong_FromUnsignedLongLong(sample->phys_addr));
534 pydict_set_item_string_decref(dict_sample, "addr",
535 PyLong_FromUnsignedLongLong(sample->addr));
534 set_sample_read_in_dict(dict_sample, sample, evsel); 536 set_sample_read_in_dict(dict_sample, sample, evsel);
535 pydict_set_item_string_decref(dict, "sample", dict_sample); 537 pydict_set_item_string_decref(dict, "sample", dict_sample);
536 538
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 6c645eb77d42..ee820fcc29b0 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -252,6 +252,13 @@ void idr_checks(void)
252 idr_remove(&idr, 3); 252 idr_remove(&idr, 3);
253 idr_remove(&idr, 0); 253 idr_remove(&idr, 0);
254 254
255 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
256 idr_remove(&idr, 1);
257 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
258 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
259 idr_remove(&idr, 1 << 30);
260 idr_destroy(&idr);
261
255 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 262 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
256 struct item *item = item_create(i, 0); 263 struct item *item = item_create(i, 0);
257 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 264 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 983dd25d49f4..1eefe211a4a8 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -5,3 +5,5 @@ CONFIG_BPF_EVENTS=y
5CONFIG_TEST_BPF=m 5CONFIG_TEST_BPF=m
6CONFIG_CGROUP_BPF=y 6CONFIG_CGROUP_BPF=y
7CONFIG_NETDEVSIM=m 7CONFIG_NETDEVSIM=m
8CONFIG_NET_CLS_ACT=y
9CONFIG_NET_SCH_INGRESS=y
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3e7718b1a9ae..fd7de7eb329e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -11713,6 +11713,11 @@ static void get_unpriv_disabled()
11713 FILE *fd; 11713 FILE *fd;
11714 11714
11715 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); 11715 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
11716 if (!fd) {
11717 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
11718 unpriv_disabled = true;
11719 return;
11720 }
11716 if (fgets(buf, 2, fd) == buf && atoi(buf)) 11721 if (fgets(buf, 2, fd) == buf && atoi(buf))
11717 unpriv_disabled = true; 11722 unpriv_disabled = true;
11718 fclose(fd); 11723 fclose(fd);
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 6a75a3ea44ad..7ba089b33e8b 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -7,3 +7,8 @@ CONFIG_NET_L3_MASTER_DEV=y
7CONFIG_IPV6=y 7CONFIG_IPV6=y
8CONFIG_IPV6_MULTIPLE_TABLES=y 8CONFIG_IPV6_MULTIPLE_TABLES=y
9CONFIG_VETH=y 9CONFIG_VETH=y
10CONFIG_INET_XFRM_MODE_TUNNEL=y
11CONFIG_NET_IPVTI=y
12CONFIG_INET6_XFRM_MODE_TUNNEL=y
13CONFIG_IPV6_VTI=y
14CONFIG_DUMMY=y
diff --git a/tools/testing/selftests/net/reuseport_bpf_numa.c b/tools/testing/selftests/net/reuseport_bpf_numa.c
index 365c32e84189..c9f478b40996 100644
--- a/tools/testing/selftests/net/reuseport_bpf_numa.c
+++ b/tools/testing/selftests/net/reuseport_bpf_numa.c
@@ -23,6 +23,8 @@
23#include <unistd.h> 23#include <unistd.h>
24#include <numa.h> 24#include <numa.h>
25 25
26#include "../kselftest.h"
27
26static const int PORT = 8888; 28static const int PORT = 8888;
27 29
28static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto) 30static void build_rcv_group(int *rcv_fd, size_t len, int family, int proto)
@@ -229,7 +231,7 @@ int main(void)
229 int *rcv_fd, nodes; 231 int *rcv_fd, nodes;
230 232
231 if (numa_available() < 0) 233 if (numa_available() < 0)
232 error(1, errno, "no numa api support"); 234 ksft_exit_skip("no numa api support\n");
233 235
234 nodes = numa_max_node() + 1; 236 nodes = numa_max_node() + 1;
235 237
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 168c66d74fc5..e1473234968d 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -134,11 +134,15 @@ struct seccomp_data {
134#endif 134#endif
135 135
136#ifndef SECCOMP_FILTER_FLAG_TSYNC 136#ifndef SECCOMP_FILTER_FLAG_TSYNC
137#define SECCOMP_FILTER_FLAG_TSYNC 1 137#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
138#endif 138#endif
139 139
140#ifndef SECCOMP_FILTER_FLAG_LOG 140#ifndef SECCOMP_FILTER_FLAG_LOG
141#define SECCOMP_FILTER_FLAG_LOG 2 141#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
142#endif
143
144#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
145#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
142#endif 146#endif
143 147
144#ifndef PTRACE_SECCOMP_GET_METADATA 148#ifndef PTRACE_SECCOMP_GET_METADATA
@@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock)
2072TEST(detect_seccomp_filter_flags) 2076TEST(detect_seccomp_filter_flags)
2073{ 2077{
2074 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, 2078 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
2075 SECCOMP_FILTER_FLAG_LOG }; 2079 SECCOMP_FILTER_FLAG_LOG,
2080 SECCOMP_FILTER_FLAG_SPEC_ALLOW };
2076 unsigned int flag, all_flags; 2081 unsigned int flag, all_flags;
2077 int i; 2082 int i;
2078 long ret; 2083 long ret;
2079 2084
2080 /* Test detection of known-good filter flags */ 2085 /* Test detection of known-good filter flags */
2081 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2086 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
2087 int bits = 0;
2088
2082 flag = flags[i]; 2089 flag = flags[i];
2090 /* Make sure the flag is a single bit! */
2091 while (flag) {
2092 if (flag & 0x1)
2093 bits ++;
2094 flag >>= 1;
2095 }
2096 ASSERT_EQ(1, bits);
2097 flag = flags[i];
2098
2083 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2099 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
2084 ASSERT_NE(ENOSYS, errno) { 2100 ASSERT_NE(ENOSYS, errno) {
2085 TH_LOG("Kernel does not support seccomp syscall!"); 2101 TH_LOG("Kernel does not support seccomp syscall!");