aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/fb/efifb.txt6
-rw-r--r--Documentation/gpio/gpio-legacy.txt2
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm/include/asm/tlb.h11
-rw-r--r--arch/ia64/include/asm/tlb.h8
-rw-r--r--arch/mips/net/ebpf_jit.c1950
-rw-r--r--arch/powerpc/configs/powernv_defconfig3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/configs/pseries_defconfig3
-rw-r--r--arch/powerpc/kernel/entry_64.S60
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/watchdog.c49
-rw-r--r--arch/powerpc/platforms/powernv/idle.c41
-rw-r--r--arch/s390/include/asm/tlb.h17
-rw-r--r--arch/s390/net/bpf_jit_comp.c3
-rw-r--r--arch/sh/include/asm/tlb.h8
-rw-r--r--arch/sparc/include/asm/spitfire.h16
-rw-r--r--arch/sparc/kernel/cpu.c6
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/setup_64.c15
-rw-r--r--arch/sparc/mm/init_64.c14
-rw-r--r--arch/um/include/asm/tlb.h13
-rw-r--r--arch/x86/include/asm/hypervisor.h10
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/xen/enlighten_hvm.c59
-rw-r--r--arch/xtensa/include/asm/Kbuild2
-rw-r--r--arch/xtensa/include/asm/device.h15
-rw-r--r--arch/xtensa/include/asm/param.h18
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--arch/xtensa/mm/cache.c16
-rw-r--r--block/bfq-iosched.h22
-rw-r--r--block/bfq-wf2q.c146
-rw-r--r--block/bio-integrity.c6
-rw-r--r--block/blk-mq.c21
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/base/firmware_class.c49
-rw-r--r--drivers/block/sunvdc.c61
-rw-r--r--drivers/block/zram/zram_drv.c4
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/dma-buf/sync_file.c5
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c181
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c41
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c6
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c1
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c29
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/infiniband/core/addr.c62
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/infiniband/core/verbs.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c33
-rw-r--r--drivers/iommu/arm-smmu.c7
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c2
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/nand_base.c13
-rw-r--r--drivers/mtd/nand/nand_timings.c6
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c18
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/nvme/host/core.c35
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/target/fc.c212
-rw-r--r--drivers/pci/pci.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c25
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c11
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c68
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_main.c20
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/scsi/sg.c31
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/amba-pl011.c37
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc3/gadget.c33
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c21
-rw-r--r--drivers/usb/host/pci-quirks.c37
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--fs/fuse/file.c9
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c1
-rw-r--r--fs/nfs/nfs4proc.c3
-rw-r--r--fs/proc/meminfo.c8
-rw-r--r--fs/proc/task_mmu.c7
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--fs/xfs/xfs_inode.c12
-rw-r--r--fs/xfs/xfs_log_cil.c1
-rw-r--r--include/asm-generic/tlb.h7
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/iio/common/st_sensors.h7
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mm_types.h64
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/pinctrl/pinconf-generic.h4
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/net/tcp.h10
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/msm_drm.h6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c5
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--lib/fault-inject.c8
-rw-r--r--lib/test_kmod.c16
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/debug.c6
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c42
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/page_alloc.c11
-rw-r--r--mm/rmap.c52
-rw-r--r--mm/shmem.c12
-rw-r--r--mm/util.c2
-rw-r--r--net/batman-adv/translation-table.c60
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/igmp.c6
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_output.c27
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/ip6_output.c7
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/packet/af_packet.c13
-rw-r--r--net/rds/ib_recv.c5
-rw-r--r--net/sched/act_ipt.c22
-rw-r--r--net/tipc/node.c4
-rwxr-xr-xscripts/get_maintainer.pl91
-rw-r--r--scripts/parse-maintainers.pl97
-rw-r--r--tools/build/feature/test-bpf.c2
-rw-r--r--tools/lib/bpf/bpf.c2
-rw-r--r--tools/testing/selftests/bpf/test_pkt_md_access.c11
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
276 files changed, 4608 insertions, 1323 deletions
diff --git a/Documentation/fb/efifb.txt b/Documentation/fb/efifb.txt
index a59916c29b33..1a85c1bdaf38 100644
--- a/Documentation/fb/efifb.txt
+++ b/Documentation/fb/efifb.txt
@@ -27,5 +27,11 @@ You have to add the following kernel parameters in your elilo.conf:
27 Macbook Pro 17", iMac 20" : 27 Macbook Pro 17", iMac 20" :
28 video=efifb:i20 28 video=efifb:i20
29 29
30Accepted options:
31
32nowc Don't map the framebuffer write combined. This can be used
33 to workaround side-effects and slowdowns on other CPU cores
34 when large amounts of console data are written.
35
30-- 36--
31Edgar Hucek <gimli@dark-green.com> 37Edgar Hucek <gimli@dark-green.com>
diff --git a/Documentation/gpio/gpio-legacy.txt b/Documentation/gpio/gpio-legacy.txt
index b34fd94f7089..5eacc147ea87 100644
--- a/Documentation/gpio/gpio-legacy.txt
+++ b/Documentation/gpio/gpio-legacy.txt
@@ -459,7 +459,7 @@ pin controller?
459 459
460This is done by registering "ranges" of pins, which are essentially 460This is done by registering "ranges" of pins, which are essentially
461cross-reference tables. These are described in 461cross-reference tables. These are described in
462Documentation/pinctrl.txt 462Documentation/driver-api/pinctl.rst
463 463
464While the pin allocation is totally managed by the pinctrl subsystem, 464While the pin allocation is totally managed by the pinctrl subsystem,
465gpio (under gpiolib) is still maintained by gpio drivers. It may happen 465gpio (under gpiolib) is still maintained by gpio drivers. It may happen
diff --git a/MAINTAINERS b/MAINTAINERS
index 44cb004c765d..6f7721d1634c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1161,7 +1161,7 @@ M: Brendan Higgins <brendanhiggins@google.com>
1161R: Benjamin Herrenschmidt <benh@kernel.crashing.org> 1161R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
1162R: Joel Stanley <joel@jms.id.au> 1162R: Joel Stanley <joel@jms.id.au>
1163L: linux-i2c@vger.kernel.org 1163L: linux-i2c@vger.kernel.org
1164L: openbmc@lists.ozlabs.org 1164L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
1165S: Maintained 1165S: Maintained
1166F: drivers/irqchip/irq-aspeed-i2c-ic.c 1166F: drivers/irqchip/irq-aspeed-i2c-ic.c
1167F: drivers/i2c/busses/i2c-aspeed.c 1167F: drivers/i2c/busses/i2c-aspeed.c
@@ -5834,7 +5834,7 @@ F: drivers/staging/greybus/spi.c
5834F: drivers/staging/greybus/spilib.c 5834F: drivers/staging/greybus/spilib.c
5835F: drivers/staging/greybus/spilib.h 5835F: drivers/staging/greybus/spilib.h
5836 5836
5837GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS 5837GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS
5838M: Bryan O'Donoghue <pure.logic@nexus-software.ie> 5838M: Bryan O'Donoghue <pure.logic@nexus-software.ie>
5839S: Maintained 5839S: Maintained
5840F: drivers/staging/greybus/loopback.c 5840F: drivers/staging/greybus/loopback.c
@@ -10383,7 +10383,7 @@ L: linux-gpio@vger.kernel.org
10383T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git 10383T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
10384S: Maintained 10384S: Maintained
10385F: Documentation/devicetree/bindings/pinctrl/ 10385F: Documentation/devicetree/bindings/pinctrl/
10386F: Documentation/pinctrl.txt 10386F: Documentation/driver-api/pinctl.rst
10387F: drivers/pinctrl/ 10387F: drivers/pinctrl/
10388F: include/linux/pinctrl/ 10388F: include/linux/pinctrl/
10389 10389
@@ -14004,6 +14004,7 @@ F: drivers/block/virtio_blk.c
14004F: include/linux/virtio*.h 14004F: include/linux/virtio*.h
14005F: include/uapi/linux/virtio_*.h 14005F: include/uapi/linux/virtio_*.h
14006F: drivers/crypto/virtio/ 14006F: drivers/crypto/virtio/
14007F: mm/balloon_compaction.c
14007 14008
14008VIRTIO CRYPTO DRIVER 14009VIRTIO CRYPTO DRIVER
14009M: Gonglei <arei.gonglei@huawei.com> 14010M: Gonglei <arei.gonglei@huawei.com>
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 3f2eb76243e3..d5562f9ce600 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -148,7 +148,8 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
148} 148}
149 149
150static inline void 150static inline void
151tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 151arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
152 unsigned long start, unsigned long end)
152{ 153{
153 tlb->mm = mm; 154 tlb->mm = mm;
154 tlb->fullmm = !(start | (end+1)); 155 tlb->fullmm = !(start | (end+1));
@@ -166,8 +167,14 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
166} 167}
167 168
168static inline void 169static inline void
169tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 170arch_tlb_finish_mmu(struct mmu_gather *tlb,
171 unsigned long start, unsigned long end, bool force)
170{ 172{
173 if (force) {
174 tlb->range_start = start;
175 tlb->range_end = end;
176 }
177
171 tlb_flush_mmu(tlb); 178 tlb_flush_mmu(tlb);
172 179
173 /* keep the page table cache within bounds */ 180 /* keep the page table cache within bounds */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index fced197b9626..cbe5ac3699bf 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -168,7 +168,8 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
168 168
169 169
170static inline void 170static inline void
171tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 171arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
172 unsigned long start, unsigned long end)
172{ 173{
173 tlb->mm = mm; 174 tlb->mm = mm;
174 tlb->max = ARRAY_SIZE(tlb->local); 175 tlb->max = ARRAY_SIZE(tlb->local);
@@ -185,8 +186,11 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
185 * collected. 186 * collected.
186 */ 187 */
187static inline void 188static inline void
188tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 189arch_tlb_finish_mmu(struct mmu_gather *tlb,
190 unsigned long start, unsigned long end, bool force)
189{ 191{
192 if (force)
193 tlb->need_flush = 1;
190 /* 194 /*
191 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 195 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
192 * tlb->end_addr. 196 * tlb->end_addr.
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000000..3f87b96da5c4
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1950 @@
1/*
2 * Just-In-Time compiler for eBPF filters on MIPS
3 *
4 * Copyright (c) 2017 Cavium, Inc.
5 *
6 * Based on code from:
7 *
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
14 */
15
16#include <linux/bitops.h>
17#include <linux/errno.h>
18#include <linux/filter.h>
19#include <linux/bpf.h>
20#include <linux/slab.h>
21#include <asm/bitops.h>
22#include <asm/byteorder.h>
23#include <asm/cacheflush.h>
24#include <asm/cpu-features.h>
25#include <asm/uasm.h>
26
27/* Registers used by JIT */
28#define MIPS_R_ZERO 0
29#define MIPS_R_AT 1
30#define MIPS_R_V0 2 /* BPF_R0 */
31#define MIPS_R_V1 3
32#define MIPS_R_A0 4 /* BPF_R1 */
33#define MIPS_R_A1 5 /* BPF_R2 */
34#define MIPS_R_A2 6 /* BPF_R3 */
35#define MIPS_R_A3 7 /* BPF_R4 */
36#define MIPS_R_A4 8 /* BPF_R5 */
37#define MIPS_R_T4 12 /* BPF_AX */
38#define MIPS_R_T5 13
39#define MIPS_R_T6 14
40#define MIPS_R_T7 15
41#define MIPS_R_S0 16 /* BPF_R6 */
42#define MIPS_R_S1 17 /* BPF_R7 */
43#define MIPS_R_S2 18 /* BPF_R8 */
44#define MIPS_R_S3 19 /* BPF_R9 */
45#define MIPS_R_S4 20 /* BPF_TCC */
46#define MIPS_R_S5 21
47#define MIPS_R_S6 22
48#define MIPS_R_S7 23
49#define MIPS_R_T8 24
50#define MIPS_R_T9 25
51#define MIPS_R_SP 29
52#define MIPS_R_RA 31
53
54/* eBPF flags */
55#define EBPF_SAVE_S0 BIT(0)
56#define EBPF_SAVE_S1 BIT(1)
57#define EBPF_SAVE_S2 BIT(2)
58#define EBPF_SAVE_S3 BIT(3)
59#define EBPF_SAVE_S4 BIT(4)
60#define EBPF_SAVE_RA BIT(5)
61#define EBPF_SEEN_FP BIT(6)
62#define EBPF_SEEN_TC BIT(7)
63#define EBPF_TCC_IN_V1 BIT(8)
64
65/*
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
71 * semantics require.
72 */
73enum reg_val_type {
74 /* uninitialized */
75 REG_UNKNOWN,
76 /* not known to be 32-bit compatible. */
77 REG_64BIT,
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS
86};
87
88/*
89 * high bit of offsets indicates if long branch conversion done at
90 * this insn.
91 */
92#define OFFSETS_B_CONV BIT(31)
93
94/**
95 * struct jit_ctx - JIT context
96 * @skf: The sk_filter
97 * @stack_size: eBPF stack size
98 * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
99 * @idx: Instruction index
100 * @flags: JIT flags
101 * @offsets: Instruction offsets
102 * @target: Memory location for the compiled filter
103 * @reg_val_types Packed enum reg_val_type for each register.
104 */
105struct jit_ctx {
106 const struct bpf_prog *skf;
107 int stack_size;
108 int tmp_offset;
109 u32 idx;
110 u32 flags;
111 u32 *offsets;
112 u32 *target;
113 u64 *reg_val_types;
114 unsigned int long_b_conversion:1;
115 unsigned int gen_b_offsets:1;
116};
117
118static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
119{
120 *rvt &= ~(7ull << (reg * 3));
121 *rvt |= ((u64)type << (reg * 3));
122}
123
124static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
125 int index, int reg)
126{
127 return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
128}
129
130/* Simply emit the instruction if the JIT memory space has been allocated */
131#define emit_instr(ctx, func, ...) \
132do { \
133 if ((ctx)->target != NULL) { \
134 u32 *p = &(ctx)->target[ctx->idx]; \
135 uasm_i_##func(&p, ##__VA_ARGS__); \
136 } \
137 (ctx)->idx++; \
138} while (0)
139
140static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
141{
142 unsigned long target_va, base_va;
143 unsigned int r;
144
145 if (!ctx->target)
146 return 0;
147
148 base_va = (unsigned long)ctx->target;
149 target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
150
151 if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
152 return (unsigned int)-1;
153 r = target_va & 0x0ffffffful;
154 return r;
155}
156
157/* Compute the immediate value for PC-relative branches. */
158static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
159{
160 if (!ctx->gen_b_offsets)
161 return 0;
162
163 /*
164 * We want a pc-relative branch. tgt is the instruction offset
165 * we want to jump to.
166
167 * Branch on MIPS:
168 * I: target_offset <- sign_extend(offset)
169 * I+1: PC += target_offset (delay slot)
170 *
171 * ctx->idx currently points to the branch instruction
172 * but the offset is added to the delay slot so we need
173 * to subtract 4.
174 */
175 return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
176 (ctx->idx * 4) - 4;
177}
178
179int bpf_jit_enable __read_mostly;
180
181enum which_ebpf_reg {
182 src_reg,
183 src_reg_no_fp,
184 dst_reg,
185 dst_reg_fp_ok
186};
187
188/*
189 * For eBPF, the register mapping naturally falls out of the
190 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
191 * separate frame pointer, so BPF_REG_10 relative accesses are
192 * adjusted to be $sp relative.
193 */
194int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
195 enum which_ebpf_reg w)
196{
197 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
198 insn->src_reg : insn->dst_reg;
199
200 switch (ebpf_reg) {
201 case BPF_REG_0:
202 return MIPS_R_V0;
203 case BPF_REG_1:
204 return MIPS_R_A0;
205 case BPF_REG_2:
206 return MIPS_R_A1;
207 case BPF_REG_3:
208 return MIPS_R_A2;
209 case BPF_REG_4:
210 return MIPS_R_A3;
211 case BPF_REG_5:
212 return MIPS_R_A4;
213 case BPF_REG_6:
214 ctx->flags |= EBPF_SAVE_S0;
215 return MIPS_R_S0;
216 case BPF_REG_7:
217 ctx->flags |= EBPF_SAVE_S1;
218 return MIPS_R_S1;
219 case BPF_REG_8:
220 ctx->flags |= EBPF_SAVE_S2;
221 return MIPS_R_S2;
222 case BPF_REG_9:
223 ctx->flags |= EBPF_SAVE_S3;
224 return MIPS_R_S3;
225 case BPF_REG_10:
226 if (w == dst_reg || w == src_reg_no_fp)
227 goto bad_reg;
228 ctx->flags |= EBPF_SEEN_FP;
229 /*
230 * Needs special handling, return something that
231 * cannot be clobbered just in case.
232 */
233 return MIPS_R_ZERO;
234 case BPF_REG_AX:
235 return MIPS_R_T4;
236 default:
237bad_reg:
238 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
239 return -EINVAL;
240 }
241}
242/*
243 * eBPF stack frame will be something like:
244 *
245 * Entry $sp ------> +--------------------------------+
246 * | $ra (optional) |
247 * +--------------------------------+
248 * | $s0 (optional) |
249 * +--------------------------------+
250 * | $s1 (optional) |
251 * +--------------------------------+
252 * | $s2 (optional) |
253 * +--------------------------------+
254 * | $s3 (optional) |
255 * +--------------------------------+
256 * | $s4 (optional) |
257 * +--------------------------------+
258 * | tmp-storage (if $ra saved) |
259 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
260 * | BPF_REG_10 relative storage |
261 * | MAX_BPF_STACK (optional) |
262 * | . |
263 * | . |
264 * | . |
265 * $sp --------> +--------------------------------+
266 *
267 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
268 * area is not allocated.
269 */
270static int gen_int_prologue(struct jit_ctx *ctx)
271{
272 int stack_adjust = 0;
273 int store_offset;
274 int locals_size;
275
276 if (ctx->flags & EBPF_SAVE_RA)
277 /*
278 * If RA we are doing a function call and may need
279 * extra 8-byte tmp area.
280 */
281 stack_adjust += 16;
282 if (ctx->flags & EBPF_SAVE_S0)
283 stack_adjust += 8;
284 if (ctx->flags & EBPF_SAVE_S1)
285 stack_adjust += 8;
286 if (ctx->flags & EBPF_SAVE_S2)
287 stack_adjust += 8;
288 if (ctx->flags & EBPF_SAVE_S3)
289 stack_adjust += 8;
290 if (ctx->flags & EBPF_SAVE_S4)
291 stack_adjust += 8;
292
293 BUILD_BUG_ON(MAX_BPF_STACK & 7);
294 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
295
296 stack_adjust += locals_size;
297 ctx->tmp_offset = locals_size;
298
299 ctx->stack_size = stack_adjust;
300
301 /*
302 * First instruction initializes the tail call count (TCC).
303 * On tail call we skip this instruction, and the TCC is
304 * passed in $v1 from the caller.
305 */
306 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
307 if (stack_adjust)
308 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
309 else
310 return 0;
311
312 store_offset = stack_adjust - 8;
313
314 if (ctx->flags & EBPF_SAVE_RA) {
315 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
316 store_offset -= 8;
317 }
318 if (ctx->flags & EBPF_SAVE_S0) {
319 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
320 store_offset -= 8;
321 }
322 if (ctx->flags & EBPF_SAVE_S1) {
323 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
324 store_offset -= 8;
325 }
326 if (ctx->flags & EBPF_SAVE_S2) {
327 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
328 store_offset -= 8;
329 }
330 if (ctx->flags & EBPF_SAVE_S3) {
331 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
332 store_offset -= 8;
333 }
334 if (ctx->flags & EBPF_SAVE_S4) {
335 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
336 store_offset -= 8;
337 }
338
339 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
340 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
341
342 return 0;
343}
344
345static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
346{
347 const struct bpf_prog *prog = ctx->skf;
348 int stack_adjust = ctx->stack_size;
349 int store_offset = stack_adjust - 8;
350 int r0 = MIPS_R_V0;
351
352 if (dest_reg == MIPS_R_RA &&
353 get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
354 /* Don't let zero extended value escape. */
355 emit_instr(ctx, sll, r0, r0, 0);
356
357 if (ctx->flags & EBPF_SAVE_RA) {
358 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
359 store_offset -= 8;
360 }
361 if (ctx->flags & EBPF_SAVE_S0) {
362 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
363 store_offset -= 8;
364 }
365 if (ctx->flags & EBPF_SAVE_S1) {
366 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
367 store_offset -= 8;
368 }
369 if (ctx->flags & EBPF_SAVE_S2) {
370 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
371 store_offset -= 8;
372 }
373 if (ctx->flags & EBPF_SAVE_S3) {
374 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
375 store_offset -= 8;
376 }
377 if (ctx->flags & EBPF_SAVE_S4) {
378 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
379 store_offset -= 8;
380 }
381 emit_instr(ctx, jr, dest_reg);
382
383 if (stack_adjust)
384 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
385 else
386 emit_instr(ctx, nop);
387
388 return 0;
389}
390
391static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
392 struct jit_ctx *ctx)
393{
394 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
395 emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
396 } else {
397 int lower = (s16)(insn->imm & 0xffff);
398 int upper = insn->imm - lower;
399
400 emit_instr(ctx, lui, reg, upper >> 16);
401 emit_instr(ctx, addiu, reg, reg, lower);
402 }
403
404}
405
406static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
407 int idx)
408{
409 int upper_bound, lower_bound;
410 int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
411
412 if (dst < 0)
413 return dst;
414
415 switch (BPF_OP(insn->code)) {
416 case BPF_MOV:
417 case BPF_ADD:
418 upper_bound = S16_MAX;
419 lower_bound = S16_MIN;
420 break;
421 case BPF_SUB:
422 upper_bound = -(int)S16_MIN;
423 lower_bound = -(int)S16_MAX;
424 break;
425 case BPF_AND:
426 case BPF_OR:
427 case BPF_XOR:
428 upper_bound = 0xffff;
429 lower_bound = 0;
430 break;
431 case BPF_RSH:
432 case BPF_LSH:
433 case BPF_ARSH:
434 /* Shift amounts are truncated, no need for bounds */
435 upper_bound = S32_MAX;
436 lower_bound = S32_MIN;
437 break;
438 default:
439 return -EINVAL;
440 }
441
442 /*
443 * Immediate move clobbers the register, so no sign/zero
444 * extension needed.
445 */
446 if (BPF_CLASS(insn->code) == BPF_ALU64 &&
447 BPF_OP(insn->code) != BPF_MOV &&
448 get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
449 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
450 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
451 if (BPF_CLASS(insn->code) == BPF_ALU &&
452 BPF_OP(insn->code) != BPF_LSH &&
453 BPF_OP(insn->code) != BPF_MOV &&
454 get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
455 emit_instr(ctx, sll, dst, dst, 0);
456
457 if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
458 /* single insn immediate case */
459 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
460 case BPF_ALU64 | BPF_MOV:
461 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
462 break;
463 case BPF_ALU64 | BPF_AND:
464 case BPF_ALU | BPF_AND:
465 emit_instr(ctx, andi, dst, dst, insn->imm);
466 break;
467 case BPF_ALU64 | BPF_OR:
468 case BPF_ALU | BPF_OR:
469 emit_instr(ctx, ori, dst, dst, insn->imm);
470 break;
471 case BPF_ALU64 | BPF_XOR:
472 case BPF_ALU | BPF_XOR:
473 emit_instr(ctx, xori, dst, dst, insn->imm);
474 break;
475 case BPF_ALU64 | BPF_ADD:
476 emit_instr(ctx, daddiu, dst, dst, insn->imm);
477 break;
478 case BPF_ALU64 | BPF_SUB:
479 emit_instr(ctx, daddiu, dst, dst, -insn->imm);
480 break;
481 case BPF_ALU64 | BPF_RSH:
482 emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
483 break;
484 case BPF_ALU | BPF_RSH:
485 emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
486 break;
487 case BPF_ALU64 | BPF_LSH:
488 emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
489 break;
490 case BPF_ALU | BPF_LSH:
491 emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
492 break;
493 case BPF_ALU64 | BPF_ARSH:
494 emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
495 break;
496 case BPF_ALU | BPF_ARSH:
497 emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
498 break;
499 case BPF_ALU | BPF_MOV:
500 emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
501 break;
502 case BPF_ALU | BPF_ADD:
503 emit_instr(ctx, addiu, dst, dst, insn->imm);
504 break;
505 case BPF_ALU | BPF_SUB:
506 emit_instr(ctx, addiu, dst, dst, -insn->imm);
507 break;
508 default:
509 return -EINVAL;
510 }
511 } else {
512 /* multi insn immediate case */
513 if (BPF_OP(insn->code) == BPF_MOV) {
514 gen_imm_to_reg(insn, dst, ctx);
515 } else {
516 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
517 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
518 case BPF_ALU64 | BPF_AND:
519 case BPF_ALU | BPF_AND:
520 emit_instr(ctx, and, dst, dst, MIPS_R_AT);
521 break;
522 case BPF_ALU64 | BPF_OR:
523 case BPF_ALU | BPF_OR:
524 emit_instr(ctx, or, dst, dst, MIPS_R_AT);
525 break;
526 case BPF_ALU64 | BPF_XOR:
527 case BPF_ALU | BPF_XOR:
528 emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
529 break;
530 case BPF_ALU64 | BPF_ADD:
531 emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
532 break;
533 case BPF_ALU64 | BPF_SUB:
534 emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
535 break;
536 case BPF_ALU | BPF_ADD:
537 emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
538 break;
539 case BPF_ALU | BPF_SUB:
540 emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
541 break;
542 default:
543 return -EINVAL;
544 }
545 }
546 }
547
548 return 0;
549}
550
551static void * __must_check
552ool_skb_header_pointer(const struct sk_buff *skb, int offset,
553 int len, void *buffer)
554{
555 return skb_header_pointer(skb, offset, len, buffer);
556}
557
558static int size_to_len(const struct bpf_insn *insn)
559{
560 switch (BPF_SIZE(insn->code)) {
561 case BPF_B:
562 return 1;
563 case BPF_H:
564 return 2;
565 case BPF_W:
566 return 4;
567 case BPF_DW:
568 return 8;
569 }
570 return 0;
571}
572
573static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
574{
575 if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
576 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
577 } else if (value >= 0xffffffff80000000ull ||
578 (value < 0x80000000 && value > 0xffff)) {
579 emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
580 emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
581 } else {
582 int i;
583 bool seen_part = false;
584 int needed_shift = 0;
585
586 for (i = 0; i < 4; i++) {
587 u64 part = (value >> (16 * (3 - i))) & 0xffff;
588
589 if (seen_part && needed_shift > 0 && (part || i == 3)) {
590 emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
591 needed_shift = 0;
592 }
593 if (part) {
594 if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
595 emit_instr(ctx, lui, dst, (s32)(s16)part);
596 needed_shift = -16;
597 } else {
598 emit_instr(ctx, ori, dst,
599 seen_part ? dst : MIPS_R_ZERO,
600 (unsigned int)part);
601 }
602 seen_part = true;
603 }
604 if (seen_part)
605 needed_shift += 16;
606 }
607 }
608}
609
610static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
611{
612 int off, b_off;
613
614 ctx->flags |= EBPF_SEEN_TC;
615 /*
616 * if (index >= array->map.max_entries)
617 * goto out;
618 */
619 off = offsetof(struct bpf_array, map.max_entries);
620 emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
621 emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
622 b_off = b_imm(this_idx + 1, ctx);
623 emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
624 /*
625 * if (--TCC < 0)
626 * goto out;
627 */
628 /* Delay slot */
629 emit_instr(ctx, daddiu, MIPS_R_T5,
630 (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
631 b_off = b_imm(this_idx + 1, ctx);
632 emit_instr(ctx, bltz, MIPS_R_T5, b_off);
633 /*
634 * prog = array->ptrs[index];
635 * if (prog == NULL)
636 * goto out;
637 */
638 /* Delay slot */
639 emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
640 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
641 off = offsetof(struct bpf_array, ptrs);
642 emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
643 b_off = b_imm(this_idx + 1, ctx);
644 emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
645 /* Delay slot */
646 emit_instr(ctx, nop);
647
648 /* goto *(prog->bpf_func + 4); */
649 off = offsetof(struct bpf_prog, bpf_func);
650 emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
651 /* All systems are go... propagate TCC */
652 emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
653 /* Skip first instruction (TCC initialization) */
654 emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
655 return build_int_epilogue(ctx, MIPS_R_T9);
656}
657
658static bool use_bbit_insns(void)
659{
660 switch (current_cpu_type()) {
661 case CPU_CAVIUM_OCTEON:
662 case CPU_CAVIUM_OCTEON_PLUS:
663 case CPU_CAVIUM_OCTEON2:
664 case CPU_CAVIUM_OCTEON3:
665 return true;
666 default:
667 return false;
668 }
669}
670
671static bool is_bad_offset(int b_off)
672{
673 return b_off > 0x1ffff || b_off < -0x20000;
674}
675
676/* Returns the number of insn slots consumed. */
677static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
678 int this_idx, int exit_idx)
679{
680 int src, dst, r, td, ts, mem_off, b_off;
681 bool need_swap, did_move, cmp_eq;
682 unsigned int target;
683 u64 t64;
684 s64 t64s;
685
686 switch (insn->code) {
687 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
688 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
689 case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
690 case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
691 case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
692 case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
693 case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
694 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
695 case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
696 case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
697 case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
698 case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
699 case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
700 case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
701 case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
702 case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
703 case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
704 case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
705 r = gen_imm_insn(insn, ctx, this_idx);
706 if (r < 0)
707 return r;
708 break;
709 case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
710 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
711 if (dst < 0)
712 return dst;
713 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
714 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
715 if (insn->imm == 1) /* Mult by 1 is a nop */
716 break;
717 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
718 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
719 emit_instr(ctx, mflo, dst);
720 break;
721 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
722 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
723 if (dst < 0)
724 return dst;
725 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
726 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
727 emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
728 break;
729 case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
730 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
731 if (dst < 0)
732 return dst;
733 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
734 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
735 /* sign extend */
736 emit_instr(ctx, sll, dst, dst, 0);
737 }
738 if (insn->imm == 1) /* Mult by 1 is a nop */
739 break;
740 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
741 emit_instr(ctx, multu, dst, MIPS_R_AT);
742 emit_instr(ctx, mflo, dst);
743 break;
744 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
745 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
746 if (dst < 0)
747 return dst;
748 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
749 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
750 /* sign extend */
751 emit_instr(ctx, sll, dst, dst, 0);
752 }
753 emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
754 break;
755 case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
756 case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
757 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
758 if (dst < 0)
759 return dst;
760 if (insn->imm == 0) { /* Div by zero */
761 b_off = b_imm(exit_idx, ctx);
762 if (is_bad_offset(b_off))
763 return -E2BIG;
764 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
765 emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
766 }
767 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
768 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
769 /* sign extend */
770 emit_instr(ctx, sll, dst, dst, 0);
771 if (insn->imm == 1) {
772 /* div by 1 is a nop, mod by 1 is zero */
773 if (BPF_OP(insn->code) == BPF_MOD)
774 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
775 break;
776 }
777 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
778 emit_instr(ctx, divu, dst, MIPS_R_AT);
779 if (BPF_OP(insn->code) == BPF_DIV)
780 emit_instr(ctx, mflo, dst);
781 else
782 emit_instr(ctx, mfhi, dst);
783 break;
784 case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
785 case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
786 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
787 if (dst < 0)
788 return dst;
789 if (insn->imm == 0) { /* Div by zero */
790 b_off = b_imm(exit_idx, ctx);
791 if (is_bad_offset(b_off))
792 return -E2BIG;
793 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
794 emit_instr(ctx, addu, MIPS_R_V0, MIPS_R_ZERO, MIPS_R_ZERO);
795 }
796 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
797 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
798
799 if (insn->imm == 1) {
800 /* div by 1 is a nop, mod by 1 is zero */
801 if (BPF_OP(insn->code) == BPF_MOD)
802 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
803 break;
804 }
805 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
806 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
807 if (BPF_OP(insn->code) == BPF_DIV)
808 emit_instr(ctx, mflo, dst);
809 else
810 emit_instr(ctx, mfhi, dst);
811 break;
812 case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
813 case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
814 case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
815 case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
816 case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
817 case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
818 case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
819 case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
820 case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
821 case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
822 case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
823 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
824 src = ebpf_to_mips_reg(ctx, insn, src_reg);
825 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
826 if (src < 0 || dst < 0)
827 return -EINVAL;
828 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
829 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
830 did_move = false;
831 if (insn->src_reg == BPF_REG_10) {
832 if (BPF_OP(insn->code) == BPF_MOV) {
833 emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
834 did_move = true;
835 } else {
836 emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
837 src = MIPS_R_AT;
838 }
839 } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
840 int tmp_reg = MIPS_R_AT;
841
842 if (BPF_OP(insn->code) == BPF_MOV) {
843 tmp_reg = dst;
844 did_move = true;
845 }
846 emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
847 emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
848 src = MIPS_R_AT;
849 }
850 switch (BPF_OP(insn->code)) {
851 case BPF_MOV:
852 if (!did_move)
853 emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
854 break;
855 case BPF_ADD:
856 emit_instr(ctx, daddu, dst, dst, src);
857 break;
858 case BPF_SUB:
859 emit_instr(ctx, dsubu, dst, dst, src);
860 break;
861 case BPF_XOR:
862 emit_instr(ctx, xor, dst, dst, src);
863 break;
864 case BPF_OR:
865 emit_instr(ctx, or, dst, dst, src);
866 break;
867 case BPF_AND:
868 emit_instr(ctx, and, dst, dst, src);
869 break;
870 case BPF_MUL:
871 emit_instr(ctx, dmultu, dst, src);
872 emit_instr(ctx, mflo, dst);
873 break;
874 case BPF_DIV:
875 case BPF_MOD:
876 b_off = b_imm(exit_idx, ctx);
877 if (is_bad_offset(b_off))
878 return -E2BIG;
879 emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
880 emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
881 emit_instr(ctx, ddivu, dst, src);
882 if (BPF_OP(insn->code) == BPF_DIV)
883 emit_instr(ctx, mflo, dst);
884 else
885 emit_instr(ctx, mfhi, dst);
886 break;
887 case BPF_LSH:
888 emit_instr(ctx, dsllv, dst, dst, src);
889 break;
890 case BPF_RSH:
891 emit_instr(ctx, dsrlv, dst, dst, src);
892 break;
893 case BPF_ARSH:
894 emit_instr(ctx, dsrav, dst, dst, src);
895 break;
896 default:
897 pr_err("ALU64_REG NOT HANDLED\n");
898 return -EINVAL;
899 }
900 break;
901 case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
902 case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
903 case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
904 case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
905 case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
906 case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
907 case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
908 case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
909 case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
910 case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
911 case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
912 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
913 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
914 if (src < 0 || dst < 0)
915 return -EINVAL;
916 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
917 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
918 /* sign extend */
919 emit_instr(ctx, sll, dst, dst, 0);
920 }
921 did_move = false;
922 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
923 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
924 int tmp_reg = MIPS_R_AT;
925
926 if (BPF_OP(insn->code) == BPF_MOV) {
927 tmp_reg = dst;
928 did_move = true;
929 }
930 /* sign extend */
931 emit_instr(ctx, sll, tmp_reg, src, 0);
932 src = MIPS_R_AT;
933 }
934 switch (BPF_OP(insn->code)) {
935 case BPF_MOV:
936 if (!did_move)
937 emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
938 break;
939 case BPF_ADD:
940 emit_instr(ctx, addu, dst, dst, src);
941 break;
942 case BPF_SUB:
943 emit_instr(ctx, subu, dst, dst, src);
944 break;
945 case BPF_XOR:
946 emit_instr(ctx, xor, dst, dst, src);
947 break;
948 case BPF_OR:
949 emit_instr(ctx, or, dst, dst, src);
950 break;
951 case BPF_AND:
952 emit_instr(ctx, and, dst, dst, src);
953 break;
954 case BPF_MUL:
955 emit_instr(ctx, mul, dst, dst, src);
956 break;
957 case BPF_DIV:
958 case BPF_MOD:
959 b_off = b_imm(exit_idx, ctx);
960 if (is_bad_offset(b_off))
961 return -E2BIG;
962 emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off);
963 emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src);
964 emit_instr(ctx, divu, dst, src);
965 if (BPF_OP(insn->code) == BPF_DIV)
966 emit_instr(ctx, mflo, dst);
967 else
968 emit_instr(ctx, mfhi, dst);
969 break;
970 case BPF_LSH:
971 emit_instr(ctx, sllv, dst, dst, src);
972 break;
973 case BPF_RSH:
974 emit_instr(ctx, srlv, dst, dst, src);
975 break;
976 default:
977 pr_err("ALU_REG NOT HANDLED\n");
978 return -EINVAL;
979 }
980 break;
981 case BPF_JMP | BPF_EXIT:
982 if (this_idx + 1 < exit_idx) {
983 b_off = b_imm(exit_idx, ctx);
984 if (is_bad_offset(b_off))
985 return -E2BIG;
986 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
987 emit_instr(ctx, nop);
988 }
989 break;
990 case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
991 case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
992 cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
993 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
994 if (dst < 0)
995 return dst;
996 if (insn->imm == 0) {
997 src = MIPS_R_ZERO;
998 } else {
999 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1000 src = MIPS_R_AT;
1001 }
1002 goto jeq_common;
1003 case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
1004 case BPF_JMP | BPF_JNE | BPF_X:
1005 case BPF_JMP | BPF_JSGT | BPF_X:
1006 case BPF_JMP | BPF_JSGE | BPF_X:
1007 case BPF_JMP | BPF_JGT | BPF_X:
1008 case BPF_JMP | BPF_JGE | BPF_X:
1009 case BPF_JMP | BPF_JSET | BPF_X:
1010 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1011 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1012 if (src < 0 || dst < 0)
1013 return -EINVAL;
1014 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1015 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1016 if (td == REG_32BIT && ts != REG_32BIT) {
1017 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1018 src = MIPS_R_AT;
1019 } else if (ts == REG_32BIT && td != REG_32BIT) {
1020 emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
1021 dst = MIPS_R_AT;
1022 }
1023 if (BPF_OP(insn->code) == BPF_JSET) {
1024 emit_instr(ctx, and, MIPS_R_AT, dst, src);
1025 cmp_eq = false;
1026 dst = MIPS_R_AT;
1027 src = MIPS_R_ZERO;
1028 } else if (BPF_OP(insn->code) == BPF_JSGT) {
1029 emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
1030 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1031 b_off = b_imm(exit_idx, ctx);
1032 if (is_bad_offset(b_off))
1033 return -E2BIG;
1034 emit_instr(ctx, blez, MIPS_R_AT, b_off);
1035 emit_instr(ctx, nop);
1036 return 2; /* We consumed the exit. */
1037 }
1038 b_off = b_imm(this_idx + insn->off + 1, ctx);
1039 if (is_bad_offset(b_off))
1040 return -E2BIG;
1041 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1042 emit_instr(ctx, nop);
1043 break;
1044 } else if (BPF_OP(insn->code) == BPF_JSGE) {
1045 emit_instr(ctx, slt, MIPS_R_AT, dst, src);
1046 cmp_eq = true;
1047 dst = MIPS_R_AT;
1048 src = MIPS_R_ZERO;
1049 } else if (BPF_OP(insn->code) == BPF_JGT) {
1050 /* dst or src could be AT */
1051 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1052 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1053 /* SP known to be non-zero, movz becomes boolean not */
1054 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
1055 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
1056 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1057 cmp_eq = true;
1058 dst = MIPS_R_AT;
1059 src = MIPS_R_ZERO;
1060 } else if (BPF_OP(insn->code) == BPF_JGE) {
1061 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1062 cmp_eq = true;
1063 dst = MIPS_R_AT;
1064 src = MIPS_R_ZERO;
1065 } else { /* JNE/JEQ case */
1066 cmp_eq = (BPF_OP(insn->code) == BPF_JEQ);
1067 }
1068jeq_common:
1069 /*
1070 * If the next insn is EXIT and we are jumping arround
1071 * only it, invert the sense of the compare and
1072 * conditionally jump to the exit. Poor man's branch
1073 * chaining.
1074 */
1075 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1076 b_off = b_imm(exit_idx, ctx);
1077 if (is_bad_offset(b_off)) {
1078 target = j_target(ctx, exit_idx);
1079 if (target == (unsigned int)-1)
1080 return -E2BIG;
1081 cmp_eq = !cmp_eq;
1082 b_off = 4 * 3;
1083 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1084 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1085 ctx->long_b_conversion = 1;
1086 }
1087 }
1088
1089 if (cmp_eq)
1090 emit_instr(ctx, bne, dst, src, b_off);
1091 else
1092 emit_instr(ctx, beq, dst, src, b_off);
1093 emit_instr(ctx, nop);
1094 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1095 emit_instr(ctx, j, target);
1096 emit_instr(ctx, nop);
1097 }
1098 return 2; /* We consumed the exit. */
1099 }
1100 b_off = b_imm(this_idx + insn->off + 1, ctx);
1101 if (is_bad_offset(b_off)) {
1102 target = j_target(ctx, this_idx + insn->off + 1);
1103 if (target == (unsigned int)-1)
1104 return -E2BIG;
1105 cmp_eq = !cmp_eq;
1106 b_off = 4 * 3;
1107 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1108 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1109 ctx->long_b_conversion = 1;
1110 }
1111 }
1112
1113 if (cmp_eq)
1114 emit_instr(ctx, beq, dst, src, b_off);
1115 else
1116 emit_instr(ctx, bne, dst, src, b_off);
1117 emit_instr(ctx, nop);
1118 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1119 emit_instr(ctx, j, target);
1120 emit_instr(ctx, nop);
1121 }
1122 break;
1123 case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1124 case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1125 cmp_eq = (BPF_OP(insn->code) == BPF_JSGE);
1126 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1127 if (dst < 0)
1128 return dst;
1129
1130 if (insn->imm == 0) {
1131 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1132 b_off = b_imm(exit_idx, ctx);
1133 if (is_bad_offset(b_off))
1134 return -E2BIG;
1135 if (cmp_eq)
1136 emit_instr(ctx, bltz, dst, b_off);
1137 else
1138 emit_instr(ctx, blez, dst, b_off);
1139 emit_instr(ctx, nop);
1140 return 2; /* We consumed the exit. */
1141 }
1142 b_off = b_imm(this_idx + insn->off + 1, ctx);
1143 if (is_bad_offset(b_off))
1144 return -E2BIG;
1145 if (cmp_eq)
1146 emit_instr(ctx, bgez, dst, b_off);
1147 else
1148 emit_instr(ctx, bgtz, dst, b_off);
1149 emit_instr(ctx, nop);
1150 break;
1151 }
1152 /*
1153 * only "LT" compare available, so we must use imm + 1
1154 * to generate "GT"
1155 */
1156 t64s = insn->imm + (cmp_eq ? 0 : 1);
1157 if (t64s >= S16_MIN && t64s <= S16_MAX) {
1158 emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1159 src = MIPS_R_AT;
1160 dst = MIPS_R_ZERO;
1161 cmp_eq = true;
1162 goto jeq_common;
1163 }
1164 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1165 emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1166 src = MIPS_R_AT;
1167 dst = MIPS_R_ZERO;
1168 cmp_eq = true;
1169 goto jeq_common;
1170
1171 case BPF_JMP | BPF_JGT | BPF_K:
1172 case BPF_JMP | BPF_JGE | BPF_K:
1173 cmp_eq = (BPF_OP(insn->code) == BPF_JGE);
1174 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1175 if (dst < 0)
1176 return dst;
1177 /*
1178 * only "LT" compare available, so we must use imm + 1
1179 * to generate "GT"
1180 */
1181 t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1);
1182 if (t64s >= 0 && t64s <= S16_MAX) {
1183 emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s);
1184 src = MIPS_R_AT;
1185 dst = MIPS_R_ZERO;
1186 cmp_eq = true;
1187 goto jeq_common;
1188 }
1189 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1190 emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1191 src = MIPS_R_AT;
1192 dst = MIPS_R_ZERO;
1193 cmp_eq = true;
1194 goto jeq_common;
1195
1196 case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1197 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1198 if (dst < 0)
1199 return dst;
1200
1201 if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) {
1202 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1203 b_off = b_imm(exit_idx, ctx);
1204 if (is_bad_offset(b_off))
1205 return -E2BIG;
1206 emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1207 emit_instr(ctx, nop);
1208 return 2; /* We consumed the exit. */
1209 }
1210 b_off = b_imm(this_idx + insn->off + 1, ctx);
1211 if (is_bad_offset(b_off))
1212 return -E2BIG;
1213 emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1214 emit_instr(ctx, nop);
1215 break;
1216 }
1217 t64 = (u32)insn->imm;
1218 emit_const_to_reg(ctx, MIPS_R_AT, t64);
1219 emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1220 src = MIPS_R_AT;
1221 dst = MIPS_R_ZERO;
1222 cmp_eq = false;
1223 goto jeq_common;
1224
1225 case BPF_JMP | BPF_JA:
1226 /*
1227 * Prefer relative branch for easier debugging, but
1228 * fall back if needed.
1229 */
1230 b_off = b_imm(this_idx + insn->off + 1, ctx);
1231 if (is_bad_offset(b_off)) {
1232 target = j_target(ctx, this_idx + insn->off + 1);
1233 if (target == (unsigned int)-1)
1234 return -E2BIG;
1235 emit_instr(ctx, j, target);
1236 } else {
1237 emit_instr(ctx, b, b_off);
1238 }
1239 emit_instr(ctx, nop);
1240 break;
1241 case BPF_LD | BPF_DW | BPF_IMM:
1242 if (insn->src_reg != 0)
1243 return -EINVAL;
1244 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1245 if (dst < 0)
1246 return dst;
1247 t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1248 emit_const_to_reg(ctx, dst, t64);
1249 return 2; /* Double slot insn */
1250
1251 case BPF_JMP | BPF_CALL:
1252 ctx->flags |= EBPF_SAVE_RA;
1253 t64s = (s64)insn->imm + (s64)__bpf_call_base;
1254 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1255 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1256 /* delay slot */
1257 emit_instr(ctx, nop);
1258 break;
1259
1260 case BPF_JMP | BPF_TAIL_CALL:
1261 if (emit_bpf_tail_call(ctx, this_idx))
1262 return -EINVAL;
1263 break;
1264
1265 case BPF_LD | BPF_B | BPF_ABS:
1266 case BPF_LD | BPF_H | BPF_ABS:
1267 case BPF_LD | BPF_W | BPF_ABS:
1268 case BPF_LD | BPF_DW | BPF_ABS:
1269 ctx->flags |= EBPF_SAVE_RA;
1270
1271 gen_imm_to_reg(insn, MIPS_R_A1, ctx);
1272 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1273
1274 if (insn->imm < 0) {
1275 emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
1276 } else {
1277 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1278 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1279 }
1280 goto ld_skb_common;
1281
1282 case BPF_LD | BPF_B | BPF_IND:
1283 case BPF_LD | BPF_H | BPF_IND:
1284 case BPF_LD | BPF_W | BPF_IND:
1285 case BPF_LD | BPF_DW | BPF_IND:
1286 ctx->flags |= EBPF_SAVE_RA;
1287 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1288 if (src < 0)
1289 return src;
1290 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1291 if (ts == REG_32BIT_ZERO_EX) {
1292 /* sign extend */
1293 emit_instr(ctx, sll, MIPS_R_A1, src, 0);
1294 src = MIPS_R_A1;
1295 }
1296 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
1297 emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
1298 } else {
1299 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1300 emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
1301 }
1302 /* truncate to 32-bit int */
1303 emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
1304 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1305 emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
1306
1307 emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
1308 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1309 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1310 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
1311
1312ld_skb_common:
1313 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1314 /* delay slot move */
1315 emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
1316
1317 /* Check the error value */
1318 b_off = b_imm(exit_idx, ctx);
1319 if (is_bad_offset(b_off)) {
1320 target = j_target(ctx, exit_idx);
1321 if (target == (unsigned int)-1)
1322 return -E2BIG;
1323
1324 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1325 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1326 ctx->long_b_conversion = 1;
1327 }
1328 emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
1329 emit_instr(ctx, nop);
1330 emit_instr(ctx, j, target);
1331 emit_instr(ctx, nop);
1332 } else {
1333 emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
1334 emit_instr(ctx, nop);
1335 }
1336
1337#ifdef __BIG_ENDIAN
1338 need_swap = false;
1339#else
1340 need_swap = true;
1341#endif
1342 dst = MIPS_R_V0;
1343 switch (BPF_SIZE(insn->code)) {
1344 case BPF_B:
1345 emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
1346 break;
1347 case BPF_H:
1348 emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
1349 if (need_swap)
1350 emit_instr(ctx, wsbh, dst, dst);
1351 break;
1352 case BPF_W:
1353 emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
1354 if (need_swap) {
1355 emit_instr(ctx, wsbh, dst, dst);
1356 emit_instr(ctx, rotr, dst, dst, 16);
1357 }
1358 break;
1359 case BPF_DW:
1360 emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
1361 if (need_swap) {
1362 emit_instr(ctx, dsbh, dst, dst);
1363 emit_instr(ctx, dshd, dst, dst);
1364 }
1365 break;
1366 }
1367
1368 break;
1369 case BPF_ALU | BPF_END | BPF_FROM_BE:
1370 case BPF_ALU | BPF_END | BPF_FROM_LE:
1371 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1372 if (dst < 0)
1373 return dst;
1374 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1375 if (insn->imm == 64 && td == REG_32BIT)
1376 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1377
1378 if (insn->imm != 64 &&
1379 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1380 /* sign extend */
1381 emit_instr(ctx, sll, dst, dst, 0);
1382 }
1383
1384#ifdef __BIG_ENDIAN
1385 need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1386#else
1387 need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1388#endif
1389 if (insn->imm == 16) {
1390 if (need_swap)
1391 emit_instr(ctx, wsbh, dst, dst);
1392 emit_instr(ctx, andi, dst, dst, 0xffff);
1393 } else if (insn->imm == 32) {
1394 if (need_swap) {
1395 emit_instr(ctx, wsbh, dst, dst);
1396 emit_instr(ctx, rotr, dst, dst, 16);
1397 }
1398 } else { /* 64-bit*/
1399 if (need_swap) {
1400 emit_instr(ctx, dsbh, dst, dst);
1401 emit_instr(ctx, dshd, dst, dst);
1402 }
1403 }
1404 break;
1405
1406 case BPF_ST | BPF_B | BPF_MEM:
1407 case BPF_ST | BPF_H | BPF_MEM:
1408 case BPF_ST | BPF_W | BPF_MEM:
1409 case BPF_ST | BPF_DW | BPF_MEM:
1410 if (insn->dst_reg == BPF_REG_10) {
1411 ctx->flags |= EBPF_SEEN_FP;
1412 dst = MIPS_R_SP;
1413 mem_off = insn->off + MAX_BPF_STACK;
1414 } else {
1415 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1416 if (dst < 0)
1417 return dst;
1418 mem_off = insn->off;
1419 }
1420 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1421 switch (BPF_SIZE(insn->code)) {
1422 case BPF_B:
1423 emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1424 break;
1425 case BPF_H:
1426 emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1427 break;
1428 case BPF_W:
1429 emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1430 break;
1431 case BPF_DW:
1432 emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1433 break;
1434 }
1435 break;
1436
1437 case BPF_LDX | BPF_B | BPF_MEM:
1438 case BPF_LDX | BPF_H | BPF_MEM:
1439 case BPF_LDX | BPF_W | BPF_MEM:
1440 case BPF_LDX | BPF_DW | BPF_MEM:
1441 if (insn->src_reg == BPF_REG_10) {
1442 ctx->flags |= EBPF_SEEN_FP;
1443 src = MIPS_R_SP;
1444 mem_off = insn->off + MAX_BPF_STACK;
1445 } else {
1446 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1447 if (src < 0)
1448 return src;
1449 mem_off = insn->off;
1450 }
1451 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1452 if (dst < 0)
1453 return dst;
1454 switch (BPF_SIZE(insn->code)) {
1455 case BPF_B:
1456 emit_instr(ctx, lbu, dst, mem_off, src);
1457 break;
1458 case BPF_H:
1459 emit_instr(ctx, lhu, dst, mem_off, src);
1460 break;
1461 case BPF_W:
1462 emit_instr(ctx, lw, dst, mem_off, src);
1463 break;
1464 case BPF_DW:
1465 emit_instr(ctx, ld, dst, mem_off, src);
1466 break;
1467 }
1468 break;
1469
1470 case BPF_STX | BPF_B | BPF_MEM:
1471 case BPF_STX | BPF_H | BPF_MEM:
1472 case BPF_STX | BPF_W | BPF_MEM:
1473 case BPF_STX | BPF_DW | BPF_MEM:
1474 case BPF_STX | BPF_W | BPF_XADD:
1475 case BPF_STX | BPF_DW | BPF_XADD:
1476 if (insn->dst_reg == BPF_REG_10) {
1477 ctx->flags |= EBPF_SEEN_FP;
1478 dst = MIPS_R_SP;
1479 mem_off = insn->off + MAX_BPF_STACK;
1480 } else {
1481 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1482 if (dst < 0)
1483 return dst;
1484 mem_off = insn->off;
1485 }
1486 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1487 if (src < 0)
1488 return dst;
1489 if (BPF_MODE(insn->code) == BPF_XADD) {
1490 switch (BPF_SIZE(insn->code)) {
1491 case BPF_W:
1492 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1493 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1494 src = MIPS_R_AT;
1495 }
1496 emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1497 emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1498 emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1499 /*
1500 * On failure back up to LL (-4
1501 * instructions of 4 bytes each
1502 */
1503 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1504 emit_instr(ctx, nop);
1505 break;
1506 case BPF_DW:
1507 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1508 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1509 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1510 src = MIPS_R_AT;
1511 }
1512 emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1513 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1514 emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1515 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1516 emit_instr(ctx, nop);
1517 break;
1518 }
1519 } else { /* BPF_MEM */
1520 switch (BPF_SIZE(insn->code)) {
1521 case BPF_B:
1522 emit_instr(ctx, sb, src, mem_off, dst);
1523 break;
1524 case BPF_H:
1525 emit_instr(ctx, sh, src, mem_off, dst);
1526 break;
1527 case BPF_W:
1528 emit_instr(ctx, sw, src, mem_off, dst);
1529 break;
1530 case BPF_DW:
1531 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1532 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1533 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1534 src = MIPS_R_AT;
1535 }
1536 emit_instr(ctx, sd, src, mem_off, dst);
1537 break;
1538 }
1539 }
1540 break;
1541
1542 default:
1543 pr_err("NOT HANDLED %d - (%02x)\n",
1544 this_idx, (unsigned int)insn->code);
1545 return -EINVAL;
1546 }
1547 return 1;
1548}
1549
1550#define RVT_VISITED_MASK 0xc000000000000000ull
1551#define RVT_FALL_THROUGH 0x4000000000000000ull
1552#define RVT_BRANCH_TAKEN 0x8000000000000000ull
1553#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1554
1555static int build_int_body(struct jit_ctx *ctx)
1556{
1557 const struct bpf_prog *prog = ctx->skf;
1558 const struct bpf_insn *insn;
1559 int i, r;
1560
1561 for (i = 0; i < prog->len; ) {
1562 insn = prog->insnsi + i;
1563 if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1564 /* dead instruction, don't emit it. */
1565 i++;
1566 continue;
1567 }
1568
1569 if (ctx->target == NULL)
1570 ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1571
1572 r = build_one_insn(insn, ctx, i, prog->len);
1573 if (r < 0)
1574 return r;
1575 i += r;
1576 }
1577 /* epilogue offset */
1578 if (ctx->target == NULL)
1579 ctx->offsets[i] = ctx->idx * 4;
1580
1581 /*
1582 * All exits have an offset of the epilogue, some offsets may
1583 * not have been set due to banch-around threading, so set
1584 * them now.
1585 */
1586 if (ctx->target == NULL)
1587 for (i = 0; i < prog->len; i++) {
1588 insn = prog->insnsi + i;
1589 if (insn->code == (BPF_JMP | BPF_EXIT))
1590 ctx->offsets[i] = ctx->idx * 4;
1591 }
1592 return 0;
1593}
1594
1595/* return the last idx processed, or negative for error */
1596static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1597 int start_idx, bool follow_taken)
1598{
1599 const struct bpf_prog *prog = ctx->skf;
1600 const struct bpf_insn *insn;
1601 u64 exit_rvt = initial_rvt;
1602 u64 *rvt = ctx->reg_val_types;
1603 int idx;
1604 int reg;
1605
1606 for (idx = start_idx; idx < prog->len; idx++) {
1607 rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1608 insn = prog->insnsi + idx;
1609 switch (BPF_CLASS(insn->code)) {
1610 case BPF_ALU:
1611 switch (BPF_OP(insn->code)) {
1612 case BPF_ADD:
1613 case BPF_SUB:
1614 case BPF_MUL:
1615 case BPF_DIV:
1616 case BPF_OR:
1617 case BPF_AND:
1618 case BPF_LSH:
1619 case BPF_RSH:
1620 case BPF_NEG:
1621 case BPF_MOD:
1622 case BPF_XOR:
1623 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1624 break;
1625 case BPF_MOV:
1626 if (BPF_SRC(insn->code)) {
1627 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1628 } else {
1629 /* IMM to REG move*/
1630 if (insn->imm >= 0)
1631 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1632 else
1633 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1634 }
1635 break;
1636 case BPF_END:
1637 if (insn->imm == 64)
1638 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1639 else if (insn->imm == 32)
1640 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1641 else /* insn->imm == 16 */
1642 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1643 break;
1644 }
1645 rvt[idx] |= RVT_DONE;
1646 break;
1647 case BPF_ALU64:
1648 switch (BPF_OP(insn->code)) {
1649 case BPF_MOV:
1650 if (BPF_SRC(insn->code)) {
1651 /* REG to REG move*/
1652 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1653 } else {
1654 /* IMM to REG move*/
1655 if (insn->imm >= 0)
1656 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1657 else
1658 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1659 }
1660 break;
1661 default:
1662 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1663 }
1664 rvt[idx] |= RVT_DONE;
1665 break;
1666 case BPF_LD:
1667 switch (BPF_SIZE(insn->code)) {
1668 case BPF_DW:
1669 if (BPF_MODE(insn->code) == BPF_IMM) {
1670 s64 val;
1671
1672 val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1673 if (val > 0 && val <= S32_MAX)
1674 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1675 else if (val >= S32_MIN && val <= S32_MAX)
1676 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1677 else
1678 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1679 rvt[idx] |= RVT_DONE;
1680 idx++;
1681 } else {
1682 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1683 }
1684 break;
1685 case BPF_B:
1686 case BPF_H:
1687 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1688 break;
1689 case BPF_W:
1690 if (BPF_MODE(insn->code) == BPF_IMM)
1691 set_reg_val_type(&exit_rvt, insn->dst_reg,
1692 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1693 else
1694 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1695 break;
1696 }
1697 rvt[idx] |= RVT_DONE;
1698 break;
1699 case BPF_LDX:
1700 switch (BPF_SIZE(insn->code)) {
1701 case BPF_DW:
1702 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1703 break;
1704 case BPF_B:
1705 case BPF_H:
1706 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1707 break;
1708 case BPF_W:
1709 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1710 break;
1711 }
1712 rvt[idx] |= RVT_DONE;
1713 break;
1714 case BPF_JMP:
1715 switch (BPF_OP(insn->code)) {
1716 case BPF_EXIT:
1717 rvt[idx] = RVT_DONE | exit_rvt;
1718 rvt[prog->len] = exit_rvt;
1719 return idx;
1720 case BPF_JA:
1721 rvt[idx] |= RVT_DONE;
1722 idx += insn->off;
1723 break;
1724 case BPF_JEQ:
1725 case BPF_JGT:
1726 case BPF_JGE:
1727 case BPF_JSET:
1728 case BPF_JNE:
1729 case BPF_JSGT:
1730 case BPF_JSGE:
1731 if (follow_taken) {
1732 rvt[idx] |= RVT_BRANCH_TAKEN;
1733 idx += insn->off;
1734 follow_taken = false;
1735 } else {
1736 rvt[idx] |= RVT_FALL_THROUGH;
1737 }
1738 break;
1739 case BPF_CALL:
1740 set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1741 /* Upon call return, argument registers are clobbered. */
1742 for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1743 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1744
1745 rvt[idx] |= RVT_DONE;
1746 break;
1747 default:
1748 WARN(1, "Unhandled BPF_JMP case.\n");
1749 rvt[idx] |= RVT_DONE;
1750 break;
1751 }
1752 break;
1753 default:
1754 rvt[idx] |= RVT_DONE;
1755 break;
1756 }
1757 }
1758 return idx;
1759}
1760
1761/*
1762 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1763 * each eBPF insn. This allows unneeded sign and zero extension
1764 * operations to be omitted.
1765 *
1766 * Doesn't handle yet confluence of control paths with conflicting
1767 * ranges, but it is good enough for most sane code.
1768 */
1769static int reg_val_propagate(struct jit_ctx *ctx)
1770{
1771 const struct bpf_prog *prog = ctx->skf;
1772 u64 exit_rvt;
1773 int reg;
1774 int i;
1775
1776 /*
1777 * 11 registers * 3 bits/reg leaves top bits free for other
1778 * uses. Bit-62..63 used to see if we have visited an insn.
1779 */
1780 exit_rvt = 0;
1781
1782 /* Upon entry, argument registers are 64-bit. */
1783 for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1784 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1785
1786 /*
1787 * First follow all conditional branches on the fall-through
1788 * edge of control flow..
1789 */
1790 reg_val_propagate_range(ctx, exit_rvt, 0, false);
1791restart_search:
1792 /*
1793 * Then repeatedly find the first conditional branch where
1794 * both edges of control flow have not been taken, and follow
1795 * the branch taken edge. We will end up restarting the
1796 * search once per conditional branch insn.
1797 */
1798 for (i = 0; i < prog->len; i++) {
1799 u64 rvt = ctx->reg_val_types[i];
1800
1801 if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1802 (rvt & RVT_VISITED_MASK) == 0)
1803 continue;
1804 if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1805 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1806 } else { /* RVT_BRANCH_TAKEN */
1807 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1808 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1809 }
1810 goto restart_search;
1811 }
1812 /*
1813 * Eventually all conditional branches have been followed on
1814 * both branches and we are done. Any insn that has not been
1815 * visited at this point is dead.
1816 */
1817
1818 return 0;
1819}
1820
1821static void jit_fill_hole(void *area, unsigned int size)
1822{
1823 u32 *p;
1824
1825 /* We are guaranteed to have aligned memory. */
1826 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1827 uasm_i_break(&p, BRK_BUG); /* Increments p */
1828}
1829
1830struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1831{
1832 struct bpf_prog *orig_prog = prog;
1833 bool tmp_blinded = false;
1834 struct bpf_prog *tmp;
1835 struct bpf_binary_header *header = NULL;
1836 struct jit_ctx ctx;
1837 unsigned int image_size;
1838 u8 *image_ptr;
1839
1840 if (!bpf_jit_enable || !cpu_has_mips64r2)
1841 return prog;
1842
1843 tmp = bpf_jit_blind_constants(prog);
1844 /* If blinding was requested and we failed during blinding,
1845 * we must fall back to the interpreter.
1846 */
1847 if (IS_ERR(tmp))
1848 return orig_prog;
1849 if (tmp != prog) {
1850 tmp_blinded = true;
1851 prog = tmp;
1852 }
1853
1854 memset(&ctx, 0, sizeof(ctx));
1855
1856 ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1857 if (ctx.offsets == NULL)
1858 goto out_err;
1859
1860 ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1861 if (ctx.reg_val_types == NULL)
1862 goto out_err;
1863
1864 ctx.skf = prog;
1865
1866 if (reg_val_propagate(&ctx))
1867 goto out_err;
1868
1869 /*
1870 * First pass discovers used resources and instruction offsets
1871 * assuming short branches are used.
1872 */
1873 if (build_int_body(&ctx))
1874 goto out_err;
1875
1876 /*
1877 * If no calls are made (EBPF_SAVE_RA), then tail call count
1878 * in $v1, else we must save in n$s4.
1879 */
1880 if (ctx.flags & EBPF_SEEN_TC) {
1881 if (ctx.flags & EBPF_SAVE_RA)
1882 ctx.flags |= EBPF_SAVE_S4;
1883 else
1884 ctx.flags |= EBPF_TCC_IN_V1;
1885 }
1886
1887 /*
1888 * Second pass generates offsets, if any branches are out of
1889 * range a jump-around long sequence is generated, and we have
1890 * to try again from the beginning to generate the new
1891 * offsets. This is done until no additional conversions are
1892 * necessary.
1893 */
1894 do {
1895 ctx.idx = 0;
1896 ctx.gen_b_offsets = 1;
1897 ctx.long_b_conversion = 0;
1898 if (gen_int_prologue(&ctx))
1899 goto out_err;
1900 if (build_int_body(&ctx))
1901 goto out_err;
1902 if (build_int_epilogue(&ctx, MIPS_R_RA))
1903 goto out_err;
1904 } while (ctx.long_b_conversion);
1905
1906 image_size = 4 * ctx.idx;
1907
1908 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1909 sizeof(u32), jit_fill_hole);
1910 if (header == NULL)
1911 goto out_err;
1912
1913 ctx.target = (u32 *)image_ptr;
1914
1915 /* Third pass generates the code */
1916 ctx.idx = 0;
1917 if (gen_int_prologue(&ctx))
1918 goto out_err;
1919 if (build_int_body(&ctx))
1920 goto out_err;
1921 if (build_int_epilogue(&ctx, MIPS_R_RA))
1922 goto out_err;
1923
1924 /* Update the icache */
1925 flush_icache_range((unsigned long)ctx.target,
1926 (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
1927
1928 if (bpf_jit_enable > 1)
1929 /* Dump JIT code */
1930 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1931
1932 bpf_jit_binary_lock_ro(header);
1933 prog->bpf_func = (void *)ctx.target;
1934 prog->jited = 1;
1935 prog->jited_len = image_size;
1936out_normal:
1937 if (tmp_blinded)
1938 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1939 tmp : orig_prog);
1940 kfree(ctx.offsets);
1941 kfree(ctx.reg_val_types);
1942
1943 return prog;
1944
1945out_err:
1946 prog = orig_prog;
1947 if (header)
1948 bpf_jit_binary_free(header);
1949 goto out_normal;
1950}
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 0695ce047d56..34fc9bbfca9e 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -293,7 +293,8 @@ CONFIG_MAGIC_SYSRQ=y
293CONFIG_DEBUG_KERNEL=y 293CONFIG_DEBUG_KERNEL=y
294CONFIG_DEBUG_STACK_USAGE=y 294CONFIG_DEBUG_STACK_USAGE=y
295CONFIG_DEBUG_STACKOVERFLOW=y 295CONFIG_DEBUG_STACKOVERFLOW=y
296CONFIG_LOCKUP_DETECTOR=y 296CONFIG_SOFTLOCKUP_DETECTOR=y
297CONFIG_HARDLOCKUP_DETECTOR=y
297CONFIG_LATENCYTOP=y 298CONFIG_LATENCYTOP=y
298CONFIG_SCHED_TRACER=y 299CONFIG_SCHED_TRACER=y
299CONFIG_BLK_DEV_IO_TRACE=y 300CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5175028c56ce..c5246d29f385 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -324,7 +324,8 @@ CONFIG_MAGIC_SYSRQ=y
324CONFIG_DEBUG_KERNEL=y 324CONFIG_DEBUG_KERNEL=y
325CONFIG_DEBUG_STACK_USAGE=y 325CONFIG_DEBUG_STACK_USAGE=y
326CONFIG_DEBUG_STACKOVERFLOW=y 326CONFIG_DEBUG_STACKOVERFLOW=y
327CONFIG_LOCKUP_DETECTOR=y 327CONFIG_SOFTLOCKUP_DETECTOR=y
328CONFIG_HARDLOCKUP_DETECTOR=y
328CONFIG_DEBUG_MUTEXES=y 329CONFIG_DEBUG_MUTEXES=y
329CONFIG_LATENCYTOP=y 330CONFIG_LATENCYTOP=y
330CONFIG_SCHED_TRACER=y 331CONFIG_SCHED_TRACER=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 1a61aa20dfba..fd5d98a0b95c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -291,7 +291,8 @@ CONFIG_MAGIC_SYSRQ=y
291CONFIG_DEBUG_KERNEL=y 291CONFIG_DEBUG_KERNEL=y
292CONFIG_DEBUG_STACK_USAGE=y 292CONFIG_DEBUG_STACK_USAGE=y
293CONFIG_DEBUG_STACKOVERFLOW=y 293CONFIG_DEBUG_STACKOVERFLOW=y
294CONFIG_LOCKUP_DETECTOR=y 294CONFIG_SOFTLOCKUP_DETECTOR=y
295CONFIG_HARDLOCKUP_DETECTOR=y
295CONFIG_LATENCYTOP=y 296CONFIG_LATENCYTOP=y
296CONFIG_SCHED_TRACER=y 297CONFIG_SCHED_TRACER=y
297CONFIG_BLK_DEV_IO_TRACE=y 298CONFIG_BLK_DEV_IO_TRACE=y
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 49d8422767b4..e925c1c99c71 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -223,17 +223,27 @@ system_call_exit:
223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) 223 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
224 bne- .Lsyscall_exit_work 224 bne- .Lsyscall_exit_work
225 225
226 /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */ 226 andi. r0,r8,MSR_FP
227 li r7,MSR_FP 227 beq 2f
228#ifdef CONFIG_ALTIVEC 228#ifdef CONFIG_ALTIVEC
229 oris r7,r7,MSR_VEC@h 229 andis. r0,r8,MSR_VEC@h
230 bne 3f
230#endif 231#endif
231 and r0,r8,r7 2322: addi r3,r1,STACK_FRAME_OVERHEAD
232 cmpd r0,r7 233#ifdef CONFIG_PPC_BOOK3S
233 bne .Lsyscall_restore_math 234 li r10,MSR_RI
234.Lsyscall_restore_math_cont: 235 mtmsrd r10,1 /* Restore RI */
236#endif
237 bl restore_math
238#ifdef CONFIG_PPC_BOOK3S
239 li r11,0
240 mtmsrd r11,1
241#endif
242 ld r8,_MSR(r1)
243 ld r3,RESULT(r1)
244 li r11,-MAX_ERRNO
235 245
236 cmpld r3,r11 2463: cmpld r3,r11
237 ld r5,_CCR(r1) 247 ld r5,_CCR(r1)
238 bge- .Lsyscall_error 248 bge- .Lsyscall_error
239.Lsyscall_error_cont: 249.Lsyscall_error_cont:
@@ -267,40 +277,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
267 std r5,_CCR(r1) 277 std r5,_CCR(r1)
268 b .Lsyscall_error_cont 278 b .Lsyscall_error_cont
269 279
270.Lsyscall_restore_math:
271 /*
272 * Some initial tests from restore_math to avoid the heavyweight
273 * C code entry and MSR manipulations.
274 */
275 LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
276 and. r0,r0,r8
277 bne 1f
278
279 ld r7,PACACURRENT(r13)
280 lbz r0,THREAD+THREAD_LOAD_FP(r7)
281#ifdef CONFIG_ALTIVEC
282 lbz r6,THREAD+THREAD_LOAD_VEC(r7)
283 add r0,r0,r6
284#endif
285 cmpdi r0,0
286 beq .Lsyscall_restore_math_cont
287
2881: addi r3,r1,STACK_FRAME_OVERHEAD
289#ifdef CONFIG_PPC_BOOK3S
290 li r10,MSR_RI
291 mtmsrd r10,1 /* Restore RI */
292#endif
293 bl restore_math
294#ifdef CONFIG_PPC_BOOK3S
295 li r11,0
296 mtmsrd r11,1
297#endif
298 /* Restore volatiles, reload MSR from updated one */
299 ld r8,_MSR(r1)
300 ld r3,RESULT(r1)
301 li r11,-MAX_ERRNO
302 b .Lsyscall_restore_math_cont
303
304/* Traced system call support */ 280/* Traced system call support */
305.Lsyscall_dotrace: 281.Lsyscall_dotrace:
306 bl save_nvgprs 282 bl save_nvgprs
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9f3e2c932dcc..ec480966f9bf 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -511,10 +511,6 @@ void restore_math(struct pt_regs *regs)
511{ 511{
512 unsigned long msr; 512 unsigned long msr;
513 513
514 /*
515 * Syscall exit makes a similar initial check before branching
516 * to restore_math. Keep them in synch.
517 */
518 if (!msr_tm_active(regs->msr) && 514 if (!msr_tm_active(regs->msr) &&
519 !current->thread.load_fp && !loadvec(current->thread)) 515 !current->thread.load_fp && !loadvec(current->thread))
520 return; 516 return;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index cf0e1245b8cc..8d3320562c70 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -351,7 +351,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
351 hard_irq_disable(); 351 hard_irq_disable();
352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 352 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
353 raw_local_irq_restore(*flags); 353 raw_local_irq_restore(*flags);
354 cpu_relax(); 354 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
355 raw_local_irq_save(*flags); 355 raw_local_irq_save(*flags);
356 hard_irq_disable(); 356 hard_irq_disable();
357 } 357 }
@@ -360,7 +360,7 @@ static void nmi_ipi_lock_start(unsigned long *flags)
360static void nmi_ipi_lock(void) 360static void nmi_ipi_lock(void)
361{ 361{
362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 362 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
363 cpu_relax(); 363 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
364} 364}
365 365
366static void nmi_ipi_unlock(void) 366static void nmi_ipi_unlock(void)
@@ -475,7 +475,7 @@ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
475 nmi_ipi_lock_start(&flags); 475 nmi_ipi_lock_start(&flags);
476 while (nmi_ipi_busy_count) { 476 while (nmi_ipi_busy_count) {
477 nmi_ipi_unlock_end(&flags); 477 nmi_ipi_unlock_end(&flags);
478 cpu_relax(); 478 spin_until_cond(nmi_ipi_busy_count == 0);
479 nmi_ipi_lock_start(&flags); 479 nmi_ipi_lock_start(&flags);
480 } 480 }
481 481
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index b67f8b03a32d..34721a257a77 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -71,15 +71,20 @@ static inline void wd_smp_lock(unsigned long *flags)
71 * This may be called from low level interrupt handlers at some 71 * This may be called from low level interrupt handlers at some
72 * point in future. 72 * point in future.
73 */ 73 */
74 local_irq_save(*flags); 74 raw_local_irq_save(*flags);
75 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) 75 hard_irq_disable(); /* Make it soft-NMI safe */
76 cpu_relax(); 76 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
77 raw_local_irq_restore(*flags);
78 spin_until_cond(!test_bit(0, &__wd_smp_lock));
79 raw_local_irq_save(*flags);
80 hard_irq_disable();
81 }
77} 82}
78 83
79static inline void wd_smp_unlock(unsigned long *flags) 84static inline void wd_smp_unlock(unsigned long *flags)
80{ 85{
81 clear_bit_unlock(0, &__wd_smp_lock); 86 clear_bit_unlock(0, &__wd_smp_lock);
82 local_irq_restore(*flags); 87 raw_local_irq_restore(*flags);
83} 88}
84 89
85static void wd_lockup_ipi(struct pt_regs *regs) 90static void wd_lockup_ipi(struct pt_regs *regs)
@@ -96,10 +101,10 @@ static void wd_lockup_ipi(struct pt_regs *regs)
96 nmi_panic(regs, "Hard LOCKUP"); 101 nmi_panic(regs, "Hard LOCKUP");
97} 102}
98 103
99static void set_cpu_stuck(int cpu, u64 tb) 104static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
100{ 105{
101 cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); 106 cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
102 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); 107 cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
103 if (cpumask_empty(&wd_smp_cpus_pending)) { 108 if (cpumask_empty(&wd_smp_cpus_pending)) {
104 wd_smp_last_reset_tb = tb; 109 wd_smp_last_reset_tb = tb;
105 cpumask_andnot(&wd_smp_cpus_pending, 110 cpumask_andnot(&wd_smp_cpus_pending,
@@ -107,6 +112,10 @@ static void set_cpu_stuck(int cpu, u64 tb)
107 &wd_smp_cpus_stuck); 112 &wd_smp_cpus_stuck);
108 } 113 }
109} 114}
115static void set_cpu_stuck(int cpu, u64 tb)
116{
117 set_cpumask_stuck(cpumask_of(cpu), tb);
118}
110 119
111static void watchdog_smp_panic(int cpu, u64 tb) 120static void watchdog_smp_panic(int cpu, u64 tb)
112{ 121{
@@ -135,11 +144,9 @@ static void watchdog_smp_panic(int cpu, u64 tb)
135 } 144 }
136 smp_flush_nmi_ipi(1000000); 145 smp_flush_nmi_ipi(1000000);
137 146
138 /* Take the stuck CPU out of the watch group */ 147 /* Take the stuck CPUs out of the watch group */
139 for_each_cpu(c, &wd_smp_cpus_pending) 148 set_cpumask_stuck(&wd_smp_cpus_pending, tb);
140 set_cpu_stuck(c, tb);
141 149
142out:
143 wd_smp_unlock(&flags); 150 wd_smp_unlock(&flags);
144 151
145 printk_safe_flush(); 152 printk_safe_flush();
@@ -152,6 +159,11 @@ out:
152 159
153 if (hardlockup_panic) 160 if (hardlockup_panic)
154 nmi_panic(NULL, "Hard LOCKUP"); 161 nmi_panic(NULL, "Hard LOCKUP");
162
163 return;
164
165out:
166 wd_smp_unlock(&flags);
155} 167}
156 168
157static void wd_smp_clear_cpu_pending(int cpu, u64 tb) 169static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
@@ -258,9 +270,11 @@ static void wd_timer_fn(unsigned long data)
258 270
259void arch_touch_nmi_watchdog(void) 271void arch_touch_nmi_watchdog(void)
260{ 272{
273 unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
261 int cpu = smp_processor_id(); 274 int cpu = smp_processor_id();
262 275
263 watchdog_timer_interrupt(cpu); 276 if (get_tb() - per_cpu(wd_timer_tb, cpu) >= ticks)
277 watchdog_timer_interrupt(cpu);
264} 278}
265EXPORT_SYMBOL(arch_touch_nmi_watchdog); 279EXPORT_SYMBOL(arch_touch_nmi_watchdog);
266 280
@@ -283,6 +297,8 @@ static void stop_watchdog_timer_on(unsigned int cpu)
283 297
284static int start_wd_on_cpu(unsigned int cpu) 298static int start_wd_on_cpu(unsigned int cpu)
285{ 299{
300 unsigned long flags;
301
286 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { 302 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
287 WARN_ON(1); 303 WARN_ON(1);
288 return 0; 304 return 0;
@@ -297,12 +313,14 @@ static int start_wd_on_cpu(unsigned int cpu)
297 if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) 313 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
298 return 0; 314 return 0;
299 315
316 wd_smp_lock(&flags);
300 cpumask_set_cpu(cpu, &wd_cpus_enabled); 317 cpumask_set_cpu(cpu, &wd_cpus_enabled);
301 if (cpumask_weight(&wd_cpus_enabled) == 1) { 318 if (cpumask_weight(&wd_cpus_enabled) == 1) {
302 cpumask_set_cpu(cpu, &wd_smp_cpus_pending); 319 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
303 wd_smp_last_reset_tb = get_tb(); 320 wd_smp_last_reset_tb = get_tb();
304 } 321 }
305 smp_wmb(); 322 wd_smp_unlock(&flags);
323
306 start_watchdog_timer_on(cpu); 324 start_watchdog_timer_on(cpu);
307 325
308 return 0; 326 return 0;
@@ -310,12 +328,17 @@ static int start_wd_on_cpu(unsigned int cpu)
310 328
311static int stop_wd_on_cpu(unsigned int cpu) 329static int stop_wd_on_cpu(unsigned int cpu)
312{ 330{
331 unsigned long flags;
332
313 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) 333 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
314 return 0; /* Can happen in CPU unplug case */ 334 return 0; /* Can happen in CPU unplug case */
315 335
316 stop_watchdog_timer_on(cpu); 336 stop_watchdog_timer_on(cpu);
317 337
338 wd_smp_lock(&flags);
318 cpumask_clear_cpu(cpu, &wd_cpus_enabled); 339 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
340 wd_smp_unlock(&flags);
341
319 wd_smp_clear_cpu_pending(cpu, get_tb()); 342 wd_smp_clear_cpu_pending(cpu, get_tb());
320 343
321 return 0; 344 return 0;
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 2abee070373f..a553aeea7af6 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -56,6 +56,7 @@ u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
56 */ 56 */
57static u64 pnv_deepest_stop_psscr_val; 57static u64 pnv_deepest_stop_psscr_val;
58static u64 pnv_deepest_stop_psscr_mask; 58static u64 pnv_deepest_stop_psscr_mask;
59static u64 pnv_deepest_stop_flag;
59static bool deepest_stop_found; 60static bool deepest_stop_found;
60 61
61static int pnv_save_sprs_for_deep_states(void) 62static int pnv_save_sprs_for_deep_states(void)
@@ -185,8 +186,40 @@ static void pnv_alloc_idle_core_states(void)
185 186
186 update_subcore_sibling_mask(); 187 update_subcore_sibling_mask();
187 188
188 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) 189 if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
189 pnv_save_sprs_for_deep_states(); 190 int rc = pnv_save_sprs_for_deep_states();
191
192 if (likely(!rc))
193 return;
194
195 /*
196 * The stop-api is unable to restore hypervisor
197 * resources on wakeup from platform idle states which
198 * lose full context. So disable such states.
199 */
200 supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
201 pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
202 pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
203
204 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
205 (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
206 /*
207 * Use the default stop state for CPU-Hotplug
208 * if available.
209 */
210 if (default_stop_found) {
211 pnv_deepest_stop_psscr_val =
212 pnv_default_stop_val;
213 pnv_deepest_stop_psscr_mask =
214 pnv_default_stop_mask;
215 pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
216 pnv_deepest_stop_psscr_val);
217 } else { /* Fallback to snooze loop for CPU-Hotplug */
218 deepest_stop_found = false;
219 pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
220 }
221 }
222 }
190} 223}
191 224
192u32 pnv_get_supported_cpuidle_states(void) 225u32 pnv_get_supported_cpuidle_states(void)
@@ -375,7 +408,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
375 pnv_deepest_stop_psscr_val; 408 pnv_deepest_stop_psscr_val;
376 srr1 = power9_idle_stop(psscr); 409 srr1 = power9_idle_stop(psscr);
377 410
378 } else if (idle_states & OPAL_PM_WINKLE_ENABLED) { 411 } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
412 (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
379 srr1 = power7_idle_insn(PNV_THREAD_WINKLE); 413 srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
380 } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) || 414 } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
381 (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { 415 (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
@@ -553,6 +587,7 @@ static int __init pnv_power9_idle_init(struct device_node *np, u32 *flags,
553 max_residency_ns = residency_ns[i]; 587 max_residency_ns = residency_ns[i];
554 pnv_deepest_stop_psscr_val = psscr_val[i]; 588 pnv_deepest_stop_psscr_val = psscr_val[i];
555 pnv_deepest_stop_psscr_mask = psscr_mask[i]; 589 pnv_deepest_stop_psscr_mask = psscr_mask[i];
590 pnv_deepest_stop_flag = flags[i];
556 deepest_stop_found = true; 591 deepest_stop_found = true;
557 } 592 }
558 593
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7317b3108a88..2eb8ff0d6fca 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -47,10 +47,9 @@ struct mmu_table_batch {
47extern void tlb_table_flush(struct mmu_gather *tlb); 47extern void tlb_table_flush(struct mmu_gather *tlb);
48extern void tlb_remove_table(struct mmu_gather *tlb, void *table); 48extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
49 49
50static inline void tlb_gather_mmu(struct mmu_gather *tlb, 50static inline void
51 struct mm_struct *mm, 51arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
52 unsigned long start, 52 unsigned long start, unsigned long end)
53 unsigned long end)
54{ 53{
55 tlb->mm = mm; 54 tlb->mm = mm;
56 tlb->start = start; 55 tlb->start = start;
@@ -76,9 +75,15 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
76 tlb_flush_mmu_free(tlb); 75 tlb_flush_mmu_free(tlb);
77} 76}
78 77
79static inline void tlb_finish_mmu(struct mmu_gather *tlb, 78static inline void
80 unsigned long start, unsigned long end) 79arch_tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end, bool force)
81{ 81{
82 if (force) {
83 tlb->start = start;
84 tlb->end = end;
85 }
86
82 tlb_flush_mmu(tlb); 87 tlb_flush_mmu(tlb);
83} 88}
84 89
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 01c6fbc3e85b..1803797fc885 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
1253 insn_count = bpf_jit_insn(jit, fp, i); 1253 insn_count = bpf_jit_insn(jit, fp, i);
1254 if (insn_count < 0) 1254 if (insn_count < 0)
1255 return -1; 1255 return -1;
1256 jit->addrs[i + 1] = jit->prg; /* Next instruction address */ 1256 /* Next instruction address */
1257 jit->addrs[i + insn_count] = jit->prg;
1257 } 1258 }
1258 bpf_jit_epilogue(jit); 1259 bpf_jit_epilogue(jit);
1259 1260
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 46e0d635e36f..51a8bc967e75 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,7 +36,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36} 36}
37 37
38static inline void 38static inline void
39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 39arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
40 unsigned long start, unsigned long end)
40{ 41{
41 tlb->mm = mm; 42 tlb->mm = mm;
42 tlb->start = start; 43 tlb->start = start;
@@ -47,9 +48,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start
47} 48}
48 49
49static inline void 50static inline void
50tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 51arch_tlb_finish_mmu(struct mmu_gather *tlb,
52 unsigned long start, unsigned long end, bool force)
51{ 53{
52 if (tlb->fullmm) 54 if (tlb->fullmm || force)
53 flush_tlb_mm(tlb->mm); 55 flush_tlb_mm(tlb->mm);
54 56
55 /* keep the page table cache within bounds */ 57 /* keep the page table cache within bounds */
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h
index 1d8321c827a8..1b1286d05069 100644
--- a/arch/sparc/include/asm/spitfire.h
+++ b/arch/sparc/include/asm/spitfire.h
@@ -47,10 +47,26 @@
47#define SUN4V_CHIP_NIAGARA5 0x05 47#define SUN4V_CHIP_NIAGARA5 0x05
48#define SUN4V_CHIP_SPARC_M6 0x06 48#define SUN4V_CHIP_SPARC_M6 0x06
49#define SUN4V_CHIP_SPARC_M7 0x07 49#define SUN4V_CHIP_SPARC_M7 0x07
50#define SUN4V_CHIP_SPARC_M8 0x08
50#define SUN4V_CHIP_SPARC64X 0x8a 51#define SUN4V_CHIP_SPARC64X 0x8a
51#define SUN4V_CHIP_SPARC_SN 0x8b 52#define SUN4V_CHIP_SPARC_SN 0x8b
52#define SUN4V_CHIP_UNKNOWN 0xff 53#define SUN4V_CHIP_UNKNOWN 0xff
53 54
55/*
56 * The following CPU_ID_xxx constants are used
57 * to identify the CPU type in the setup phase
58 * (see head_64.S)
59 */
60#define CPU_ID_NIAGARA1 ('1')
61#define CPU_ID_NIAGARA2 ('2')
62#define CPU_ID_NIAGARA3 ('3')
63#define CPU_ID_NIAGARA4 ('4')
64#define CPU_ID_NIAGARA5 ('5')
65#define CPU_ID_M6 ('6')
66#define CPU_ID_M7 ('7')
67#define CPU_ID_M8 ('8')
68#define CPU_ID_SONOMA1 ('N')
69
54#ifndef __ASSEMBLY__ 70#ifndef __ASSEMBLY__
55 71
56enum ultra_tlb_layout { 72enum ultra_tlb_layout {
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 493e023a468a..ef4f18f7a674 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
506 sparc_pmu_type = "sparc-m7"; 506 sparc_pmu_type = "sparc-m7";
507 break; 507 break;
508 508
509 case SUN4V_CHIP_SPARC_M8:
510 sparc_cpu_type = "SPARC-M8";
511 sparc_fpu_type = "SPARC-M8 integrated FPU";
512 sparc_pmu_type = "sparc-m8";
513 break;
514
509 case SUN4V_CHIP_SPARC_SN: 515 case SUN4V_CHIP_SPARC_SN:
510 sparc_cpu_type = "SPARC-SN"; 516 sparc_cpu_type = "SPARC-SN";
511 sparc_fpu_type = "SPARC-SN integrated FPU"; 517 sparc_fpu_type = "SPARC-SN integrated FPU";
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 45c820e1cba5..90d550bbfeef 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
328 case SUN4V_CHIP_NIAGARA5: 328 case SUN4V_CHIP_NIAGARA5:
329 case SUN4V_CHIP_SPARC_M6: 329 case SUN4V_CHIP_SPARC_M6:
330 case SUN4V_CHIP_SPARC_M7: 330 case SUN4V_CHIP_SPARC_M7:
331 case SUN4V_CHIP_SPARC_M8:
331 case SUN4V_CHIP_SPARC_SN: 332 case SUN4V_CHIP_SPARC_SN:
332 case SUN4V_CHIP_SPARC64X: 333 case SUN4V_CHIP_SPARC64X:
333 rover_inc_table = niagara_iterate_method; 334 rover_inc_table = niagara_iterate_method;
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 41a407328667..78e0211753d2 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -424,22 +424,25 @@ EXPORT_SYMBOL(sun4v_chip_type)
424 nop 424 nop
425 425
42670: ldub [%g1 + 7], %g2 42670: ldub [%g1 + 7], %g2
427 cmp %g2, '3' 427 cmp %g2, CPU_ID_NIAGARA3
428 be,pt %xcc, 5f 428 be,pt %xcc, 5f
429 mov SUN4V_CHIP_NIAGARA3, %g4 429 mov SUN4V_CHIP_NIAGARA3, %g4
430 cmp %g2, '4' 430 cmp %g2, CPU_ID_NIAGARA4
431 be,pt %xcc, 5f 431 be,pt %xcc, 5f
432 mov SUN4V_CHIP_NIAGARA4, %g4 432 mov SUN4V_CHIP_NIAGARA4, %g4
433 cmp %g2, '5' 433 cmp %g2, CPU_ID_NIAGARA5
434 be,pt %xcc, 5f 434 be,pt %xcc, 5f
435 mov SUN4V_CHIP_NIAGARA5, %g4 435 mov SUN4V_CHIP_NIAGARA5, %g4
436 cmp %g2, '6' 436 cmp %g2, CPU_ID_M6
437 be,pt %xcc, 5f 437 be,pt %xcc, 5f
438 mov SUN4V_CHIP_SPARC_M6, %g4 438 mov SUN4V_CHIP_SPARC_M6, %g4
439 cmp %g2, '7' 439 cmp %g2, CPU_ID_M7
440 be,pt %xcc, 5f 440 be,pt %xcc, 5f
441 mov SUN4V_CHIP_SPARC_M7, %g4 441 mov SUN4V_CHIP_SPARC_M7, %g4
442 cmp %g2, 'N' 442 cmp %g2, CPU_ID_M8
443 be,pt %xcc, 5f
444 mov SUN4V_CHIP_SPARC_M8, %g4
445 cmp %g2, CPU_ID_SONOMA1
443 be,pt %xcc, 5f 446 be,pt %xcc, 5f
444 mov SUN4V_CHIP_SPARC_SN, %g4 447 mov SUN4V_CHIP_SPARC_SN, %g4
445 ba,pt %xcc, 49f 448 ba,pt %xcc, 49f
@@ -448,10 +451,10 @@ EXPORT_SYMBOL(sun4v_chip_type)
44891: sethi %hi(prom_cpu_compatible), %g1 45191: sethi %hi(prom_cpu_compatible), %g1
449 or %g1, %lo(prom_cpu_compatible), %g1 452 or %g1, %lo(prom_cpu_compatible), %g1
450 ldub [%g1 + 17], %g2 453 ldub [%g1 + 17], %g2
451 cmp %g2, '1' 454 cmp %g2, CPU_ID_NIAGARA1
452 be,pt %xcc, 5f 455 be,pt %xcc, 5f
453 mov SUN4V_CHIP_NIAGARA1, %g4 456 mov SUN4V_CHIP_NIAGARA1, %g4
454 cmp %g2, '2' 457 cmp %g2, CPU_ID_NIAGARA2
455 be,pt %xcc, 5f 458 be,pt %xcc, 5f
456 mov SUN4V_CHIP_NIAGARA2, %g4 459 mov SUN4V_CHIP_NIAGARA2, %g4
457 460
@@ -602,6 +605,9 @@ niagara_tlb_fixup:
602 cmp %g1, SUN4V_CHIP_SPARC_M7 605 cmp %g1, SUN4V_CHIP_SPARC_M7
603 be,pt %xcc, niagara4_patch 606 be,pt %xcc, niagara4_patch
604 nop 607 nop
608 cmp %g1, SUN4V_CHIP_SPARC_M8
609 be,pt %xcc, niagara4_patch
610 nop
605 cmp %g1, SUN4V_CHIP_SPARC_SN 611 cmp %g1, SUN4V_CHIP_SPARC_SN
606 be,pt %xcc, niagara4_patch 612 be,pt %xcc, niagara4_patch
607 nop 613 nop
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 4d9c3e13c150..150ee7d4b059 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -288,10 +288,17 @@ static void __init sun4v_patch(void)
288 288
289 sun4v_patch_2insn_range(&__sun4v_2insn_patch, 289 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
290 &__sun4v_2insn_patch_end); 290 &__sun4v_2insn_patch_end);
291 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 291
292 sun4v_chip_type == SUN4V_CHIP_SPARC_SN) 292 switch (sun4v_chip_type) {
293 case SUN4V_CHIP_SPARC_M7:
294 case SUN4V_CHIP_SPARC_M8:
295 case SUN4V_CHIP_SPARC_SN:
293 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, 296 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
294 &__sun_m7_2insn_patch_end); 297 &__sun_m7_2insn_patch_end);
298 break;
299 default:
300 break;
301 }
295 302
296 sun4v_hvapi_init(); 303 sun4v_hvapi_init();
297} 304}
@@ -529,6 +536,7 @@ static void __init init_sparc64_elf_hwcap(void)
529 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 536 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
530 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 537 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
531 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 538 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
539 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
532 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 540 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
533 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 541 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
534 cap |= HWCAP_SPARC_BLKINIT; 542 cap |= HWCAP_SPARC_BLKINIT;
@@ -538,6 +546,7 @@ static void __init init_sparc64_elf_hwcap(void)
538 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 546 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
539 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 547 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
540 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 548 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
549 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
541 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 550 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
542 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 551 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
543 cap |= HWCAP_SPARC_N2; 552 cap |= HWCAP_SPARC_N2;
@@ -568,6 +577,7 @@ static void __init init_sparc64_elf_hwcap(void)
568 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 577 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
569 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 578 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
570 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 579 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
580 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
571 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 581 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
572 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 582 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
573 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 583 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
@@ -578,6 +588,7 @@ static void __init init_sparc64_elf_hwcap(void)
578 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || 588 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
579 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || 589 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
580 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || 590 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
591 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
581 sun4v_chip_type == SUN4V_CHIP_SPARC_SN || 592 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
582 sun4v_chip_type == SUN4V_CHIP_SPARC64X) 593 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
583 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 594 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index fed73f14aa49..afa0099f3748 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1944,12 +1944,22 @@ static void __init setup_page_offset(void)
1944 break; 1944 break;
1945 case SUN4V_CHIP_SPARC_M7: 1945 case SUN4V_CHIP_SPARC_M7:
1946 case SUN4V_CHIP_SPARC_SN: 1946 case SUN4V_CHIP_SPARC_SN:
1947 default:
1948 /* M7 and later support 52-bit virtual addresses. */ 1947 /* M7 and later support 52-bit virtual addresses. */
1949 sparc64_va_hole_top = 0xfff8000000000000UL; 1948 sparc64_va_hole_top = 0xfff8000000000000UL;
1950 sparc64_va_hole_bottom = 0x0008000000000000UL; 1949 sparc64_va_hole_bottom = 0x0008000000000000UL;
1951 max_phys_bits = 49; 1950 max_phys_bits = 49;
1952 break; 1951 break;
1952 case SUN4V_CHIP_SPARC_M8:
1953 default:
1954 /* M8 and later support 54-bit virtual addresses.
1955 * However, restricting M8 and above VA bits to 53
1956 * as 4-level page table cannot support more than
1957 * 53 VA bits.
1958 */
1959 sparc64_va_hole_top = 0xfff0000000000000UL;
1960 sparc64_va_hole_bottom = 0x0010000000000000UL;
1961 max_phys_bits = 51;
1962 break;
1953 } 1963 }
1954 } 1964 }
1955 1965
@@ -2161,6 +2171,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
2161 */ 2171 */
2162 switch (sun4v_chip_type) { 2172 switch (sun4v_chip_type) {
2163 case SUN4V_CHIP_SPARC_M7: 2173 case SUN4V_CHIP_SPARC_M7:
2174 case SUN4V_CHIP_SPARC_M8:
2164 case SUN4V_CHIP_SPARC_SN: 2175 case SUN4V_CHIP_SPARC_SN:
2165 pagecv_flag = 0x00; 2176 pagecv_flag = 0x00;
2166 break; 2177 break;
@@ -2313,6 +2324,7 @@ void __init paging_init(void)
2313 */ 2324 */
2314 switch (sun4v_chip_type) { 2325 switch (sun4v_chip_type) {
2315 case SUN4V_CHIP_SPARC_M7: 2326 case SUN4V_CHIP_SPARC_M7:
2327 case SUN4V_CHIP_SPARC_M8:
2316 case SUN4V_CHIP_SPARC_SN: 2328 case SUN4V_CHIP_SPARC_SN:
2317 page_cache4v_flag = _PAGE_CP_4V; 2329 page_cache4v_flag = _PAGE_CP_4V;
2318 break; 2330 break;
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 600a2e9bfee2..344d95619d03 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,7 +45,8 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
45} 45}
46 46
47static inline void 47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 48arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
49 unsigned long start, unsigned long end)
49{ 50{
50 tlb->mm = mm; 51 tlb->mm = mm;
51 tlb->start = start; 52 tlb->start = start;
@@ -80,13 +81,19 @@ tlb_flush_mmu(struct mmu_gather *tlb)
80 tlb_flush_mmu_free(tlb); 81 tlb_flush_mmu_free(tlb);
81} 82}
82 83
83/* tlb_finish_mmu 84/* arch_tlb_finish_mmu
84 * Called at the end of the shootdown operation to free up any resources 85 * Called at the end of the shootdown operation to free up any resources
85 * that were required. 86 * that were required.
86 */ 87 */
87static inline void 88static inline void
88tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 89arch_tlb_finish_mmu(struct mmu_gather *tlb,
90 unsigned long start, unsigned long end, bool force)
89{ 91{
92 if (force) {
93 tlb->start = start;
94 tlb->end = end;
95 tlb->need_flush = 1;
96 }
90 tlb_flush_mmu(tlb); 97 tlb_flush_mmu(tlb);
91 98
92 /* keep the page table cache within bounds */ 99 /* keep the page table cache within bounds */
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 21126155a739..0ead9dbb9130 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -43,6 +43,9 @@ struct hypervisor_x86 {
43 43
44 /* pin current vcpu to specified physical cpu (run rarely) */ 44 /* pin current vcpu to specified physical cpu (run rarely) */
45 void (*pin_vcpu)(int); 45 void (*pin_vcpu)(int);
46
47 /* called during init_mem_mapping() to setup early mappings. */
48 void (*init_mem_mapping)(void);
46}; 49};
47 50
48extern const struct hypervisor_x86 *x86_hyper; 51extern const struct hypervisor_x86 *x86_hyper;
@@ -57,8 +60,15 @@ extern const struct hypervisor_x86 x86_hyper_kvm;
57extern void init_hypervisor_platform(void); 60extern void init_hypervisor_platform(void);
58extern bool hypervisor_x2apic_available(void); 61extern bool hypervisor_x2apic_available(void);
59extern void hypervisor_pin_vcpu(int cpu); 62extern void hypervisor_pin_vcpu(int cpu);
63
64static inline void hypervisor_init_mem_mapping(void)
65{
66 if (x86_hyper && x86_hyper->init_mem_mapping)
67 x86_hyper->init_mem_mapping();
68}
60#else 69#else
61static inline void init_hypervisor_platform(void) { } 70static inline void init_hypervisor_platform(void) { }
62static inline bool hypervisor_x2apic_available(void) { return false; } 71static inline bool hypervisor_x2apic_available(void) { return false; }
72static inline void hypervisor_init_mem_mapping(void) { }
63#endif /* CONFIG_HYPERVISOR_GUEST */ 73#endif /* CONFIG_HYPERVISOR_GUEST */
64#endif /* _ASM_X86_HYPERVISOR_H */ 74#endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 673541eb3b3f..bf3f1065d6ad 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -18,6 +18,7 @@
18#include <asm/dma.h> /* for MAX_DMA_PFN */ 18#include <asm/dma.h> /* for MAX_DMA_PFN */
19#include <asm/microcode.h> 19#include <asm/microcode.h>
20#include <asm/kaslr.h> 20#include <asm/kaslr.h>
21#include <asm/hypervisor.h>
21 22
22/* 23/*
23 * We need to define the tracepoints somewhere, and tlb.c 24 * We need to define the tracepoints somewhere, and tlb.c
@@ -636,6 +637,8 @@ void __init init_mem_mapping(void)
636 load_cr3(swapper_pg_dir); 637 load_cr3(swapper_pg_dir);
637 __flush_tlb_all(); 638 __flush_tlb_all();
638 639
640 hypervisor_init_mem_mapping();
641
639 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 642 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
640} 643}
641 644
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 87d791356ea9..de503c225ae1 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -12,6 +12,7 @@
12#include <asm/setup.h> 12#include <asm/setup.h>
13#include <asm/hypervisor.h> 13#include <asm/hypervisor.h>
14#include <asm/e820/api.h> 14#include <asm/e820/api.h>
15#include <asm/early_ioremap.h>
15 16
16#include <asm/xen/cpuid.h> 17#include <asm/xen/cpuid.h>
17#include <asm/xen/hypervisor.h> 18#include <asm/xen/hypervisor.h>
@@ -21,38 +22,50 @@
21#include "mmu.h" 22#include "mmu.h"
22#include "smp.h" 23#include "smp.h"
23 24
24void __ref xen_hvm_init_shared_info(void) 25static unsigned long shared_info_pfn;
26
27void xen_hvm_init_shared_info(void)
25{ 28{
26 struct xen_add_to_physmap xatp; 29 struct xen_add_to_physmap xatp;
27 u64 pa;
28
29 if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
30 /*
31 * Search for a free page starting at 4kB physical address.
32 * Low memory is preferred to avoid an EPT large page split up
33 * by the mapping.
34 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
35 * the BIOS used for HVM guests is well behaved and won't
36 * clobber memory other than the first 4kB.
37 */
38 for (pa = PAGE_SIZE;
39 !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
40 memblock_is_reserved(pa);
41 pa += PAGE_SIZE)
42 ;
43
44 memblock_reserve(pa, PAGE_SIZE);
45 HYPERVISOR_shared_info = __va(pa);
46 }
47 30
48 xatp.domid = DOMID_SELF; 31 xatp.domid = DOMID_SELF;
49 xatp.idx = 0; 32 xatp.idx = 0;
50 xatp.space = XENMAPSPACE_shared_info; 33 xatp.space = XENMAPSPACE_shared_info;
51 xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info); 34 xatp.gpfn = shared_info_pfn;
52 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 35 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
53 BUG(); 36 BUG();
54} 37}
55 38
39static void __init reserve_shared_info(void)
40{
41 u64 pa;
42
43 /*
44 * Search for a free page starting at 4kB physical address.
45 * Low memory is preferred to avoid an EPT large page split up
46 * by the mapping.
47 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
48 * the BIOS used for HVM guests is well behaved and won't
49 * clobber memory other than the first 4kB.
50 */
51 for (pa = PAGE_SIZE;
52 !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
53 memblock_is_reserved(pa);
54 pa += PAGE_SIZE)
55 ;
56
57 shared_info_pfn = PHYS_PFN(pa);
58
59 memblock_reserve(pa, PAGE_SIZE);
60 HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
61}
62
63static void __init xen_hvm_init_mem_mapping(void)
64{
65 early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
66 HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
67}
68
56static void __init init_hvm_pv_info(void) 69static void __init init_hvm_pv_info(void)
57{ 70{
58 int major, minor; 71 int major, minor;
@@ -153,6 +166,7 @@ static void __init xen_hvm_guest_init(void)
153 166
154 init_hvm_pv_info(); 167 init_hvm_pv_info();
155 168
169 reserve_shared_info();
156 xen_hvm_init_shared_info(); 170 xen_hvm_init_shared_info();
157 171
158 /* 172 /*
@@ -218,5 +232,6 @@ const struct hypervisor_x86 x86_hyper_xen_hvm = {
218 .init_platform = xen_hvm_guest_init, 232 .init_platform = xen_hvm_guest_init,
219 .pin_vcpu = xen_pin_vcpu, 233 .pin_vcpu = xen_pin_vcpu,
220 .x2apic_available = xen_x2apic_para_available, 234 .x2apic_available = xen_x2apic_para_available,
235 .init_mem_mapping = xen_hvm_init_mem_mapping,
221}; 236};
222EXPORT_SYMBOL(x86_hyper_xen_hvm); 237EXPORT_SYMBOL(x86_hyper_xen_hvm);
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 2d716ebc5a5e..dff7cc39437c 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,5 +1,6 @@
1generic-y += bug.h 1generic-y += bug.h
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += device.h
3generic-y += div64.h 4generic-y += div64.h
4generic-y += dma-contiguous.h 5generic-y += dma-contiguous.h
5generic-y += emergency-restart.h 6generic-y += emergency-restart.h
@@ -17,6 +18,7 @@ generic-y += local.h
17generic-y += local64.h 18generic-y += local64.h
18generic-y += mcs_spinlock.h 19generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h 20generic-y += mm-arch-hooks.h
21generic-y += param.h
20generic-y += percpu.h 22generic-y += percpu.h
21generic-y += preempt.h 23generic-y += preempt.h
22generic-y += rwsem.h 24generic-y += rwsem.h
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
deleted file mode 100644
index 1deeb8ebbb1b..000000000000
--- a/arch/xtensa/include/asm/device.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Arch specific extensions to struct device
3 *
4 * This file is released under the GPLv2
5 */
6#ifndef _ASM_XTENSA_DEVICE_H
7#define _ASM_XTENSA_DEVICE_H
8
9struct dev_archdata {
10};
11
12struct pdev_archdata {
13};
14
15#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/arch/xtensa/include/asm/param.h b/arch/xtensa/include/asm/param.h
deleted file mode 100644
index 0a70e780ef2a..000000000000
--- a/arch/xtensa/include/asm/param.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * include/asm-xtensa/param.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10#ifndef _XTENSA_PARAM_H
11#define _XTENSA_PARAM_H
12
13#include <uapi/asm/param.h>
14
15# define HZ CONFIG_HZ /* internal timer frequency */
16# define USER_HZ 100 /* for user interfaces in "ticks" */
17# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
18#endif /* _XTENSA_PARAM_H */
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index d159e9b9c018..672391003e40 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
94} 94}
95EXPORT_SYMBOL(__sync_fetch_and_or_4); 95EXPORT_SYMBOL(__sync_fetch_and_or_4);
96 96
97#ifdef CONFIG_NET
98/* 97/*
99 * Networking support 98 * Networking support
100 */ 99 */
101EXPORT_SYMBOL(csum_partial); 100EXPORT_SYMBOL(csum_partial);
102EXPORT_SYMBOL(csum_partial_copy_generic); 101EXPORT_SYMBOL(csum_partial_copy_generic);
103#endif /* CONFIG_NET */
104 102
105/* 103/*
106 * Architecture-specific symbols 104 * Architecture-specific symbols
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 1a804a2f9a5b..3c75c4e597da 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
103 clear_page_alias(kvaddr, paddr); 103 clear_page_alias(kvaddr, paddr);
104 preempt_enable(); 104 preempt_enable();
105} 105}
106EXPORT_SYMBOL(clear_user_highpage);
106 107
107void copy_user_highpage(struct page *dst, struct page *src, 108void copy_user_highpage(struct page *dst, struct page *src,
108 unsigned long vaddr, struct vm_area_struct *vma) 109 unsigned long vaddr, struct vm_area_struct *vma)
@@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src,
119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); 120 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
120 preempt_enable(); 121 preempt_enable();
121} 122}
122 123EXPORT_SYMBOL(copy_user_highpage);
123#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
124
125#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
126 124
127/* 125/*
128 * Any time the kernel writes to a user page cache page, or it is about to 126 * Any time the kernel writes to a user page cache page, or it is about to
@@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page)
176 174
177 /* There shouldn't be an entry in the cache for this page anymore. */ 175 /* There shouldn't be an entry in the cache for this page anymore. */
178} 176}
179 177EXPORT_SYMBOL(flush_dcache_page);
180 178
181/* 179/*
182 * For now, flush the whole cache. FIXME?? 180 * For now, flush the whole cache. FIXME??
@@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma,
188 __flush_invalidate_dcache_all(); 186 __flush_invalidate_dcache_all();
189 __invalidate_icache_all(); 187 __invalidate_icache_all();
190} 188}
189EXPORT_SYMBOL(local_flush_cache_range);
191 190
192/* 191/*
193 * Remove any entry in the cache for this page. 192 * Remove any entry in the cache for this page.
@@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
207 __flush_invalidate_dcache_page_alias(virt, phys); 206 __flush_invalidate_dcache_page_alias(virt, phys);
208 __invalidate_icache_page_alias(virt, phys); 207 __invalidate_icache_page_alias(virt, phys);
209} 208}
209EXPORT_SYMBOL(local_flush_cache_page);
210 210
211#endif 211#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
212 212
213void 213void
214update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) 214update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
225 225
226 flush_tlb_page(vma, addr); 226 flush_tlb_page(vma, addr);
227 227
228#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 228#if (DCACHE_WAY_SIZE > PAGE_SIZE)
229 229
230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { 230 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
231 unsigned long phys = page_to_phys(page); 231 unsigned long phys = page_to_phys(page);
@@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
256 * flush_dcache_page() on the page. 256 * flush_dcache_page() on the page.
257 */ 257 */
258 258
259#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 259#if (DCACHE_WAY_SIZE > PAGE_SIZE)
260 260
261void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 261void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
262 unsigned long vaddr, void *dst, const void *src, 262 unsigned long vaddr, void *dst, const void *src,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 63e771ab56d8..859f0a8c97c8 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -71,17 +71,29 @@ struct bfq_service_tree {
71 * 71 *
72 * bfq_sched_data is the basic scheduler queue. It supports three 72 * bfq_sched_data is the basic scheduler queue. It supports three
73 * ioprio_classes, and can be used either as a toplevel queue or as an 73 * ioprio_classes, and can be used either as a toplevel queue or as an
74 * intermediate queue on a hierarchical setup. @next_in_service 74 * intermediate queue in a hierarchical setup.
75 * points to the active entity of the sched_data service trees that
76 * will be scheduled next. It is used to reduce the number of steps
77 * needed for each hierarchical-schedule update.
78 * 75 *
79 * The supported ioprio_classes are the same as in CFQ, in descending 76 * The supported ioprio_classes are the same as in CFQ, in descending
80 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. 77 * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
81 * Requests from higher priority queues are served before all the 78 * Requests from higher priority queues are served before all the
82 * requests from lower priority queues; among requests of the same 79 * requests from lower priority queues; among requests of the same
83 * queue requests are served according to B-WF2Q+. 80 * queue requests are served according to B-WF2Q+.
84 * All the fields are protected by the queue lock of the containing bfqd. 81 *
82 * The schedule is implemented by the service trees, plus the field
83 * @next_in_service, which points to the entity on the active trees
84 * that will be served next, if 1) no changes in the schedule occurs
85 * before the current in-service entity is expired, 2) the in-service
86 * queue becomes idle when it expires, and 3) if the entity pointed by
87 * in_service_entity is not a queue, then the in-service child entity
88 * of the entity pointed by in_service_entity becomes idle on
89 * expiration. This peculiar definition allows for the following
90 * optimization, not yet exploited: while a given entity is still in
91 * service, we already know which is the best candidate for next
92 * service among the other active entitities in the same parent
93 * entity. We can then quickly compare the timestamps of the
94 * in-service entity with those of such best candidate.
95 *
96 * All fields are protected by the lock of the containing bfqd.
85 */ 97 */
86struct bfq_sched_data { 98struct bfq_sched_data {
87 /* entity in service */ 99 /* entity in service */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 979f8f21b7e2..911aa7431dbe 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -188,21 +188,23 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service)
188 188
189/* 189/*
190 * This function tells whether entity stops being a candidate for next 190 * This function tells whether entity stops being a candidate for next
191 * service, according to the following logic. 191 * service, according to the restrictive definition of the field
192 * next_in_service. In particular, this function is invoked for an
193 * entity that is about to be set in service.
192 * 194 *
193 * This function is invoked for an entity that is about to be set in 195 * If entity is a queue, then the entity is no longer a candidate for
194 * service. If such an entity is a queue, then the entity is no longer 196 * next service according to the that definition, because entity is
195 * a candidate for next service (i.e, a candidate entity to serve 197 * about to become the in-service queue. This function then returns
196 * after the in-service entity is expired). The function then returns 198 * true if entity is a queue.
197 * true.
198 * 199 *
199 * In contrast, the entity could stil be a candidate for next service 200 * In contrast, entity could still be a candidate for next service if
200 * if it is not a queue, and has more than one child. In fact, even if 201 * it is not a queue, and has more than one active child. In fact,
201 * one of its children is about to be set in service, other children 202 * even if one of its children is about to be set in service, other
202 * may still be the next to serve. As a consequence, a non-queue 203 * active children may still be the next to serve, for the parent
203 * entity is not a candidate for next-service only if it has only one 204 * entity, even according to the above definition. As a consequence, a
204 * child. And only if this condition holds, then the function returns 205 * non-queue entity is not a candidate for next-service only if it has
205 * true for a non-queue entity. 206 * only one active child. And only if this condition holds, then this
207 * function returns true for a non-queue entity.
206 */ 208 */
207static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) 209static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
208{ 210{
@@ -213,6 +215,18 @@ static bool bfq_no_longer_next_in_service(struct bfq_entity *entity)
213 215
214 bfqg = container_of(entity, struct bfq_group, entity); 216 bfqg = container_of(entity, struct bfq_group, entity);
215 217
218 /*
219 * The field active_entities does not always contain the
220 * actual number of active children entities: it happens to
221 * not account for the in-service entity in case the latter is
222 * removed from its active tree (which may get done after
223 * invoking the function bfq_no_longer_next_in_service in
224 * bfq_get_next_queue). Fortunately, here, i.e., while
225 * bfq_no_longer_next_in_service is not yet completed in
226 * bfq_get_next_queue, bfq_active_extract has not yet been
227 * invoked, and thus active_entities still coincides with the
228 * actual number of active entities.
229 */
216 if (bfqg->active_entities == 1) 230 if (bfqg->active_entities == 1)
217 return true; 231 return true;
218 232
@@ -954,7 +968,7 @@ static void bfq_update_fin_time_enqueue(struct bfq_entity *entity,
954 * one of its children receives a new request. 968 * one of its children receives a new request.
955 * 969 *
956 * Basically, this function updates the timestamps of entity and 970 * Basically, this function updates the timestamps of entity and
957 * inserts entity into its active tree, ater possible extracting it 971 * inserts entity into its active tree, ater possibly extracting it
958 * from its idle tree. 972 * from its idle tree.
959 */ 973 */
960static void __bfq_activate_entity(struct bfq_entity *entity, 974static void __bfq_activate_entity(struct bfq_entity *entity,
@@ -1048,7 +1062,7 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
1048 entity->start = entity->finish; 1062 entity->start = entity->finish;
1049 /* 1063 /*
1050 * In addition, if the entity had more than one child 1064 * In addition, if the entity had more than one child
1051 * when set in service, then was not extracted from 1065 * when set in service, then it was not extracted from
1052 * the active tree. This implies that the position of 1066 * the active tree. This implies that the position of
1053 * the entity in the active tree may need to be 1067 * the entity in the active tree may need to be
1054 * changed now, because we have just updated the start 1068 * changed now, because we have just updated the start
@@ -1056,9 +1070,8 @@ static void __bfq_requeue_entity(struct bfq_entity *entity)
1056 * time in a moment (the requeueing is then, more 1070 * time in a moment (the requeueing is then, more
1057 * precisely, a repositioning in this case). To 1071 * precisely, a repositioning in this case). To
1058 * implement this repositioning, we: 1) dequeue the 1072 * implement this repositioning, we: 1) dequeue the
1059 * entity here, 2) update the finish time and 1073 * entity here, 2) update the finish time and requeue
1060 * requeue the entity according to the new 1074 * the entity according to the new timestamps below.
1061 * timestamps below.
1062 */ 1075 */
1063 if (entity->tree) 1076 if (entity->tree)
1064 bfq_active_extract(st, entity); 1077 bfq_active_extract(st, entity);
@@ -1105,9 +1118,10 @@ static void __bfq_activate_requeue_entity(struct bfq_entity *entity,
1105 1118
1106 1119
1107/** 1120/**
1108 * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, 1121 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1109 * and activate, requeue or reposition all ancestors 1122 * bfq_queue, and activate, requeue or reposition
1110 * for which such an update becomes necessary. 1123 * all ancestors for which such an update becomes
1124 * necessary.
1111 * @entity: the entity to activate. 1125 * @entity: the entity to activate.
1112 * @non_blocking_wait_rq: true if this entity was waiting for a request 1126 * @non_blocking_wait_rq: true if this entity was waiting for a request
1113 * @requeue: true if this is a requeue, which implies that bfqq is 1127 * @requeue: true if this is a requeue, which implies that bfqq is
@@ -1135,9 +1149,9 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1135 * @ins_into_idle_tree: if false, the entity will not be put into the 1149 * @ins_into_idle_tree: if false, the entity will not be put into the
1136 * idle tree. 1150 * idle tree.
1137 * 1151 *
1138 * Deactivates an entity, independently from its previous state. Must 1152 * Deactivates an entity, independently of its previous state. Must
1139 * be invoked only if entity is on a service tree. Extracts the entity 1153 * be invoked only if entity is on a service tree. Extracts the entity
1140 * from that tree, and if necessary and allowed, puts it on the idle 1154 * from that tree, and if necessary and allowed, puts it into the idle
1141 * tree. 1155 * tree.
1142 */ 1156 */
1143bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) 1157bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
@@ -1158,8 +1172,10 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1158 st = bfq_entity_service_tree(entity); 1172 st = bfq_entity_service_tree(entity);
1159 is_in_service = entity == sd->in_service_entity; 1173 is_in_service = entity == sd->in_service_entity;
1160 1174
1161 if (is_in_service) 1175 if (is_in_service) {
1162 bfq_calc_finish(entity, entity->service); 1176 bfq_calc_finish(entity, entity->service);
1177 sd->in_service_entity = NULL;
1178 }
1163 1179
1164 if (entity->tree == &st->active) 1180 if (entity->tree == &st->active)
1165 bfq_active_extract(st, entity); 1181 bfq_active_extract(st, entity);
@@ -1177,7 +1193,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1177/** 1193/**
1178 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. 1194 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1179 * @entity: the entity to deactivate. 1195 * @entity: the entity to deactivate.
1180 * @ins_into_idle_tree: true if the entity can be put on the idle tree 1196 * @ins_into_idle_tree: true if the entity can be put into the idle tree
1181 */ 1197 */
1182static void bfq_deactivate_entity(struct bfq_entity *entity, 1198static void bfq_deactivate_entity(struct bfq_entity *entity,
1183 bool ins_into_idle_tree, 1199 bool ins_into_idle_tree,
@@ -1208,16 +1224,29 @@ static void bfq_deactivate_entity(struct bfq_entity *entity,
1208 */ 1224 */
1209 bfq_update_next_in_service(sd, NULL); 1225 bfq_update_next_in_service(sd, NULL);
1210 1226
1211 if (sd->next_in_service) 1227 if (sd->next_in_service || sd->in_service_entity) {
1212 /* 1228 /*
1213 * The parent entity is still backlogged, 1229 * The parent entity is still active, because
1214 * because next_in_service is not NULL. So, no 1230 * either next_in_service or in_service_entity
1215 * further upwards deactivation must be 1231 * is not NULL. So, no further upwards
1216 * performed. Yet, next_in_service has 1232 * deactivation must be performed. Yet,
1217 * changed. Then the schedule does need to be 1233 * next_in_service has changed. Then the
1218 * updated upwards. 1234 * schedule does need to be updated upwards.
1235 *
1236 * NOTE If in_service_entity is not NULL, then
1237 * next_in_service may happen to be NULL,
1238 * although the parent entity is evidently
1239 * active. This happens if 1) the entity
1240 * pointed by in_service_entity is the only
1241 * active entity in the parent entity, and 2)
1242 * according to the definition of
1243 * next_in_service, the in_service_entity
1244 * cannot be considered as
1245 * next_in_service. See the comments on the
1246 * definition of next_in_service for details.
1219 */ 1247 */
1220 break; 1248 break;
1249 }
1221 1250
1222 /* 1251 /*
1223 * If we get here, then the parent is no more 1252 * If we get here, then the parent is no more
@@ -1494,47 +1523,34 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
1494 1523
1495 /* 1524 /*
1496 * If entity is no longer a candidate for next 1525 * If entity is no longer a candidate for next
1497 * service, then we extract it from its active tree, 1526 * service, then it must be extracted from its active
1498 * for the following reason. To further boost the 1527 * tree, so as to make sure that it won't be
1499 * throughput in some special case, BFQ needs to know 1528 * considered when computing next_in_service. See the
1500 * which is the next candidate entity to serve, while 1529 * comments on the function
1501 * there is already an entity in service. In this 1530 * bfq_no_longer_next_in_service() for details.
1502 * respect, to make it easy to compute/update the next
1503 * candidate entity to serve after the current
1504 * candidate has been set in service, there is a case
1505 * where it is necessary to extract the current
1506 * candidate from its service tree. Such a case is
1507 * when the entity just set in service cannot be also
1508 * a candidate for next service. Details about when
1509 * this conditions holds are reported in the comments
1510 * on the function bfq_no_longer_next_in_service()
1511 * invoked below.
1512 */ 1531 */
1513 if (bfq_no_longer_next_in_service(entity)) 1532 if (bfq_no_longer_next_in_service(entity))
1514 bfq_active_extract(bfq_entity_service_tree(entity), 1533 bfq_active_extract(bfq_entity_service_tree(entity),
1515 entity); 1534 entity);
1516 1535
1517 /* 1536 /*
1518 * For the same reason why we may have just extracted 1537 * Even if entity is not to be extracted according to
1519 * entity from its active tree, we may need to update 1538 * the above check, a descendant entity may get
1520 * next_in_service for the sched_data of entity too, 1539 * extracted in one of the next iterations of this
1521 * regardless of whether entity has been extracted. 1540 * loop. Such an event could cause a change in
1522 * In fact, even if entity has not been extracted, a 1541 * next_in_service for the level of the descendant
1523 * descendant entity may get extracted. Such an event 1542 * entity, and thus possibly back to this level.
1524 * would cause a change in next_in_service for the
1525 * level of the descendant entity, and thus possibly
1526 * back to upper levels.
1527 * 1543 *
1528 * We cannot perform the resulting needed update 1544 * However, we cannot perform the resulting needed
1529 * before the end of this loop, because, to know which 1545 * update of next_in_service for this level before the
1530 * is the correct next-to-serve candidate entity for 1546 * end of the whole loop, because, to know which is
1531 * each level, we need first to find the leaf entity 1547 * the correct next-to-serve candidate entity for each
1532 * to set in service. In fact, only after we know 1548 * level, we need first to find the leaf entity to set
1533 * which is the next-to-serve leaf entity, we can 1549 * in service. In fact, only after we know which is
1534 * discover whether the parent entity of the leaf 1550 * the next-to-serve leaf entity, we can discover
1535 * entity becomes the next-to-serve, and so on. 1551 * whether the parent entity of the leaf entity
1552 * becomes the next-to-serve, and so on.
1536 */ 1553 */
1537
1538 } 1554 }
1539 1555
1540 bfqq = bfq_entity_to_bfqq(entity); 1556 bfqq = bfq_entity_to_bfqq(entity);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 83e92beb3c9f..9b1ea478577b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -387,9 +387,11 @@ static void bio_integrity_verify_fn(struct work_struct *work)
387 */ 387 */
388bool __bio_integrity_endio(struct bio *bio) 388bool __bio_integrity_endio(struct bio *bio)
389{ 389{
390 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status) { 390 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
391 struct bio_integrity_payload *bip = bio_integrity(bio); 391 struct bio_integrity_payload *bip = bio_integrity(bio);
392 392
393 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
394 (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
393 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); 395 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
394 queue_work(kintegrityd_wq, &bip->bip_work); 396 queue_work(kintegrityd_wq, &bip->bip_work);
395 return false; 397 return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 041f7b7fa0d6..535cbdf32aab 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -301,11 +301,12 @@ static struct request *blk_mq_get_request(struct request_queue *q,
301 struct elevator_queue *e = q->elevator; 301 struct elevator_queue *e = q->elevator;
302 struct request *rq; 302 struct request *rq;
303 unsigned int tag; 303 unsigned int tag;
304 struct blk_mq_ctx *local_ctx = NULL;
304 305
305 blk_queue_enter_live(q); 306 blk_queue_enter_live(q);
306 data->q = q; 307 data->q = q;
307 if (likely(!data->ctx)) 308 if (likely(!data->ctx))
308 data->ctx = blk_mq_get_ctx(q); 309 data->ctx = local_ctx = blk_mq_get_ctx(q);
309 if (likely(!data->hctx)) 310 if (likely(!data->hctx))
310 data->hctx = blk_mq_map_queue(q, data->ctx->cpu); 311 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
311 if (op & REQ_NOWAIT) 312 if (op & REQ_NOWAIT)
@@ -324,6 +325,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
324 325
325 tag = blk_mq_get_tag(data); 326 tag = blk_mq_get_tag(data);
326 if (tag == BLK_MQ_TAG_FAIL) { 327 if (tag == BLK_MQ_TAG_FAIL) {
328 if (local_ctx) {
329 blk_mq_put_ctx(local_ctx);
330 data->ctx = NULL;
331 }
327 blk_queue_exit(q); 332 blk_queue_exit(q);
328 return NULL; 333 return NULL;
329 } 334 }
@@ -356,12 +361,12 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
356 361
357 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 362 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
358 363
359 blk_mq_put_ctx(alloc_data.ctx);
360 blk_queue_exit(q);
361
362 if (!rq) 364 if (!rq)
363 return ERR_PTR(-EWOULDBLOCK); 365 return ERR_PTR(-EWOULDBLOCK);
364 366
367 blk_mq_put_ctx(alloc_data.ctx);
368 blk_queue_exit(q);
369
365 rq->__data_len = 0; 370 rq->__data_len = 0;
366 rq->__sector = (sector_t) -1; 371 rq->__sector = (sector_t) -1;
367 rq->bio = rq->biotail = NULL; 372 rq->bio = rq->biotail = NULL;
@@ -407,11 +412,11 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
407 412
408 rq = blk_mq_get_request(q, NULL, op, &alloc_data); 413 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
409 414
410 blk_queue_exit(q);
411
412 if (!rq) 415 if (!rq)
413 return ERR_PTR(-EWOULDBLOCK); 416 return ERR_PTR(-EWOULDBLOCK);
414 417
418 blk_queue_exit(q);
419
415 return rq; 420 return rq;
416} 421}
417EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 422EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
@@ -679,8 +684,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list);
679void blk_mq_delay_kick_requeue_list(struct request_queue *q, 684void blk_mq_delay_kick_requeue_list(struct request_queue *q,
680 unsigned long msecs) 685 unsigned long msecs)
681{ 686{
682 kblockd_schedule_delayed_work(&q->requeue_work, 687 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
683 msecs_to_jiffies(msecs)); 688 msecs_to_jiffies(msecs));
684} 689}
685EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); 690EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
686 691
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..98aa8c808a33 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
17#include <linux/serial_core.h> 17#include <linux/serial_core.h>
18 18
19/* 19/*
20 * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
21 * occasionally getting stuck as 1. To avoid the potential for a hang, check
22 * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
23 * implementations, so only do so if an affected platform is detected in
24 * parse_spcr().
25 */
26bool qdf2400_e44_present;
27EXPORT_SYMBOL(qdf2400_e44_present);
28
29/*
20 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. 30 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
21 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI 31 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
22 * quirk detection in pci_mcfg.c. 32 * quirk detection in pci_mcfg.c.
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
147 goto done; 157 goto done;
148 } 158 }
149 159
150 if (qdf2400_erratum_44_present(&table->header)) 160 /*
151 uart = "qdf2400_e44"; 161 * If the E44 erratum is required, then we need to tell the pl011
162 * driver to implement the work-around.
163 *
164 * The global variable is used by the probe function when it
165 * creates the UARTs, whether or not they're used as a console.
166 *
167 * If the user specifies "traditional" earlycon, the qdf2400_e44
168 * console name matches the EARLYCON_DECLARE() statement, and
169 * SPCR is not used. Parameter "earlycon" is false.
170 *
171 * If the user specifies "SPCR" earlycon, then we need to update
172 * the console name so that it also says "qdf2400_e44". Parameter
173 * "earlycon" is true.
174 *
175 * For consistency, if we change the console name, then we do it
176 * for everyone, not just earlycon.
177 */
178 if (qdf2400_erratum_44_present(&table->header)) {
179 qdf2400_e44_present = true;
180 if (earlycon)
181 uart = "qdf2400_e44";
182 }
183
152 if (xgene_8250_erratum_present(table)) 184 if (xgene_8250_erratum_present(table))
153 iotype = "mmio32"; 185 iotype = "mmio32";
154 186
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..bfbe1e154128 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,7 +30,6 @@
30#include <linux/syscore_ops.h> 30#include <linux/syscore_ops.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/swait.h>
34 33
35#include <generated/utsrelease.h> 34#include <generated/utsrelease.h>
36 35
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
112 * state of the firmware loading. 111 * state of the firmware loading.
113 */ 112 */
114struct fw_state { 113struct fw_state {
115 struct swait_queue_head wq; 114 struct completion completion;
116 enum fw_status status; 115 enum fw_status status;
117}; 116};
118 117
119static void fw_state_init(struct fw_state *fw_st) 118static void fw_state_init(struct fw_state *fw_st)
120{ 119{
121 init_swait_queue_head(&fw_st->wq); 120 init_completion(&fw_st->completion);
122 fw_st->status = FW_STATUS_UNKNOWN; 121 fw_st->status = FW_STATUS_UNKNOWN;
123} 122}
124 123
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
131{ 130{
132 long ret; 131 long ret;
133 132
134 ret = swait_event_interruptible_timeout(fw_st->wq, 133 ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
135 __fw_state_is_done(READ_ONCE(fw_st->status)),
136 timeout);
137 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) 134 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
138 return -ENOENT; 135 return -ENOENT;
139 if (!ret) 136 if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
148 WRITE_ONCE(fw_st->status, status); 145 WRITE_ONCE(fw_st->status, status);
149 146
150 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) 147 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
151 swake_up(&fw_st->wq); 148 complete_all(&fw_st->completion);
152} 149}
153 150
154#define fw_state_start(fw_st) \ 151#define fw_state_start(fw_st) \
155 __fw_state_set(fw_st, FW_STATUS_LOADING) 152 __fw_state_set(fw_st, FW_STATUS_LOADING)
156#define fw_state_done(fw_st) \ 153#define fw_state_done(fw_st) \
157 __fw_state_set(fw_st, FW_STATUS_DONE) 154 __fw_state_set(fw_st, FW_STATUS_DONE)
155#define fw_state_aborted(fw_st) \
156 __fw_state_set(fw_st, FW_STATUS_ABORTED)
158#define fw_state_wait(fw_st) \ 157#define fw_state_wait(fw_st) \
159 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) 158 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
160 159
161#ifndef CONFIG_FW_LOADER_USER_HELPER
162
163#define fw_state_is_aborted(fw_st) false
164
165#else /* CONFIG_FW_LOADER_USER_HELPER */
166
167static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) 160static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
168{ 161{
169 return fw_st->status == status; 162 return fw_st->status == status;
170} 163}
171 164
165#define fw_state_is_aborted(fw_st) \
166 __fw_state_check(fw_st, FW_STATUS_ABORTED)
167
168#ifdef CONFIG_FW_LOADER_USER_HELPER
169
172#define fw_state_aborted(fw_st) \ 170#define fw_state_aborted(fw_st) \
173 __fw_state_set(fw_st, FW_STATUS_ABORTED) 171 __fw_state_set(fw_st, FW_STATUS_ABORTED)
174#define fw_state_is_done(fw_st) \ 172#define fw_state_is_done(fw_st) \
175 __fw_state_check(fw_st, FW_STATUS_DONE) 173 __fw_state_check(fw_st, FW_STATUS_DONE)
176#define fw_state_is_loading(fw_st) \ 174#define fw_state_is_loading(fw_st) \
177 __fw_state_check(fw_st, FW_STATUS_LOADING) 175 __fw_state_check(fw_st, FW_STATUS_LOADING)
178#define fw_state_is_aborted(fw_st) \
179 __fw_state_check(fw_st, FW_STATUS_ABORTED)
180#define fw_state_wait_timeout(fw_st, timeout) \ 176#define fw_state_wait_timeout(fw_st, timeout) \
181 __fw_state_wait_common(fw_st, timeout) 177 __fw_state_wait_common(fw_st, timeout)
182 178
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1200 return 1; /* need to load */ 1196 return 1; /* need to load */
1201} 1197}
1202 1198
1199/*
1200 * Batched requests need only one wake, we need to do this step last due to the
1201 * fallback mechanism. The buf is protected with kref_get(), and it won't be
1202 * released until the last user calls release_firmware().
1203 *
1204 * Failed batched requests are possible as well, in such cases we just share
1205 * the struct firmware_buf and won't release it until all requests are woken
1206 * and have gone through this same path.
1207 */
1208static void fw_abort_batch_reqs(struct firmware *fw)
1209{
1210 struct firmware_buf *buf;
1211
1212 /* Loaded directly? */
1213 if (!fw || !fw->priv)
1214 return;
1215
1216 buf = fw->priv;
1217 if (!fw_state_is_aborted(&buf->fw_st))
1218 fw_state_aborted(&buf->fw_st);
1219}
1220
1203/* called from request_firmware() and request_firmware_work_func() */ 1221/* called from request_firmware() and request_firmware_work_func() */
1204static int 1222static int
1205_request_firmware(const struct firmware **firmware_p, const char *name, 1223_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
1243 1261
1244 out: 1262 out:
1245 if (ret < 0) { 1263 if (ret < 0) {
1264 fw_abort_batch_reqs(fw);
1246 release_firmware(fw); 1265 release_firmware(fw);
1247 fw = NULL; 1266 fw = NULL;
1248 } 1267 }
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b16ead1da58..ad9749463d4f 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -875,6 +875,56 @@ static void print_version(void)
875 printk(KERN_INFO "%s", version); 875 printk(KERN_INFO "%s", version);
876} 876}
877 877
878struct vdc_check_port_data {
879 int dev_no;
880 char *type;
881};
882
883static int vdc_device_probed(struct device *dev, void *arg)
884{
885 struct vio_dev *vdev = to_vio_dev(dev);
886 struct vdc_check_port_data *port_data;
887
888 port_data = (struct vdc_check_port_data *)arg;
889
890 if ((vdev->dev_no == port_data->dev_no) &&
891 (!(strcmp((char *)&vdev->type, port_data->type))) &&
892 dev_get_drvdata(dev)) {
893 /* This device has already been configured
894 * by vdc_port_probe()
895 */
896 return 1;
897 } else {
898 return 0;
899 }
900}
901
902/* Determine whether the VIO device is part of an mpgroup
903 * by locating all the virtual-device-port nodes associated
904 * with the parent virtual-device node for the VIO device
905 * and checking whether any of these nodes are vdc-ports
906 * which have already been configured.
907 *
908 * Returns true if this device is part of an mpgroup and has
909 * already been probed.
910 */
911static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
912{
913 struct vdc_check_port_data port_data;
914 struct device *dev;
915
916 port_data.dev_no = vdev->dev_no;
917 port_data.type = (char *)&vdev->type;
918
919 dev = device_find_child(vdev->dev.parent, &port_data,
920 vdc_device_probed);
921
922 if (dev)
923 return true;
924
925 return false;
926}
927
878static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 928static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
879{ 929{
880 struct mdesc_handle *hp; 930 struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
893 goto err_out_release_mdesc; 943 goto err_out_release_mdesc;
894 } 944 }
895 945
946 /* Check if this device is part of an mpgroup */
947 if (vdc_port_mpgroup_check(vdev)) {
948 printk(KERN_WARNING
949 "VIO: Ignoring extra vdisk port %s",
950 dev_name(&vdev->dev));
951 goto err_out_release_mdesc;
952 }
953
896 port = kzalloc(sizeof(*port), GFP_KERNEL); 954 port = kzalloc(sizeof(*port), GFP_KERNEL);
897 err = -ENOMEM; 955 err = -ENOMEM;
898 if (!port) { 956 if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
943 if (err) 1001 if (err)
944 goto err_out_free_tx_ring; 1002 goto err_out_free_tx_ring;
945 1003
1004 /* Note that the device driver_data is used to determine
1005 * whether the port has been probed.
1006 */
946 dev_set_drvdata(&vdev->dev, port); 1007 dev_set_drvdata(&vdev->dev, port);
947 1008
948 mdesc_release(hp); 1009 mdesc_release(hp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..3b1b6340ba13 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
308 struct device_attribute *attr, const char *buf, size_t len) 308 struct device_attribute *attr, const char *buf, size_t len)
309{ 309{
310 struct zram *zram = dev_to_zram(dev); 310 struct zram *zram = dev_to_zram(dev);
311 char compressor[CRYPTO_MAX_ALG_NAME]; 311 char compressor[ARRAY_SIZE(zram->compressor)];
312 size_t sz; 312 size_t sz;
313 313
314 strlcpy(compressor, buf, sizeof(compressor)); 314 strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
327 return -EBUSY; 327 return -EBUSY;
328 } 328 }
329 329
330 strlcpy(zram->compressor, compressor, sizeof(compressor)); 330 strcpy(zram->compressor, compressor);
331 up_write(&zram->init_lock); 331 up_write(&zram->init_lock);
332 return len; 332 return len;
333} 333}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index afa3ce7d3e72..8ad92707e45f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1493 print_once = true; 1493 print_once = true;
1494#endif 1494#endif
1495 pr_notice("random: %s called from %pF with crng_init=%d\n", 1495 pr_notice("random: %s called from %pS with crng_init=%d\n",
1496 func_name, caller, crng_init); 1496 func_name, caller, crng_init);
1497} 1497}
1498 1498
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
235 return -1; 235 return -1;
236} 236}
237 237
238extern u32 pnv_get_supported_cpuidle_states(void);
238static int powernv_add_idle_states(void) 239static int powernv_add_idle_states(void)
239{ 240{
240 struct device_node *power_mgt; 241 struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
248 const char *names[CPUIDLE_STATE_MAX]; 249 const char *names[CPUIDLE_STATE_MAX];
249 u32 has_stop_states = 0; 250 u32 has_stop_states = 0;
250 int i, rc; 251 int i, rc;
252 u32 supported_flags = pnv_get_supported_cpuidle_states();
253
251 254
252 /* Currently we have snooze statically defined */ 255 /* Currently we have snooze statically defined */
253 256
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
362 for (i = 0; i < dt_idle_states; i++) { 365 for (i = 0; i < dt_idle_states; i++) {
363 unsigned int exit_latency, target_residency; 366 unsigned int exit_latency, target_residency;
364 bool stops_timebase = false; 367 bool stops_timebase = false;
368
369 /*
370 * Skip the platform idle state whose flag isn't in
371 * the supported_cpuidle_states flag mask.
372 */
373 if ((flags[i] & supported_flags) != flags[i])
374 continue;
365 /* 375 /*
366 * If an idle state has exit latency beyond 376 * If an idle state has exit latency beyond
367 * POWERNV_THRESHOLD_LATENCY_NS then don't use it 377 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 8527a5899a2f..3f819399cd95 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
883 if (ret) 883 if (ret)
884 return ret; 884 return ret;
885 885
886 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); 886 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
887 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
888
889 for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
890 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 887 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
891 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 888 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
892 ctx->base.needs_inv = true; 889 ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
894 } 891 }
895 } 892 }
896 893
894 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
895 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
896
897 return 0; 897 return 0;
898} 898}
899 899
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d7e219d2669d..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
304{ 304{
305 struct sync_file *sync_file = file->private_data; 305 struct sync_file *sync_file = file->private_data;
306 306
307 if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) 307 if (test_bit(POLL_ENABLED, &sync_file->flags))
308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb); 308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
309 dma_fence_put(sync_file->fence); 309 dma_fence_put(sync_file->fence);
310 kfree(sync_file); 310 kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
318 318
319 poll_wait(file, &sync_file->wq, wait); 319 poll_wait(file, &sync_file->wq, wait);
320 320
321 if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { 321 if (list_empty(&sync_file->cb.node) &&
322 !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
322 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, 323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
323 fence_check_cb_func) < 0) 324 fence_check_cb_func) < 0)
324 wake_up_all(&sync_file->wq); 325 wake_up_all(&sync_file->wq);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..0529e500c534 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1255 1255
1256 /* port@2 is the output port */ 1256 /* port@2 is the output port */
1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
1258 if (ret) 1258 if (ret && ret != -ENODEV)
1259 return ret; 1259 return ret;
1260 1260
1261 /* Shut down GPIO is optional */ 1261 /* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..6463fc2c736f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
270 if (ret) 270 if (ret)
271 return ret; 271 return ret;
272 272
273 if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { 273 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
274 DRM_ERROR("relocation %u outside object", i); 274 DRM_ERROR("relocation %u outside object\n", i);
275 return -EINVAL; 275 return -EINVAL;
276 } 276 }
277 277
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..73217c281c9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
146 const struct drm_mode_fb_cmd2 *mode_cmd) 146 const struct drm_mode_fb_cmd2 *mode_cmd)
147{ 147{
148 const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
148 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
149 struct drm_gem_object *obj; 150 struct drm_gem_object *obj;
150 struct drm_framebuffer *fb; 151 struct drm_framebuffer *fb;
151 int i; 152 int i;
152 int ret; 153 int ret;
153 154
154 for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { 155 for (i = 0; i < info->num_planes; i++) {
156 unsigned int height = (i == 0) ? mode_cmd->height :
157 DIV_ROUND_UP(mode_cmd->height, info->vsub);
158 unsigned long size = height * mode_cmd->pitches[i] +
159 mode_cmd->offsets[i];
160
155 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); 161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
156 if (!obj) { 162 if (!obj) {
157 DRM_ERROR("failed to lookup gem object\n"); 163 DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
160 } 166 }
161 167
162 exynos_gem[i] = to_exynos_gem(obj); 168 exynos_gem[i] = to_exynos_gem(obj);
169
170 if (size > exynos_gem[i]->size) {
171 i++;
172 ret = -EINVAL;
173 goto err;
174 }
163 } 175 }
164 176
165 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); 177 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca)) 47 ((a)->lrca == (b)->lrca))
48 48
49static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
50
49static int context_switch_events[] = { 51static int context_switch_events[] = {
50 [RCS] = RCS_AS_CONTEXT_SWITCH, 52 [RCS] = RCS_AS_CONTEXT_SWITCH,
51 [BCS] = BCS_AS_CONTEXT_SWITCH, 53 [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
499static int complete_execlist_workload(struct intel_vgpu_workload *workload) 501static int complete_execlist_workload(struct intel_vgpu_workload *workload)
500{ 502{
501 struct intel_vgpu *vgpu = workload->vgpu; 503 struct intel_vgpu *vgpu = workload->vgpu;
502 struct intel_vgpu_execlist *execlist = 504 int ring_id = workload->ring_id;
503 &vgpu->execlist[workload->ring_id]; 505 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
504 struct intel_vgpu_workload *next_workload; 506 struct intel_vgpu_workload *next_workload;
505 struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; 507 struct list_head *next = workload_q_head(vgpu, ring_id)->next;
506 bool lite_restore = false; 508 bool lite_restore = false;
507 int ret; 509 int ret;
508 510
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
512 release_shadow_batch_buffer(workload); 514 release_shadow_batch_buffer(workload);
513 release_shadow_wa_ctx(&workload->wa_ctx); 515 release_shadow_wa_ctx(&workload->wa_ctx);
514 516
515 if (workload->status || vgpu->resetting) 517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
518 /* if workload->status is not successful means HW GPU
519 * has occurred GPU hang or something wrong with i915/GVT,
520 * and GVT won't inject context switch interrupt to guest.
521 * So this error is a vGPU hang actually to the guest.
522 * According to this we should emunlate a vGPU hang. If
523 * there are pending workloads which are already submitted
524 * from guest, we should clean them up like HW GPU does.
525 *
526 * if it is in middle of engine resetting, the pending
527 * workloads won't be submitted to HW GPU and will be
528 * cleaned up during the resetting process later, so doing
529 * the workload clean up here doesn't have any impact.
530 **/
531 clean_workloads(vgpu, ENGINE_MASK(ring_id));
516 goto out; 532 goto out;
533 }
517 534
518 if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { 535 if (!list_empty(workload_q_head(vgpu, ring_id))) {
519 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 536 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
520 537
521 next_workload = container_of(next, 538 next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
72 struct intel_gvt_device_info *info = &gvt->device_info; 72 struct intel_gvt_device_info *info = &gvt->device_info;
73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
74 struct intel_gvt_mmio_info *e; 74 struct intel_gvt_mmio_info *e;
75 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
76 int num = gvt->mmio.num_mmio_block;
75 struct gvt_firmware_header *h; 77 struct gvt_firmware_header *h;
76 void *firmware; 78 void *firmware;
77 void *p; 79 void *p;
78 unsigned long size, crc32_start; 80 unsigned long size, crc32_start;
79 int i; 81 int i, j;
80 int ret; 82 int ret;
81 83
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; 84 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
107 109
110 for (i = 0; i < num; i++, block++) {
111 for (j = 0; j < block->size; j += 4)
112 *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
113 I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
114 block->offset) + j));
115 }
116
108 memcpy(gvt->firmware.mmio, p, info->mmio_size); 117 memcpy(gvt->firmware.mmio, p, info->mmio_size);
109 118
110 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; 119 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
149 bool active; 149 bool active;
150 bool pv_notified; 150 bool pv_notified;
151 bool failsafe; 151 bool failsafe;
152 bool resetting; 152 unsigned int resetting_eng;
153 void *sched_data; 153 void *sched_data;
154 struct vgpu_sched_ctl sched_ctl; 154 struct vgpu_sched_ctl sched_ctl;
155 155
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
195 unsigned long vgpu_allocated_fence_num; 195 unsigned long vgpu_allocated_fence_num;
196}; 196};
197 197
198/* Special MMIO blocks. */
199struct gvt_mmio_block {
200 unsigned int device;
201 i915_reg_t offset;
202 unsigned int size;
203 gvt_mmio_func read;
204 gvt_mmio_func write;
205};
206
198#define INTEL_GVT_MMIO_HASH_BITS 11 207#define INTEL_GVT_MMIO_HASH_BITS 11
199 208
200struct intel_gvt_mmio { 209struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
214/* This reg could be accessed by unaligned address */ 223/* This reg could be accessed by unaligned address */
215#define F_UNALIGN (1 << 6) 224#define F_UNALIGN (1 << 6)
216 225
226 struct gvt_mmio_block *mmio_block;
227 unsigned int num_mmio_block;
228
217 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 229 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
218 unsigned int num_tracked_mmio; 230 unsigned int num_tracked_mmio;
219}; 231};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2857 return 0; 2857 return 0;
2858} 2858}
2859 2859
2860/* Special MMIO blocks. */
2861static struct gvt_mmio_block {
2862 unsigned int device;
2863 i915_reg_t offset;
2864 unsigned int size;
2865 gvt_mmio_func read;
2866 gvt_mmio_func write;
2867} gvt_mmio_blocks[] = {
2868 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2869 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2870 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2871 pvinfo_mmio_read, pvinfo_mmio_write},
2872 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2873 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2874 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2875};
2876
2877static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2878 unsigned int offset) 2861 unsigned int offset)
2879{ 2862{
2880 unsigned long device = intel_gvt_get_device_type(gvt); 2863 unsigned long device = intel_gvt_get_device_type(gvt);
2881 struct gvt_mmio_block *block = gvt_mmio_blocks; 2864 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2865 int num = gvt->mmio.num_mmio_block;
2882 int i; 2866 int i;
2883 2867
2884 for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { 2868 for (i = 0; i < num; i++, block++) {
2885 if (!(device & block->device)) 2869 if (!(device & block->device))
2886 continue; 2870 continue;
2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && 2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2912 gvt->mmio.mmio_attribute = NULL; 2896 gvt->mmio.mmio_attribute = NULL;
2913} 2897}
2914 2898
2899/* Special MMIO blocks. */
2900static struct gvt_mmio_block mmio_blocks[] = {
2901 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2902 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2903 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2904 pvinfo_mmio_read, pvinfo_mmio_write},
2905 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2906 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2907 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2908};
2909
2915/** 2910/**
2916 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device 2911 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2917 * @gvt: GVT device 2912 * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2951 goto err; 2946 goto err;
2952 } 2947 }
2953 2948
2949 gvt->mmio.mmio_block = mmio_blocks;
2950 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
2951
2954 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2952 gvt_dbg_mmio("traced %u virtual mmio registers\n",
2955 gvt->mmio.num_tracked_mmio); 2953 gvt->mmio.num_tracked_mmio);
2956 return 0; 2954 return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3030 gvt_mmio_func func; 3028 gvt_mmio_func func;
3031 int ret; 3029 int ret;
3032 3030
3033 if (WARN_ON(bytes > 4)) 3031 if (WARN_ON(bytes > 8))
3034 return -EINVAL; 3032 return -EINVAL;
3035 3033
3036 /* 3034 /*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
432 432
433 i915_gem_request_put(fetch_and_zero(&workload->req)); 433 i915_gem_request_put(fetch_and_zero(&workload->req));
434 434
435 if (!workload->status && !vgpu->resetting) { 435 if (!workload->status && !(vgpu->resetting_eng &
436 ENGINE_MASK(ring_id))) {
436 update_guest_context(workload); 437 update_guest_context(workload);
437 438
438 for_each_set_bit(event, workload->pending_events, 439 for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
480{ 480{
481 struct intel_gvt *gvt = vgpu->gvt; 481 struct intel_gvt *gvt = vgpu->gvt;
482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
483 unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
483 484
484 gvt_dbg_core("------------------------------------------\n"); 485 gvt_dbg_core("------------------------------------------\n");
485 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 486 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
486 vgpu->id, dmlr, engine_mask); 487 vgpu->id, dmlr, engine_mask);
487 vgpu->resetting = true; 488
489 vgpu->resetting_eng = resetting_eng;
488 490
489 intel_vgpu_stop_schedule(vgpu); 491 intel_vgpu_stop_schedule(vgpu);
490 /* 492 /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
497 mutex_lock(&gvt->lock); 499 mutex_lock(&gvt->lock);
498 } 500 }
499 501
500 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 502 intel_vgpu_reset_execlist(vgpu, resetting_eng);
501 503
502 /* full GPU reset or device model level reset */ 504 /* full GPU reset or device model level reset */
503 if (engine_mask == ALL_ENGINES || dmlr) { 505 if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
520 } 522 }
521 } 523 }
522 524
523 vgpu->resetting = false; 525 vgpu->resetting_eng = 0;
524 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 526 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
525 gvt_dbg_core("------------------------------------------\n"); 527 gvt_dbg_core("------------------------------------------\n");
526} 528}
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
43 return true; 43 return true;
44 44
45 case MUTEX_TRYLOCK_FAILED: 45 case MUTEX_TRYLOCK_FAILED:
46 *unlock = false;
47 preempt_disable();
46 do { 48 do {
47 cpu_relax(); 49 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true; 51 *unlock = true;
51 return true; 52 break;
52 } 53 }
53 } while (!need_resched()); 54 } while (!need_resched());
55 preempt_enable();
56 return *unlock;
54 57
55 return false; 58 case MUTEX_TRYLOCK_SUCCESS:
59 *unlock = true;
60 return true;
56 } 61 }
57 62
58 BUG(); 63 BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
1601 u32 *cs; 1601 u32 *cs;
1602 int i; 1602 int i;
1603 1603
1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1604 cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1605 if (IS_ERR(cs)) 1605 if (IS_ERR(cs))
1606 return PTR_ERR(cs); 1606 return PTR_ERR(cs);
1607 1607
1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1608 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1609 1609
1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
398 } 398 }
399 399
400 /* Program the max register to clamp values > 1.0. */ 400 /* Program the max register to clamp values > 1.0. */
401 i = lut_size - 1;
401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
402 drm_color_lut_extract(lut[i].red, 16)); 403 drm_color_lut_extract(lut[i].red, 16));
403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), 404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
469 469
470 if (i915.invert_brightness > 0 || 470 if (i915.invert_brightness > 0 ||
471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
472 return panel->backlight.max - val; 472 return panel->backlight.max - val + panel->backlight.min;
473 } 473 }
474 474
475 return val; 475 return val;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 depends on MMU 7 depends on MMU
8 select QCOM_MDT_LOADER 8 select QCOM_MDT_LOADER if ARCH_QCOM
9 select REGULATOR 9 select REGULATOR
10 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
11 select DRM_PANEL 11 select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..f9eae03aa1dc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/qcom_scm.h> 16#include <linux/qcom_scm.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/of_reserved_mem.h> 18#include <linux/of_address.h>
19#include <linux/soc/qcom/mdt_loader.h> 19#include <linux/soc/qcom/mdt_loader.h>
20#include "msm_gem.h" 20#include "msm_gem.h"
21#include "msm_mmu.h" 21#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
26 26
27#define GPU_PAS_ID 13 27#define GPU_PAS_ID 13
28 28
29#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
30
31static int zap_shader_load_mdt(struct device *dev, const char *fwname) 29static int zap_shader_load_mdt(struct device *dev, const char *fwname)
32{ 30{
33 const struct firmware *fw; 31 const struct firmware *fw;
32 struct device_node *np;
33 struct resource r;
34 phys_addr_t mem_phys; 34 phys_addr_t mem_phys;
35 ssize_t mem_size; 35 ssize_t mem_size;
36 void *mem_region = NULL; 36 void *mem_region = NULL;
37 int ret; 37 int ret;
38 38
39 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
40 return -EINVAL;
41
42 np = of_get_child_by_name(dev->of_node, "zap-shader");
43 if (!np)
44 return -ENODEV;
45
46 np = of_parse_phandle(np, "memory-region", 0);
47 if (!np)
48 return -EINVAL;
49
50 ret = of_address_to_resource(np, 0, &r);
51 if (ret)
52 return ret;
53
54 mem_phys = r.start;
55 mem_size = resource_size(&r);
56
39 /* Request the MDT file for the firmware */ 57 /* Request the MDT file for the firmware */
40 ret = request_firmware(&fw, fwname, dev); 58 ret = request_firmware(&fw, fwname, dev);
41 if (ret) { 59 if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
51 } 69 }
52 70
53 /* Allocate memory for the firmware image */ 71 /* Allocate memory for the firmware image */
54 mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); 72 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
55 if (!mem_region) { 73 if (!mem_region) {
56 ret = -ENOMEM; 74 ret = -ENOMEM;
57 goto out; 75 goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); 87 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
70 88
71out: 89out:
90 if (mem_region)
91 memunmap(mem_region);
92
72 release_firmware(fw); 93 release_firmware(fw);
73 94
74 return ret; 95 return ret;
75} 96}
76#else
77static int zap_shader_load_mdt(struct device *dev, const char *fwname)
78{
79 return -ENODEV;
80}
81#endif
82 97
83static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 98static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
84 struct msm_file_private *ctx) 99 struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
117 gpu->funcs->flush(gpu); 132 gpu->funcs->flush(gpu);
118} 133}
119 134
120struct a5xx_hwcg { 135static const struct {
121 u32 offset; 136 u32 offset;
122 u32 value; 137 u32 value;
123}; 138} a5xx_hwcg[] = {
124
125static const struct a5xx_hwcg a530_hwcg[] = {
126 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, 139 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
127 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, 140 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
128 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, 141 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
217 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} 230 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
218}; 231};
219 232
220static const struct { 233void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
221 int (*test)(struct adreno_gpu *gpu);
222 const struct a5xx_hwcg *regs;
223 unsigned int count;
224} a5xx_hwcg_regs[] = {
225 { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
226};
227
228static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
229 const struct a5xx_hwcg *regs, unsigned int count)
230{ 234{
231 unsigned int i; 235 unsigned int i;
232 236
233 for (i = 0; i < count; i++) 237 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
234 gpu_write(gpu, regs[i].offset, regs[i].value); 238 gpu_write(gpu, a5xx_hwcg[i].offset,
239 state ? a5xx_hwcg[i].value : 0);
235 240
236 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); 241 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
237 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); 242 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
238}
239
240static void a5xx_enable_hwcg(struct msm_gpu *gpu)
241{
242 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
243 unsigned int i;
244
245 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
246 if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
247 _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
248 a5xx_hwcg_regs[i].count);
249 return;
250 }
251 }
252} 243}
253 244
254static int a5xx_me_init(struct msm_gpu *gpu) 245static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
377 return ret; 368 return ret;
378} 369}
379 370
380/* Set up a child device to "own" the zap shader */
381static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
382{
383 struct device_node *node;
384 int ret;
385
386 if (dev->parent)
387 return 0;
388
389 /* Find the sub-node for the zap shader */
390 node = of_get_child_by_name(parent->of_node, "zap-shader");
391 if (!node) {
392 DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
393 return -ENODEV;
394 }
395
396 dev->parent = parent;
397 dev->of_node = node;
398 dev_set_name(dev, "adreno_zap_shader");
399
400 ret = device_register(dev);
401 if (ret) {
402 DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
403 goto out;
404 }
405
406 ret = of_reserved_mem_device_init(dev);
407 if (ret) {
408 DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
409 device_unregister(dev);
410 }
411
412out:
413 if (ret)
414 dev->parent = NULL;
415
416 return ret;
417}
418
419static int a5xx_zap_shader_init(struct msm_gpu *gpu) 371static int a5xx_zap_shader_init(struct msm_gpu *gpu)
420{ 372{
421 static bool loaded; 373 static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
444 return -ENODEV; 396 return -ENODEV;
445 } 397 }
446 398
447 ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); 399 ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
448
449 if (!ret)
450 ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
451 adreno_gpu->info->zapfw);
452 400
453 loaded = !ret; 401 loaded = !ret;
454 402
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
545 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 493 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
546 494
547 /* Enable HWCG */ 495 /* Enable HWCG */
548 a5xx_enable_hwcg(gpu); 496 a5xx_set_hwcg(gpu, true);
549 497
550 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 498 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
551 499
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
691 639
692 DBG("%s", gpu->name); 640 DBG("%s", gpu->name);
693 641
694 if (a5xx_gpu->zap_dev.parent)
695 device_unregister(&a5xx_gpu->zap_dev);
696
697 if (a5xx_gpu->pm4_bo) { 642 if (a5xx_gpu->pm4_bo) {
698 if (a5xx_gpu->pm4_iova) 643 if (a5xx_gpu->pm4_iova)
699 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); 644 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
920 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 865 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
921 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 866 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
922 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 867 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
923 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, 868 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
924 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 869 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
925 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 870 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
926 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 871 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
927 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 872 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
928 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 873 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
929 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 874 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
930 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, 875 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
931 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 876 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
932 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 877 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
933 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 878 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
934 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 879 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
935 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 880 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
936 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, 881 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
937 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 882 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
938 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 883 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
939 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, 884 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
940 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 885 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
941 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 886 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
942 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, 887 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
943 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, 888 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
944 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 889 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
945 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 890 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
946 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, 891 0xB9A0, 0xB9BF, ~0
947 ~0
948}; 892};
949 893
950static void a5xx_dump(struct msm_gpu *gpu) 894static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
1020{ 964{
1021 seq_printf(m, "status: %08x\n", 965 seq_printf(m, "status: %08x\n",
1022 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 966 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
967
968 /*
969 * Temporarily disable hardware clock gating before going into
970 * adreno_show to avoid issues while reading the registers
971 */
972 a5xx_set_hwcg(gpu, false);
1023 adreno_show(gpu, m); 973 adreno_show(gpu, m);
974 a5xx_set_hwcg(gpu, true);
1024} 975}
1025#endif 976#endif
1026 977
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..1137092241d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,8 +36,6 @@ struct a5xx_gpu {
36 uint32_t gpmu_dwords; 36 uint32_t gpmu_dwords;
37 37
38 uint32_t lm_leakage; 38 uint32_t lm_leakage;
39
40 struct device zap_dev;
41}; 39};
42 40
43#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) 41#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
59} 57}
60 58
61bool a5xx_idle(struct msm_gpu *gpu); 59bool a5xx_idle(struct msm_gpu *gpu);
60void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
62 61
63#endif /* __A5XX_GPU_H__ */ 62#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..7414c6bbd582 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
48 *value = adreno_gpu->base.fast_rate; 48 *value = adreno_gpu->base.fast_rate;
49 return 0; 49 return 0;
50 case MSM_PARAM_TIMESTAMP: 50 case MSM_PARAM_TIMESTAMP:
51 if (adreno_gpu->funcs->get_timestamp) 51 if (adreno_gpu->funcs->get_timestamp) {
52 return adreno_gpu->funcs->get_timestamp(gpu, value); 52 int ret;
53
54 pm_runtime_get_sync(&gpu->pdev->dev);
55 ret = adreno_gpu->funcs->get_timestamp(gpu, value);
56 pm_runtime_put_autosuspend(&gpu->pdev->dev);
57
58 return ret;
59 }
53 return -EINVAL; 60 return -EINVAL;
54 default: 61 default:
55 DBG("%s: invalid param: %u", gpu->name, param); 62 DBG("%s: invalid param: %u", gpu->name, param);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..c7b612c3d771 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2137 struct msm_dsi_phy_clk_request *clk_req) 2137 struct msm_dsi_phy_clk_request *clk_req)
2138{ 2138{
2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2140 int ret;
2141
2142 ret = dsi_calc_clk_rate(msm_host);
2143 if (ret) {
2144 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2145 return;
2146 }
2140 2147
2141 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2148 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2142 clk_req->escclk_rate = msm_host->esc_clk_rate; 2149 clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2280 struct drm_display_mode *mode) 2287 struct drm_display_mode *mode)
2281{ 2288{
2282 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2289 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2283 int ret;
2284 2290
2285 if (msm_host->mode) { 2291 if (msm_host->mode) {
2286 drm_mode_destroy(msm_host->dev, msm_host->mode); 2292 drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2293 return -ENOMEM; 2299 return -ENOMEM;
2294 } 2300 }
2295 2301
2296 ret = dsi_calc_clk_rate(msm_host);
2297 if (ret) {
2298 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2299 return ret;
2300 }
2301
2302 return 0; 2302 return 0;
2303} 2303}
2304 2304
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..735a87a699fa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
221 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 221 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
223 unsigned long flags; 223 unsigned long flags;
224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
226 int i, plane_cnt = 0; 226 int i, plane_cnt = 0;
227 bool bg_alpha_enabled = false; 227 bool bg_alpha_enabled = false;
228 u32 mixer_op_mode = 0; 228 u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
753 if (!handle) { 753 if (!handle) {
754 DBG("Cursor off"); 754 DBG("Cursor off");
755 cursor_enable = false; 755 cursor_enable = false;
756 mdp5_enable(mdp5_kms);
756 goto set_cursor; 757 goto set_cursor;
757 } 758 }
758 759
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
776 777
777 get_roi(crtc, &roi_w, &roi_h); 778 get_roi(crtc, &roi_w, &roi_h);
778 779
780 mdp5_enable(mdp5_kms);
781
779 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 782 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
780 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), 783 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
781 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); 784 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
804 crtc_flush(crtc, flush_mask); 807 crtc_flush(crtc, flush_mask);
805 808
806end: 809end:
810 mdp5_disable(mdp5_kms);
807 if (old_bo) { 811 if (old_bo) {
808 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 812 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
809 /* enable vblank to complete cursor work: */ 813 /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
836 840
837 get_roi(crtc, &roi_w, &roi_h); 841 get_roi(crtc, &roi_w, &roi_h);
838 842
843 mdp5_enable(mdp5_kms);
844
839 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 845 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
840 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 846 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
841 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | 847 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
847 853
848 crtc_flush(crtc, flush_mask); 854 crtc_flush(crtc, flush_mask);
849 855
856 mdp5_disable(mdp5_kms);
857
850 return 0; 858 return 0;
851} 859}
852 860
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..70bef51245af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
299 struct mdp5_interface *intf = mdp5_encoder->intf; 299 struct mdp5_interface *intf = mdp5_encoder->intf;
300 300
301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
302 mdp5_cmd_encoder_disable(encoder); 302 mdp5_cmd_encoder_enable(encoder);
303 else 303 else
304 mdp5_vid_encoder_enable(encoder); 304 mdp5_vid_encoder_enable(encoder);
305} 305}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..1c603aef3c59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
502 const char *name, bool mandatory) 502 const char *name, bool mandatory)
503{ 503{
504 struct device *dev = &pdev->dev; 504 struct device *dev = &pdev->dev;
505 struct clk *clk = devm_clk_get(dev, name); 505 struct clk *clk = msm_clk_get(pdev, name);
506 if (IS_ERR(clk) && mandatory) { 506 if (IS_ERR(clk) && mandatory) {
507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
508 return PTR_ERR(clk); 508 return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
887 } 887 }
888 888
889 /* mandatory clocks: */ 889 /* mandatory clocks: */
890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); 890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
891 if (ret) 891 if (ret)
892 goto fail; 892 goto fail;
893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
894 if (ret) 894 if (ret)
895 goto fail; 895 goto fail;
896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
897 if (ret) 897 if (ret)
898 goto fail; 898 goto fail;
899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); 899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
900 if (ret) 900 if (ret)
901 goto fail; 901 goto fail;
902 902
903 /* optional clocks: */ 903 /* optional clocks: */
904 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); 904 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
905 905
906 /* we need to set a default rate before enabling. Set a safe 906 /* we need to set a default rate before enabling. Set a safe
907 * rate first, then figure out hw revision, and then set a 907 * rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..61f39c86dd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
890 struct mdp5_hw_pipe *right_hwpipe; 890 struct mdp5_hw_pipe *right_hwpipe;
891 const struct mdp_format *format; 891 const struct mdp_format *format;
892 uint32_t nplanes, config = 0; 892 uint32_t nplanes, config = 0;
893 struct phase_step step = { 0 }; 893 struct phase_step step = { { 0 } };
894 struct pixel_ext pe = { 0 }; 894 struct pixel_ext pe = { { 0 } };
895 uint32_t hdecm = 0, vdecm = 0; 895 uint32_t hdecm = 0, vdecm = 0;
896 uint32_t pix_format; 896 uint32_t pix_format;
897 unsigned int rotation; 897 unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..a0c60e738db8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
383 struct page **pages; 383 struct page **pages;
384 384
385 vma = add_vma(obj, aspace); 385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma)) 386 if (IS_ERR(vma)) {
387 return PTR_ERR(vma); 387 ret = PTR_ERR(vma);
388 goto unlock;
389 }
388 390
389 pages = get_pages(obj); 391 pages = get_pages(obj);
390 if (IS_ERR(pages)) { 392 if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
405 407
406fail: 408fail:
407 del_vma(vma); 409 del_vma(vma);
408 410unlock:
409 mutex_unlock(&msm_obj->lock); 411 mutex_unlock(&msm_obj->lock);
410 return ret; 412 return ret;
411} 413}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
928 if (use_vram) { 930 if (use_vram) {
929 struct msm_gem_vma *vma; 931 struct msm_gem_vma *vma;
930 struct page **pages; 932 struct page **pages;
933 struct msm_gem_object *msm_obj = to_msm_bo(obj);
934
935 mutex_lock(&msm_obj->lock);
931 936
932 vma = add_vma(obj, NULL); 937 vma = add_vma(obj, NULL);
938 mutex_unlock(&msm_obj->lock);
933 if (IS_ERR(vma)) { 939 if (IS_ERR(vma)) {
934 ret = PTR_ERR(vma); 940 ret = PTR_ERR(vma);
935 goto fail; 941 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
35{ 35{
36 struct msm_gem_submit *submit; 36 struct msm_gem_submit *submit;
37 uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + 37 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
38 (nr_cmds * sizeof(submit->cmd[0])); 38 ((u64)nr_cmds * sizeof(submit->cmd[0]));
39 39
40 if (sz > SIZE_MAX) 40 if (sz > SIZE_MAX)
41 return NULL; 41 return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
451 if (ret) 451 if (ret)
452 goto out; 452 goto out;
453 453
454 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 454 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
455 ret = submit_fence_sync(submit); 455 ret = submit_fence_sync(submit);
456 if (ret) 456 if (ret)
457 goto out; 457 goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
42msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
43 struct msm_gem_vma *vma, struct sg_table *sgt) 43 struct msm_gem_vma *vma, struct sg_table *sgt)
44{ 44{
45 if (!vma->iova) 45 if (!aspace || !vma->iova)
46 return; 46 return;
47 47
48 if (aspace->mmu) { 48 if (aspace->mmu) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..88582af8bd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
267 /* Create output path objects for each VBIOS display path. */ 267 /* Create output path objects for each VBIOS display path. */
268 i = -1; 268 i = -1;
269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
270 if (ver < 0x40) /* No support for chipsets prior to NV50. */
271 break;
270 if (dcbE.type == DCB_OUTPUT_UNUSED) 272 if (dcbE.type == DCB_OUTPUT_UNUSED)
271 continue; 273 continue;
272 if (dcbE.type == DCB_OUTPUT_EOL) 274 if (dcbE.type == DCB_OUTPUT_EOL)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..2900f1410d95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
500static int vop_enable(struct drm_crtc *crtc) 500static int vop_enable(struct drm_crtc *crtc)
501{ 501{
502 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
503 int ret; 503 int ret, i;
504 504
505 ret = pm_runtime_get_sync(vop->dev); 505 ret = pm_runtime_get_sync(vop->dev);
506 if (ret < 0) { 506 if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
533 } 533 }
534 534
535 memcpy(vop->regs, vop->regsbak, vop->len); 535 memcpy(vop->regs, vop->regsbak, vop->len);
536 /*
537 * We need to make sure that all windows are disabled before we
538 * enable the crtc. Otherwise we might try to scan from a destroyed
539 * buffer later.
540 */
541 for (i = 0; i < vop->data->win_size; i++) {
542 struct vop_win *vop_win = &vop->win[i];
543 const struct vop_win_data *win = vop_win->data;
544
545 spin_lock(&vop->reg_lock);
546 VOP_WIN_SET(vop, win, enable, 0);
547 spin_unlock(&vop->reg_lock);
548 }
549
536 vop_cfg_done(vop); 550 vop_cfg_done(vop);
537 551
538 /* 552 /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
566static void vop_crtc_disable(struct drm_crtc *crtc) 580static void vop_crtc_disable(struct drm_crtc *crtc)
567{ 581{
568 struct vop *vop = to_vop(crtc); 582 struct vop *vop = to_vop(crtc);
569 int i;
570 583
571 WARN_ON(vop->event); 584 WARN_ON(vop->event);
572 585
573 rockchip_drm_psr_deactivate(&vop->crtc); 586 rockchip_drm_psr_deactivate(&vop->crtc);
574 587
575 /*
576 * We need to make sure that all windows are disabled before we
577 * disable that crtc. Otherwise we might try to scan from a destroyed
578 * buffer later.
579 */
580 for (i = 0; i < vop->data->win_size; i++) {
581 struct vop_win *vop_win = &vop->win[i];
582 const struct vop_win_data *win = vop_win->data;
583
584 spin_lock(&vop->reg_lock);
585 VOP_WIN_SET(vop, win, enable, 0);
586 spin_unlock(&vop->reg_lock);
587 }
588
589 vop_cfg_done(vop);
590
591 drm_crtc_vblank_off(crtc); 588 drm_crtc_vblank_off(crtc);
592 589
593 /* 590 /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
682 * Src.x1 can be odd when do clip, but yuv plane start point 679 * Src.x1 can be odd when do clip, but yuv plane start point
683 * need align with 2 pixel. 680 * need align with 2 pixel.
684 */ 681 */
685 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) 682 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
683 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
686 return -EINVAL; 684 return -EINVAL;
685 }
687 686
688 return 0; 687 return 0;
689} 688}
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
764 spin_lock(&vop->reg_lock); 763 spin_lock(&vop->reg_lock);
765 764
766 VOP_WIN_SET(vop, win, format, format); 765 VOP_WIN_SET(vop, win, format, format);
767 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); 766 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
769 if (is_yuv_support(fb->format->format)) { 768 if (is_yuv_support(fb->format->format)) {
770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); 769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
779 778
780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
781 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); 780 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
782 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 781 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
783 } 782 }
784 783
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..27eefbfcf3d0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
282 282
283 act_height = (src_h + vskiplines - 1) / vskiplines; 283 act_height = (src_h + vskiplines - 1) / vskiplines;
284 284
285 if (act_height == dst_h)
286 return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
287
285 return GET_SCL_FT_BILI_DN(act_height, dst_h); 288 return GET_SCL_FT_BILI_DN(act_height, dst_h);
286} 289}
287 290
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..8fe5b184b4e8 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -7,7 +7,6 @@ config DRM_STM
7 select DRM_PANEL 7 select DRM_PANEL
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA
10 default y
11 10
12 help 11 help
13 Enable support for the on-chip display controller on 12 Enable support for the on-chip display controller on
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1006b230b236..65fa29591d21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
983 983
984config I2C_VERSATILE 984config I2C_VERSATILE
985 tristate "ARM Versatile/Realview I2C bus support" 985 tristate "ARM Versatile/Realview I2C bus support"
986 depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST 986 depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
987 select I2C_ALGOBIT 987 select I2C_ALGOBIT
988 help 988 help
989 Say yes if you want to support the I2C serial bus on ARMs Versatile 989 Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2ea6d0d25a01..143a8fd582b4 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -298,6 +298,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
298 } 298 }
299 299
300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); 300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
301 /* Some broken DSTDs use 1MiHz instead of 1MHz */
302 if (acpi_speed == 1048576)
303 acpi_speed = 1000000;
301 /* 304 /*
302 * Find bus speed from the "clock-frequency" device property, ACPI 305 * Find bus speed from the "clock-frequency" device property, ACPI
303 * or by using fast mode if neither is set. 306 * or by using fast mode if neither is set.
@@ -319,7 +322,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
319 if (dev->clk_freq != 100000 && dev->clk_freq != 400000 322 if (dev->clk_freq != 100000 && dev->clk_freq != 400000
320 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { 323 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
321 dev_err(&pdev->dev, 324 dev_err(&pdev->dev,
322 "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); 325 "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
326 dev->clk_freq);
323 ret = -EINVAL; 327 ret = -EINVAL;
324 goto exit_reset; 328 goto exit_reset;
325 } 329 }
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4842ec3a5451..a9126b3cda61 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); 230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
231} 231}
232 232
233const struct acpi_device_id *
234i2c_acpi_match_device(const struct acpi_device_id *matches,
235 struct i2c_client *client)
236{
237 if (!(client && matches))
238 return NULL;
239
240 return acpi_match_device(matches, &client->dev);
241}
242
233static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, 243static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
234 void *data, void **return_value) 244 void *data, void **return_value)
235{ 245{
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
289} 299}
290EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); 300EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
291 301
292static int i2c_acpi_match_adapter(struct device *dev, void *data) 302static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
293{ 303{
294 struct i2c_adapter *adapter = i2c_verify_adapter(dev); 304 struct i2c_adapter *adapter = i2c_verify_adapter(dev);
295 305
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
299 return ACPI_HANDLE(dev) == (acpi_handle)data; 309 return ACPI_HANDLE(dev) == (acpi_handle)data;
300} 310}
301 311
302static int i2c_acpi_match_device(struct device *dev, void *data) 312static int i2c_acpi_find_match_device(struct device *dev, void *data)
303{ 313{
304 return ACPI_COMPANION(dev) == data; 314 return ACPI_COMPANION(dev) == data;
305} 315}
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
309 struct device *dev; 319 struct device *dev;
310 320
311 dev = bus_find_device(&i2c_bus_type, NULL, handle, 321 dev = bus_find_device(&i2c_bus_type, NULL, handle,
312 i2c_acpi_match_adapter); 322 i2c_acpi_find_match_adapter);
313 return dev ? i2c_verify_adapter(dev) : NULL; 323 return dev ? i2c_verify_adapter(dev) : NULL;
314} 324}
315 325
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
317{ 327{
318 struct device *dev; 328 struct device *dev;
319 329
320 dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); 330 dev = bus_find_device(&i2c_bus_type, NULL, adev,
331 i2c_acpi_find_match_device);
321 return dev ? i2c_verify_client(dev) : NULL; 332 return dev ? i2c_verify_client(dev) : NULL;
322} 333}
323 334
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c89dac7fd2e7..12822a4b8f8f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -357,6 +357,7 @@ static int i2c_device_probe(struct device *dev)
357 * Tree match table entry is supplied for the probing device. 357 * Tree match table entry is supplied for the probing device.
358 */ 358 */
359 if (!driver->id_table && 359 if (!driver->id_table &&
360 !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
360 !i2c_of_match_device(dev->driver->of_match_table, client)) 361 !i2c_of_match_device(dev->driver->of_match_table, client))
361 return -ENODEV; 362 return -ENODEV;
362 363
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 3b63f5e5b89c..3d3d9bf02101 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
31int i2c_check_7bit_addr_validity_strict(unsigned short addr); 31int i2c_check_7bit_addr_validity_strict(unsigned short addr);
32 32
33#ifdef CONFIG_ACPI 33#ifdef CONFIG_ACPI
34const struct acpi_device_id *
35i2c_acpi_match_device(const struct acpi_device_id *matches,
36 struct i2c_client *client);
34void i2c_acpi_register_devices(struct i2c_adapter *adap); 37void i2c_acpi_register_devices(struct i2c_adapter *adap);
35#else /* CONFIG_ACPI */ 38#else /* CONFIG_ACPI */
36static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } 39static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
40static inline const struct acpi_device_id *
41i2c_acpi_match_device(const struct acpi_device_id *matches,
42 struct i2c_client *client)
43{
44 return NULL;
45}
37#endif /* CONFIG_ACPI */ 46#endif /* CONFIG_ACPI */
38extern struct notifier_block i2c_acpi_notifier; 47extern struct notifier_block i2c_acpi_notifier;
39 48
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 2c64d0e0740f..17121329bb79 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
83 different sets of pins at run-time. 83 different sets of pins at run-time.
84 84
85 This driver can also be built as a module. If so, the module will be 85 This driver can also be built as a module. If so, the module will be
86 called pinctrl-i2cmux. 86 called i2c-mux-pinctrl.
87 87
88config I2C_MUX_REG 88config I2C_MUX_REG
89 tristate "Register-based I2C multiplexer" 89 tristate "Register-based I2C multiplexer"
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
193 struct regmap *regmap; 193 struct regmap *regmap;
194 int irq; 194 int irq;
195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; 195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
196 atomic_t active_intr;
197 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; 196 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
198 struct mutex mutex; 197 struct mutex mutex;
199 u8 fifo_mode, watermark; 198 u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
493 goto out_fix_power_state; 492 goto out_fix_power_state;
494 } 493 }
495 494
496 if (state)
497 atomic_inc(&data->active_intr);
498 else
499 atomic_dec(&data->active_intr);
500
501 return 0; 495 return 0;
502 496
503out_fix_power_state: 497out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
1710 struct bmc150_accel_data *data = iio_priv(indio_dev); 1704 struct bmc150_accel_data *data = iio_priv(indio_dev);
1711 1705
1712 mutex_lock(&data->mutex); 1706 mutex_lock(&data->mutex);
1713 if (atomic_read(&data->active_intr)) 1707 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1714 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1715 bmc150_accel_fifo_set_mode(data); 1708 bmc150_accel_fifo_set_mode(data);
1716 mutex_unlock(&data->mutex); 1709 mutex_unlock(&data->mutex);
1717 1710
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..e44f62bf9caa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
166 .mask_ihl = 0x02, 166 .mask_ihl = 0x02,
167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
168 }, 168 },
169 .sim = {
170 .addr = 0x23,
171 .value = BIT(0),
172 },
169 .multi_read_bit = true, 173 .multi_read_bit = true,
170 .bootime = 2, 174 .bootime = 2,
171 }, 175 },
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
234 .mask_od = 0x40, 238 .mask_od = 0x40,
235 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 239 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
236 }, 240 },
241 .sim = {
242 .addr = 0x23,
243 .value = BIT(0),
244 },
237 .multi_read_bit = true, 245 .multi_read_bit = true,
238 .bootime = 2, 246 .bootime = 2,
239 }, 247 },
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
316 .en_mask = 0x08, 324 .en_mask = 0x08,
317 }, 325 },
318 }, 326 },
327 .sim = {
328 .addr = 0x24,
329 .value = BIT(0),
330 },
319 .multi_read_bit = false, 331 .multi_read_bit = false,
320 .bootime = 2, 332 .bootime = 2,
321 }, 333 },
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
379 .mask_int1 = 0x04, 391 .mask_int1 = 0x04,
380 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 392 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
381 }, 393 },
394 .sim = {
395 .addr = 0x21,
396 .value = BIT(1),
397 },
382 .multi_read_bit = true, 398 .multi_read_bit = true,
383 .bootime = 2, /* guess */ 399 .bootime = 2, /* guess */
384 }, 400 },
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
437 .mask_od = 0x40, 453 .mask_od = 0x40,
438 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 454 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
439 }, 455 },
456 .sim = {
457 .addr = 0x21,
458 .value = BIT(7),
459 },
440 .multi_read_bit = false, 460 .multi_read_bit = false,
441 .bootime = 2, /* guess */ 461 .bootime = 2, /* guess */
442 }, 462 },
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
499 .addr_ihl = 0x22, 519 .addr_ihl = 0x22,
500 .mask_ihl = 0x80, 520 .mask_ihl = 0x80,
501 }, 521 },
522 .sim = {
523 .addr = 0x23,
524 .value = BIT(0),
525 },
502 .multi_read_bit = true, 526 .multi_read_bit = true,
503 .bootime = 2, 527 .bootime = 2,
504 }, 528 },
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
547 .mask_int1 = 0x04, 571 .mask_int1 = 0x04,
548 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 572 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
549 }, 573 },
574 .sim = {
575 .addr = 0x21,
576 .value = BIT(1),
577 },
550 .multi_read_bit = false, 578 .multi_read_bit = false,
551 .bootime = 2, 579 .bootime = 2,
552 }, 580 },
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
614 .mask_ihl = 0x02, 642 .mask_ihl = 0x02,
615 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 643 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
616 }, 644 },
645 .sim = {
646 .addr = 0x23,
647 .value = BIT(0),
648 },
617 .multi_read_bit = true, 649 .multi_read_bit = true,
618 .bootime = 2, 650 .bootime = 2,
619 }, 651 },
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/iio/iio.h> 23#include <linux/iio/iio.h>
24#include <linux/iio/driver.h> 24#include <linux/iio/driver.h>
25#include <linux/iopoll.h>
25 26
26#define ASPEED_RESOLUTION_BITS 10 27#define ASPEED_RESOLUTION_BITS 10
27#define ASPEED_CLOCKS_PER_SAMPLE 12 28#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
38 39
39#define ASPEED_ENGINE_ENABLE BIT(0) 40#define ASPEED_ENGINE_ENABLE BIT(0)
40 41
42#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
43
44#define ASPEED_ADC_INIT_POLLING_TIME 500
45#define ASPEED_ADC_INIT_TIMEOUT 500000
46
41struct aspeed_adc_model_data { 47struct aspeed_adc_model_data {
42 const char *model_name; 48 const char *model_name;
43 unsigned int min_sampling_rate; // Hz 49 unsigned int min_sampling_rate; // Hz
44 unsigned int max_sampling_rate; // Hz 50 unsigned int max_sampling_rate; // Hz
45 unsigned int vref_voltage; // mV 51 unsigned int vref_voltage; // mV
52 bool wait_init_sequence;
46}; 53};
47 54
48struct aspeed_adc_data { 55struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
211 goto scaler_error; 218 goto scaler_error;
212 } 219 }
213 220
221 model_data = of_device_get_match_data(&pdev->dev);
222
223 if (model_data->wait_init_sequence) {
224 /* Enable engine in normal mode. */
225 writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
226 data->base + ASPEED_REG_ENGINE_CONTROL);
227
228 /* Wait for initial sequence complete. */
229 ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
230 adc_engine_control_reg_val,
231 adc_engine_control_reg_val &
232 ASPEED_ADC_CTRL_INIT_RDY,
233 ASPEED_ADC_INIT_POLLING_TIME,
234 ASPEED_ADC_INIT_TIMEOUT);
235 if (ret)
236 goto scaler_error;
237 }
238
214 /* Start all channels in normal mode. */ 239 /* Start all channels in normal mode. */
215 ret = clk_prepare_enable(data->clk_scaler->clk); 240 ret = clk_prepare_enable(data->clk_scaler->clk);
216 if (ret) 241 if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
274 .vref_voltage = 1800, // mV 299 .vref_voltage = 1800, // mV
275 .min_sampling_rate = 1, 300 .min_sampling_rate = 1,
276 .max_sampling_rate = 1000000, 301 .max_sampling_rate = 1000000,
302 .wait_init_sequence = true,
277}; 303};
278 304
279static const struct of_device_id aspeed_adc_matches[] = { 305static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
28#include <linux/iio/driver.h> 28#include <linux/iio/driver.h>
29 29
30#define AXP288_ADC_EN_MASK 0xF1 30#define AXP288_ADC_EN_MASK 0xF1
31#define AXP288_ADC_TS_PIN_GPADC 0xF2
32#define AXP288_ADC_TS_PIN_ON 0xF3
31 33
32enum axp288_adc_id { 34enum axp288_adc_id {
33 AXP288_ADC_TS, 35 AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
121 return IIO_VAL_INT; 123 return IIO_VAL_INT;
122} 124}
123 125
126static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
127 unsigned long address)
128{
129 int ret;
130
131 /* channels other than GPADC do not need to switch TS pin */
132 if (address != AXP288_GP_ADC_H)
133 return 0;
134
135 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
136 if (ret)
137 return ret;
138
139 /* When switching to the GPADC pin give things some time to settle */
140 if (mode == AXP288_ADC_TS_PIN_GPADC)
141 usleep_range(6000, 10000);
142
143 return 0;
144}
145
124static int axp288_adc_read_raw(struct iio_dev *indio_dev, 146static int axp288_adc_read_raw(struct iio_dev *indio_dev,
125 struct iio_chan_spec const *chan, 147 struct iio_chan_spec const *chan,
126 int *val, int *val2, long mask) 148 int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
131 mutex_lock(&indio_dev->mlock); 153 mutex_lock(&indio_dev->mlock);
132 switch (mask) { 154 switch (mask) {
133 case IIO_CHAN_INFO_RAW: 155 case IIO_CHAN_INFO_RAW:
156 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
157 chan->address)) {
158 dev_err(&indio_dev->dev, "GPADC mode\n");
159 ret = -EINVAL;
160 break;
161 }
134 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 162 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
163 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
164 chan->address))
165 dev_err(&indio_dev->dev, "TS pin restore\n");
135 break; 166 break;
136 default: 167 default:
137 ret = -EINVAL; 168 ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
141 return ret; 172 return ret;
142} 173}
143 174
175static int axp288_adc_set_state(struct regmap *regmap)
176{
177 /* ADC should be always enabled for internal FG to function */
178 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
179 return -EIO;
180
181 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
182}
183
144static const struct iio_info axp288_adc_iio_info = { 184static const struct iio_info axp288_adc_iio_info = {
145 .read_raw = &axp288_adc_read_raw, 185 .read_raw = &axp288_adc_read_raw,
146 .driver_module = THIS_MODULE, 186 .driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
169 * Set ADC to enabled state at all time, including system suspend. 209 * Set ADC to enabled state at all time, including system suspend.
170 * otherwise internal fuel gauge functionality may be affected. 210 * otherwise internal fuel gauge functionality may be affected.
171 */ 211 */
172 ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 212 ret = axp288_adc_set_state(axp20x->regmap);
173 if (ret) { 213 if (ret) {
174 dev_err(&pdev->dev, "unable to enable ADC device\n"); 214 dev_err(&pdev->dev, "unable to enable ADC device\n");
175 return ret; 215 return ret;
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
256 256
257err: 257err:
258 pm_runtime_put_autosuspend(indio_dev->dev.parent); 258 pm_runtime_put_autosuspend(indio_dev->dev.parent);
259 disable_irq(irq);
259 mutex_unlock(&info->mutex); 260 mutex_unlock(&info->mutex);
260 261
261 return ret; 262 return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
365 complete(&info->completion); 366 complete(&info->completion);
366 367
367out: 368out:
368 disable_irq_nosync(info->temp_data_irq);
369 return IRQ_HANDLED; 369 return IRQ_HANDLED;
370} 370}
371 371
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
380 complete(&info->completion); 380 complete(&info->completion);
381 381
382out: 382out:
383 disable_irq_nosync(info->fifo_data_irq);
384 return IRQ_HANDLED; 383 return IRQ_HANDLED;
385} 384}
386 385
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
77#define VF610_ADC_ADSTS_MASK 0x300 77#define VF610_ADC_ADSTS_MASK 0x300
78#define VF610_ADC_ADLPC_EN 0x80 78#define VF610_ADC_ADLPC_EN 0x80
79#define VF610_ADC_ADHSC_EN 0x400 79#define VF610_ADC_ADHSC_EN 0x400
80#define VF610_ADC_REFSEL_VALT 0x100 80#define VF610_ADC_REFSEL_VALT 0x800
81#define VF610_ADC_REFSEL_VBG 0x1000 81#define VF610_ADC_REFSEL_VBG 0x1000
82#define VF610_ADC_ADTRG_HARD 0x2000 82#define VF610_ADC_ADTRG_HARD 0x2000
83#define VF610_ADC_AVGS_8 0x4000 83#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..6e6a1ecc99dd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -550,6 +550,31 @@ out:
550} 550}
551EXPORT_SYMBOL(st_sensors_read_info_raw); 551EXPORT_SYMBOL(st_sensors_read_info_raw);
552 552
553static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
554 const struct st_sensor_settings *sensor_settings)
555{
556 struct st_sensor_data *sdata = iio_priv(indio_dev);
557 struct device_node *np = sdata->dev->of_node;
558 struct st_sensors_platform_data *pdata;
559
560 pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
561 if (((np && of_property_read_bool(np, "spi-3wire")) ||
562 (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
563 int err;
564
565 err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
566 sensor_settings->sim.addr,
567 sensor_settings->sim.value);
568 if (err < 0) {
569 dev_err(&indio_dev->dev,
570 "failed to init interface mode\n");
571 return err;
572 }
573 }
574
575 return 0;
576}
577
553int st_sensors_check_device_support(struct iio_dev *indio_dev, 578int st_sensors_check_device_support(struct iio_dev *indio_dev,
554 int num_sensors_list, 579 int num_sensors_list,
555 const struct st_sensor_settings *sensor_settings) 580 const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
574 return -ENODEV; 599 return -ENODEV;
575 } 600 }
576 601
602 err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
603 if (err < 0)
604 return err;
605
577 if (sensor_settings[i].wai_addr) { 606 if (sensor_settings[i].wai_addr) {
578 err = sdata->tf->read_byte(&sdata->tb, sdata->dev, 607 err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
579 sensor_settings[i].wai_addr, &wai); 608 sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
626 struct tsl2563_chip *chip = iio_priv(dev_info); 626 struct tsl2563_chip *chip = iio_priv(dev_info);
627 627
628 iio_push_event(dev_info, 628 iio_push_event(dev_info,
629 IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 629 IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
630 0, 630 0,
631 IIO_EV_TYPE_THRESH, 631 IIO_EV_TYPE_THRESH,
632 IIO_EV_DIR_EITHER), 632 IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..f1bce05ffa13 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
456 .mask_od = 0x40, 456 .mask_od = 0x40,
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = false,
460 .bootime = 2, 460 .bootime = 2,
461 }, 461 },
462}; 462};
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 01236cef7bfb..437522ca97b4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,6 +61,7 @@ struct addr_req {
61 void (*callback)(int status, struct sockaddr *src_addr, 61 void (*callback)(int status, struct sockaddr *src_addr,
62 struct rdma_dev_addr *addr, void *context); 62 struct rdma_dev_addr *addr, void *context);
63 unsigned long timeout; 63 unsigned long timeout;
64 struct delayed_work work;
64 int status; 65 int status;
65 u32 seq; 66 u32 seq;
66}; 67};
@@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
295} 296}
296EXPORT_SYMBOL(rdma_translate_ip); 297EXPORT_SYMBOL(rdma_translate_ip);
297 298
298static void set_timeout(unsigned long time) 299static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
299{ 300{
300 unsigned long delay; 301 unsigned long delay;
301 302
@@ -303,7 +304,7 @@ static void set_timeout(unsigned long time)
303 if ((long)delay < 0) 304 if ((long)delay < 0)
304 delay = 0; 305 delay = 0;
305 306
306 mod_delayed_work(addr_wq, &work, delay); 307 mod_delayed_work(addr_wq, delayed_work, delay);
307} 308}
308 309
309static void queue_req(struct addr_req *req) 310static void queue_req(struct addr_req *req)
@@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req)
318 319
319 list_add(&req->list, &temp_req->list); 320 list_add(&req->list, &temp_req->list);
320 321
321 if (req_list.next == &req->list) 322 set_timeout(&req->work, req->timeout);
322 set_timeout(req->timeout);
323 mutex_unlock(&lock); 323 mutex_unlock(&lock);
324} 324}
325 325
@@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in,
574 return ret; 574 return ret;
575} 575}
576 576
577static void process_one_req(struct work_struct *_work)
578{
579 struct addr_req *req;
580 struct sockaddr *src_in, *dst_in;
581
582 mutex_lock(&lock);
583 req = container_of(_work, struct addr_req, work.work);
584
585 if (req->status == -ENODATA) {
586 src_in = (struct sockaddr *)&req->src_addr;
587 dst_in = (struct sockaddr *)&req->dst_addr;
588 req->status = addr_resolve(src_in, dst_in, req->addr,
589 true, req->seq);
590 if (req->status && time_after_eq(jiffies, req->timeout)) {
591 req->status = -ETIMEDOUT;
592 } else if (req->status == -ENODATA) {
593 /* requeue the work for retrying again */
594 set_timeout(&req->work, req->timeout);
595 mutex_unlock(&lock);
596 return;
597 }
598 }
599 list_del(&req->list);
600 mutex_unlock(&lock);
601
602 req->callback(req->status, (struct sockaddr *)&req->src_addr,
603 req->addr, req->context);
604 put_client(req->client);
605 kfree(req);
606}
607
577static void process_req(struct work_struct *work) 608static void process_req(struct work_struct *work)
578{ 609{
579 struct addr_req *req, *temp_req; 610 struct addr_req *req, *temp_req;
@@ -591,20 +622,23 @@ static void process_req(struct work_struct *work)
591 true, req->seq); 622 true, req->seq);
592 if (req->status && time_after_eq(jiffies, req->timeout)) 623 if (req->status && time_after_eq(jiffies, req->timeout))
593 req->status = -ETIMEDOUT; 624 req->status = -ETIMEDOUT;
594 else if (req->status == -ENODATA) 625 else if (req->status == -ENODATA) {
626 set_timeout(&req->work, req->timeout);
595 continue; 627 continue;
628 }
596 } 629 }
597 list_move_tail(&req->list, &done_list); 630 list_move_tail(&req->list, &done_list);
598 } 631 }
599 632
600 if (!list_empty(&req_list)) {
601 req = list_entry(req_list.next, struct addr_req, list);
602 set_timeout(req->timeout);
603 }
604 mutex_unlock(&lock); 633 mutex_unlock(&lock);
605 634
606 list_for_each_entry_safe(req, temp_req, &done_list, list) { 635 list_for_each_entry_safe(req, temp_req, &done_list, list) {
607 list_del(&req->list); 636 list_del(&req->list);
637 /* It is safe to cancel other work items from this work item
638 * because at a time there can be only one work item running
639 * with this single threaded work queue.
640 */
641 cancel_delayed_work(&req->work);
608 req->callback(req->status, (struct sockaddr *) &req->src_addr, 642 req->callback(req->status, (struct sockaddr *) &req->src_addr,
609 req->addr, req->context); 643 req->addr, req->context);
610 put_client(req->client); 644 put_client(req->client);
@@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
647 req->context = context; 681 req->context = context;
648 req->client = client; 682 req->client = client;
649 atomic_inc(&client->refcount); 683 atomic_inc(&client->refcount);
684 INIT_DELAYED_WORK(&req->work, process_one_req);
650 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 685 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
651 686
652 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); 687 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
701 req->status = -ECANCELED; 736 req->status = -ECANCELED;
702 req->timeout = jiffies; 737 req->timeout = jiffies;
703 list_move(&req->list, &req_list); 738 list_move(&req->list, &req_list);
704 set_timeout(req->timeout); 739 set_timeout(&req->work, req->timeout);
705 break; 740 break;
706 } 741 }
707 } 742 }
@@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
807 if (event == NETEVENT_NEIGH_UPDATE) { 842 if (event == NETEVENT_NEIGH_UPDATE) {
808 struct neighbour *neigh = ctx; 843 struct neighbour *neigh = ctx;
809 844
810 if (neigh->nud_state & NUD_VALID) { 845 if (neigh->nud_state & NUD_VALID)
811 set_timeout(jiffies); 846 set_timeout(&work, jiffies);
812 }
813 } 847 }
814 return 0; 848 return 0;
815} 849}
@@ -820,7 +854,7 @@ static struct notifier_block nb = {
820 854
821int addr_init(void) 855int addr_init(void)
822{ 856{
823 addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); 857 addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
824 if (!addr_wq) 858 if (!addr_wq)
825 return -ENOMEM; 859 return -ENOMEM;
826 860
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 2c98533a0203..c551d2b275fd 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1153 int out_len) 1153 int out_len)
1154{ 1154{
1155 struct ib_uverbs_resize_cq cmd; 1155 struct ib_uverbs_resize_cq cmd;
1156 struct ib_uverbs_resize_cq_resp resp; 1156 struct ib_uverbs_resize_cq_resp resp = {};
1157 struct ib_udata udata; 1157 struct ib_udata udata;
1158 struct ib_cq *cq; 1158 struct ib_cq *cq;
1159 int ret = -EINVAL; 1159 int ret = -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3d2609608f58..c023e2c81b8f 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
250 if (atomic_dec_and_test(&file->device->refcount)) 250 if (atomic_dec_and_test(&file->device->refcount))
251 ib_uverbs_comp_dev(file->device); 251 ib_uverbs_comp_dev(file->device);
252 252
253 kobject_put(&file->device->kobj);
253 kfree(file); 254 kfree(file);
254} 255}
255 256
@@ -917,7 +918,6 @@ err:
917static int ib_uverbs_close(struct inode *inode, struct file *filp) 918static int ib_uverbs_close(struct inode *inode, struct file *filp)
918{ 919{
919 struct ib_uverbs_file *file = filp->private_data; 920 struct ib_uverbs_file *file = filp->private_data;
920 struct ib_uverbs_device *dev = file->device;
921 921
922 mutex_lock(&file->cleanup_mutex); 922 mutex_lock(&file->cleanup_mutex);
923 if (file->ucontext) { 923 if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
939 ib_uverbs_release_async_event_file); 939 ib_uverbs_release_async_event_file);
940 940
941 kref_put(&file->ref, ib_uverbs_release_file); 941 kref_put(&file->ref, ib_uverbs_release_file);
942 kobject_put(&dev->kobj);
943 942
944 return 0; 943 return 0;
945} 944}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index fb98ed67d5bc..7f8fe443df46 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -895,7 +895,6 @@ static const struct {
895} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 895} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
896 [IB_QPS_RESET] = { 896 [IB_QPS_RESET] = {
897 [IB_QPS_RESET] = { .valid = 1 }, 897 [IB_QPS_RESET] = { .valid = 1 },
898 [IB_QPS_ERR] = { .valid = 1 },
899 [IB_QPS_INIT] = { 898 [IB_QPS_INIT] = {
900 .valid = 1, 899 .valid = 1,
901 .req_param = { 900 .req_param = {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 23fad6d96944..2540b65e242c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
733 continue; 733 continue;
734 734
735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
736 if (IS_ERR(free_mr->mr_free_qp[i])) { 736 if (!free_mr->mr_free_qp[i]) {
737 dev_err(dev, "Create loop qp failed!\n"); 737 dev_err(dev, "Create loop qp failed!\n");
738 goto create_lp_qp_failed; 738 goto create_lp_qp_failed;
739 } 739 }
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ae0746754008..3d701c7a4c91 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
939 939
940 if (qp->ibqp.qp_type != IB_QPT_RC) { 940 if (qp->ibqp.qp_type != IB_QPT_RC) {
941 av = *wqe; 941 av = *wqe;
942 if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) 942 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
943 *wqe += sizeof(struct mlx5_av); 943 *wqe += sizeof(struct mlx5_av);
944 else 944 else
945 *wqe += sizeof(struct mlx5_base_av); 945 *wqe += sizeof(struct mlx5_base_av);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ff50a7bd66d8..7ac25059c40f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
336 unsigned long flags; 336 unsigned long flags;
337 337
338 struct rw_semaphore vlan_rwsem; 338 struct rw_semaphore vlan_rwsem;
339 struct mutex mcast_mutex;
339 340
340 struct rb_root path_tree; 341 struct rb_root path_tree;
341 struct list_head path_list; 342 struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index f87d104837dc..d69410c2ed97 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
511 case IB_CM_REQ_RECEIVED: 511 case IB_CM_REQ_RECEIVED:
512 return ipoib_cm_req_handler(cm_id, event); 512 return ipoib_cm_req_handler(cm_id, event);
513 case IB_CM_DREQ_RECEIVED: 513 case IB_CM_DREQ_RECEIVED:
514 p = cm_id->context;
515 ib_send_cm_drep(cm_id, NULL, 0); 514 ib_send_cm_drep(cm_id, NULL, 0);
516 /* Fall through */ 515 /* Fall through */
517 case IB_CM_REJ_RECEIVED: 516 case IB_CM_REJ_RECEIVED:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 7871379342f4..184a22f48027 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
52 IPOIB_NETDEV_STAT(tx_bytes), 52 IPOIB_NETDEV_STAT(tx_bytes),
53 IPOIB_NETDEV_STAT(tx_errors), 53 IPOIB_NETDEV_STAT(tx_errors),
54 IPOIB_NETDEV_STAT(rx_dropped), 54 IPOIB_NETDEV_STAT(rx_dropped),
55 IPOIB_NETDEV_STAT(tx_dropped) 55 IPOIB_NETDEV_STAT(tx_dropped),
56 IPOIB_NETDEV_STAT(multicast),
56}; 57};
57 58
58#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) 59#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 57a9655e844d..2e075377242e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
256 256
257 ++dev->stats.rx_packets; 257 ++dev->stats.rx_packets;
258 dev->stats.rx_bytes += skb->len; 258 dev->stats.rx_bytes += skb->len;
259 if (skb->pkt_type == PACKET_MULTICAST)
260 dev->stats.multicast++;
259 261
260 skb->dev = dev; 262 skb->dev = dev;
261 if ((dev->features & NETIF_F_RXCSUM) && 263 if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
709 return pending; 711 return pending;
710} 712}
711 713
714static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
715 struct ib_qp *qp,
716 enum ib_qp_state new_state)
717{
718 struct ib_qp_attr qp_attr;
719 struct ib_qp_init_attr query_init_attr;
720 int ret;
721
722 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
723 if (ret) {
724 ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
725 return;
726 }
727 /* print according to the new-state and the previous state.*/
728 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
729 ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
730 else
731 ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
732 new_state, qp_attr.qp_state);
733}
734
712int ipoib_ib_dev_stop_default(struct net_device *dev) 735int ipoib_ib_dev_stop_default(struct net_device *dev)
713{ 736{
714 struct ipoib_dev_priv *priv = ipoib_priv(dev); 737 struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
728 */ 751 */
729 qp_attr.qp_state = IB_QPS_ERR; 752 qp_attr.qp_state = IB_QPS_ERR;
730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 753 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
731 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 754 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
732 755
733 /* Wait for all sends and receives to complete */ 756 /* Wait for all sends and receives to complete */
734 begin = jiffies; 757 begin = jiffies;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 4ce315c92b48..6c77df34869d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1560 int i, wait_flushed = 0; 1560 int i, wait_flushed = 0;
1561 1561
1562 init_completion(&priv->ntbl.flushed); 1562 init_completion(&priv->ntbl.flushed);
1563 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1563 1564
1564 spin_lock_irqsave(&priv->lock, flags); 1565 spin_lock_irqsave(&priv->lock, flags);
1565 1566
@@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
1604 1605
1605 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1606 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1606 init_completion(&priv->ntbl.deleted); 1607 init_completion(&priv->ntbl.deleted);
1607 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1608 1608
1609 /* Stop GC if called at init fail need to cancel work */ 1609 /* Stop GC if called at init fail need to cancel work */
1610 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1610 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
1847 .ndo_tx_timeout = ipoib_timeout, 1847 .ndo_tx_timeout = ipoib_timeout,
1848 .ndo_set_rx_mode = ipoib_set_mcast_list, 1848 .ndo_set_rx_mode = ipoib_set_mcast_list,
1849 .ndo_get_iflink = ipoib_get_iflink, 1849 .ndo_get_iflink = ipoib_get_iflink,
1850 .ndo_get_stats64 = ipoib_get_stats,
1850}; 1851};
1851 1852
1852void ipoib_setup_common(struct net_device *dev) 1853void ipoib_setup_common(struct net_device *dev)
@@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
1877 priv->dev = dev; 1878 priv->dev = dev;
1878 spin_lock_init(&priv->lock); 1879 spin_lock_init(&priv->lock);
1879 init_rwsem(&priv->vlan_rwsem); 1880 init_rwsem(&priv->vlan_rwsem);
1881 mutex_init(&priv->mcast_mutex);
1880 1882
1881 INIT_LIST_HEAD(&priv->path_list); 1883 INIT_LIST_HEAD(&priv->path_list);
1882 INIT_LIST_HEAD(&priv->child_intfs); 1884 INIT_LIST_HEAD(&priv->child_intfs);
@@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
2173 priv->dev->dev_id = port - 1; 2175 priv->dev->dev_id = port - 1;
2174 2176
2175 result = ib_query_port(hca, port, &attr); 2177 result = ib_query_port(hca, port, &attr);
2176 if (!result) 2178 if (result) {
2177 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2178 else {
2179 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 2179 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
2180 hca->name, port); 2180 hca->name, port);
2181 goto device_init_failed; 2181 goto device_init_failed;
2182 } 2182 }
2183 2183
2184 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2185
2184 /* MTU will be reset when mcast join happens */ 2186 /* MTU will be reset when mcast join happens */
2185 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 2187 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
2186 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 2188 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
2211 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 2213 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
2212 hca->name, port, result); 2214 hca->name, port, result);
2213 goto device_init_failed; 2215 goto device_init_failed;
2214 } else 2216 }
2215 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 2217
2218 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
2219 sizeof(union ib_gid));
2216 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2220 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2217 2221
2218 result = ipoib_dev_init(priv->dev, hca, port); 2222 result = ipoib_dev_init(priv->dev, hca, port);
2219 if (result < 0) { 2223 if (result) {
2220 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 2224 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
2221 hca->name, port, result); 2225 hca->name, port, result);
2222 goto device_init_failed; 2226 goto device_init_failed;
@@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void)
2365 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2369 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2366#ifdef CONFIG_INFINIBAND_IPOIB_CM 2370#ifdef CONFIG_INFINIBAND_IPOIB_CM
2367 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2371 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2372 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2368#endif 2373#endif
2369 2374
2370 /* 2375 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 057f58e6afca..93e149efc1f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
684int ipoib_mcast_stop_thread(struct net_device *dev) 684int ipoib_mcast_stop_thread(struct net_device *dev)
685{ 685{
686 struct ipoib_dev_priv *priv = ipoib_priv(dev); 686 struct ipoib_dev_priv *priv = ipoib_priv(dev);
687 unsigned long flags;
688 687
689 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 688 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
690 689
691 spin_lock_irqsave(&priv->lock, flags); 690 cancel_delayed_work_sync(&priv->mcast_task);
692 cancel_delayed_work(&priv->mcast_task);
693 spin_unlock_irqrestore(&priv->lock, flags);
694
695 flush_workqueue(priv->wq);
696 691
697 return 0; 692 return 0;
698} 693}
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
748{ 743{
749 struct ipoib_mcast *mcast, *tmcast; 744 struct ipoib_mcast *mcast, *tmcast;
750 745
746 /*
747 * make sure the in-flight joins have finished before we attempt
748 * to leave
749 */
750 list_for_each_entry_safe(mcast, tmcast, remove_list, list)
751 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
752 wait_for_completion(&mcast->done);
753
751 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { 754 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
752 ipoib_mcast_leave(mcast->dev, mcast); 755 ipoib_mcast_leave(mcast->dev, mcast);
753 ipoib_mcast_free(mcast); 756 ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
838 struct ipoib_mcast *mcast, *tmcast; 841 struct ipoib_mcast *mcast, *tmcast;
839 unsigned long flags; 842 unsigned long flags;
840 843
844 mutex_lock(&priv->mcast_mutex);
841 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 845 ipoib_dbg_mcast(priv, "flushing multicast list\n");
842 846
843 spin_lock_irqsave(&priv->lock, flags); 847 spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
856 860
857 spin_unlock_irqrestore(&priv->lock, flags); 861 spin_unlock_irqrestore(&priv->lock, flags);
858 862
859 /*
860 * make sure the in-flight joins have finished before we attempt
861 * to leave
862 */
863 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
864 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
865 wait_for_completion(&mcast->done);
866
867 ipoib_mcast_remove_list(&remove_list); 863 ipoib_mcast_remove_list(&remove_list);
864 mutex_unlock(&priv->mcast_mutex);
868} 865}
869 866
870static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) 867static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
982 netif_addr_unlock(dev); 979 netif_addr_unlock(dev);
983 local_irq_restore(flags); 980 local_irq_restore(flags);
984 981
985 /*
986 * make sure the in-flight joins have finished before we attempt
987 * to leave
988 */
989 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
990 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
991 wait_for_completion(&mcast->done);
992
993 ipoib_mcast_remove_list(&remove_list); 982 ipoib_mcast_remove_list(&remove_list);
994 983
995 /* 984 /*
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b97188acc4f1..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1519,6 +1519,13 @@ static int arm_smmu_add_device(struct device *dev)
1519 1519
1520 if (using_legacy_binding) { 1520 if (using_legacy_binding) {
1521 ret = arm_smmu_register_legacy_master(dev, &smmu); 1521 ret = arm_smmu_register_legacy_master(dev, &smmu);
1522
1523 /*
1524 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1525 * will allocate/initialise a new one. Thus we need to update fwspec for
1526 * later use.
1527 */
1528 fwspec = dev->iommu_fwspec;
1522 if (ret) 1529 if (ret)
1523 goto out_free; 1530 goto out_free;
1524 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { 1531 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
44 char log_name[15]; /* log filename */ 44 char log_name[15]; /* log filename */
45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 45 struct log_data *log_head, *log_tail; /* head and tail for queue */
46 int if_used; /* open count for interface */ 46 int if_used; /* open count for interface */
47 int volatile del_lock; /* lock for delete operations */
48 unsigned char logtmp[LOG_MAX_LINELEN]; 47 unsigned char logtmp[LOG_MAX_LINELEN];
49 wait_queue_head_t rd_queue; 48 wait_queue_head_t rd_queue;
50}; 49};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
102{ 101{
103 struct log_data *ib; 102 struct log_data *ib;
104 struct procdata *pd = card->proclog; 103 struct procdata *pd = card->proclog;
105 int i;
106 unsigned long flags; 104 unsigned long flags;
107 105
108 if (!pd) 106 if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
126 else 124 else
127 pd->log_tail->next = ib; /* follows existing messages */ 125 pd->log_tail->next = ib; /* follows existing messages */
128 pd->log_tail = ib; /* new tail */ 126 pd->log_tail = ib; /* new tail */
129 i = pd->del_lock++; /* get lock state */
130 spin_unlock_irqrestore(&card->hysdn_lock, flags);
131 127
132 /* delete old entrys */ 128 /* delete old entrys */
133 if (!i) 129 while (pd->log_head->next) {
134 while (pd->log_head->next) { 130 if ((pd->log_head->usage_cnt <= 0) &&
135 if ((pd->log_head->usage_cnt <= 0) && 131 (pd->log_head->next->usage_cnt <= 0)) {
136 (pd->log_head->next->usage_cnt <= 0)) { 132 ib = pd->log_head;
137 ib = pd->log_head; 133 pd->log_head = pd->log_head->next;
138 pd->log_head = pd->log_head->next; 134 kfree(ib);
139 kfree(ib); 135 } else {
140 } else 136 break;
141 break; 137 }
142 } /* pd->log_head->next */ 138 } /* pd->log_head->next */
143 pd->del_lock--; /* release lock level */ 139
140 spin_unlock_irqrestore(&card->hysdn_lock, flags);
141
144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 142 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
145} /* put_log_buffer */ 143} /* put_log_buffer */
146 144
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8621a198a2ce..bac33311f55a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
216 pci_set_drvdata(pdev, dev); 216 pci_set_drvdata(pdev, dev);
217 217
218 /* 218 /*
219 * MEI requires to resume from runtime suspend mode
220 * in order to perform link reset flow upon system suspend.
221 */
222 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
223
224 /*
219 * For not wake-able HW runtime pm framework 225 * For not wake-able HW runtime pm framework
220 * can't be used on pci device level. 226 * can't be used on pci device level.
221 * Use domain runtime pm callbacks instead. 227 * Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f811cd524468..e38a5f144373 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
138 pci_set_drvdata(pdev, dev); 138 pci_set_drvdata(pdev, dev);
139 139
140 /* 140 /*
141 * MEI requires to resume from runtime suspend mode
142 * in order to perform link reset flow upon system suspend.
143 */
144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
145
146 /*
141 * For not wake-able HW runtime pm framework 147 * For not wake-able HW runtime pm framework
142 * can't be used on pci device level. 148 * can't be used on pci device level.
143 * Use domain runtime pm callbacks instead. 149 * Use domain runtime pm callbacks instead.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index e5938c791330..f1bbfd389367 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2170,7 +2170,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
2170 * from being accepted. 2170 * from being accepted.
2171 */ 2171 */
2172 card = md->queue.card; 2172 card = md->queue.card;
2173 spin_lock_irq(md->queue.queue->queue_lock);
2173 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); 2174 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
2175 spin_unlock_irq(md->queue.queue->queue_lock);
2174 blk_set_queue_dying(md->queue.queue); 2176 blk_set_queue_dying(md->queue.queue);
2175 mmc_cleanup_queue(&md->queue); 2177 mmc_cleanup_queue(&md->queue);
2176 if (md->disk->flags & GENHD_FL_UP) { 2178 if (md->disk->flags & GENHD_FL_UP) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4ffea14b7eb6..2bae69e39544 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1289,7 +1289,7 @@ out_err:
1289static int mmc_select_hs400es(struct mmc_card *card) 1289static int mmc_select_hs400es(struct mmc_card *card)
1290{ 1290{
1291 struct mmc_host *host = card->host; 1291 struct mmc_host *host = card->host;
1292 int err = 0; 1292 int err = -EINVAL;
1293 u8 val; 1293 u8 val;
1294 1294
1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) { 1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 04ff3c97a535..2ab4788d021f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2086,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2086 mmc->max_seg_size = mmc->max_req_size; 2086 mmc->max_seg_size = mmc->max_req_size;
2087 2087
2088 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2088 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2089 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2089 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
2090 2090
2091 mmc->caps |= mmc_pdata(host)->caps; 2091 mmc->caps |= mmc_pdata(host)->caps;
2092 if (mmc->caps & MMC_CAP_8_BIT_DATA) 2092 if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f336a9b85576..9ec8f033ac5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
113 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 113 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
114 if (tr->writesect(dev, block, buf)) 114 if (tr->writesect(dev, block, buf))
115 return BLK_STS_IOERR; 115 return BLK_STS_IOERR;
116 return BLK_STS_OK;
116 default: 117 default:
117 return BLK_STS_IOERR; 118 return BLK_STS_IOERR;
118 } 119 }
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index d922a88e407f..2c8baa0c2c4e 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1201 * tRC < 30ns implies EDO mode. This controller does not support this 1201 * tRC < 30ns implies EDO mode. This controller does not support this
1202 * mode. 1202 * mode.
1203 */ 1203 */
1204 if (conf->timings.sdr.tRC_min < 30) 1204 if (conf->timings.sdr.tRC_min < 30000)
1205 return -ENOTSUPP; 1205 return -ENOTSUPP;
1206 1206
1207 atmel_smc_cs_conf_init(smcconf); 1207 atmel_smc_cs_conf_init(smcconf);
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 55a8ee5306ea..8c210a5776bc 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
945 */ 945 */
946 struct platform_device *pdev = to_platform_device(userdev); 946 struct platform_device *pdev = to_platform_device(userdev);
947 const struct atmel_pmecc_caps *caps; 947 const struct atmel_pmecc_caps *caps;
948 const struct of_device_id *match;
948 949
949 /* No PMECC engine available. */ 950 /* No PMECC engine available. */
950 if (!of_property_read_bool(userdev->of_node, 951 if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
953 954
954 caps = &at91sam9g45_caps; 955 caps = &at91sam9g45_caps;
955 956
956 /* 957 /* Find the caps associated to the NAND dev node. */
957 * Try to find the NFC subnode and extract the associated caps 958 match = of_match_node(atmel_pmecc_legacy_match,
958 * from there. 959 userdev->of_node);
959 */ 960 if (match && match->data)
960 np = of_find_compatible_node(userdev->of_node, NULL, 961 caps = match->data;
961 "atmel,sama5d3-nfc");
962 if (np) {
963 const struct of_device_id *match;
964
965 match = of_match_node(atmel_pmecc_legacy_match, np);
966 if (match && match->data)
967 caps = match->data;
968
969 of_node_put(np);
970 }
971 962
972 pmecc = atmel_pmecc_create(pdev, caps, 1, 2); 963 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
973 } 964 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5fa5ddc94834..c6c18b82f8f4 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
65 65
66 if (!section) { 66 if (!section) {
67 oobregion->offset = 0; 67 oobregion->offset = 0;
68 oobregion->length = 4; 68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
69 } else { 72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
70 oobregion->offset = 6; 76 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4; 77 oobregion->length = ecc->total - 4;
72 } 78 }
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1125 * Ensure the timing mode has been changed on the chip side 1131 * Ensure the timing mode has been changed on the chip side
1126 * before changing timings on the controller side. 1132 * before changing timings on the controller side.
1127 */ 1133 */
1128 if (chip->onfi_version) { 1134 if (chip->onfi_version &&
1135 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1136 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1129 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 1137 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1130 chip->onfi_timing_mode_default, 1138 chip->onfi_timing_mode_default,
1131 }; 1139 };
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
2741 * @buf: the data to write 2749 * @buf: the data to write
2742 * @oob_required: must write chip->oob_poi to OOB 2750 * @oob_required: must write chip->oob_poi to OOB
2743 * @page: page number to write 2751 * @page: page number to write
2744 * @cached: cached programming
2745 * @raw: use _raw version of write_page 2752 * @raw: use _raw version of write_page
2746 */ 2753 */
2747static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2754static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index f06312df3669..7e36d7d13c26 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
311 struct nand_sdr_timings *timings = &iface->timings.sdr; 311 struct nand_sdr_timings *timings = &iface->timings.sdr;
312 312
313 /* microseconds -> picoseconds */ 313 /* microseconds -> picoseconds */
314 timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); 314 timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
315 timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); 315 timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
316 timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); 316 timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
317 317
318 /* nanoseconds -> picoseconds */ 318 /* nanoseconds -> picoseconds */
319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); 319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index d0b6f8f9f297..6abd142b1324 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
1728 */ 1728 */
1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period; 1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period;
1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); 1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
1731 if (real_clk_rate <= 0) {
1732 dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
1733 return -EINVAL;
1734 }
1731 1735
1732 /* 1736 /*
1733 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data 1737 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
625 * all finished. 625 * all finished.
626 */ 626 */
627 mt7623_pad_clk_setup(ds); 627 mt7623_pad_clk_setup(ds);
628 } else {
629 u16 lcl_adv = 0, rmt_adv = 0;
630 u8 flowctrl;
631 u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
632
633 switch (phydev->speed) {
634 case SPEED_1000:
635 mcr |= PMCR_FORCE_SPEED_1000;
636 break;
637 case SPEED_100:
638 mcr |= PMCR_FORCE_SPEED_100;
639 break;
640 };
641
642 if (phydev->link)
643 mcr |= PMCR_FORCE_LNK;
644
645 if (phydev->duplex) {
646 mcr |= PMCR_FORCE_FDX;
647
648 if (phydev->pause)
649 rmt_adv = LPA_PAUSE_CAP;
650 if (phydev->asym_pause)
651 rmt_adv |= LPA_PAUSE_ASYM;
652
653 if (phydev->advertising & ADVERTISED_Pause)
654 lcl_adv |= ADVERTISE_PAUSE_CAP;
655 if (phydev->advertising & ADVERTISED_Asym_Pause)
656 lcl_adv |= ADVERTISE_PAUSE_ASYM;
657
658 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
659
660 if (flowctrl & FLOW_CTRL_TX)
661 mcr |= PMCR_TX_FC_EN;
662 if (flowctrl & FLOW_CTRL_RX)
663 mcr |= PMCR_RX_FC_EN;
664 }
665 mt7530_write(priv, MT7530_PMCR_P(port), mcr);
628 } 666 }
629} 667}
630 668
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
151#define PMCR_TX_FC_EN BIT(5) 151#define PMCR_TX_FC_EN BIT(5)
152#define PMCR_RX_FC_EN BIT(4) 152#define PMCR_RX_FC_EN BIT(4)
153#define PMCR_FORCE_SPEED_1000 BIT(3) 153#define PMCR_FORCE_SPEED_1000 BIT(3)
154#define PMCR_FORCE_SPEED_100 BIT(2)
154#define PMCR_FORCE_FDX BIT(1) 155#define PMCR_FORCE_FDX BIT(1)
155#define PMCR_FORCE_LNK BIT(0) 156#define PMCR_FORCE_LNK BIT(0)
156#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ 157#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 86058a9f3417..1d307f2def2d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,9 +1785,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1785 1785
1786 xgene_enet_gpiod_get(pdata); 1786 xgene_enet_gpiod_get(pdata);
1787 1787
1788 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { 1788 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1789 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1789 if (IS_ERR(pdata->clk)) {
1790 if (IS_ERR(pdata->clk)) { 1790 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1791 /* Abort if the clock is defined but couldn't be 1791 /* Abort if the clock is defined but couldn't be
1792 * retrived. Always abort if the clock is missing on 1792 * retrived. Always abort if the clock is missing on
1793 * DT system as the driver can't cope with this case. 1793 * DT system as the driver can't cope with this case.
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2369 2369
2370 spin_lock_init(&bp->lock); 2370 spin_lock_init(&bp->lock);
2371 u64_stats_init(&bp->hw_stats.syncp);
2371 2372
2372 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2373 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2373 bp->tx_pending = B44_DEF_TX_RING_PENDING; 2374 bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5333601f855f..dc3052751bc1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
449 p = (char *)&dev->stats; 449 p = (char *)&dev->stats;
450 else 450 else
451 p = (char *)priv; 451 p = (char *)priv;
452
453 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
454 continue;
455
452 p += s->stat_offset; 456 p += s->stat_offset;
453 data[j] = *(unsigned long *)p; 457 data[j] = *(unsigned long *)p;
454 j++; 458 j++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8); 111static void send_request_unmap(struct ibmvnic_adapter *, u8);
112static void send_login(struct ibmvnic_adapter *adapter); 112static void send_login(struct ibmvnic_adapter *adapter);
113static void send_cap_queries(struct ibmvnic_adapter *adapter); 113static void send_cap_queries(struct ibmvnic_adapter *adapter);
114static int init_sub_crqs(struct ibmvnic_adapter *);
114static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115static int ibmvnic_init(struct ibmvnic_adapter *); 116static int ibmvnic_init(struct ibmvnic_adapter *);
116static void release_crq_queue(struct ibmvnic_adapter *); 117static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
651 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
652 unsigned long timeout = msecs_to_jiffies(30000); 653 unsigned long timeout = msecs_to_jiffies(30000);
653 struct device *dev = &adapter->vdev->dev; 654 struct device *dev = &adapter->vdev->dev;
655 int rc;
654 656
655 do { 657 do {
656 if (adapter->renegotiate) { 658 if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
664 dev_err(dev, "Capabilities query timeout\n"); 666 dev_err(dev, "Capabilities query timeout\n");
665 return -1; 667 return -1;
666 } 668 }
669 rc = init_sub_crqs(adapter);
670 if (rc) {
671 dev_err(dev,
672 "Initialization of SCRQ's failed\n");
673 return -1;
674 }
675 rc = init_sub_crq_irqs(adapter);
676 if (rc) {
677 dev_err(dev,
678 "Initialization of SCRQ's irqs failed\n");
679 return -1;
680 }
667 } 681 }
668 682
669 reinit_completion(&adapter->init_done); 683 reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3004 *req_value, 3018 *req_value,
3005 (long int)be64_to_cpu(crq->request_capability_rsp. 3019 (long int)be64_to_cpu(crq->request_capability_rsp.
3006 number), name); 3020 number), name);
3007 release_sub_crqs(adapter);
3008 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3021 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
3009 ibmvnic_send_req_caps(adapter, 1); 3022 ibmvnic_send_req_caps(adapter, 1);
3010 return; 3023 return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1113 if (!tx_ring->tx_bi) 1113 if (!tx_ring->tx_bi)
1114 goto err; 1114 goto err;
1115 1115
1116 u64_stats_init(&tx_ring->syncp);
1117
1116 /* round up to nearest 4K */ 1118 /* round up to nearest 4K */
1117 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1118 /* add u32 for head writeback, align after this takes care of 1120 /* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2988 if (!tx_ring->tx_buffer_info) 2988 if (!tx_ring->tx_buffer_info)
2989 goto err; 2989 goto err;
2990 2990
2991 u64_stats_init(&tx_ring->syncp);
2992
2991 /* round up to nearest 4K */ 2993 /* round up to nearest 4K */
2992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2993 tx_ring->size = ALIGN(tx_ring->size, 4096); 2995 tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3046 if (!rx_ring->rx_buffer_info) 3048 if (!rx_ring->rx_buffer_info)
3047 goto err; 3049 goto err;
3048 3050
3051 u64_stats_init(&rx_ring->syncp);
3052
3049 /* Round up to nearest 4K */ 3053 /* Round up to nearest 4K */
3050 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 3054 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3051 rx_ring->size = ALIGN(rx_ring->size, 4096); 3055 rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
223 struct ethtool_wolinfo *wol) 223 struct ethtool_wolinfo *wol)
224{ 224{
225 struct mlx4_en_priv *priv = netdev_priv(netdev); 225 struct mlx4_en_priv *priv = netdev_priv(netdev);
226 struct mlx4_caps *caps = &priv->mdev->dev->caps;
226 int err = 0; 227 int err = 0;
227 u64 config = 0; 228 u64 config = 0;
228 u64 mask; 229 u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
235 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 236 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
236 MLX4_DEV_CAP_FLAG_WOL_PORT2; 237 MLX4_DEV_CAP_FLAG_WOL_PORT2;
237 238
238 if (!(priv->mdev->dev->caps.flags & mask)) { 239 if (!(caps->flags & mask)) {
239 wol->supported = 0; 240 wol->supported = 0;
240 wol->wolopts = 0; 241 wol->wolopts = 0;
241 return; 242 return;
242 } 243 }
243 244
245 if (caps->wol_port[priv->port])
246 wol->supported = WAKE_MAGIC;
247 else
248 wol->supported = 0;
249
244 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 250 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
245 if (err) { 251 if (err) {
246 en_err(priv, "Failed to get WoL information\n"); 252 en_err(priv, "Failed to get WoL information\n");
247 return; 253 return;
248 } 254 }
249 255
250 if (config & MLX4_EN_WOL_MAGIC) 256 if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
251 wol->supported = WAKE_MAGIC;
252 else
253 wol->supported = 0;
254
255 if (config & MLX4_EN_WOL_ENABLED)
256 wol->wolopts = WAKE_MAGIC; 257 wol->wolopts = WAKE_MAGIC;
257 else 258 else
258 wol->wolopts = 0; 259 wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 436f7689a032..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
574 * header, the HW adds it. To address that, we are subtracting the pseudo 574 * header, the HW adds it. To address that, we are subtracting the pseudo
575 * header checksum from the checksum value provided by the HW. 575 * header checksum from the checksum value provided by the HW.
576 */ 576 */
577static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 577static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
578 struct iphdr *iph) 578 struct iphdr *iph)
579{ 579{
580 __u16 length_for_csum = 0; 580 __u16 length_for_csum = 0;
581 __wsum csum_pseudo_header = 0; 581 __wsum csum_pseudo_header = 0;
582 __u8 ipproto = iph->protocol;
583
584 if (unlikely(ipproto == IPPROTO_SCTP))
585 return -1;
582 586
583 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 587 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
584 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 588 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
585 length_for_csum, iph->protocol, 0); 589 length_for_csum, ipproto, 0);
586 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 590 skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
591 return 0;
587} 592}
588 593
589#if IS_ENABLED(CONFIG_IPV6) 594#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
594static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 599static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
595 struct ipv6hdr *ipv6h) 600 struct ipv6hdr *ipv6h)
596{ 601{
602 __u8 nexthdr = ipv6h->nexthdr;
597 __wsum csum_pseudo_hdr = 0; 603 __wsum csum_pseudo_hdr = 0;
598 604
599 if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || 605 if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
600 ipv6h->nexthdr == IPPROTO_HOPOPTS)) 606 nexthdr == IPPROTO_HOPOPTS ||
607 nexthdr == IPPROTO_SCTP))
601 return -1; 608 return -1;
602 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 609 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
603 610
604 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 611 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
605 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 612 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
606 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 613 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
607 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); 614 csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
615 (__force __wsum)htons(nexthdr));
608 616
609 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 617 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
610 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); 618 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
627 } 635 }
628 636
629 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 637 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
630 get_fixed_ipv4_csum(hw_checksum, skb, hdr); 638 return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
631#if IS_ENABLED(CONFIG_IPV6) 639#if IS_ENABLED(CONFIG_IPV6)
632 else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 640 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
633 if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) 641 return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
634 return -1;
635#endif 642#endif
636 return 0; 643 return 0;
637} 644}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
159 [32] = "Loopback source checks support", 159 [32] = "Loopback source checks support",
160 [33] = "RoCEv2 support", 160 [33] = "RoCEv2 support",
161 [34] = "DMFS Sniffer support (UC & MC)", 161 [34] = "DMFS Sniffer support (UC & MC)",
162 [35] = "QinQ VST mode support", 162 [35] = "Diag counters per port",
163 [36] = "sl to vl mapping table change event support" 163 [36] = "QinQ VST mode support",
164 [37] = "sl to vl mapping table change event support",
164 }; 165 };
165 int i; 166 int i;
166 167
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
764#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 765#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
765#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 766#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
766#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 767#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
768#define QUERY_DEV_CAP_WOL_OFFSET 0x43
767#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
768#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
769#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 771#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
920 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 922 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
921 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 923 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
922 dev_cap->flags = flags | (u64)ext_flags << 32; 924 dev_cap->flags = flags | (u64)ext_flags << 32;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
926 dev_cap->wol_port[1] = !!(field & 0x20);
927 dev_cap->wol_port[2] = !!(field & 0x40);
923 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
924 dev_cap->reserved_uars = field >> 4; 929 dev_cap->reserved_uars = field >> 4;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
129 u32 dmfs_high_rate_qpn_range; 129 u32 dmfs_high_rate_qpn_range;
130 struct mlx4_rate_limit_caps rl_caps; 130 struct mlx4_rate_limit_caps rl_caps;
131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
132 bool wol_port[MLX4_MAX_PORTS + 1];
132}; 133};
133 134
134struct mlx4_func_cap { 135struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..09b9bc17bce9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,6 +424,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
427 dev->caps.wol_port[1] = dev_cap->wol_port[1];
428 dev->caps.wol_port[2] = dev_cap->wol_port[2];
427 429
428 /* Save uar page shift */ 430 /* Save uar page shift */
429 if (!mlx4_is_slave(dev)) { 431 if (!mlx4_is_slave(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..5eb1606765c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
626 626
627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
628 orig_dev); 628 orig_dev);
629 if (WARN_ON(!bridge_port)) 629 if (!bridge_port)
630 return -EINVAL; 630 return 0;
631 631
632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
633 MLXSW_SP_FLOOD_TYPE_UC, 633 MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
711 711
712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
713 orig_dev); 713 orig_dev);
714 if (WARN_ON(!bridge_port)) 714 if (!bridge_port)
715 return -EINVAL; 715 return 0;
716 716
717 if (!bridge_port->bridge_device->multicast_enabled) 717 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0; 718 return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1283 return 0; 1283 return 0;
1284 1284
1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1286 if (WARN_ON(!bridge_port)) 1286 if (!bridge_port)
1287 return -EINVAL; 1287 return 0;
1288 1288
1289 bridge_device = bridge_port->bridge_device; 1289 bridge_device = bridge_port->bridge_device;
1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1291 bridge_device, 1291 bridge_device,
1292 mdb->vid); 1292 mdb->vid);
1293 if (WARN_ON(!mlxsw_sp_port_vlan)) 1293 if (!mlxsw_sp_port_vlan)
1294 return -EINVAL; 1294 return 0;
1295 1295
1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1297 1297
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1407 int err = 0; 1407 int err = 0;
1408 1408
1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1410 if (WARN_ON(!bridge_port)) 1410 if (!bridge_port)
1411 return -EINVAL; 1411 return 0;
1412 1412
1413 bridge_device = bridge_port->bridge_device; 1413 bridge_device = bridge_port->bridge_device;
1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1415 bridge_device, 1415 bridge_device,
1416 mdb->vid); 1416 mdb->vid);
1417 if (WARN_ON(!mlxsw_sp_port_vlan)) 1417 if (!mlxsw_sp_port_vlan)
1418 return -EINVAL; 1418 return 0;
1419 1419
1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1421 1421
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1974 1974
1975} 1975}
1976 1976
1977static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
1978{
1979 struct mlxsw_sp_mid *mid, *tmp;
1980
1981 list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
1982 list_del(&mid->list);
1983 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1984 kfree(mid);
1985 }
1986}
1987
1977int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1988int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1978{ 1989{
1979 struct mlxsw_sp_bridge *bridge; 1990 struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1996void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2007void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1997{ 2008{
1998 mlxsw_sp_fdb_fini(mlxsw_sp); 2009 mlxsw_sp_fdb_fini(mlxsw_sp);
1999 WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); 2010 mlxsw_sp_mids_fini(mlxsw_sp);
2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2011 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2001 kfree(mlxsw_sp->bridge); 2012 kfree(mlxsw_sp->bridge);
2002} 2013}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..4631ca8b8eb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
513 tx_ring->idx = idx; 513 tx_ring->idx = idx;
514 tx_ring->r_vec = r_vec; 514 tx_ring->r_vec = r_vec;
515 tx_ring->is_xdp = is_xdp; 515 tx_ring->is_xdp = is_xdp;
516 u64_stats_init(&tx_ring->r_vec->tx_sync);
516 517
517 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 518 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
518 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 519 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
532 533
533 rx_ring->idx = idx; 534 rx_ring->idx = idx;
534 rx_ring->r_vec = r_vec; 535 rx_ring->r_vec = r_vec;
536 u64_stats_init(&rx_ring->r_vec->rx_sync);
535 537
536 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
537 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); 539 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
256 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 256 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
257 goto err; 257 goto err;
258 258
259 return 0; 259 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
31 31
32#include "cpts.h" 32#include "cpts.h"
33 33
34#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
35
36struct cpts_skb_cb_data {
37 unsigned long tmo;
38};
39
34#define cpts_read32(c, r) readl_relaxed(&c->reg->r) 40#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
35#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 41#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
36 42
43static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
44 u16 ts_seqid, u8 ts_msgtype);
45
37static int event_expired(struct cpts_event *event) 46static int event_expired(struct cpts_event *event)
38{ 47{
39 return time_after(jiffies, event->tmo); 48 return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
77 return removed ? 0 : -1; 86 return removed ? 0 : -1;
78} 87}
79 88
89static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
90{
91 struct sk_buff *skb, *tmp;
92 u16 seqid;
93 u8 mtype;
94 bool found = false;
95
96 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
97 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
98
99 /* no need to grab txq.lock as access is always done under cpts->lock */
100 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
101 struct skb_shared_hwtstamps ssh;
102 unsigned int class = ptp_classify_raw(skb);
103 struct cpts_skb_cb_data *skb_cb =
104 (struct cpts_skb_cb_data *)skb->cb;
105
106 if (cpts_match(skb, class, seqid, mtype)) {
107 u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
108
109 memset(&ssh, 0, sizeof(ssh));
110 ssh.hwtstamp = ns_to_ktime(ns);
111 skb_tstamp_tx(skb, &ssh);
112 found = true;
113 __skb_unlink(skb, &cpts->txq);
114 dev_consume_skb_any(skb);
115 dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
116 mtype, seqid);
117 } else if (time_after(jiffies, skb_cb->tmo)) {
118 /* timeout any expired skbs over 1s */
119 dev_dbg(cpts->dev,
120 "expiring tx timestamp mtype %u seqid %04x\n",
121 mtype, seqid);
122 __skb_unlink(skb, &cpts->txq);
123 dev_consume_skb_any(skb);
124 }
125 }
126
127 return found;
128}
129
80/* 130/*
81 * Returns zero if matching event type was found. 131 * Returns zero if matching event type was found.
82 */ 132 */
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
101 event->low = lo; 151 event->low = lo;
102 type = event_type(event); 152 type = event_type(event);
103 switch (type) { 153 switch (type) {
154 case CPTS_EV_TX:
155 if (cpts_match_tx_ts(cpts, event)) {
156 /* if the new event matches an existing skb,
157 * then don't queue it
158 */
159 break;
160 }
104 case CPTS_EV_PUSH: 161 case CPTS_EV_PUSH:
105 case CPTS_EV_RX: 162 case CPTS_EV_RX:
106 case CPTS_EV_TX:
107 list_del_init(&event->list); 163 list_del_init(&event->list);
108 list_add_tail(&event->list, &cpts->events); 164 list_add_tail(&event->list, &cpts->events);
109 break; 165 break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
224 return -EOPNOTSUPP; 280 return -EOPNOTSUPP;
225} 281}
226 282
283static long cpts_overflow_check(struct ptp_clock_info *ptp)
284{
285 struct cpts *cpts = container_of(ptp, struct cpts, info);
286 unsigned long delay = cpts->ov_check_period;
287 struct timespec64 ts;
288 unsigned long flags;
289
290 spin_lock_irqsave(&cpts->lock, flags);
291 ts = ns_to_timespec64(timecounter_read(&cpts->tc));
292
293 if (!skb_queue_empty(&cpts->txq))
294 delay = CPTS_SKB_TX_WORK_TIMEOUT;
295 spin_unlock_irqrestore(&cpts->lock, flags);
296
297 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
298 return (long)delay;
299}
300
227static struct ptp_clock_info cpts_info = { 301static struct ptp_clock_info cpts_info = {
228 .owner = THIS_MODULE, 302 .owner = THIS_MODULE,
229 .name = "CTPS timer", 303 .name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
236 .gettime64 = cpts_ptp_gettime, 310 .gettime64 = cpts_ptp_gettime,
237 .settime64 = cpts_ptp_settime, 311 .settime64 = cpts_ptp_settime,
238 .enable = cpts_ptp_enable, 312 .enable = cpts_ptp_enable,
313 .do_aux_work = cpts_overflow_check,
239}; 314};
240 315
241static void cpts_overflow_check(struct work_struct *work)
242{
243 struct timespec64 ts;
244 struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
245
246 cpts_ptp_gettime(&cpts->info, &ts);
247 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
248 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
249}
250
251static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 316static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
252 u16 ts_seqid, u8 ts_msgtype) 317 u16 ts_seqid, u8 ts_msgtype)
253{ 318{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
299 return 0; 364 return 0;
300 365
301 spin_lock_irqsave(&cpts->lock, flags); 366 spin_lock_irqsave(&cpts->lock, flags);
302 cpts_fifo_read(cpts, CPTS_EV_PUSH); 367 cpts_fifo_read(cpts, -1);
303 list_for_each_safe(this, next, &cpts->events) { 368 list_for_each_safe(this, next, &cpts->events) {
304 event = list_entry(this, struct cpts_event, list); 369 event = list_entry(this, struct cpts_event, list);
305 if (event_expired(event)) { 370 if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
317 break; 382 break;
318 } 383 }
319 } 384 }
385
386 if (ev_type == CPTS_EV_TX && !ns) {
387 struct cpts_skb_cb_data *skb_cb =
388 (struct cpts_skb_cb_data *)skb->cb;
389 /* Not found, add frame to queue for processing later.
390 * The periodic FIFO check will handle this.
391 */
392 skb_get(skb);
393 /* get the timestamp for timeouts */
394 skb_cb->tmo = jiffies + msecs_to_jiffies(100);
395 __skb_queue_tail(&cpts->txq, skb);
396 ptp_schedule_worker(cpts->clock, 0);
397 }
320 spin_unlock_irqrestore(&cpts->lock, flags); 398 spin_unlock_irqrestore(&cpts->lock, flags);
321 399
322 return ns; 400 return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
358{ 436{
359 int err, i; 437 int err, i;
360 438
439 skb_queue_head_init(&cpts->txq);
361 INIT_LIST_HEAD(&cpts->events); 440 INIT_LIST_HEAD(&cpts->events);
362 INIT_LIST_HEAD(&cpts->pool); 441 INIT_LIST_HEAD(&cpts->pool);
363 for (i = 0; i < CPTS_MAX_EVENTS; i++) 442 for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
378 } 457 }
379 cpts->phc_index = ptp_clock_index(cpts->clock); 458 cpts->phc_index = ptp_clock_index(cpts->clock);
380 459
381 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 460 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
382 return 0; 461 return 0;
383 462
384err_ptp: 463err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
392 if (WARN_ON(!cpts->clock)) 471 if (WARN_ON(!cpts->clock))
393 return; 472 return;
394 473
395 cancel_delayed_work_sync(&cpts->overflow_work);
396
397 ptp_clock_unregister(cpts->clock); 474 ptp_clock_unregister(cpts->clock);
398 cpts->clock = NULL; 475 cpts->clock = NULL;
399 476
400 cpts_write32(cpts, 0, int_enable); 477 cpts_write32(cpts, 0, int_enable);
401 cpts_write32(cpts, 0, control); 478 cpts_write32(cpts, 0, control);
402 479
480 /* Drop all packet */
481 skb_queue_purge(&cpts->txq);
482
403 clk_disable(cpts->refclk); 483 clk_disable(cpts->refclk);
404} 484}
405EXPORT_SYMBOL_GPL(cpts_unregister); 485EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
476 cpts->dev = dev; 556 cpts->dev = dev;
477 cpts->reg = (struct cpsw_cpts __iomem *)regs; 557 cpts->reg = (struct cpsw_cpts __iomem *)regs;
478 spin_lock_init(&cpts->lock); 558 spin_lock_init(&cpts->lock);
479 INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
480 559
481 ret = cpts_of_parse(cpts, node); 560 ret = cpts_of_parse(cpts, node);
482 if (ret) 561 if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
119 u32 cc_mult; /* for the nominal frequency */ 119 u32 cc_mult; /* for the nominal frequency */
120 struct cyclecounter cc; 120 struct cyclecounter cc;
121 struct timecounter tc; 121 struct timecounter tc;
122 struct delayed_work overflow_work;
123 int phc_index; 122 int phc_index;
124 struct clk *refclk; 123 struct clk *refclk;
125 struct list_head events; 124 struct list_head events;
126 struct list_head pool; 125 struct list_head pool;
127 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 126 struct cpts_event pool_data[CPTS_MAX_EVENTS];
128 unsigned long ov_check_period; 127 unsigned long ov_check_period;
128 struct sk_buff_head txq;
129}; 129};
130 130
131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); 131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de8156c6b292..2bbda71818ad 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
1091 if (data[IFLA_GENEVE_ID]) { 1091 if (data[IFLA_GENEVE_ID]) {
1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
1093 1093
1094 if (vni >= GENEVE_VID_MASK) 1094 if (vni >= GENEVE_N_VID)
1095 return -ERANGE; 1095 return -ERANGE;
1096 } 1096 }
1097 1097
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
364 364
365 gtp->dev = dev; 365 gtp->dev = dev;
366 366
367 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 367 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
368 if (!dev->tstats) 368 if (!dev->tstats)
369 return -ENOMEM; 369 return -ENOMEM;
370 370
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
765 u32 max_chn; 765 u32 max_chn;
766 u32 num_chn; 766 u32 num_chn;
767 767
768 refcount_t sc_offered; 768 atomic_t open_chn;
769 wait_queue_head_t subchan_open;
769 770
770 struct rndis_device *extension; 771 struct rndis_device *extension;
771 772
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
80 init_completion(&net_device->channel_init_wait); 80 init_completion(&net_device->channel_init_wait);
81 init_waitqueue_head(&net_device->subchan_open);
81 82
82 return net_device; 83 return net_device;
83} 84}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
1302 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1303 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1303 1304
1304 nvchan->channel = device->channel; 1305 nvchan->channel = device->channel;
1306 u64_stats_init(&nvchan->tx_stats.syncp);
1307 u64_stats_init(&nvchan->rx_stats.syncp);
1305 } 1308 }
1306 1309
1307 /* Enable NAPI handler before init callbacks */ 1310 /* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1048 else 1048 else
1049 netif_napi_del(&nvchan->napi); 1049 netif_napi_del(&nvchan->napi);
1050 1050
1051 if (refcount_dec_and_test(&nvscdev->sc_offered)) 1051 atomic_inc(&nvscdev->open_chn);
1052 complete(&nvscdev->channel_init_wait); 1052 wake_up(&nvscdev->subchan_open);
1053} 1053}
1054 1054
1055int rndis_filter_device_add(struct hv_device *dev, 1055int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
1090 net_device->max_chn = 1; 1090 net_device->max_chn = 1;
1091 net_device->num_chn = 1; 1091 net_device->num_chn = 1;
1092 1092
1093 refcount_set(&net_device->sc_offered, 0);
1094
1095 net_device->extension = rndis_device; 1093 net_device->extension = rndis_device;
1096 rndis_device->ndev = net; 1094 rndis_device->ndev = net;
1097 1095
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1219 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
1222 net_device->num_chn); 1220 net_device->num_chn);
1223 1221
1222 atomic_set(&net_device->open_chn, 1);
1224 num_rss_qs = net_device->num_chn - 1; 1223 num_rss_qs = net_device->num_chn - 1;
1225 if (num_rss_qs == 0) 1224 if (num_rss_qs == 0)
1226 return 0; 1225 return 0;
1227 1226
1228 refcount_set(&net_device->sc_offered, num_rss_qs);
1229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1227 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1230 1228
1231 init_packet = &net_device->channel_init_pkt; 1229 init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
1242 if (ret) 1240 if (ret)
1243 goto out; 1241 goto out;
1244 1242
1243 wait_for_completion(&net_device->channel_init_wait);
1245 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1244 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1246 ret = -ENODEV; 1245 ret = -ENODEV;
1247 goto out; 1246 goto out;
1248 } 1247 }
1249 wait_for_completion(&net_device->channel_init_wait);
1250 1248
1251 net_device->num_chn = 1 + 1249 net_device->num_chn = 1 +
1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1250 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1253 1251
1252 /* wait for all sub channels to open */
1253 wait_event(net_device->subchan_open,
1254 atomic_read(&net_device->open_chn) == net_device->num_chn);
1255
1254 /* ignore failues from setting rss parameters, still have channels */ 1256 /* ignore failues from setting rss parameters, still have channels */
1255 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, 1257 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
1256 net_device->num_chn); 1258 net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
192 192
193 netdev_lockdep_set_classes(dev); 193 netdev_lockdep_set_classes(dev);
194 194
195 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 195 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
196 if (!ipvlan->pcpu_stats) 196 if (!ipvlan->pcpu_stats)
197 return -ENOMEM; 197 return -ENOMEM;
198 198
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index bd4303944e44..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
1915 spin_unlock(&pch->downl); 1915 spin_unlock(&pch->downl);
1916 /* see if there is anything from the attached unit to be sent */ 1916 /* see if there is anything from the attached unit to be sent */
1917 if (skb_queue_empty(&pch->file.xq)) { 1917 if (skb_queue_empty(&pch->file.xq)) {
1918 read_lock(&pch->upl);
1919 ppp = pch->ppp; 1918 ppp = pch->ppp;
1920 if (ppp) 1919 if (ppp)
1921 ppp_xmit_process(ppp); 1920 __ppp_xmit_process(ppp);
1922 read_unlock(&pch->upl);
1923 } 1921 }
1924} 1922}
1925 1923
1926static void ppp_channel_push(struct channel *pch) 1924static void ppp_channel_push(struct channel *pch)
1927{ 1925{
1928 local_bh_disable(); 1926 read_lock_bh(&pch->upl);
1929 1927 if (pch->ppp) {
1930 __ppp_channel_push(pch); 1928 (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
1931 1929 __ppp_channel_push(pch);
1932 local_bh_enable(); 1930 (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
1931 } else {
1932 __ppp_channel_push(pch);
1933 }
1934 read_unlock_bh(&pch->upl);
1933} 1935}
1934 1936
1935/* 1937/*
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
210 struct asix_rx_fixup_info *rx); 210 struct asix_rx_fixup_info *rx);
211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
212void asix_rx_fixup_common_free(struct asix_common_private *dp);
212 213
213struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 214struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
214 gfp_t flags); 215 gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
75 value, index, data, size); 75 value, index, data, size);
76} 76}
77 77
78static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
79{
80 /* Reset the variables that have a lifetime outside of
81 * asix_rx_fixup_internal() so that future processing starts from a
82 * known set of initial conditions.
83 */
84
85 if (rx->ax_skb) {
86 /* Discard any incomplete Ethernet frame in the netdev buffer */
87 kfree_skb(rx->ax_skb);
88 rx->ax_skb = NULL;
89 }
90
91 /* Assume the Data header 32-bit word is at the start of the current
92 * or next URB socket buffer so reset all the state variables.
93 */
94 rx->remaining = 0;
95 rx->split_head = false;
96 rx->header = 0;
97}
98
78int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 99int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
79 struct asix_rx_fixup_info *rx) 100 struct asix_rx_fixup_info *rx)
80{ 101{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
99 if (size != ((~rx->header >> 16) & 0x7ff)) { 120 if (size != ((~rx->header >> 16) & 0x7ff)) {
100 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 121 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
101 rx->remaining); 122 rx->remaining);
102 if (rx->ax_skb) { 123 reset_asix_rx_fixup_info(rx);
103 kfree_skb(rx->ax_skb);
104 rx->ax_skb = NULL;
105 /* Discard the incomplete netdev Ethernet frame
106 * and assume the Data header is at the start of
107 * the current URB socket buffer.
108 */
109 }
110 rx->remaining = 0;
111 } 124 }
112 } 125 }
113 126
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
139 if (size != ((~rx->header >> 16) & 0x7ff)) { 152 if (size != ((~rx->header >> 16) & 0x7ff)) {
140 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 153 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
141 rx->header, offset); 154 rx->header, offset);
155 reset_asix_rx_fixup_info(rx);
142 return 0; 156 return 0;
143 } 157 }
144 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 158 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
145 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 159 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
146 size); 160 size);
161 reset_asix_rx_fixup_info(rx);
147 return 0; 162 return 0;
148 } 163 }
149 164
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
168 if (rx->ax_skb) { 183 if (rx->ax_skb) {
169 skb_put_data(rx->ax_skb, skb->data + offset, 184 skb_put_data(rx->ax_skb, skb->data + offset,
170 copy_length); 185 copy_length);
171 if (!rx->remaining) 186 if (!rx->remaining) {
172 usbnet_skb_return(dev, rx->ax_skb); 187 usbnet_skb_return(dev, rx->ax_skb);
188 rx->ax_skb = NULL;
189 }
173 } 190 }
174 191
175 offset += (copy_length + 1) & 0xfffe; 192 offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
178 if (skb->len != offset) { 195 if (skb->len != offset) {
179 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 196 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
180 skb->len, offset); 197 skb->len, offset);
198 reset_asix_rx_fixup_info(rx);
181 return 0; 199 return 0;
182 } 200 }
183 201
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
192 return asix_rx_fixup_internal(dev, skb, rx); 210 return asix_rx_fixup_internal(dev, skb, rx);
193} 211}
194 212
213void asix_rx_fixup_common_free(struct asix_common_private *dp)
214{
215 struct asix_rx_fixup_info *rx;
216
217 if (!dp)
218 return;
219
220 rx = &dp->rx_fixup_info;
221
222 if (rx->ax_skb) {
223 kfree_skb(rx->ax_skb);
224 rx->ax_skb = NULL;
225 }
226}
227
195struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 228struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
196 gfp_t flags) 229 gfp_t flags)
197{ 230{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
764 764
765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
766{ 766{
767 asix_rx_fixup_common_free(dev->driver_priv);
767 kfree(dev->driver_priv); 768 kfree(dev->driver_priv);
768} 769}
769 770
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2367 /* Init LTM */ 2367 /* Init LTM */
2368 lan78xx_init_ltm(dev); 2368 lan78xx_init_ltm(dev);
2369 2369
2370 dev->net->hard_header_len += TX_OVERHEAD;
2371 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2372
2373 if (dev->udev->speed == USB_SPEED_SUPER) { 2370 if (dev->udev->speed == USB_SPEED_SUPER) {
2374 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2371 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2375 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2372 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2855 return ret; 2852 return ret;
2856 } 2853 }
2857 2854
2855 dev->net->hard_header_len += TX_OVERHEAD;
2856 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2857
2858 /* Init all registers */ 2858 /* Init all registers */
2859 ret = lan78xx_reset(dev); 2859 ret = lan78xx_reset(dev);
2860 2860
2861 lan78xx_mdio_init(dev); 2861 ret = lan78xx_mdio_init(dev);
2862 2862
2863 dev->net->flags |= IFF_MULTICAST; 2863 dev->net->flags |= IFF_MULTICAST;
2864 2864
2865 pdata->wol = WAKE_MAGIC; 2865 pdata->wol = WAKE_MAGIC;
2866 2866
2867 return 0; 2867 return ret;
2868} 2868}
2869 2869
2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) 2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
3525 udev = interface_to_usbdev(intf); 3525 udev = interface_to_usbdev(intf);
3526 udev = usb_get_dev(udev); 3526 udev = usb_get_dev(udev);
3527 3527
3528 ret = -ENOMEM;
3529 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3530 if (!netdev) { 3529 if (!netdev) {
3531 dev_err(&intf->dev, "Error: OOM\n"); 3530 dev_err(&intf->dev, "Error: OOM\n");
3532 goto out1; 3531 ret = -ENOMEM;
3532 goto out1;
3533 } 3533 }
3534 3534
3535 /* netdev_printk() needs this */ 3535 /* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3610 ret = register_netdev(netdev); 3610 ret = register_netdev(netdev);
3611 if (ret != 0) { 3611 if (ret != 0) {
3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3612 netif_err(dev, probe, netdev, "couldn't register the device\n");
3613 goto out2; 3613 goto out3;
3614 } 3614 }
3615 3615
3616 usb_set_intfdata(intf, dev); 3616 usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1178 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1178 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1179 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1179 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1180 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1180 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1181 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1340static void qmi_wwan_disconnect(struct usb_interface *intf) 1341static void qmi_wwan_disconnect(struct usb_interface *intf)
1341{ 1342{
1342 struct usbnet *dev = usb_get_intfdata(intf); 1343 struct usbnet *dev = usb_get_intfdata(intf);
1343 struct qmi_wwan_state *info = (void *)&dev->data; 1344 struct qmi_wwan_state *info;
1344 struct list_head *iter; 1345 struct list_head *iter;
1345 struct net_device *ldev; 1346 struct net_device *ldev;
1346 1347
1348 /* called twice if separate control and data intf */
1349 if (!dev)
1350 return;
1351 info = (void *)&dev->data;
1347 if (info->flags & QMI_WWAN_FLAG_MUX) { 1352 if (info->flags & QMI_WWAN_FLAG_MUX) {
1348 if (!rtnl_trylock()) { 1353 if (!rtnl_trylock()) {
1349 restart_syscall(); 1354 restart_syscall();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 623
624out: 624out:
625 skb_gro_remcsum_cleanup(skb, &grc); 625 skb_gro_remcsum_cleanup(skb, &grc);
626 skb->remcsum_offload = 0;
626 NAPI_GRO_CB(skb)->flush |= flush; 627 NAPI_GRO_CB(skb)->flush |= flush;
627 628
628 return pp; 629 return pp;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c49f1f8b2e57..37046ac2c441 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
336 336
337 c.directive.opcode = nvme_admin_directive_recv; 337 c.directive.opcode = nvme_admin_directive_recv;
338 c.directive.nsid = cpu_to_le32(nsid); 338 c.directive.nsid = cpu_to_le32(nsid);
339 c.directive.numd = cpu_to_le32(sizeof(*s)); 339 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
341 c.directive.dtype = NVME_DIR_STREAMS; 341 c.directive.dtype = NVME_DIR_STREAMS;
342 342
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1509 blk_queue_write_cache(q, vwc, vwc); 1509 blk_queue_write_cache(q, vwc, vwc);
1510} 1510}
1511 1511
1512static void nvme_configure_apst(struct nvme_ctrl *ctrl) 1512static int nvme_configure_apst(struct nvme_ctrl *ctrl)
1513{ 1513{
1514 /* 1514 /*
1515 * APST (Autonomous Power State Transition) lets us program a 1515 * APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1538 * then don't do anything. 1538 * then don't do anything.
1539 */ 1539 */
1540 if (!ctrl->apsta) 1540 if (!ctrl->apsta)
1541 return; 1541 return 0;
1542 1542
1543 if (ctrl->npss > 31) { 1543 if (ctrl->npss > 31) {
1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
1545 return; 1545 return 0;
1546 } 1546 }
1547 1547
1548 table = kzalloc(sizeof(*table), GFP_KERNEL); 1548 table = kzalloc(sizeof(*table), GFP_KERNEL);
1549 if (!table) 1549 if (!table)
1550 return; 1550 return 0;
1551 1551
1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
1553 /* Turn off APST. */ 1553 /* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
1630 1630
1631 kfree(table); 1631 kfree(table);
1632 return ret;
1632} 1633}
1633 1634
1634static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1635static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1835 * In fabrics we need to verify the cntlid matches the 1836 * In fabrics we need to verify the cntlid matches the
1836 * admin connect 1837 * admin connect
1837 */ 1838 */
1838 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) 1839 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
1839 ret = -EINVAL; 1840 ret = -EINVAL;
1841 goto out_free;
1842 }
1840 1843
1841 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1844 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1842 dev_err(ctrl->device, 1845 dev_err(ctrl->device,
1843 "keep-alive support is mandatory for fabrics\n"); 1846 "keep-alive support is mandatory for fabrics\n");
1844 ret = -EINVAL; 1847 ret = -EINVAL;
1848 goto out_free;
1845 } 1849 }
1846 } else { 1850 } else {
1847 ctrl->cntlid = le16_to_cpu(id->cntlid); 1851 ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1856 else if (!ctrl->apst_enabled && prev_apst_enabled) 1860 else if (!ctrl->apst_enabled && prev_apst_enabled)
1857 dev_pm_qos_hide_latency_tolerance(ctrl->device); 1861 dev_pm_qos_hide_latency_tolerance(ctrl->device);
1858 1862
1859 nvme_configure_apst(ctrl); 1863 ret = nvme_configure_apst(ctrl);
1860 nvme_configure_directives(ctrl); 1864 if (ret < 0)
1865 return ret;
1866
1867 ret = nvme_configure_directives(ctrl);
1868 if (ret < 0)
1869 return ret;
1861 1870
1862 ctrl->identified = true; 1871 ctrl->identified = true;
1863 1872
1873 return 0;
1874
1875out_free:
1876 kfree(id);
1864 return ret; 1877 return ret;
1865} 1878}
1866EXPORT_SYMBOL_GPL(nvme_init_identify); 1879EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -2004,9 +2017,11 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
2004 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 2017 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
2005 return sprintf(buf, "eui.%8phN\n", ns->eui); 2018 return sprintf(buf, "eui.%8phN\n", ns->eui);
2006 2019
2007 while (ctrl->serial[serial_len - 1] == ' ') 2020 while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
2021 ctrl->serial[serial_len - 1] == '\0'))
2008 serial_len--; 2022 serial_len--;
2009 while (ctrl->model[model_len - 1] == ' ') 2023 while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
2024 ctrl->model[model_len - 1] == '\0'))
2010 model_len--; 2025 model_len--;
2011 2026
2012 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, 2027 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cd888a47d0fc..74a124a06264 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1558,11 +1558,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1558 if (dev->cmb) { 1558 if (dev->cmb) {
1559 iounmap(dev->cmb); 1559 iounmap(dev->cmb);
1560 dev->cmb = NULL; 1560 dev->cmb = NULL;
1561 if (dev->cmbsz) { 1561 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1562 sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1562 &dev_attr_cmb.attr, NULL);
1563 &dev_attr_cmb.attr, NULL); 1563 dev->cmbsz = 0;
1564 dev->cmbsz = 0;
1565 }
1566 } 1564 }
1567} 1565}
1568 1566
@@ -1953,16 +1951,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1953 1951
1954 /* 1952 /*
1955 * CMBs can currently only exist on >=1.2 PCIe devices. We only 1953 * CMBs can currently only exist on >=1.2 PCIe devices. We only
1956 * populate sysfs if a CMB is implemented. Note that we add the 1954 * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
1957 * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1955 * has no name we can pass NULL as final argument to
1958 * it on exit. Since nvme_dev_attrs_group has no name we can pass 1956 * sysfs_add_file_to_group.
1959 * NULL as final argument to sysfs_add_file_to_group.
1960 */ 1957 */
1961 1958
1962 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 1959 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
1963 dev->cmb = nvme_map_cmb(dev); 1960 dev->cmb = nvme_map_cmb(dev);
1964 1961 if (dev->cmb) {
1965 if (dev->cmbsz) {
1966 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1962 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1967 &dev_attr_cmb.attr, NULL)) 1963 &dev_attr_cmb.attr, NULL))
1968 dev_warn(dev->ctrl.device, 1964 dev_warn(dev->ctrl.device,
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 31ca55dfcb1d..1b7f2520a20d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
114 struct kref ref; 114 struct kref ref;
115}; 115};
116 116
117struct nvmet_fc_defer_fcp_req {
118 struct list_head req_list;
119 struct nvmefc_tgt_fcp_req *fcp_req;
120};
121
117struct nvmet_fc_tgt_queue { 122struct nvmet_fc_tgt_queue {
118 bool ninetypercent; 123 bool ninetypercent;
119 u16 qid; 124 u16 qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
132 struct nvmet_fc_tgt_assoc *assoc; 137 struct nvmet_fc_tgt_assoc *assoc;
133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ 138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
134 struct list_head fod_list; 139 struct list_head fod_list;
140 struct list_head pending_cmd_list;
141 struct list_head avail_defer_list;
135 struct workqueue_struct *work_q; 142 struct workqueue_struct *work_q;
136 struct kref ref; 143 struct kref ref;
137} __aligned(sizeof(unsigned long long)); 144} __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 230static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 231static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 232static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
226 235
227 236
228/* *********************** FC-NVME DMA Handling **************************** */ 237/* *********************** FC-NVME DMA Handling **************************** */
@@ -463,9 +472,9 @@ static struct nvmet_fc_fcp_iod *
463nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 472nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
464{ 473{
465 static struct nvmet_fc_fcp_iod *fod; 474 static struct nvmet_fc_fcp_iod *fod;
466 unsigned long flags;
467 475
468 spin_lock_irqsave(&queue->qlock, flags); 476 lockdep_assert_held(&queue->qlock);
477
469 fod = list_first_entry_or_null(&queue->fod_list, 478 fod = list_first_entry_or_null(&queue->fod_list,
470 struct nvmet_fc_fcp_iod, fcp_list); 479 struct nvmet_fc_fcp_iod, fcp_list);
471 if (fod) { 480 if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
477 * will "inherit" that reference. 486 * will "inherit" that reference.
478 */ 487 */
479 } 488 }
480 spin_unlock_irqrestore(&queue->qlock, flags);
481 return fod; 489 return fod;
482} 490}
483 491
484 492
485static void 493static void
494nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 struct nvmet_fc_tgt_queue *queue,
496 struct nvmefc_tgt_fcp_req *fcpreq)
497{
498 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499
500 /*
501 * put all admin cmds on hw queue id 0. All io commands go to
502 * the respective hw queue based on a modulo basis
503 */
504 fcpreq->hwqid = queue->qid ?
505 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506
507 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 queue_work_on(queue->cpu, queue->work_q, &fod->work);
509 else
510 nvmet_fc_handle_fcp_rqst(tgtport, fod);
511}
512
513static void
486nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 514nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 struct nvmet_fc_fcp_iod *fod) 515 struct nvmet_fc_fcp_iod *fod)
488{ 516{
489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519 struct nvmet_fc_defer_fcp_req *deferfcp;
491 unsigned long flags; 520 unsigned long flags;
492 521
493 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 522 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
495 524
496 fcpreq->nvmet_fc_private = NULL; 525 fcpreq->nvmet_fc_private = NULL;
497 526
498 spin_lock_irqsave(&queue->qlock, flags);
499 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
500 fod->active = false; 527 fod->active = false;
501 fod->abort = false; 528 fod->abort = false;
502 fod->aborted = false; 529 fod->aborted = false;
503 fod->writedataactive = false; 530 fod->writedataactive = false;
504 fod->fcpreq = NULL; 531 fod->fcpreq = NULL;
532
533 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
534
535 spin_lock_irqsave(&queue->qlock, flags);
536 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 struct nvmet_fc_defer_fcp_req, req_list);
538 if (!deferfcp) {
539 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 spin_unlock_irqrestore(&queue->qlock, flags);
541
542 /* Release reference taken at queue lookup and fod allocation */
543 nvmet_fc_tgt_q_put(queue);
544 return;
545 }
546
547 /* Re-use the fod for the next pending cmd that was deferred */
548 list_del(&deferfcp->req_list);
549
550 fcpreq = deferfcp->fcp_req;
551
552 /* deferfcp can be reused for another IO at a later date */
553 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554
505 spin_unlock_irqrestore(&queue->qlock, flags); 555 spin_unlock_irqrestore(&queue->qlock, flags);
506 556
557 /* Save NVME CMD IO in fod */
558 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559
560 /* Setup new fcpreq to be processed */
561 fcpreq->rspaddr = NULL;
562 fcpreq->rsplen = 0;
563 fcpreq->nvmet_fc_private = fod;
564 fod->fcpreq = fcpreq;
565 fod->active = true;
566
567 /* inform LLDD IO is now being processed */
568 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569
570 /* Submit deferred IO for processing */
571 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572
507 /* 573 /*
508 * release the reference taken at queue lookup and fod allocation 574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
509 */ 576 */
510 nvmet_fc_tgt_q_put(queue);
511
512 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
513} 577}
514 578
515static int 579static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
569 queue->port = assoc->tgtport->port; 633 queue->port = assoc->tgtport->port;
570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); 634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
571 INIT_LIST_HEAD(&queue->fod_list); 635 INIT_LIST_HEAD(&queue->fod_list);
636 INIT_LIST_HEAD(&queue->avail_defer_list);
637 INIT_LIST_HEAD(&queue->pending_cmd_list);
572 atomic_set(&queue->connected, 0); 638 atomic_set(&queue->connected, 0);
573 atomic_set(&queue->sqtail, 0); 639 atomic_set(&queue->sqtail, 0);
574 atomic_set(&queue->rsn, 1); 640 atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
638{ 704{
639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
640 struct nvmet_fc_fcp_iod *fod = queue->fod; 706 struct nvmet_fc_fcp_iod *fod = queue->fod;
707 struct nvmet_fc_defer_fcp_req *deferfcp;
641 unsigned long flags; 708 unsigned long flags;
642 int i, writedataactive; 709 int i, writedataactive;
643 bool disconnect; 710 bool disconnect;
@@ -666,6 +733,35 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
666 } 733 }
667 } 734 }
668 } 735 }
736
737 /* Cleanup defer'ed IOs in queue */
738 list_for_each_entry(deferfcp, &queue->avail_defer_list, req_list) {
739 list_del(&deferfcp->req_list);
740 kfree(deferfcp);
741 }
742
743 for (;;) {
744 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
745 struct nvmet_fc_defer_fcp_req, req_list);
746 if (!deferfcp)
747 break;
748
749 list_del(&deferfcp->req_list);
750 spin_unlock_irqrestore(&queue->qlock, flags);
751
752 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
753 deferfcp->fcp_req);
754
755 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
756 deferfcp->fcp_req);
757
758 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
759 deferfcp->fcp_req);
760
761 kfree(deferfcp);
762
763 spin_lock_irqsave(&queue->qlock, flags);
764 }
669 spin_unlock_irqrestore(&queue->qlock, flags); 765 spin_unlock_irqrestore(&queue->qlock, flags);
670 766
671 flush_workqueue(queue->work_q); 767 flush_workqueue(queue->work_q);
@@ -2172,11 +2268,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2268 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2173 * layer for processing. 2269 * layer for processing.
2174 * 2270 *
2175 * The nvmet-fc layer will copy cmd payload to an internal structure for 2271 * The nvmet_fc layer allocates a local job structure (struct
2176 * processing. As such, upon completion of the routine, the LLDD may 2272 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2177 * immediately free/reuse the CMD IU buffer passed in the call. 2273 * CMD IU buffer to the job structure. As such, on a successful
2274 * completion (returns 0), the LLDD may immediately free/reuse
2275 * the CMD IU buffer passed in the call.
2276 *
2277 * However, in some circumstances, due to the packetized nature of FC
2278 * and the api of the FC LLDD which may issue a hw command to send the
2279 * response, but the LLDD may not get the hw completion for that command
2280 * and upcall the nvmet_fc layer before a new command may be
2281 * asynchronously received - its possible for a command to be received
2282 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2283 * the appearance of more commands received than fits in the sq.
2284 * To alleviate this scenario, a temporary queue is maintained in the
2285 * transport for pending LLDD requests waiting for a queue job structure.
2286 * In these "overrun" cases, a temporary queue element is allocated
2287 * the LLDD request and CMD iu buffer information remembered, and the
2288 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2289 * structure is freed, it is immediately reallocated for anything on the
2290 * pending request list. The LLDDs defer_rcv() callback is called,
2291 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2292 * is then started normally with the transport.
2178 * 2293 *
2179 * If this routine returns error, the lldd should abort the exchange. 2294 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2295 * the completion as successful but must not reuse the CMD IU buffer
2296 * until the LLDD's defer_rcv() callback has been called for the
2297 * corresponding struct nvmefc_tgt_fcp_req pointer.
2298 *
2299 * If there is any other condition in which an error occurs, the
2300 * transport will return a non-zero status indicating the error.
2301 * In all cases other than -EOVERFLOW, the transport has not accepted the
2302 * request and the LLDD should abort the exchange.
2180 * 2303 *
2181 * @target_port: pointer to the (registered) target port the FCP CMD IU 2304 * @target_port: pointer to the (registered) target port the FCP CMD IU
2182 * was received on. 2305 * was received on.
@@ -2194,6 +2317,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2317 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2195 struct nvmet_fc_tgt_queue *queue; 2318 struct nvmet_fc_tgt_queue *queue;
2196 struct nvmet_fc_fcp_iod *fod; 2319 struct nvmet_fc_fcp_iod *fod;
2320 struct nvmet_fc_defer_fcp_req *deferfcp;
2321 unsigned long flags;
2197 2322
2198 /* validate iu, so the connection id can be used to find the queue */ 2323 /* validate iu, so the connection id can be used to find the queue */
2199 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2324 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2339,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2214 * when the fod is freed. 2339 * when the fod is freed.
2215 */ 2340 */
2216 2341
2342 spin_lock_irqsave(&queue->qlock, flags);
2343
2217 fod = nvmet_fc_alloc_fcp_iod(queue); 2344 fod = nvmet_fc_alloc_fcp_iod(queue);
2218 if (!fod) { 2345 if (fod) {
2346 spin_unlock_irqrestore(&queue->qlock, flags);
2347
2348 fcpreq->nvmet_fc_private = fod;
2349 fod->fcpreq = fcpreq;
2350
2351 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2352
2353 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2354
2355 return 0;
2356 }
2357
2358 if (!tgtport->ops->defer_rcv) {
2359 spin_unlock_irqrestore(&queue->qlock, flags);
2219 /* release the queue lookup reference */ 2360 /* release the queue lookup reference */
2220 nvmet_fc_tgt_q_put(queue); 2361 nvmet_fc_tgt_q_put(queue);
2221 return -ENOENT; 2362 return -ENOENT;
2222 } 2363 }
2223 2364
2224 fcpreq->nvmet_fc_private = fod; 2365 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2225 fod->fcpreq = fcpreq; 2366 struct nvmet_fc_defer_fcp_req, req_list);
2226 /* 2367 if (deferfcp) {
2227 * put all admin cmds on hw queue id 0. All io commands go to 2368 /* Just re-use one that was previously allocated */
2228 * the respective hw queue based on a modulo basis 2369 list_del(&deferfcp->req_list);
2229 */ 2370 } else {
2230 fcpreq->hwqid = queue->qid ? 2371 spin_unlock_irqrestore(&queue->qlock, flags);
2231 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2232 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2233 2372
2234 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) 2373 /* Now we need to dynamically allocate one */
2235 queue_work_on(queue->cpu, queue->work_q, &fod->work); 2374 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2236 else 2375 if (!deferfcp) {
2237 nvmet_fc_handle_fcp_rqst(tgtport, fod); 2376 /* release the queue lookup reference */
2377 nvmet_fc_tgt_q_put(queue);
2378 return -ENOMEM;
2379 }
2380 spin_lock_irqsave(&queue->qlock, flags);
2381 }
2238 2382
2239 return 0; 2383 /* For now, use rspaddr / rsplen to save payload information */
2384 fcpreq->rspaddr = cmdiubuf;
2385 fcpreq->rsplen = cmdiubuf_len;
2386 deferfcp->fcp_req = fcpreq;
2387
2388 /* defer processing till a fod becomes available */
2389 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2390
2391 /* NOTE: the queue lookup reference is still valid */
2392
2393 spin_unlock_irqrestore(&queue->qlock, flags);
2394
2395 return -EOVERFLOW;
2240} 2396}
2241EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2397EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2242 2398
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index af0cc3456dc1..b4b7eab29400 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4260,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev)
4260EXPORT_SYMBOL_GPL(pci_reset_function); 4260EXPORT_SYMBOL_GPL(pci_reset_function);
4261 4261
4262/** 4262/**
4263 * pci_reset_function_locked - quiesce and reset a PCI device function
4264 * @dev: PCI device to reset
4265 *
4266 * Some devices allow an individual function to be reset without affecting
4267 * other functions in the same device. The PCI device must be responsive
4268 * to PCI config space in order to use this function.
4269 *
4270 * This function does not just reset the PCI portion of a device, but
4271 * clears all the state associated with the device. This function differs
4272 * from __pci_reset_function() in that it saves and restores device state
4273 * over the reset. It also differs from pci_reset_function() in that it
4274 * requires the PCI device lock to be held.
4275 *
4276 * Returns 0 if the device function was successfully reset or negative if the
4277 * device doesn't support resetting a single function.
4278 */
4279int pci_reset_function_locked(struct pci_dev *dev)
4280{
4281 int rc;
4282
4283 rc = pci_probe_reset_function(dev);
4284 if (rc)
4285 return rc;
4286
4287 pci_dev_save_and_disable(dev);
4288
4289 rc = __pci_reset_function_locked(dev);
4290
4291 pci_dev_restore(dev);
4292
4293 return rc;
4294}
4295EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4296
4297/**
4263 * pci_try_reset_function - quiesce and reset a PCI device function 4298 * pci_try_reset_function - quiesce and reset a PCI device function
4264 * @dev: PCI device to reset 4299 * @dev: PCI device to reset
4265 * 4300 *
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 20f1b4493994..04e929fd0ffe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1548 }, 1548 },
1549 }, 1549 },
1550 { 1550 {
1551 .ident = "HP Chromebook 11 G5 (Setzer)",
1552 .matches = {
1553 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1554 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1555 },
1556 },
1557 {
1551 .ident = "Acer Chromebook R11 (Cyan)", 1558 .ident = "Acer Chromebook R11 (Cyan)",
1552 .matches = { 1559 .matches = {
1553 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1560 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4d4ef42a39b5..86c4b3fab7b0 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
343 343
344static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; 344static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
345static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; 345static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
346static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; 346static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
347static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; 347static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
348static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; 348static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
349static const unsigned int mrfld_pwm0_pins[] = { 144 }; 349static const unsigned int mrfld_pwm0_pins[] = { 144 };
350static const unsigned int mrfld_pwm1_pins[] = { 145 }; 350static const unsigned int mrfld_pwm1_pins[] = { 145 };
351static const unsigned int mrfld_pwm2_pins[] = { 132 }; 351static const unsigned int mrfld_pwm2_pins[] = { 132 };
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f024e25787fc..0c6d7812d6fd 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -37,7 +37,7 @@
37#define IRQ_STATUS 0x10 37#define IRQ_STATUS 0x10
38#define IRQ_WKUP 0x18 38#define IRQ_WKUP 0x18
39 39
40#define NB_FUNCS 2 40#define NB_FUNCS 3
41#define GPIO_PER_REG 32 41#define GPIO_PER_REG 32
42 42
43/** 43/**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
126 .funcs = {_func1, "gpio"} \ 126 .funcs = {_func1, "gpio"} \
127 } 127 }
128 128
129#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
130 { \
131 .name = _name, \
132 .start_pin = _start, \
133 .npins = _nr, \
134 .reg_mask = _mask, \
135 .val = {_v1, _v2, _v3}, \
136 .funcs = {_f1, _f2, "gpio"} \
137 }
138
129#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ 139#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
130 _f1, _f2) \ 140 _f1, _f2) \
131 { \ 141 { \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
171 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), 181 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
172 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), 182 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
173 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), 183 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
174 PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"), 184 PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
175 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), 185 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
176 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), 186 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
177 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), 187 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
178 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), 188 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
179 PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"), 189 PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
190 "mii", "mii_err"),
180}; 191};
181 192
182const struct armada_37xx_pin_data armada_37xx_pin_nb = { 193const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
187}; 198};
188 199
189const struct armada_37xx_pin_data armada_37xx_pin_sb = { 200const struct armada_37xx_pin_data armada_37xx_pin_sb = {
190 .nr_pins = 29, 201 .nr_pins = 30,
191 .name = "GPIO2", 202 .name = "GPIO2",
192 .groups = armada_37xx_sb_groups, 203 .groups = armada_37xx_sb_groups,
193 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), 204 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
208{ 219{
209 int f; 220 int f;
210 221
211 for (f = 0; f < NB_FUNCS; f++) 222 for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
212 if (!strcmp(grp->funcs[f], func)) 223 if (!strcmp(grp->funcs[f], func))
213 return f; 224 return f;
214 225
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
795 for (j = 0; j < grp->extra_npins; j++) 806 for (j = 0; j < grp->extra_npins; j++)
796 grp->pins[i+j] = grp->extra_pin + j; 807 grp->pins[i+j] = grp->extra_pin + j;
797 808
798 for (f = 0; f < NB_FUNCS; f++) { 809 for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
799 int ret; 810 int ret;
800 /* check for unique functions and count groups */ 811 /* check for unique functions and count groups */
801 ret = armada_37xx_add_function(info->funcs, &funcsize, 812 ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
847 struct armada_37xx_pin_group *gp = &info->groups[g]; 858 struct armada_37xx_pin_group *gp = &info->groups[g];
848 int f; 859 int f;
849 860
850 for (f = 0; f < NB_FUNCS; f++) { 861 for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
851 if (strcmp(gp->funcs[f], name) == 0) { 862 if (strcmp(gp->funcs[f], name) == 0) {
852 *groups = gp->name; 863 *groups = gp->name;
853 groups++; 864 groups++;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 159580c04b14..47a392bc73c8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ 918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */
919 PINCTRL_SUN7I_A20), 919 PINCTRL_SUN7I_A20),
920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ 920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
921 SUNXI_FUNCTION(0x5, "sim"), /* DET */
921 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ 922 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
922 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ 923 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
923 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), 924 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index a433a306a2d0..c75e094b2d90 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
1084static const int usb1_muxvals[] = {0, 0}; 1084static const int usb1_muxvals[] = {0, 0};
1085static const unsigned usb2_pins[] = {184, 185}; 1085static const unsigned usb2_pins[] = {184, 185};
1086static const int usb2_muxvals[] = {0, 0}; 1086static const int usb2_muxvals[] = {0, 0};
1087static const unsigned usb3_pins[] = {186, 187}; 1087static const unsigned usb3_pins[] = {187, 188};
1088static const int usb3_muxvals[] = {0, 0}; 1088static const int usb3_muxvals[] = {0, 0};
1089static const unsigned port_range0_pins[] = { 1089static const unsigned port_range0_pins[] = {
1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ 1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 787e3967bd5c..f828ee340a98 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
64 struct zx_pinctrl_soc_info *info = zpctl->info; 64 struct zx_pinctrl_soc_info *info = zpctl->info;
65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; 65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
66 struct zx_pin_data *data = pindesc->drv_data; 66 struct zx_pin_data *data = pindesc->drv_data;
67 struct zx_mux_desc *mux = data->muxes; 67 struct zx_mux_desc *mux;
68 u32 mask = (1 << data->width) - 1; 68 u32 mask, offset, bitpos;
69 u32 offset = data->offset;
70 u32 bitpos = data->bitpos;
71 struct function_desc *func; 69 struct function_desc *func;
72 unsigned long flags; 70 unsigned long flags;
73 u32 val, mval; 71 u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
76 if (!data) 74 if (!data)
77 return -EINVAL; 75 return -EINVAL;
78 76
77 mux = data->muxes;
78 mask = (1 << data->width) - 1;
79 offset = data->offset;
80 bitpos = data->bitpos;
81
79 func = pinmux_generic_get_function(pctldev, func_selector); 82 func = pinmux_generic_get_function(pctldev, func_selector);
80 if (!func) 83 if (!func)
81 return -EINVAL; 84 return -EINVAL;
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <uapi/linux/sched/types.h>
31 32
32#include "ptp_private.h" 33#include "ptp_private.h"
33 34
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
184 kfree(ptp); 185 kfree(ptp);
185} 186}
186 187
188static void ptp_aux_kworker(struct kthread_work *work)
189{
190 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
191 aux_work.work);
192 struct ptp_clock_info *info = ptp->info;
193 long delay;
194
195 delay = info->do_aux_work(info);
196
197 if (delay >= 0)
198 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
199}
200
187/* public interface */ 201/* public interface */
188 202
189struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 203struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
217 mutex_init(&ptp->pincfg_mux); 231 mutex_init(&ptp->pincfg_mux);
218 init_waitqueue_head(&ptp->tsev_wq); 232 init_waitqueue_head(&ptp->tsev_wq);
219 233
234 if (ptp->info->do_aux_work) {
235 char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
236
237 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
238 ptp->kworker = kthread_create_worker(0, worker_name ?
239 worker_name : info->name);
240 kfree(worker_name);
241 if (IS_ERR(ptp->kworker)) {
242 err = PTR_ERR(ptp->kworker);
243 pr_err("failed to create ptp aux_worker %d\n", err);
244 goto kworker_err;
245 }
246 }
247
220 err = ptp_populate_pin_groups(ptp); 248 err = ptp_populate_pin_groups(ptp);
221 if (err) 249 if (err)
222 goto no_pin_groups; 250 goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
259no_device: 287no_device:
260 ptp_cleanup_pin_groups(ptp); 288 ptp_cleanup_pin_groups(ptp);
261no_pin_groups: 289no_pin_groups:
290 if (ptp->kworker)
291 kthread_destroy_worker(ptp->kworker);
292kworker_err:
262 mutex_destroy(&ptp->tsevq_mux); 293 mutex_destroy(&ptp->tsevq_mux);
263 mutex_destroy(&ptp->pincfg_mux); 294 mutex_destroy(&ptp->pincfg_mux);
264 ida_simple_remove(&ptp_clocks_map, index); 295 ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
274 ptp->defunct = 1; 305 ptp->defunct = 1;
275 wake_up_interruptible(&ptp->tsev_wq); 306 wake_up_interruptible(&ptp->tsev_wq);
276 307
308 if (ptp->kworker) {
309 kthread_cancel_delayed_work_sync(&ptp->aux_work);
310 kthread_destroy_worker(ptp->kworker);
311 }
312
277 /* Release the clock's resources. */ 313 /* Release the clock's resources. */
278 if (ptp->pps_source) 314 if (ptp->pps_source)
279 pps_unregister_source(ptp->pps_source); 315 pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
339} 375}
340EXPORT_SYMBOL(ptp_find_pin); 376EXPORT_SYMBOL(ptp_find_pin);
341 377
378int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
379{
380 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
381}
382EXPORT_SYMBOL(ptp_schedule_worker);
383
342/* module operations */ 384/* module operations */
343 385
344static void __exit ptp_exit(void) 386static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/kthread.h>
25#include <linux/mutex.h> 26#include <linux/mutex.h>
26#include <linux/posix-clock.h> 27#include <linux/posix-clock.h>
27#include <linux/ptp_clock.h> 28#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
56 struct attribute_group pin_attr_group; 57 struct attribute_group pin_attr_group;
57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 58 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
58 const struct attribute_group *pin_attr_groups[2]; 59 const struct attribute_group *pin_attr_groups[2];
60 struct kthread_worker *kworker;
61 struct kthread_delayed_work aux_work;
59}; 62};
60 63
61/* 64/*
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2512 struct rtable *rt = (struct rtable *) dst; 2512 struct rtable *rt = (struct rtable *) dst;
2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2513 __be32 *pkey = &ip_hdr(skb)->daddr;
2514 2514
2515 if (rt->rt_gateway) 2515 if (rt && rt->rt_gateway)
2516 pkey = &rt->rt_gateway; 2516 pkey = &rt->rt_gateway;
2517 2517
2518 /* IPv4 */ 2518 /* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2523 struct rt6_info *rt = (struct rt6_info *) dst; 2523 struct rt6_info *rt = (struct rt6_info *) dst;
2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
2525 2525
2526 if (!ipv6_addr_any(&rt->rt6i_gateway)) 2526 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2527 pkey = &rt->rt6i_gateway; 2527 pkey = &rt->rt6i_gateway;
2528 2528
2529 /* IPv6 */ 2529 /* IPv6 */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 707ee2f5954d..4591113c49de 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3198,10 +3198,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
3198 return -EBUSY; 3198 return -EBUSY;
3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3200 return -EFAULT; 3200 return -EFAULT;
3201 if (qd.cnum == -1) 3201 if (qd.cnum == -1) {
3202 if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3203 return -EINVAL;
3202 qd.cnum = qd.id; 3204 qd.cnum = qd.id;
3203 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 3205 } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3204 {
3205 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3206 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3206 return -EINVAL; 3207 return -EINVAL;
3207 qd.instance = dev->scsi_host_ptr->host_no; 3208 qd.instance = dev->scsi_host_ptr->host_no;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dfe709a7138..6844ba361616 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
2624}; 2624};
2625 2625
2626/** 2626/**
2627 * bnx2fc_percpu_thread_create - Create a receive thread for an 2627 * bnx2fc_cpu_online - Create a receive thread for an online CPU
2628 * online CPU
2629 * 2628 *
2630 * @cpu: cpu index for the online cpu 2629 * @cpu: cpu index for the online cpu
2631 */ 2630 */
2632static void bnx2fc_percpu_thread_create(unsigned int cpu) 2631static int bnx2fc_cpu_online(unsigned int cpu)
2633{ 2632{
2634 struct bnx2fc_percpu_s *p; 2633 struct bnx2fc_percpu_s *p;
2635 struct task_struct *thread; 2634 struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
2639 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2638 thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
2640 (void *)p, cpu_to_node(cpu), 2639 (void *)p, cpu_to_node(cpu),
2641 "bnx2fc_thread/%d", cpu); 2640 "bnx2fc_thread/%d", cpu);
2641 if (IS_ERR(thread))
2642 return PTR_ERR(thread);
2643
2642 /* bind thread to the cpu */ 2644 /* bind thread to the cpu */
2643 if (likely(!IS_ERR(thread))) { 2645 kthread_bind(thread, cpu);
2644 kthread_bind(thread, cpu); 2646 p->iothread = thread;
2645 p->iothread = thread; 2647 wake_up_process(thread);
2646 wake_up_process(thread); 2648 return 0;
2647 }
2648} 2649}
2649 2650
2650static void bnx2fc_percpu_thread_destroy(unsigned int cpu) 2651static int bnx2fc_cpu_offline(unsigned int cpu)
2651{ 2652{
2652 struct bnx2fc_percpu_s *p; 2653 struct bnx2fc_percpu_s *p;
2653 struct task_struct *thread; 2654 struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2661 thread = p->iothread; 2662 thread = p->iothread;
2662 p->iothread = NULL; 2663 p->iothread = NULL;
2663 2664
2664
2665 /* Free all work in the list */ 2665 /* Free all work in the list */
2666 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2666 list_for_each_entry_safe(work, tmp, &p->work_list, list) {
2667 list_del_init(&work->list); 2667 list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2673 2673
2674 if (thread) 2674 if (thread)
2675 kthread_stop(thread); 2675 kthread_stop(thread);
2676}
2677
2678
2679static int bnx2fc_cpu_online(unsigned int cpu)
2680{
2681 printk(PFX "CPU %x online: Create Rx thread\n", cpu);
2682 bnx2fc_percpu_thread_create(cpu);
2683 return 0;
2684}
2685
2686static int bnx2fc_cpu_dead(unsigned int cpu)
2687{
2688 printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
2689 bnx2fc_percpu_thread_destroy(cpu);
2690 return 0; 2676 return 0;
2691} 2677}
2692 2678
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
2761 spin_lock_init(&p->fp_work_lock); 2747 spin_lock_init(&p->fp_work_lock);
2762 } 2748 }
2763 2749
2764 get_online_cpus(); 2750 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
2765 2751 bnx2fc_cpu_online, bnx2fc_cpu_offline);
2766 for_each_online_cpu(cpu)
2767 bnx2fc_percpu_thread_create(cpu);
2768
2769 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2770 "scsi/bnx2fc:online",
2771 bnx2fc_cpu_online, NULL);
2772 if (rc < 0) 2752 if (rc < 0)
2773 goto stop_threads; 2753 goto stop_thread;
2774 bnx2fc_online_state = rc; 2754 bnx2fc_online_state = rc;
2775 2755
2776 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
2777 NULL, bnx2fc_cpu_dead);
2778 put_online_cpus();
2779
2780 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2756 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2781
2782 return 0; 2757 return 0;
2783 2758
2784stop_threads: 2759stop_thread:
2785 for_each_online_cpu(cpu)
2786 bnx2fc_percpu_thread_destroy(cpu);
2787 put_online_cpus();
2788 kthread_stop(l2_thread); 2760 kthread_stop(l2_thread);
2789free_wq: 2761free_wq:
2790 destroy_workqueue(bnx2fc_wq); 2762 destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
2803 struct fcoe_percpu_s *bg; 2775 struct fcoe_percpu_s *bg;
2804 struct task_struct *l2_thread; 2776 struct task_struct *l2_thread;
2805 struct sk_buff *skb; 2777 struct sk_buff *skb;
2806 unsigned int cpu = 0;
2807 2778
2808 /* 2779 /*
2809 * NOTE: Since cnic calls register_driver routine rtnl_lock, 2780 * NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
2844 if (l2_thread) 2815 if (l2_thread)
2845 kthread_stop(l2_thread); 2816 kthread_stop(l2_thread);
2846 2817
2847 get_online_cpus(); 2818 cpuhp_remove_state(bnx2fc_online_state);
2848 /* Destroy per cpu threads */
2849 for_each_online_cpu(cpu) {
2850 bnx2fc_percpu_thread_destroy(cpu);
2851 }
2852
2853 cpuhp_remove_state_nocalls(bnx2fc_online_state);
2854 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
2855
2856 put_online_cpus();
2857 2819
2858 destroy_workqueue(bnx2fc_wq); 2820 destroy_workqueue(bnx2fc_wq);
2859 /* 2821 /*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 913c750205ce..26de61d65a4d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
1008 return work; 1008 return work;
1009} 1009}
1010 1010
1011/* Pending work request completion */
1012static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
1013{
1014 unsigned int cpu = wqe % num_possible_cpus();
1015 struct bnx2fc_percpu_s *fps;
1016 struct bnx2fc_work *work;
1017
1018 fps = &per_cpu(bnx2fc_percpu, cpu);
1019 spin_lock_bh(&fps->fp_work_lock);
1020 if (fps->iothread) {
1021 work = bnx2fc_alloc_work(tgt, wqe);
1022 if (work) {
1023 list_add_tail(&work->list, &fps->work_list);
1024 wake_up_process(fps->iothread);
1025 spin_unlock_bh(&fps->fp_work_lock);
1026 return;
1027 }
1028 }
1029 spin_unlock_bh(&fps->fp_work_lock);
1030 bnx2fc_process_cq_compl(tgt, wqe);
1031}
1032
1011int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1033int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1012{ 1034{
1013 struct fcoe_cqe *cq; 1035 struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1042 /* Unsolicited event notification */ 1064 /* Unsolicited event notification */
1043 bnx2fc_process_unsol_compl(tgt, wqe); 1065 bnx2fc_process_unsol_compl(tgt, wqe);
1044 } else { 1066 } else {
1045 /* Pending work request completion */ 1067 bnx2fc_pending_work(tgt, wqe);
1046 struct bnx2fc_work *work = NULL;
1047 struct bnx2fc_percpu_s *fps = NULL;
1048 unsigned int cpu = wqe % num_possible_cpus();
1049
1050 fps = &per_cpu(bnx2fc_percpu, cpu);
1051 spin_lock_bh(&fps->fp_work_lock);
1052 if (unlikely(!fps->iothread))
1053 goto unlock;
1054
1055 work = bnx2fc_alloc_work(tgt, wqe);
1056 if (work)
1057 list_add_tail(&work->list,
1058 &fps->work_list);
1059unlock:
1060 spin_unlock_bh(&fps->fp_work_lock);
1061
1062 /* Pending work request completion */
1063 if (fps->iothread && work)
1064 wake_up_process(fps->iothread);
1065 else
1066 bnx2fc_process_cq_compl(tgt, wqe);
1067 num_free_sqes++; 1068 num_free_sqes++;
1068 } 1069 }
1069 cqe++; 1070 cqe++;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 86afc002814c..4ebcda8d9500 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
404 404
405 405
406/** 406/**
407 * bnx2i_percpu_thread_create - Create a receive thread for an 407 * bnx2i_cpu_online - Create a receive thread for an online CPU
408 * online CPU
409 * 408 *
410 * @cpu: cpu index for the online cpu 409 * @cpu: cpu index for the online cpu
411 */ 410 */
412static void bnx2i_percpu_thread_create(unsigned int cpu) 411static int bnx2i_cpu_online(unsigned int cpu)
413{ 412{
414 struct bnx2i_percpu_s *p; 413 struct bnx2i_percpu_s *p;
415 struct task_struct *thread; 414 struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
419 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 418 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
420 cpu_to_node(cpu), 419 cpu_to_node(cpu),
421 "bnx2i_thread/%d", cpu); 420 "bnx2i_thread/%d", cpu);
421 if (IS_ERR(thread))
422 return PTR_ERR(thread);
423
422 /* bind thread to the cpu */ 424 /* bind thread to the cpu */
423 if (likely(!IS_ERR(thread))) { 425 kthread_bind(thread, cpu);
424 kthread_bind(thread, cpu); 426 p->iothread = thread;
425 p->iothread = thread; 427 wake_up_process(thread);
426 wake_up_process(thread); 428 return 0;
427 }
428} 429}
429 430
430 431static int bnx2i_cpu_offline(unsigned int cpu)
431static void bnx2i_percpu_thread_destroy(unsigned int cpu)
432{ 432{
433 struct bnx2i_percpu_s *p; 433 struct bnx2i_percpu_s *p;
434 struct task_struct *thread; 434 struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
451 spin_unlock_bh(&p->p_work_lock); 451 spin_unlock_bh(&p->p_work_lock);
452 if (thread) 452 if (thread)
453 kthread_stop(thread); 453 kthread_stop(thread);
454}
455
456static int bnx2i_cpu_online(unsigned int cpu)
457{
458 pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
459 bnx2i_percpu_thread_create(cpu);
460 return 0;
461}
462
463static int bnx2i_cpu_dead(unsigned int cpu)
464{
465 pr_info("CPU %x offline: Remove Rx thread\n", cpu);
466 bnx2i_percpu_thread_destroy(cpu);
467 return 0; 454 return 0;
468} 455}
469 456
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
511 p->iothread = NULL; 498 p->iothread = NULL;
512 } 499 }
513 500
514 get_online_cpus(); 501 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
515 502 bnx2i_cpu_online, bnx2i_cpu_offline);
516 for_each_online_cpu(cpu)
517 bnx2i_percpu_thread_create(cpu);
518
519 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
520 "scsi/bnx2i:online",
521 bnx2i_cpu_online, NULL);
522 if (err < 0) 503 if (err < 0)
523 goto remove_threads; 504 goto unreg_driver;
524 bnx2i_online_state = err; 505 bnx2i_online_state = err;
525
526 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
527 NULL, bnx2i_cpu_dead);
528 put_online_cpus();
529 return 0; 506 return 0;
530 507
531remove_threads: 508unreg_driver:
532 for_each_online_cpu(cpu)
533 bnx2i_percpu_thread_destroy(cpu);
534 put_online_cpus();
535 cnic_unregister_driver(CNIC_ULP_ISCSI); 509 cnic_unregister_driver(CNIC_ULP_ISCSI);
536unreg_xport: 510unreg_xport:
537 iscsi_unregister_transport(&bnx2i_iscsi_transport); 511 iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
551static void __exit bnx2i_mod_exit(void) 525static void __exit bnx2i_mod_exit(void)
552{ 526{
553 struct bnx2i_hba *hba; 527 struct bnx2i_hba *hba;
554 unsigned cpu = 0;
555 528
556 mutex_lock(&bnx2i_dev_lock); 529 mutex_lock(&bnx2i_dev_lock);
557 while (!list_empty(&adapter_list)) { 530 while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
569 } 542 }
570 mutex_unlock(&bnx2i_dev_lock); 543 mutex_unlock(&bnx2i_dev_lock);
571 544
572 get_online_cpus(); 545 cpuhp_remove_state(bnx2i_online_state);
573
574 for_each_online_cpu(cpu)
575 bnx2i_percpu_thread_destroy(cpu);
576
577 cpuhp_remove_state_nocalls(bnx2i_online_state);
578 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
579 put_online_cpus();
580 546
581 iscsi_unregister_transport(&bnx2i_iscsi_transport); 547 iscsi_unregister_transport(&bnx2i_iscsi_transport);
582 cnic_unregister_driver(CNIC_ULP_ISCSI); 548 cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ed48ed38e79..7ee1a94c0b33 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
205 atomic_read(&tgtp->xmt_ls_rsp_error)); 205 atomic_read(&tgtp->xmt_ls_rsp_error));
206 206
207 len += snprintf(buf+len, PAGE_SIZE-len, 207 len += snprintf(buf+len, PAGE_SIZE-len,
208 "FCP: Rcv %08x Release %08x Drop %08x\n", 208 "FCP: Rcv %08x Defer %08x Release %08x "
209 "Drop %08x\n",
209 atomic_read(&tgtp->rcv_fcp_cmd_in), 210 atomic_read(&tgtp->rcv_fcp_cmd_in),
211 atomic_read(&tgtp->rcv_fcp_cmd_defer),
210 atomic_read(&tgtp->xmt_fcp_release), 212 atomic_read(&tgtp->xmt_fcp_release),
211 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 213 atomic_read(&tgtp->rcv_fcp_cmd_drop));
212 214
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5cc8b0f7d885..744f3f395b64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
782 atomic_read(&tgtp->xmt_ls_rsp_error)); 782 atomic_read(&tgtp->xmt_ls_rsp_error));
783 783
784 len += snprintf(buf + len, size - len, 784 len += snprintf(buf + len, size - len,
785 "FCP: Rcv %08x Drop %08x\n", 785 "FCP: Rcv %08x Defer %08x Release %08x "
786 "Drop %08x\n",
786 atomic_read(&tgtp->rcv_fcp_cmd_in), 787 atomic_read(&tgtp->rcv_fcp_cmd_in),
788 atomic_read(&tgtp->rcv_fcp_cmd_defer),
789 atomic_read(&tgtp->xmt_fcp_release),
787 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 790 atomic_read(&tgtp->rcv_fcp_cmd_drop));
788 791
789 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 792 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fbeec344c6cc..bbbd0f84160d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
842} 842}
843 843
844static void
845lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
846 struct nvmefc_tgt_fcp_req *rsp)
847{
848 struct lpfc_nvmet_tgtport *tgtp;
849 struct lpfc_nvmet_rcv_ctx *ctxp =
850 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
851 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
852 struct lpfc_hba *phba = ctxp->phba;
853
854 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
855 ctxp->oxid, ctxp->size, smp_processor_id());
856
857 tgtp = phba->targetport->private;
858 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
859 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
860}
861
844static struct nvmet_fc_target_template lpfc_tgttemplate = { 862static struct nvmet_fc_target_template lpfc_tgttemplate = {
845 .targetport_delete = lpfc_nvmet_targetport_delete, 863 .targetport_delete = lpfc_nvmet_targetport_delete,
846 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 864 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
847 .fcp_op = lpfc_nvmet_xmt_fcp_op, 865 .fcp_op = lpfc_nvmet_xmt_fcp_op,
848 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 866 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
849 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 867 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
868 .defer_rcv = lpfc_nvmet_defer_rcv,
850 869
851 .max_hw_queues = 1, 870 .max_hw_queues = 1,
852 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 871 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1504 return; 1523 return;
1505 } 1524 }
1506 1525
1526 /* Processing of FCP command is deferred */
1527 if (rc == -EOVERFLOW) {
1528 lpfc_nvmeio_data(phba,
1529 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1530 oxid, size, sid);
1531 /* defer reposting rcv buffer till .defer_rcv callback */
1532 ctxp->rqb_buffer = nvmebuf;
1533 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1534 return;
1535 }
1536
1507 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1537 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1508 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1509 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 1539 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index e675ef17be08..48a76788b003 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
49 atomic_t rcv_fcp_cmd_in; 49 atomic_t rcv_fcp_cmd_in;
50 atomic_t rcv_fcp_cmd_out; 50 atomic_t rcv_fcp_cmd_out;
51 atomic_t rcv_fcp_cmd_drop; 51 atomic_t rcv_fcp_cmd_drop;
52 atomic_t rcv_fcp_cmd_defer;
52 atomic_t xmt_fcp_release; 53 atomic_t xmt_fcp_release;
53 54
54 /* Stats counters - lpfc_nvmet_xmt_fcp_op */ 55 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 4d038926a455..351f06dfc5a0 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -528,7 +528,8 @@ struct fip_vlan {
528#define QEDF_WRITE (1 << 0) 528#define QEDF_WRITE (1 << 0)
529#define MAX_FIBRE_LUNS 0xffffffff 529#define MAX_FIBRE_LUNS 0xffffffff
530 530
531#define QEDF_MAX_NUM_CQS 8 531#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
532 num_online_cpus())
532 533
533/* 534/*
534 * PCI function probe defines 535 * PCI function probe defines
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7786c97e033f..1d13c9ca517d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
2760 * we allocation is the minimum off: 2760 * we allocation is the minimum off:
2761 * 2761 *
2762 * Number of CPUs 2762 * Number of CPUs
2763 * Number of MSI-X vectors 2763 * Number allocated by qed for our PCI function
2764 * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
2765 */ 2764 */
2766 qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, 2765 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
2767 num_online_cpus());
2768 2766
2769 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2767 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
2770 qedf->num_queues); 2768 qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2962 goto err1; 2960 goto err1;
2963 } 2961 }
2964 2962
2963 /* Learn information crucial for qedf to progress */
2964 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
2965 if (rc) {
2966 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
2967 goto err1;
2968 }
2969
2965 /* queue allocation code should come here 2970 /* queue allocation code should come here
2966 * order should be 2971 * order should be
2967 * slowpath_start 2972 * slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2977 } 2982 }
2978 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 2983 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
2979 2984
2980 /* Learn information crucial for qedf to progress */
2981 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
2982 if (rc) {
2983 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
2984 goto err1;
2985 }
2986
2987 /* Record BDQ producer doorbell addresses */ 2985 /* Record BDQ producer doorbell addresses */
2988 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; 2986 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
2989 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; 2987 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b20da0d27ad7..3f82ea1b72dc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
500static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 500static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
501{ 501{
502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
503 unsigned long flags;
504 503
505 /* 504 /*
506 * Ensure that the complete FCP WRITE payload has been received. 505 * Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
508 */ 507 */
509 cmd->cmd_in_wq = 0; 508 cmd->cmd_in_wq = 0;
510 509
511 spin_lock_irqsave(&cmd->cmd_lock, flags);
512 cmd->data_work = 1;
513 if (cmd->aborted) {
514 cmd->data_work_free = 1;
515 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
516
517 tcm_qla2xxx_free_cmd(cmd);
518 return;
519 }
520 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
521
522 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 510 cmd->qpair->tgt_counters.qla_core_ret_ctio++;
523 if (!cmd->write_data_transferred) { 511 if (!cmd->write_data_transferred) {
524 /* 512 /*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
765 qlt_xmit_tm_rsp(mcmd); 753 qlt_xmit_tm_rsp(mcmd);
766} 754}
767 755
768#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
769static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 756static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
770{ 757{
771 struct qla_tgt_cmd *cmd = container_of(se_cmd, 758 struct qla_tgt_cmd *cmd = container_of(se_cmd,
772 struct qla_tgt_cmd, se_cmd); 759 struct qla_tgt_cmd, se_cmd);
773 unsigned long flags;
774 760
775 if (qlt_abort_cmd(cmd)) 761 if (qlt_abort_cmd(cmd))
776 return; 762 return;
777
778 spin_lock_irqsave(&cmd->cmd_lock, flags);
779 if ((cmd->state == QLA_TGT_STATE_NEW)||
780 ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
781 DATA_WORK_NOT_FREE(cmd))) {
782 cmd->data_work_free = 1;
783 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
784 /*
785 * cmd has not reached fw, Use this trigger to free it.
786 */
787 tcm_qla2xxx_free_cmd(cmd);
788 return;
789 }
790 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
791 return;
792
793} 763}
794 764
795static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 765static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 4fe606b000b4..d7ff71e0c85c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -751,35 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
751 return count; 751 return count;
752} 752}
753 753
754static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
755{
756 switch (hp->dxfer_direction) {
757 case SG_DXFER_NONE:
758 if (hp->dxferp || hp->dxfer_len > 0)
759 return false;
760 return true;
761 case SG_DXFER_FROM_DEV:
762 /*
763 * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
764 * can either be NULL or != NULL so there's no point in checking
765 * it either. So just return true.
766 */
767 return true;
768 case SG_DXFER_TO_DEV:
769 case SG_DXFER_TO_FROM_DEV:
770 if (!hp->dxferp || hp->dxfer_len == 0)
771 return false;
772 return true;
773 case SG_DXFER_UNKNOWN:
774 if ((!hp->dxferp && hp->dxfer_len) ||
775 (hp->dxferp && hp->dxfer_len == 0))
776 return false;
777 return true;
778 default:
779 return false;
780 }
781}
782
783static int 754static int
784sg_common_write(Sg_fd * sfp, Sg_request * srp, 755sg_common_write(Sg_fd * sfp, Sg_request * srp,
785 unsigned char *cmnd, int timeout, int blocking) 756 unsigned char *cmnd, int timeout, int blocking)
@@ -800,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
801 (int) cmnd[0], (int) hp->cmd_len)); 772 (int) cmnd[0], (int) hp->cmd_len));
802 773
803 if (!sg_is_valid_dxfer(hp)) 774 if (hp->dxfer_len >= SZ_256M)
804 return -EINVAL; 775 return -EINVAL;
805 776
806 k = sg_start_req(srp, cmnd); 777 k = sg_start_req(srp, cmnd);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ca11be21f64b..34ca7823255d 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2396 continue; 2396 continue;
2397 } 2397 }
2398 2398
2399 set_current_state(TASK_RUNNING);
2399 wp = async->buf_write_ptr; 2400 wp = async->buf_write_ptr;
2400 n1 = min(n, async->prealloc_bufsz - wp); 2401 n1 = min(n, async->prealloc_bufsz - wp);
2401 n2 = n - n1; 2402 n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2528 } 2529 }
2529 continue; 2530 continue;
2530 } 2531 }
2532
2533 set_current_state(TASK_RUNNING);
2531 rp = async->buf_read_ptr; 2534 rp = async->buf_read_ptr;
2532 n1 = min(n, async->prealloc_bufsz - rp); 2535 n1 = min(n, async->prealloc_bufsz - rp);
2533 n2 = n - n1; 2536 n2 = n - n1;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index a6a8393d6664..3e00df74b18c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
472 long m) 472 long m)
473{ 473{
474 struct ad2s1210_state *st = iio_priv(indio_dev); 474 struct ad2s1210_state *st = iio_priv(indio_dev);
475 bool negative; 475 u16 negative;
476 int ret = 0; 476 int ret = 0;
477 u16 pos; 477 u16 pos;
478 s16 vel; 478 s16 vel;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index e583dd8a418b..d4fa41be80f9 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1510 1510
1511 if (!cnp) { 1511 if (!cnp) {
1512 pr_info("%s stid %d lookup failure\n", __func__, stid); 1512 pr_info("%s stid %d lookup failure\n", __func__, stid);
1513 return; 1513 goto rel_skb;
1514 } 1514 }
1515 1515
1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1517 cxgbit_put_cnp(cnp); 1517 cxgbit_put_cnp(cnp);
1518rel_skb:
1519 __kfree_skb(skb);
1518} 1520}
1519 1521
1520static void 1522static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1530 1532
1531 if (!cnp) { 1533 if (!cnp) {
1532 pr_info("%s stid %d lookup failure\n", __func__, stid); 1534 pr_info("%s stid %d lookup failure\n", __func__, stid);
1533 return; 1535 goto rel_skb;
1534 } 1536 }
1535 1537
1536 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1538 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1537 cxgbit_put_cnp(cnp); 1539 cxgbit_put_cnp(cnp);
1540rel_skb:
1541 __kfree_skb(skb);
1538} 1542}
1539 1543
1540static void 1544static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1819 struct tid_info *t = lldi->tids; 1823 struct tid_info *t = lldi->tids;
1820 1824
1821 csk = lookup_tid(t, tid); 1825 csk = lookup_tid(t, tid);
1822 if (unlikely(!csk)) 1826 if (unlikely(!csk)) {
1823 pr_err("can't find connection for tid %u.\n", tid); 1827 pr_err("can't find connection for tid %u.\n", tid);
1824 else 1828 goto rel_skb;
1829 } else {
1825 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); 1830 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1831 }
1826 1832
1827 cxgbit_put_csk(csk); 1833 cxgbit_put_csk(csk);
1834rel_skb:
1835 __kfree_skb(skb);
1828} 1836}
1829 1837
1830static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) 1838static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index dda13f1af38e..514986b57c2d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
827 827
828static void 828static void
829cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, 829cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
830 unsigned int nents) 830 unsigned int nents, u32 skip)
831{ 831{
832 struct skb_seq_state st; 832 struct skb_seq_state st;
833 const u8 *buf; 833 const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
846 } 846 }
847 847
848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, 848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
849 buf_len, consumed); 849 buf_len, skip + consumed);
850 } 850 }
851} 851}
852 852
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; 912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); 913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
914 914
915 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); 915 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
916 } 916 }
917 917
918 cmd->write_data_done += pdu_cb->dlen; 918 cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
1069 cmd->se_cmd.data_length); 1069 cmd->se_cmd.data_length);
1070 1070
1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { 1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
1072 u32 skip = data_offset % PAGE_SIZE;
1073
1072 sg_off = data_offset / PAGE_SIZE; 1074 sg_off = data_offset / PAGE_SIZE;
1073 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1075 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1074 sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); 1076 sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
1075 1077
1076 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); 1078 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
1077 } 1079 }
1078 1080
1079check_payload: 1081check_payload:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e4975dd1b1..5001261f5d69 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
418 return 0; 418 return 0;
419 } 419 }
420 np->np_thread_state = ISCSI_NP_THREAD_RESET; 420 np->np_thread_state = ISCSI_NP_THREAD_RESET;
421 atomic_inc(&np->np_reset_count);
421 422
422 if (np->np_thread) { 423 if (np->np_thread) {
423 spin_unlock_bh(&np->np_thread_lock); 424 spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2167 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2168 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2168 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2169 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2169 cmd->data_direction = DMA_NONE; 2170 cmd->data_direction = DMA_NONE;
2171 kfree(cmd->text_in_ptr);
2170 cmd->text_in_ptr = NULL; 2172 cmd->text_in_ptr = NULL;
2171 2173
2172 return 0; 2174 return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3487 return text_length; 3489 return text_length;
3488 3490
3489 if (completed) { 3491 if (completed) {
3490 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3492 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3491 } else { 3493 } else {
3492 hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; 3494 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3493 cmd->read_data_done += text_length; 3495 cmd->read_data_done += text_length;
3494 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3496 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3495 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 3497 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e9bdc8b86e7d..dc13afbd4c88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1243 flush_signals(current); 1243 flush_signals(current);
1244 1244
1245 spin_lock_bh(&np->np_thread_lock); 1245 spin_lock_bh(&np->np_thread_lock);
1246 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1246 if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1248 spin_unlock_bh(&np->np_thread_lock);
1248 complete(&np->np_restart_comp); 1249 complete(&np->np_restart_comp);
1250 return 1;
1249 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { 1251 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1250 spin_unlock_bh(&np->np_thread_lock); 1252 spin_unlock_bh(&np->np_thread_lock);
1251 goto exit; 1253 goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1278 goto exit; 1280 goto exit;
1279 } else if (rc < 0) { 1281 } else if (rc < 0) {
1280 spin_lock_bh(&np->np_thread_lock); 1282 spin_lock_bh(&np->np_thread_lock);
1281 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1283 if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
1284 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1282 spin_unlock_bh(&np->np_thread_lock); 1285 spin_unlock_bh(&np->np_thread_lock);
1283 complete(&np->np_restart_comp); 1286 complete(&np->np_restart_comp);
1284 iscsit_put_transport(conn->conn_transport); 1287 iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 36913734c6bc..02e8a5d86658 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
364 mutex_lock(&tpg->acl_node_mutex); 364 mutex_lock(&tpg->acl_node_mutex);
365 if (acl->dynamic_node_acl) 365 if (acl->dynamic_node_acl)
366 acl->dynamic_node_acl = 0; 366 acl->dynamic_node_acl = 0;
367 list_del(&acl->acl_list); 367 list_del_init(&acl->acl_list);
368 mutex_unlock(&tpg->acl_node_mutex); 368 mutex_unlock(&tpg->acl_node_mutex);
369 369
370 target_shutdown_sessions(acl); 370 target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
548 * in transport_deregister_session(). 548 * in transport_deregister_session().
549 */ 549 */
550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
551 list_del(&nacl->acl_list); 551 list_del_init(&nacl->acl_list);
552 552
553 core_tpg_wait_for_nacl_pr_ref(nacl); 553 core_tpg_wait_for_nacl_pr_ref(nacl);
554 core_free_device_list_for_node(nacl, se_tpg); 554 core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 97fed9a298bd..836d552b0385 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
466 } 466 }
467 467
468 mutex_lock(&se_tpg->acl_node_mutex); 468 mutex_lock(&se_tpg->acl_node_mutex);
469 list_del(&nacl->acl_list); 469 list_del_init(&nacl->acl_list);
470 mutex_unlock(&se_tpg->acl_node_mutex); 470 mutex_unlock(&se_tpg->acl_node_mutex);
471 471
472 core_tpg_wait_for_nacl_pr_ref(nacl); 472 core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
539 539
540 if (se_nacl->dynamic_stop) 540 if (se_nacl->dynamic_stop)
541 list_del(&se_nacl->acl_list); 541 list_del_init(&se_nacl->acl_list);
542 } 542 }
543 mutex_unlock(&se_tpg->acl_node_mutex); 543 mutex_unlock(&se_tpg->acl_node_mutex);
544 544
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80ee130f8253..942d094269fb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
563 block_remaining); 563 block_remaining);
564 to_offset = get_block_offset_user(udev, dbi, 564 to_offset = get_block_offset_user(udev, dbi,
565 block_remaining); 565 block_remaining);
566 offset = DATA_BLOCK_SIZE - block_remaining;
567 to += offset;
568 566
569 if (*iov_cnt != 0 && 567 if (*iov_cnt != 0 &&
570 to_offset == iov_tail(*iov)) { 568 to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
575 (*iov)->iov_len = copy_bytes; 573 (*iov)->iov_len = copy_bytes;
576 } 574 }
577 if (copy_data) { 575 if (copy_data) {
578 memcpy(to, from + sg->length - sg_remaining, 576 offset = DATA_BLOCK_SIZE - block_remaining;
579 copy_bytes); 577 memcpy(to + offset,
578 from + sg->length - sg_remaining,
579 copy_bytes);
580 tcmu_flush_dcache_range(to, copy_bytes); 580 tcmu_flush_dcache_range(to, copy_bytes);
581 } 581 }
582 sg_remaining -= copy_bytes; 582 sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
637 copy_bytes = min_t(size_t, sg_remaining, 637 copy_bytes = min_t(size_t, sg_remaining,
638 block_remaining); 638 block_remaining);
639 offset = DATA_BLOCK_SIZE - block_remaining; 639 offset = DATA_BLOCK_SIZE - block_remaining;
640 from += offset;
641 tcmu_flush_dcache_range(from, copy_bytes); 640 tcmu_flush_dcache_range(from, copy_bytes);
642 memcpy(to + sg->length - sg_remaining, from, 641 memcpy(to + sg->length - sg_remaining, from + offset,
643 copy_bytes); 642 copy_bytes);
644 643
645 sg_remaining -= copy_bytes; 644 sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
1433 if (udev->dev_config[0]) 1432 if (udev->dev_config[0])
1434 snprintf(str + used, size - used, "/%s", udev->dev_config); 1433 snprintf(str + used, size - used, "/%s", udev->dev_config);
1435 1434
1435 /* If the old string exists, free it */
1436 kfree(info->name);
1436 info->name = str; 1437 info->name = str;
1437 1438
1438 return 0; 1439 return 0;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 308b6e17c88a..fe2f00ceafc5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
333 int res; 333 int res;
334 enum tb_port_type type; 334 enum tb_port_type type;
335 335
336 /*
337 * Some DROMs list more ports than the controller actually has
338 * so we skip those but allow the parser to continue.
339 */
340 if (header->index > sw->config.max_port_number) {
341 dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
342 return 0;
343 }
344
336 port = &sw->ports[header->index]; 345 port = &sw->ports[header->index];
337 port->disabled = header->port_disabled; 346 port->disabled = header->port_disabled;
338 if (port->disabled) 347 if (port->disabled)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b5def356af63..1aab3010fbfa 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1043 if (up->dl_write) 1043 if (up->dl_write)
1044 uart->dl_write = up->dl_write; 1044 uart->dl_write = up->dl_write;
1045 1045
1046 if (serial8250_isa_config != NULL) 1046 if (uart->port.type != PORT_8250_CIR) {
1047 serial8250_isa_config(0, &uart->port, 1047 if (serial8250_isa_config != NULL)
1048 &uart->capabilities); 1048 serial8250_isa_config(0, &uart->port,
1049 &uart->capabilities);
1050
1051 ret = uart_add_one_port(&serial8250_reg,
1052 &uart->port);
1053 if (ret == 0)
1054 ret = uart->port.line;
1055 } else {
1056 dev_info(uart->port.dev,
1057 "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
1058 uart->port.iobase,
1059 (unsigned long long)uart->port.mapbase,
1060 uart->port.irq);
1049 1061
1050 ret = uart_add_one_port(&serial8250_reg, &uart->port); 1062 ret = 0;
1051 if (ret == 0) 1063 }
1052 ret = uart->port.line;
1053 } 1064 }
1054 mutex_unlock(&serial_mutex); 1065 mutex_unlock(&serial_mutex);
1055 1066
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a857bb34fbb..1888d168a41c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
142 .fixed_options = true, 142 .fixed_options = true,
143}; 143};
144 144
145/* 145#ifdef CONFIG_ACPI_SPCR_TABLE
146 * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
147 * occasionally getting stuck as 1. To avoid the potential for a hang, check
148 * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
149 * implementations, so only do so if an affected platform is detected in
150 * parse_spcr().
151 */
152static bool qdf2400_e44_present = false;
153
154static struct vendor_data vendor_qdt_qdf2400_e44 = { 146static struct vendor_data vendor_qdt_qdf2400_e44 = {
155 .reg_offset = pl011_std_offsets, 147 .reg_offset = pl011_std_offsets,
156 .fr_busy = UART011_FR_TXFE, 148 .fr_busy = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
165 .always_enabled = true, 157 .always_enabled = true,
166 .fixed_options = true, 158 .fixed_options = true,
167}; 159};
160#endif
168 161
169static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 162static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
170 [REG_DR] = UART01x_DR, 163 [REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
2375 resource_size_t addr; 2368 resource_size_t addr;
2376 int i; 2369 int i;
2377 2370
2378 if (strcmp(name, "qdf2400_e44") == 0) { 2371 /*
2379 pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2372 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2380 qdf2400_e44_present = true; 2373 * have a distinct console name, so make sure we check for that.
2381 } else if (strcmp(name, "pl011") != 0) { 2374 * The actual implementation of the erratum occurs in the probe
2375 * function.
2376 */
2377 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2382 return -ENODEV; 2378 return -ENODEV;
2383 }
2384 2379
2385 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2380 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2386 return -ENODEV; 2381 return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
2734 } 2729 }
2735 uap->port.irq = ret; 2730 uap->port.irq = ret;
2736 2731
2737 uap->reg_offset = vendor_sbsa.reg_offset; 2732#ifdef CONFIG_ACPI_SPCR_TABLE
2738 uap->vendor = qdf2400_e44_present ? 2733 if (qdf2400_e44_present) {
2739 &vendor_qdt_qdf2400_e44 : &vendor_sbsa; 2734 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2735 uap->vendor = &vendor_qdt_qdf2400_e44;
2736 } else
2737#endif
2738 uap->vendor = &vendor_sbsa;
2739
2740 uap->reg_offset = uap->vendor->reg_offset;
2740 uap->fifosize = 32; 2741 uap->fifosize = 32;
2741 uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; 2742 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2742 uap->port.ops = &sbsa_uart_pops; 2743 uap->port.ops = &sbsa_uart_pops;
2743 uap->fixed_baud = baudrate; 2744 uap->fixed_baud = baudrate;
2744 2745
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ab1bb3b538ac..7f277b092b5b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
1888 /* No more submits can occur */ 1888 /* No more submits can occur */
1889 spin_lock_irq(&hcd_urb_list_lock); 1889 spin_lock_irq(&hcd_urb_list_lock);
1890rescan: 1890rescan:
1891 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1891 list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
1892 int is_in; 1892 int is_in;
1893 1893
1894 if (urb->unlinked) 1894 if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
2485 } 2485 }
2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { 2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
2487 hcd = hcd->shared_hcd; 2487 hcd = hcd->shared_hcd;
2488 clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2489 set_bit(HCD_FLAG_DEAD, &hcd->flags);
2488 if (hcd->rh_registered) { 2490 if (hcd->rh_registered) {
2489 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2491 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2490 2492
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6e6797d145dd..822f8c50e423 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
4725static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, 4725static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4726 u16 portchange) 4726 u16 portchange)
4727{ 4727{
4728 int status, i; 4728 int status = -ENODEV;
4729 int i;
4729 unsigned unit_load; 4730 unsigned unit_load;
4730 struct usb_device *hdev = hub->hdev; 4731 struct usb_device *hdev = hub->hdev;
4731 struct usb_hcd *hcd = bus_to_hcd(hdev->bus); 4732 struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
4929 4930
4930done: 4931done:
4931 hub_port_disable(hub, port1, 1); 4932 hub_port_disable(hub, port1, 1);
4932 if (hcd->driver->relinquish_port && !hub->hdev->parent) 4933 if (hcd->driver->relinquish_port && !hub->hdev->parent) {
4933 hcd->driver->relinquish_port(hcd, port1); 4934 if (status != -ENOTCONN && status != -ENODEV)
4934 4935 hcd->driver->relinquish_port(hcd, port1);
4936 }
4935} 4937}
4936 4938
4937/* Handle physical or logical connection change events. 4939/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edfcdc18..574da2b4529c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
150 /* appletouch */ 150 /* appletouch */
151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
152 152
153 /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
154 { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
155
153 /* Avision AV600U */ 156 /* Avision AV600U */
154 { USB_DEVICE(0x0638, 0x0a13), .driver_info = 157 { USB_DEVICE(0x0638, 0x0a13), .driver_info =
155 USB_QUIRK_STRING_FETCH_255 }, 158 USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
249 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, 252 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
250 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, 253 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
251 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, 254 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
255 { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
252 256
253 /* Logitech Optical Mouse M90/M100 */ 257 /* Logitech Optical Mouse M90/M100 */
254 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, 258 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6b299c7b7656..f064f1549333 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
896 if (!node) { 896 if (!node) {
897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
898 898
899 /*
900 * USB Specification 2.0 Section 5.9.2 states that: "If
901 * there is only a single transaction in the microframe,
902 * only a DATA0 data packet PID is used. If there are
903 * two transactions per microframe, DATA1 is used for
904 * the first transaction data packet and DATA0 is used
905 * for the second transaction data packet. If there are
906 * three transactions per microframe, DATA2 is used for
907 * the first transaction data packet, DATA1 is used for
908 * the second, and DATA0 is used for the third."
909 *
910 * IOW, we should satisfy the following cases:
911 *
912 * 1) length <= maxpacket
913 * - DATA0
914 *
915 * 2) maxpacket < length <= (2 * maxpacket)
916 * - DATA1, DATA0
917 *
918 * 3) (2 * maxpacket) < length <= (3 * maxpacket)
919 * - DATA2, DATA1, DATA0
920 */
899 if (speed == USB_SPEED_HIGH) { 921 if (speed == USB_SPEED_HIGH) {
900 struct usb_ep *ep = &dep->endpoint; 922 struct usb_ep *ep = &dep->endpoint;
901 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); 923 unsigned int mult = ep->mult - 1;
924 unsigned int maxp = usb_endpoint_maxp(ep->desc);
925
926 if (length <= (2 * maxp))
927 mult--;
928
929 if (length <= maxp)
930 mult--;
931
932 trb->size |= DWC3_TRB_SIZE_PCM1(mult);
902 } 933 }
903 } else { 934 } else {
904 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 62dc9c7798e7..e1de8fe599a3 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
838 return usb3_req; 838 return usb3_req;
839} 839}
840 840
841static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, 841static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
842 struct renesas_usb3_request *usb3_req, int status) 842 struct renesas_usb3_request *usb3_req,
843 int status)
843{ 844{
844 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); 845 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
845 unsigned long flags;
846 846
847 dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", 847 dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
848 usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, 848 usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
849 status); 849 status);
850 usb3_req->req.status = status; 850 usb3_req->req.status = status;
851 spin_lock_irqsave(&usb3->lock, flags);
852 usb3_ep->started = false; 851 usb3_ep->started = false;
853 list_del_init(&usb3_req->queue); 852 list_del_init(&usb3_req->queue);
854 spin_unlock_irqrestore(&usb3->lock, flags); 853 spin_unlock(&usb3->lock);
855 usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); 854 usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
855 spin_lock(&usb3->lock);
856}
857
858static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
859 struct renesas_usb3_request *usb3_req, int status)
860{
861 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
862 unsigned long flags;
863
864 spin_lock_irqsave(&usb3->lock, flags);
865 __usb3_request_done(usb3_ep, usb3_req, status);
866 spin_unlock_irqrestore(&usb3->lock, flags);
856} 867}
857 868
858static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) 869static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c8989c62a262..c8f38649f749 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -98,6 +98,7 @@ enum amd_chipset_gen {
98 AMD_CHIPSET_HUDSON2, 98 AMD_CHIPSET_HUDSON2,
99 AMD_CHIPSET_BOLTON, 99 AMD_CHIPSET_BOLTON,
100 AMD_CHIPSET_YANGTZE, 100 AMD_CHIPSET_YANGTZE,
101 AMD_CHIPSET_TAISHAN,
101 AMD_CHIPSET_UNKNOWN, 102 AMD_CHIPSET_UNKNOWN,
102}; 103};
103 104
@@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
141 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 142 pinfo->sb_type.gen = AMD_CHIPSET_SB700;
142 else if (rev >= 0x40 && rev <= 0x4f) 143 else if (rev >= 0x40 && rev <= 0x4f)
143 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 144 pinfo->sb_type.gen = AMD_CHIPSET_SB800;
145 }
146 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
147 0x145c, NULL);
148 if (pinfo->smbus_dev) {
149 pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
144 } else { 150 } else {
145 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 151 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
146 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 152 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
260{ 266{
261 /* Make sure amd chipset type has already been initialized */ 267 /* Make sure amd chipset type has already been initialized */
262 usb_amd_find_chipset_info(); 268 usb_amd_find_chipset_info();
263 if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) 269 if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
264 return 0; 270 amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
265 271 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
266 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 272 return 1;
267 return 1; 273 }
274 return 0;
268} 275}
269EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 276EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
270 277
@@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
1150} 1157}
1151DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1158DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1152 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); 1159 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
1160
1161bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
1162{
1163 /*
1164 * Our dear uPD72020{1,2} friend only partially resets when
1165 * asked to via the XHCI interface, and may end up doing DMA
1166 * at the wrong addresses, as it keeps the top 32bit of some
1167 * addresses from its previous programming under obscure
1168 * circumstances.
1169 * Give it a good wack at probe time. Unfortunately, this
1170 * needs to happen before we've had a chance to discover any
1171 * quirk, or the system will be in a rather bad state.
1172 */
1173 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1174 (pdev->device == 0x0014 || pdev->device == 0x0015))
1175 return true;
1176
1177 return false;
1178}
1179EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 655994480198..5582cbafecd4 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
15void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); 15void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
16void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 16void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
17void sb800_prefetch(struct device *dev, int on); 17void sb800_prefetch(struct device *dev, int on);
18bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
18#else 19#else
19struct pci_dev; 20struct pci_dev;
20static inline void usb_amd_quirk_pll_disable(void) {} 21static inline void usb_amd_quirk_pll_disable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5b0fa553c8bc..8071c8fdd15e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
284 284
285 driver = (struct hc_driver *)id->driver_data; 285 driver = (struct hc_driver *)id->driver_data;
286 286
287 /* For some HW implementation, a XHCI reset is just not enough... */
288 if (usb_xhci_needs_pci_reset(dev)) {
289 dev_info(&dev->dev, "Resetting\n");
290 if (pci_reset_function_locked(dev))
291 dev_warn(&dev->dev, "Reset failed");
292 }
293
287 /* Prevent runtime suspending between USB-2 and USB-3 initialization */ 294 /* Prevent runtime suspending between USB-2 and USB-3 initialization */
288 pm_runtime_get_noresume(&dev->dev); 295 pm_runtime_get_noresume(&dev->dev);
289 296
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 76decb8011eb..3344ffd5bb13 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
139 "Could not flush host TX%d fifo: csr: %04x\n", 139 "Could not flush host TX%d fifo: csr: %04x\n",
140 ep->epnum, csr)) 140 ep->epnum, csr))
141 return; 141 return;
142 mdelay(1);
142 } 143 }
143} 144}
144 145
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8fb86a5f458e..3d0dd2f97415 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -197,6 +197,7 @@ struct msm_otg {
197 struct regulator *v3p3; 197 struct regulator *v3p3;
198 struct regulator *v1p8; 198 struct regulator *v1p8;
199 struct regulator *vddcx; 199 struct regulator *vddcx;
200 struct regulator_bulk_data supplies[3];
200 201
201 struct reset_control *phy_rst; 202 struct reset_control *phy_rst;
202 struct reset_control *link_rst; 203 struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
1731 1732
1732static int msm_otg_probe(struct platform_device *pdev) 1733static int msm_otg_probe(struct platform_device *pdev)
1733{ 1734{
1734 struct regulator_bulk_data regs[3];
1735 int ret = 0; 1735 int ret = 0;
1736 struct device_node *np = pdev->dev.of_node; 1736 struct device_node *np = pdev->dev.of_node;
1737 struct msm_otg_platform_data *pdata; 1737 struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
1817 return motg->irq; 1817 return motg->irq;
1818 } 1818 }
1819 1819
1820 regs[0].supply = "vddcx"; 1820 motg->supplies[0].supply = "vddcx";
1821 regs[1].supply = "v3p3"; 1821 motg->supplies[1].supply = "v3p3";
1822 regs[2].supply = "v1p8"; 1822 motg->supplies[2].supply = "v1p8";
1823 1823
1824 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); 1824 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
1825 motg->supplies);
1825 if (ret) 1826 if (ret)
1826 return ret; 1827 return ret;
1827 1828
1828 motg->vddcx = regs[0].consumer; 1829 motg->vddcx = motg->supplies[0].consumer;
1829 motg->v3p3 = regs[1].consumer; 1830 motg->v3p3 = motg->supplies[1].consumer;
1830 motg->v1p8 = regs[2].consumer; 1831 motg->v1p8 = motg->supplies[2].consumer;
1831 1832
1832 clk_set_rate(motg->clk, 60000000); 1833 clk_set_rate(motg->clk, 60000000);
1833 1834
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 93fba9033b00..2c8161bcf5b5 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
640 struct usbhs_pipe *pipe; 640 struct usbhs_pipe *pipe;
641 unsigned long flags; 641 unsigned long flags;
642 int ret = 0;
643 642
644 spin_lock_irqsave(&uep->lock, flags); 643 spin_lock_irqsave(&uep->lock, flags);
645 pipe = usbhsg_uep_to_pipe(uep); 644 pipe = usbhsg_uep_to_pipe(uep);
646 if (!pipe) { 645 if (!pipe)
647 ret = -EINVAL;
648 goto out; 646 goto out;
649 }
650 647
651 usbhsg_pipe_disable(uep); 648 usbhsg_pipe_disable(uep);
652 usbhs_pipe_free(pipe); 649 usbhs_pipe_free(pipe);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b331c9f2..02b67abfc2a1 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
20/* Low Power Status register (LPSTS) */ 20/* Low Power Status register (LPSTS) */
21#define LPSTS_SUSPM 0x4000 21#define LPSTS_SUSPM 0x4000
22 22
23/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ 23/*
24 * USB General control register 2 (UGCTRL2)
25 * Remarks: bit[31:11] and bit[9:6] should be 0
26 */
24#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ 27#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
25#define UGCTRL2_USB0SEL_OTG 0x00000030 28#define UGCTRL2_USB0SEL_OTG 0x00000030
29#define UGCTRL2_VBUSSEL 0x00000400
26 30
27static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) 31static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
28{ 32{
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
34{ 38{
35 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); 39 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
36 40
37 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); 41 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
42 UGCTRL2_VBUSSEL);
38 43
39 if (enable) { 44 if (enable) {
40 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); 45 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f64e914a8985..2d945c9f975c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
145 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
145 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 146 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
146 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 147 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
147 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 148 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11105d..fe123153b1a5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ 2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ 2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2028 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2029 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2028 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2029 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 2031 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 2032 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c9ebefd8f35f..a585b477415d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, 54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
56 .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, 57 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
56 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 58 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
57 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 59 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..3b5a15d1dc0d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
27#define ATEN_VENDOR_ID 0x0557 27#define ATEN_VENDOR_ID 0x0557
28#define ATEN_VENDOR_ID2 0x0547 28#define ATEN_VENDOR_ID2 0x0547
29#define ATEN_PRODUCT_ID 0x2008 29#define ATEN_PRODUCT_ID 0x2008
30#define ATEN_PRODUCT_UC485 0x2021
30#define ATEN_PRODUCT_ID2 0x2118 31#define ATEN_PRODUCT_ID2 0x2118
31 32
32#define IODATA_VENDOR_ID 0x04bb 33#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f329e71..cde115359793 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
124/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ 124/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
125UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, 125UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
126 "Initio Corporation", 126 "Initio Corporation",
127 "", 127 "INIC-3069",
128 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 128 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
129 US_FL_NO_ATA_1X), 129 US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
130 130
131/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 131/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
132UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, 132UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 06615934fed1..0dceb9fa3a06 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
315{ 315{
316 struct us_data *us = (struct us_data *)__us; 316 struct us_data *us = (struct us_data *)__us;
317 struct Scsi_Host *host = us_to_host(us); 317 struct Scsi_Host *host = us_to_host(us);
318 struct scsi_cmnd *srb;
318 319
319 for (;;) { 320 for (;;) {
320 usb_stor_dbg(us, "*** thread sleeping\n"); 321 usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
330 scsi_lock(host); 331 scsi_lock(host);
331 332
332 /* When we are called with no command pending, we're done */ 333 /* When we are called with no command pending, we're done */
334 srb = us->srb;
333 if (us->srb == NULL) { 335 if (us->srb == NULL) {
334 scsi_unlock(host); 336 scsi_unlock(host);
335 mutex_unlock(&us->dev_mutex); 337 mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
398 /* lock access to the state */ 400 /* lock access to the state */
399 scsi_lock(host); 401 scsi_lock(host);
400 402
401 /* indicate that the command is done */ 403 /* was the command aborted? */
402 if (us->srb->result != DID_ABORT << 16) { 404 if (us->srb->result == DID_ABORT << 16) {
403 usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
404 us->srb->result);
405 us->srb->scsi_done(us->srb);
406 } else {
407SkipForAbort: 405SkipForAbort:
408 usb_stor_dbg(us, "scsi command aborted\n"); 406 usb_stor_dbg(us, "scsi command aborted\n");
407 srb = NULL; /* Don't call srb->scsi_done() */
409 } 408 }
410 409
411 /* 410 /*
@@ -429,6 +428,13 @@ SkipForAbort:
429 428
430 /* unlock the device pointers */ 429 /* unlock the device pointers */
431 mutex_unlock(&us->dev_mutex); 430 mutex_unlock(&us->dev_mutex);
431
432 /* now that the locks are released, notify the SCSI core */
433 if (srb) {
434 usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
435 srb->result);
436 srb->scsi_done(srb);
437 }
432 } /* for (;;) */ 438 } /* for (;;) */
433 439
434 /* Wait until we are told to stop */ 440 /* Wait until we are told to stop */
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ff01bed7112f..1e784adb89b1 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -17,6 +17,7 @@
17#include <asm/efi.h> 17#include <asm/efi.h>
18 18
19static bool request_mem_succeeded = false; 19static bool request_mem_succeeded = false;
20static bool nowc = false;
20 21
21static struct fb_var_screeninfo efifb_defined = { 22static struct fb_var_screeninfo efifb_defined = {
22 .activate = FB_ACTIVATE_NOW, 23 .activate = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
99 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); 100 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
100 else if (!strncmp(this_opt, "width:", 6)) 101 else if (!strncmp(this_opt, "width:", 6))
101 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 102 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
103 else if (!strcmp(this_opt, "nowc"))
104 nowc = true;
102 } 105 }
103 } 106 }
104 107
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
255 info->apertures->ranges[0].base = efifb_fix.smem_start; 258 info->apertures->ranges[0].base = efifb_fix.smem_start;
256 info->apertures->ranges[0].size = size_remap; 259 info->apertures->ranges[0].size = size_remap;
257 260
258 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); 261 if (nowc)
262 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
263 else
264 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
259 if (!info->screen_base) { 265 if (!info->screen_base) {
260 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", 266 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
261 efifb_fix.smem_len, efifb_fix.smem_start); 267 efifb_fix.smem_len, efifb_fix.smem_start);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c166e0725be5..ba82f97fb42b 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
1073 imxfb_disable_controller(fbi); 1073 imxfb_disable_controller(fbi);
1074 1074
1075 unregister_framebuffer(info); 1075 unregister_framebuffer(info);
1076 1076 fb_dealloc_cmap(&info->cmap);
1077 pdata = dev_get_platdata(&pdev->dev); 1077 pdata = dev_get_platdata(&pdev->dev);
1078 if (pdata && pdata->exit) 1078 if (pdata && pdata->exit)
1079 pdata->exit(fbi->pdev); 1079 pdata->exit(fbi->pdev);
1080
1081 fb_dealloc_cmap(&info->cmap);
1082 kfree(info->pseudo_palette);
1083 framebuffer_release(info);
1084
1085 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, 1080 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
1086 fbi->map_dma); 1081 fbi->map_dma);
1087
1088 iounmap(fbi->regs); 1082 iounmap(fbi->regs);
1089 release_mem_region(res->start, resource_size(res)); 1083 release_mem_region(res->start, resource_size(res));
1084 kfree(info->pseudo_palette);
1085 framebuffer_release(info);
1090 1086
1091 return 0; 1087 return 0;
1092} 1088}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index eecf695c16f4..09e5bb013d28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
193 193
194static int __init omap_dss_probe(struct platform_device *pdev) 194static int __init omap_dss_probe(struct platform_device *pdev)
195{ 195{
196 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
197 int r; 196 int r;
198 197
199 core.pdev = pdev; 198 core.pdev = pdev;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index bae1f5d36c26..2d43118077e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -574,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
574 574
575static void enable_pirq(struct irq_data *data) 575static void enable_pirq(struct irq_data *data)
576{ 576{
577 startup_pirq(data); 577 enable_dynirq(data);
578} 578}
579 579
580static void disable_pirq(struct irq_data *data) 580static void disable_pirq(struct irq_data *data)
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e46080214955..3e59590c7254 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
857 struct list_head *ent; 857 struct list_head *ent;
858 struct xs_watch_event *event; 858 struct xs_watch_event *event;
859 859
860 xenwatch_pid = current->pid;
861
860 for (;;) { 862 for (;;) {
861 wait_event_interruptible(watch_events_waitq, 863 wait_event_interruptible(watch_events_waitq,
862 !list_empty(&watch_events)); 864 !list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
925 task = kthread_run(xenwatch_thread, NULL, "xenwatch"); 927 task = kthread_run(xenwatch_thread, NULL, "xenwatch");
926 if (IS_ERR(task)) 928 if (IS_ERR(task))
927 return PTR_ERR(task); 929 return PTR_ERR(task);
928 xenwatch_pid = task->pid;
929 930
930 /* shutdown watches for kexec boot */ 931 /* shutdown watches for kexec boot */
931 xs_reset_watches(); 932 xs_reset_watches();
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3ee4fdc3da9e..ab60051be6e5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
46{ 46{
47 struct fuse_file *ff; 47 struct fuse_file *ff;
48 48
49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 49 ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
50 if (unlikely(!ff)) 50 if (unlikely(!ff))
51 return NULL; 51 return NULL;
52 52
@@ -609,7 +609,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
609 struct fuse_io_priv *io = req->io; 609 struct fuse_io_priv *io = req->io;
610 ssize_t pos = -1; 610 ssize_t pos = -1;
611 611
612 fuse_release_user_pages(req, !io->write); 612 fuse_release_user_pages(req, io->should_dirty);
613 613
614 if (io->write) { 614 if (io->write) {
615 if (req->misc.write.in.size != req->misc.write.out.size) 615 if (req->misc.write.in.size != req->misc.write.out.size)
@@ -1316,7 +1316,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1316 loff_t *ppos, int flags) 1316 loff_t *ppos, int flags)
1317{ 1317{
1318 int write = flags & FUSE_DIO_WRITE; 1318 int write = flags & FUSE_DIO_WRITE;
1319 bool should_dirty = !write && iter_is_iovec(iter);
1320 int cuse = flags & FUSE_DIO_CUSE; 1319 int cuse = flags & FUSE_DIO_CUSE;
1321 struct file *file = io->file; 1320 struct file *file = io->file;
1322 struct inode *inode = file->f_mapping->host; 1321 struct inode *inode = file->f_mapping->host;
@@ -1346,6 +1345,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1346 inode_unlock(inode); 1345 inode_unlock(inode);
1347 } 1346 }
1348 1347
1348 io->should_dirty = !write && iter_is_iovec(iter);
1349 while (count) { 1349 while (count) {
1350 size_t nres; 1350 size_t nres;
1351 fl_owner_t owner = current->files; 1351 fl_owner_t owner = current->files;
@@ -1360,7 +1360,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
1360 nres = fuse_send_read(req, io, pos, nbytes, owner); 1360 nres = fuse_send_read(req, io, pos, nbytes, owner);
1361 1361
1362 if (!io->async) 1362 if (!io->async)
1363 fuse_release_user_pages(req, should_dirty); 1363 fuse_release_user_pages(req, io->should_dirty);
1364 if (req->out.h.error) { 1364 if (req->out.h.error) {
1365 err = req->out.h.error; 1365 err = req->out.h.error;
1366 break; 1366 break;
@@ -1669,6 +1669,7 @@ err_nofile:
1669err_free: 1669err_free:
1670 fuse_request_free(req); 1670 fuse_request_free(req);
1671err: 1671err:
1672 mapping_set_error(page->mapping, error);
1672 end_page_writeback(page); 1673 end_page_writeback(page);
1673 return error; 1674 return error;
1674} 1675}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1bd7ffdad593..bd4d2a3e1ec1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -249,6 +249,7 @@ struct fuse_io_priv {
249 size_t size; 249 size_t size;
250 __u64 offset; 250 __u64 offset;
251 bool write; 251 bool write;
252 bool should_dirty;
252 int err; 253 int err;
253 struct kiocb *iocb; 254 struct kiocb *iocb;
254 struct file *file; 255 struct file *file;
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 69d02cf8cf37..5f93cfacb3d1 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
121config PNFS_BLOCK 121config PNFS_BLOCK
122 tristate 122 tristate
123 depends on NFS_V4_1 && BLK_DEV_DM 123 depends on NFS_V4_1 && BLK_DEV_DM
124 depends on 64BIT || LBDAF
124 default NFS_V4 125 default NFS_V4
125 126
126config PNFS_FLEXFILE_LAYOUT 127config PNFS_FLEXFILE_LAYOUT
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 6df7a0cf5660..f32c58bbe556 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
32{ 32{
33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid); 33 nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
34 nfs4_pnfs_ds_put(mirror_ds->ds); 34 nfs4_pnfs_ds_put(mirror_ds->ds);
35 kfree(mirror_ds->ds_versions);
35 kfree_rcu(mirror_ds, id_node.rcu); 36 kfree_rcu(mirror_ds, id_node.rcu);
36} 37}
37 38
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ffd2e712595d..d90132642340 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2553,9 +2553,8 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
2553 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2553 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2554 clear_bit(NFS_OPEN_STATE, &state->flags); 2554 clear_bit(NFS_OPEN_STATE, &state->flags);
2555 stateid->type = NFS4_INVALID_STATEID_TYPE; 2555 stateid->type = NFS4_INVALID_STATEID_TYPE;
2556 }
2557 if (status != NFS_OK)
2558 return status; 2556 return status;
2557 }
2559 if (nfs_open_stateid_recover_openmode(state)) 2558 if (nfs_open_stateid_recover_openmode(state))
2560 return -NFS4ERR_OPENMODE; 2559 return -NFS4ERR_OPENMODE;
2561 return NFS_OK; 2560 return NFS_OK;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8a428498d6b2..509a61668d90 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -106,13 +106,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
106 global_node_page_state(NR_FILE_MAPPED)); 106 global_node_page_state(NR_FILE_MAPPED));
107 show_val_kb(m, "Shmem: ", i.sharedram); 107 show_val_kb(m, "Shmem: ", i.sharedram);
108 show_val_kb(m, "Slab: ", 108 show_val_kb(m, "Slab: ",
109 global_page_state(NR_SLAB_RECLAIMABLE) + 109 global_node_page_state(NR_SLAB_RECLAIMABLE) +
110 global_page_state(NR_SLAB_UNRECLAIMABLE)); 110 global_node_page_state(NR_SLAB_UNRECLAIMABLE));
111 111
112 show_val_kb(m, "SReclaimable: ", 112 show_val_kb(m, "SReclaimable: ",
113 global_page_state(NR_SLAB_RECLAIMABLE)); 113 global_node_page_state(NR_SLAB_RECLAIMABLE));
114 show_val_kb(m, "SUnreclaim: ", 114 show_val_kb(m, "SUnreclaim: ",
115 global_page_state(NR_SLAB_UNRECLAIMABLE)); 115 global_node_page_state(NR_SLAB_UNRECLAIMABLE));
116 seq_printf(m, "KernelStack: %8lu kB\n", 116 seq_printf(m, "KernelStack: %8lu kB\n",
117 global_page_state(NR_KERNEL_STACK_KB)); 117 global_page_state(NR_KERNEL_STACK_KB));
118 show_val_kb(m, "PageTables: ", 118 show_val_kb(m, "PageTables: ",
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index b836fd61ed87..fe8f3265e877 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -16,9 +16,10 @@
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/page_idle.h> 17#include <linux/page_idle.h>
18#include <linux/shmem_fs.h> 18#include <linux/shmem_fs.h>
19#include <linux/uaccess.h>
19 20
20#include <asm/elf.h> 21#include <asm/elf.h>
21#include <linux/uaccess.h> 22#include <asm/tlb.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include "internal.h" 24#include "internal.h"
24 25
@@ -1008,6 +1009,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1008 struct mm_struct *mm; 1009 struct mm_struct *mm;
1009 struct vm_area_struct *vma; 1010 struct vm_area_struct *vma;
1010 enum clear_refs_types type; 1011 enum clear_refs_types type;
1012 struct mmu_gather tlb;
1011 int itype; 1013 int itype;
1012 int rv; 1014 int rv;
1013 1015
@@ -1054,6 +1056,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1054 } 1056 }
1055 1057
1056 down_read(&mm->mmap_sem); 1058 down_read(&mm->mmap_sem);
1059 tlb_gather_mmu(&tlb, mm, 0, -1);
1057 if (type == CLEAR_REFS_SOFT_DIRTY) { 1060 if (type == CLEAR_REFS_SOFT_DIRTY) {
1058 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1061 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1059 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1062 if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -1075,7 +1078,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1075 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1078 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk);
1076 if (type == CLEAR_REFS_SOFT_DIRTY) 1079 if (type == CLEAR_REFS_SOFT_DIRTY)
1077 mmu_notifier_invalidate_range_end(mm, 0, -1); 1080 mmu_notifier_invalidate_range_end(mm, 0, -1);
1078 flush_tlb_mm(mm); 1081 tlb_finish_mmu(&tlb, 0, -1);
1079 up_read(&mm->mmap_sem); 1082 up_read(&mm->mmap_sem);
1080out_mm: 1083out_mm:
1081 mmput(mm); 1084 mmput(mm);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 06ea26b8c996..b0d5897bc4e6 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -1600,7 +1600,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1600 uffdio_copy.len); 1600 uffdio_copy.len);
1601 mmput(ctx->mm); 1601 mmput(ctx->mm);
1602 } else { 1602 } else {
1603 return -ENOSPC; 1603 return -ESRCH;
1604 } 1604 }
1605 if (unlikely(put_user(ret, &user_uffdio_copy->copy))) 1605 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1606 return -EFAULT; 1606 return -EFAULT;
@@ -1647,7 +1647,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1647 uffdio_zeropage.range.len); 1647 uffdio_zeropage.range.len);
1648 mmput(ctx->mm); 1648 mmput(ctx->mm);
1649 } else { 1649 } else {
1650 return -ENOSPC; 1650 return -ESRCH;
1651 } 1651 }
1652 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) 1652 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1653 return -EFAULT; 1653 return -EFAULT;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ceef77c0416a..ff48f0096810 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -874,7 +874,6 @@ xfs_ialloc(
874 case S_IFREG: 874 case S_IFREG:
875 case S_IFDIR: 875 case S_IFDIR:
876 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 876 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
877 uint64_t di_flags2 = 0;
878 uint di_flags = 0; 877 uint di_flags = 0;
879 878
880 if (S_ISDIR(mode)) { 879 if (S_ISDIR(mode)) {
@@ -911,20 +910,23 @@ xfs_ialloc(
911 di_flags |= XFS_DIFLAG_NODEFRAG; 910 di_flags |= XFS_DIFLAG_NODEFRAG;
912 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 911 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
913 di_flags |= XFS_DIFLAG_FILESTREAM; 912 di_flags |= XFS_DIFLAG_FILESTREAM;
914 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
915 di_flags2 |= XFS_DIFLAG2_DAX;
916 913
917 ip->i_d.di_flags |= di_flags; 914 ip->i_d.di_flags |= di_flags;
918 ip->i_d.di_flags2 |= di_flags2;
919 } 915 }
920 if (pip && 916 if (pip &&
921 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) && 917 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
922 pip->i_d.di_version == 3 && 918 pip->i_d.di_version == 3 &&
923 ip->i_d.di_version == 3) { 919 ip->i_d.di_version == 3) {
920 uint64_t di_flags2 = 0;
921
924 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) { 922 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
925 ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE; 923 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
926 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize; 924 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
927 } 925 }
926 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
927 di_flags2 |= XFS_DIFLAG2_DAX;
928
929 ip->i_d.di_flags2 |= di_flags2;
928 } 930 }
929 /* FALLTHROUGH */ 931 /* FALLTHROUGH */
930 case S_IFLNK: 932 case S_IFLNK:
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index fbe72b134bef..43aa42a3a5d3 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -539,6 +539,7 @@ xlog_discard_endio(
539 539
540 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); 540 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
541 queue_work(xfs_discard_wq, &ctx->discard_endio_work); 541 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
542 bio_put(bio);
542} 543}
543 544
544static void 545static void
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 8afa4335e5b2..faddde44de8c 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,10 +112,11 @@ struct mmu_gather {
112 112
113#define HAVE_GENERIC_MMU_GATHER 113#define HAVE_GENERIC_MMU_GATHER
114 114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); 115void arch_tlb_gather_mmu(struct mmu_gather *tlb,
116 struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb); 117void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, 118void arch_tlb_finish_mmu(struct mmu_gather *tlb,
118 unsigned long end); 119 unsigned long start, unsigned long end, bool force);
119extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, 120extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
120 int page_size); 121 int page_size);
121 122
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c749eef1daa1..27b4b6615263 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
1209#endif 1209#endif
1210 1210
1211#ifdef CONFIG_ACPI_SPCR_TABLE 1211#ifdef CONFIG_ACPI_SPCR_TABLE
1212extern bool qdf2400_e44_present;
1212int parse_spcr(bool earlycon); 1213int parse_spcr(bool earlycon);
1213#else 1214#else
1214static inline int parse_spcr(bool earlycon) { return 0; } 1215static inline int parse_spcr(bool earlycon) { return 0; }
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b56573bf440d..82b30e638430 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -39,8 +39,6 @@ enum cpuhp_state {
39 CPUHP_PCI_XGENE_DEAD, 39 CPUHP_PCI_XGENE_DEAD,
40 CPUHP_IOMMU_INTEL_DEAD, 40 CPUHP_IOMMU_INTEL_DEAD,
41 CPUHP_LUSTRE_CFS_DEAD, 41 CPUHP_LUSTRE_CFS_DEAD,
42 CPUHP_SCSI_BNX2FC_DEAD,
43 CPUHP_SCSI_BNX2I_DEAD,
44 CPUHP_WORKQUEUE_PREP, 42 CPUHP_WORKQUEUE_PREP,
45 CPUHP_POWER_NUMA_PREPARE, 43 CPUHP_POWER_NUMA_PREPARE,
46 CPUHP_HRTIMERS_PREPARE, 44 CPUHP_HRTIMERS_PREPARE,
diff --git a/include/linux/device.h b/include/linux/device.h
index 723cd54b94da..beabdbc08420 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -843,7 +843,7 @@ struct dev_links_info {
843 * hibernation, system resume and during runtime PM transitions 843 * hibernation, system resume and during runtime PM transitions
844 * along with subsystem-level and driver-level callbacks. 844 * along with subsystem-level and driver-level callbacks.
845 * @pins: For device pin management. 845 * @pins: For device pin management.
846 * See Documentation/pinctrl.txt for details. 846 * See Documentation/driver-api/pinctl.rst for details.
847 * @msi_list: Hosts MSI descriptors 847 * @msi_list: Hosts MSI descriptors
848 * @msi_domain: The generic MSI domain this device is using. 848 * @msi_domain: The generic MSI domain this device is using.
849 * @numa_node: NUMA node this device is close to. 849 * @numa_node: NUMA node this device is close to.
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 00ca5b86a753..d501d3956f13 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
689#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ 689#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
690#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 690#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
691#define I2C_CLASS_SPD (1<<7) /* Memory modules */ 691#define I2C_CLASS_SPD (1<<7) /* Memory modules */
692#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ 692/* Warn users that the adapter doesn't support classes anymore */
693#define I2C_CLASS_DEPRECATED (1<<8)
693 694
694/* Internal numbers to terminate lists */ 695/* Internal numbers to terminate lists */
695#define I2C_CLIENT_END 0xfffeU 696#define I2C_CLIENT_END 0xfffeU
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 497f2b3a5a62..97f1b465d04f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; 105 struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
106}; 106};
107 107
108struct st_sensor_sim {
109 u8 addr;
110 u8 value;
111};
112
108/** 113/**
109 * struct st_sensor_bdu - ST sensor device block data update 114 * struct st_sensor_bdu - ST sensor device block data update
110 * @addr: address of the register. 115 * @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
197 * @bdu: Block data update register. 202 * @bdu: Block data update register.
198 * @das: Data Alignment Selection register. 203 * @das: Data Alignment Selection register.
199 * @drdy_irq: Data ready register of the sensor. 204 * @drdy_irq: Data ready register of the sensor.
205 * @sim: SPI serial interface mode register of the sensor.
200 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. 206 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
201 * @bootime: samples to discard when sensor passing from power-down to power-up. 207 * @bootime: samples to discard when sensor passing from power-down to power-up.
202 */ 208 */
@@ -213,6 +219,7 @@ struct st_sensor_settings {
213 struct st_sensor_bdu bdu; 219 struct st_sensor_bdu bdu;
214 struct st_sensor_das das; 220 struct st_sensor_das das;
215 struct st_sensor_data_ready_irq drdy_irq; 221 struct st_sensor_data_ready_irq drdy_irq;
222 struct st_sensor_sim sim;
216 bool multi_read_bit; 223 bool multi_read_bit;
217 unsigned int bootime; 224 unsigned int bootime;
218}; 225};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index aad5d81dfb44..b54517c05e9a 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -620,6 +620,7 @@ struct mlx4_caps {
620 u32 dmfs_high_rate_qpn_base; 620 u32 dmfs_high_rate_qpn_base;
621 u32 dmfs_high_rate_qpn_range; 621 u32 dmfs_high_rate_qpn_range;
622 u32 vf_caps; 622 u32 vf_caps;
623 bool wol_port[MLX4_MAX_PORTS + 1];
623 struct mlx4_rate_limit_caps rl_caps; 624 struct mlx4_rate_limit_caps rl_caps;
624}; 625};
625 626
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 6f41270d80c0..f378dc0e7eaf 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
212#define MLX5_WQE_CTRL_OPCODE_MASK 0xff 212#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
213#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00 213#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
214#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8 214#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
215#define MLX5_WQE_AV_EXT 0x80000000
216 215
217enum { 216enum {
218 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4, 217 MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f384bb62d8e..3cadee0a3508 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -487,14 +487,12 @@ struct mm_struct {
487 /* numa_scan_seq prevents two threads setting pte_numa */ 487 /* numa_scan_seq prevents two threads setting pte_numa */
488 int numa_scan_seq; 488 int numa_scan_seq;
489#endif 489#endif
490#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
491 /* 490 /*
492 * An operation with batched TLB flushing is going on. Anything that 491 * An operation with batched TLB flushing is going on. Anything that
493 * can move process memory needs to flush the TLB when moving a 492 * can move process memory needs to flush the TLB when moving a
494 * PROT_NONE or PROT_NUMA mapped page. 493 * PROT_NONE or PROT_NUMA mapped page.
495 */ 494 */
496 bool tlb_flush_pending; 495 atomic_t tlb_flush_pending;
497#endif
498#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 496#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
499 /* See flush_tlb_batched_pending() */ 497 /* See flush_tlb_batched_pending() */
500 bool tlb_flush_batched; 498 bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
522 return mm->cpu_vm_mask_var; 520 return mm->cpu_vm_mask_var;
523} 521}
524 522
525#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 523struct mmu_gather;
524extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
525 unsigned long start, unsigned long end);
526extern void tlb_finish_mmu(struct mmu_gather *tlb,
527 unsigned long start, unsigned long end);
528
526/* 529/*
527 * Memory barriers to keep this state in sync are graciously provided by 530 * Memory barriers to keep this state in sync are graciously provided by
528 * the page table locks, outside of which no page table modifications happen. 531 * the page table locks, outside of which no page table modifications happen.
529 * The barriers below prevent the compiler from re-ordering the instructions 532 * The barriers are used to ensure the order between tlb_flush_pending updates,
530 * around the memory barriers that are already present in the code. 533 * which happen while the lock is not taken, and the PTE updates, which happen
534 * while the lock is taken, are serialized.
531 */ 535 */
532static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 536static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
533{ 537{
534 barrier(); 538 return atomic_read(&mm->tlb_flush_pending) > 0;
535 return mm->tlb_flush_pending; 539}
540
541/*
542 * Returns true if there are two above TLB batching threads in parallel.
543 */
544static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
545{
546 return atomic_read(&mm->tlb_flush_pending) > 1;
547}
548
549static inline void init_tlb_flush_pending(struct mm_struct *mm)
550{
551 atomic_set(&mm->tlb_flush_pending, 0);
536} 552}
537static inline void set_tlb_flush_pending(struct mm_struct *mm) 553
554static inline void inc_tlb_flush_pending(struct mm_struct *mm)
538{ 555{
539 mm->tlb_flush_pending = true; 556 atomic_inc(&mm->tlb_flush_pending);
540 557
541 /* 558 /*
542 * Guarantee that the tlb_flush_pending store does not leak into the 559 * Guarantee that the tlb_flush_pending increase does not leak into the
543 * critical section updating the page tables 560 * critical section updating the page tables
544 */ 561 */
545 smp_mb__before_spinlock(); 562 smp_mb__before_spinlock();
546} 563}
564
547/* Clearing is done after a TLB flush, which also provides a barrier. */ 565/* Clearing is done after a TLB flush, which also provides a barrier. */
548static inline void clear_tlb_flush_pending(struct mm_struct *mm) 566static inline void dec_tlb_flush_pending(struct mm_struct *mm)
549{
550 barrier();
551 mm->tlb_flush_pending = false;
552}
553#else
554static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
555{
556 return false;
557}
558static inline void set_tlb_flush_pending(struct mm_struct *mm)
559{
560}
561static inline void clear_tlb_flush_pending(struct mm_struct *mm)
562{ 567{
568 /*
569 * Guarantee that the tlb_flush_pending does not not leak into the
570 * critical section, since we must order the PTE change and changes to
571 * the pending TLB flush indication. We could have relied on TLB flush
572 * as a memory barrier, but this behavior is not clearly documented.
573 */
574 smp_mb__before_atomic();
575 atomic_dec(&mm->tlb_flush_pending);
563} 576}
564#endif
565 577
566struct vm_fault; 578struct vm_fault;
567 579
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 892148c448cc..5216d2eb2289 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -681,10 +681,10 @@ struct nand_buffers {
681 * @tWW_min: WP# transition to WE# low 681 * @tWW_min: WP# transition to WE# low
682 */ 682 */
683struct nand_sdr_timings { 683struct nand_sdr_timings {
684 u32 tBERS_max; 684 u64 tBERS_max;
685 u32 tCCS_min; 685 u32 tCCS_min;
686 u32 tPROG_max; 686 u64 tPROG_max;
687 u32 tR_max; 687 u64 tR_max;
688 u32 tALH_min; 688 u32 tALH_min;
689 u32 tADL_min; 689 u32 tADL_min;
690 u32 tALS_min; 690 u32 tALS_min;
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..2591878c1d48 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
346 * indicating an FC transport Aborted status. 346 * indicating an FC transport Aborted status.
347 * Entrypoint is Mandatory. 347 * Entrypoint is Mandatory.
348 * 348 *
349 * @defer_rcv: Called by the transport to signal the LLLD that it has
350 * begun processing of a previously received NVME CMD IU. The LLDD
351 * is now free to re-use the rcv buffer associated with the
352 * nvmefc_tgt_fcp_req.
353 *
349 * @max_hw_queues: indicates the maximum number of hw queues the LLDD 354 * @max_hw_queues: indicates the maximum number of hw queues the LLDD
350 * supports for cpu affinitization. 355 * supports for cpu affinitization.
351 * Value is Mandatory. Must be at least 1. 356 * Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
846 struct nvmefc_tgt_fcp_req *fcpreq); 851 struct nvmefc_tgt_fcp_req *fcpreq);
847 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport, 852 void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
848 struct nvmefc_tgt_fcp_req *fcpreq); 853 struct nvmefc_tgt_fcp_req *fcpreq);
854 void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
855 struct nvmefc_tgt_fcp_req *fcpreq);
849 856
850 u32 max_hw_queues; 857 u32 max_hw_queues;
851 u16 max_sgl_segments; 858 u16 max_sgl_segments;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4869e66dd659..a75c13673852 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1067,6 +1067,7 @@ void pcie_flr(struct pci_dev *dev);
1067int __pci_reset_function(struct pci_dev *dev); 1067int __pci_reset_function(struct pci_dev *dev);
1068int __pci_reset_function_locked(struct pci_dev *dev); 1068int __pci_reset_function_locked(struct pci_dev *dev);
1069int pci_reset_function(struct pci_dev *dev); 1069int pci_reset_function(struct pci_dev *dev);
1070int pci_reset_function_locked(struct pci_dev *dev);
1070int pci_try_reset_function(struct pci_dev *dev); 1071int pci_try_reset_function(struct pci_dev *dev);
1071int pci_probe_reset_slot(struct pci_slot *slot); 1072int pci_probe_reset_slot(struct pci_slot *slot);
1072int pci_reset_slot(struct pci_slot *slot); 1073int pci_reset_slot(struct pci_slot *slot);
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 231d3075815a..e91d1b6a260d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,8 +81,8 @@
81 * it. 81 * it.
82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a 82 * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
83 * value on the line. Use argument 1 to indicate high level, argument 0 to 83 * value on the line. Use argument 1 to indicate high level, argument 0 to
84 * indicate low level. (Please see Documentation/pinctrl.txt, section 84 * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
85 * "GPIO mode pitfalls" for a discussion around this parameter.) 85 * section "GPIO mode pitfalls" for a discussion around this parameter.)
86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power 86 * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
87 * supplies, the argument to this parameter (on a custom format) tells 87 * supplies, the argument to this parameter (on a custom format) tells
88 * the driver which alternative power source to use. 88 * the driver which alternative power source to use.
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 79b0e4cdb814..f8274b0c6888 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -17,10 +17,12 @@
17 * Available only for accelerometer and pressure sensors. 17 * Available only for accelerometer and pressure sensors.
18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). 18 * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
19 * @open_drain: set the interrupt line to be open drain if possible. 19 * @open_drain: set the interrupt line to be open drain if possible.
20 * @spi_3wire: enable spi-3wire mode.
20 */ 21 */
21struct st_sensors_platform_data { 22struct st_sensors_platform_data {
22 u8 drdy_int_pin; 23 u8 drdy_int_pin;
23 bool open_drain; 24 bool open_drain;
25 bool spi_3wire;
24}; 26};
25 27
26#endif /* ST_SENSORS_PDATA_H */ 28#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..51349d124ee5 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
99 * parameter func: the desired function to use. 99 * parameter func: the desired function to use.
100 * parameter chan: the function channel index to use. 100 * parameter chan: the function channel index to use.
101 * 101 *
102 * @do_work: Request driver to perform auxiliary (periodic) operations
103 * Driver should return delay of the next auxiliary work scheduling
104 * time (>=0) or negative value in case further scheduling
105 * is not required.
106 *
102 * Drivers should embed their ptp_clock_info within a private 107 * Drivers should embed their ptp_clock_info within a private
103 * structure, obtaining a reference to it using container_of(). 108 * structure, obtaining a reference to it using container_of().
104 * 109 *
@@ -126,6 +131,7 @@ struct ptp_clock_info {
126 struct ptp_clock_request *request, int on); 131 struct ptp_clock_request *request, int on);
127 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, 132 int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
128 enum ptp_pin_function func, unsigned int chan); 133 enum ptp_pin_function func, unsigned int chan);
134 long (*do_aux_work)(struct ptp_clock_info *ptp);
129}; 135};
130 136
131struct ptp_clock; 137struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
211int ptp_find_pin(struct ptp_clock *ptp, 217int ptp_find_pin(struct ptp_clock *ptp,
212 enum ptp_pin_function func, unsigned int chan); 218 enum ptp_pin_function func, unsigned int chan);
213 219
220/**
221 * ptp_schedule_worker() - schedule ptp auxiliary work
222 *
223 * @ptp: The clock obtained from ptp_clock_register().
224 * @delay: number of jiffies to wait before queuing
225 * See kthread_queue_delayed_work() for more info.
226 */
227
228int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
229
214#else 230#else
215static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 231static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
216 struct device *parent) 232 struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
225static inline int ptp_find_pin(struct ptp_clock *ptp, 241static inline int ptp_find_pin(struct ptp_clock *ptp,
226 enum ptp_pin_function func, unsigned int chan) 242 enum ptp_pin_function func, unsigned int chan)
227{ return -1; } 243{ return -1; }
244static inline int ptp_schedule_worker(struct ptp_clock *ptp,
245 unsigned long delay)
246{ return -EOPNOTSUPP; }
247
228#endif 248#endif
229 249
230#endif 250#endif
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 5726107963b2..0ad87c434ae6 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,12 +43,13 @@ struct sync_file {
43#endif 43#endif
44 44
45 wait_queue_head_t wq; 45 wait_queue_head_t wq;
46 unsigned long flags;
46 47
47 struct dma_fence *fence; 48 struct dma_fence *fence;
48 struct dma_fence_cb cb; 49 struct dma_fence_cb cb;
49}; 50};
50 51
51#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS 52#define POLL_ENABLED 0
52 53
53struct sync_file *sync_file_create(struct dma_fence *fence); 54struct sync_file *sync_file_create(struct dma_fence *fence);
54struct dma_fence *sync_file_get_fence(int fd); 55struct dma_fence *sync_file_get_fence(int fd);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70483296157f..ada65e767b28 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1916,6 +1916,16 @@ extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1916 u64 xmit_time); 1916 u64 xmit_time);
1917extern void tcp_rack_reo_timeout(struct sock *sk); 1917extern void tcp_rack_reo_timeout(struct sock *sk);
1918 1918
1919/* At how many usecs into the future should the RTO fire? */
1920static inline s64 tcp_rto_delta_us(const struct sock *sk)
1921{
1922 const struct sk_buff *skb = tcp_write_queue_head(sk);
1923 u32 rto = inet_csk(sk)->icsk_rto;
1924 u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1925
1926 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
1927}
1928
1919/* 1929/*
1920 * Save and compile IPv4 options, return a pointer to it 1930 * Save and compile IPv4 options, return a pointer to it
1921 */ 1931 */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0ca1fb08805b..fb87d32f5e51 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -786,6 +786,7 @@ struct iscsi_np {
786 int np_sock_type; 786 int np_sock_type;
787 enum np_thread_state_table np_thread_state; 787 enum np_thread_state_table np_thread_state;
788 bool enabled; 788 bool enabled;
789 atomic_t np_reset_count;
789 enum iscsi_timer_flags_table np_login_timer_flags; 790 enum iscsi_timer_flags_table np_login_timer_flags;
790 u32 np_exports; 791 u32 np_exports;
791 enum np_flags_table np_flags; 792 enum np_flags_table np_flags;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 26c54f6d595d..ad4eb2863e70 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
171 __u32 size; /* in, cmdstream size */ 171 __u32 size; /* in, cmdstream size */
172 __u32 pad; 172 __u32 pad;
173 __u32 nr_relocs; /* in, number of submit_reloc's */ 173 __u32 nr_relocs; /* in, number of submit_reloc's */
174 __u64 __user relocs; /* in, ptr to array of submit_reloc's */ 174 __u64 relocs; /* in, ptr to array of submit_reloc's */
175}; 175};
176 176
177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the 177/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
215 __u32 fence; /* out */ 215 __u32 fence; /* out */
216 __u32 nr_bos; /* in, number of submit_bo's */ 216 __u32 nr_bos; /* in, number of submit_bo's */
217 __u32 nr_cmds; /* in, number of submit_cmd's */ 217 __u32 nr_cmds; /* in, number of submit_cmd's */
218 __u64 __user bos; /* in, ptr to array of submit_bo's */ 218 __u64 bos; /* in, ptr to array of submit_bo's */
219 __u64 __user cmds; /* in, ptr to array of submit_cmd's */ 219 __u64 cmds; /* in, ptr to array of submit_cmd's */
220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ 220 __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
221}; 221};
222 222
diff --git a/kernel/fork.c b/kernel/fork.c
index 17921b0390b4..e075b7780421 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -807,7 +807,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
807 mm_init_aio(mm); 807 mm_init_aio(mm);
808 mm_init_owner(mm, p); 808 mm_init_owner(mm, p);
809 mmu_notifier_mm_init(mm); 809 mmu_notifier_mm_init(mm);
810 clear_tlb_flush_pending(mm); 810 init_tlb_flush_pending(mm);
811#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS 811#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
812 mm->pmd_huge_pte = NULL; 812 mm->pmd_huge_pte = NULL;
813#endif 813#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index 16dbe4c93895..f50b434756c1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -670,13 +670,14 @@ again:
670 * this reference was taken by ihold under the page lock 670 * this reference was taken by ihold under the page lock
671 * pinning the inode in place so i_lock was unnecessary. The 671 * pinning the inode in place so i_lock was unnecessary. The
672 * only way for this check to fail is if the inode was 672 * only way for this check to fail is if the inode was
673 * truncated in parallel so warn for now if this happens. 673 * truncated in parallel which is almost certainly an
674 * application bug. In such a case, just retry.
674 * 675 *
675 * We are not calling into get_futex_key_refs() in file-backed 676 * We are not calling into get_futex_key_refs() in file-backed
676 * cases, therefore a successful atomic_inc return below will 677 * cases, therefore a successful atomic_inc return below will
677 * guarantee that get_futex_key() will still imply smp_mb(); (B). 678 * guarantee that get_futex_key() will still imply smp_mb(); (B).
678 */ 679 */
679 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { 680 if (!atomic_inc_not_zero(&inode->i_count)) {
680 rcu_read_unlock(); 681 rcu_read_unlock();
681 put_page(page); 682 put_page(page);
682 683
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 222317721c5a..0972a8e09d08 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1650,7 +1650,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
1650{ 1650{
1651 unsigned long size; 1651 unsigned long size;
1652 1652
1653 size = global_page_state(NR_SLAB_RECLAIMABLE) 1653 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1654 + global_node_page_state(NR_ACTIVE_ANON) 1654 + global_node_page_state(NR_ACTIVE_ANON)
1655 + global_node_page_state(NR_INACTIVE_ANON) 1655 + global_node_page_state(NR_INACTIVE_ANON)
1656 + global_node_page_state(NR_ACTIVE_FILE) 1656 + global_node_page_state(NR_ACTIVE_FILE)
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 7d315fdb9f13..cf7b129b0b2b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -110,10 +110,12 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
110 if (in_task()) { 110 if (in_task()) {
111 unsigned int fail_nth = READ_ONCE(current->fail_nth); 111 unsigned int fail_nth = READ_ONCE(current->fail_nth);
112 112
113 if (fail_nth && !WRITE_ONCE(current->fail_nth, fail_nth - 1)) 113 if (fail_nth) {
114 goto fail; 114 if (!WRITE_ONCE(current->fail_nth, fail_nth - 1))
115 goto fail;
115 116
116 return false; 117 return false;
118 }
117 } 119 }
118 120
119 /* No need to check any other properties if the probability is 0 */ 121 /* No need to check any other properties if the probability is 0 */
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 6c1d678bcf8b..ff9148969b92 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -485,7 +485,7 @@ static ssize_t config_show(struct device *dev,
485 config->test_driver); 485 config->test_driver);
486 else 486 else
487 len += snprintf(buf+len, PAGE_SIZE - len, 487 len += snprintf(buf+len, PAGE_SIZE - len,
488 "driver:\tEMTPY\n"); 488 "driver:\tEMPTY\n");
489 489
490 if (config->test_fs) 490 if (config->test_fs)
491 len += snprintf(buf+len, PAGE_SIZE - len, 491 len += snprintf(buf+len, PAGE_SIZE - len,
@@ -493,7 +493,7 @@ static ssize_t config_show(struct device *dev,
493 config->test_fs); 493 config->test_fs);
494 else 494 else
495 len += snprintf(buf+len, PAGE_SIZE - len, 495 len += snprintf(buf+len, PAGE_SIZE - len,
496 "fs:\tEMTPY\n"); 496 "fs:\tEMPTY\n");
497 497
498 mutex_unlock(&test_dev->config_mutex); 498 mutex_unlock(&test_dev->config_mutex);
499 499
@@ -746,11 +746,11 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev,
746 strlen(test_str)); 746 strlen(test_str));
747 break; 747 break;
748 case TEST_KMOD_FS_TYPE: 748 case TEST_KMOD_FS_TYPE:
749 break;
750 kfree_const(config->test_fs); 749 kfree_const(config->test_fs);
751 config->test_driver = NULL; 750 config->test_driver = NULL;
752 copied = config_copy_test_fs(config, test_str, 751 copied = config_copy_test_fs(config, test_str,
753 strlen(test_str)); 752 strlen(test_str));
753 break;
754 default: 754 default:
755 mutex_unlock(&test_dev->config_mutex); 755 mutex_unlock(&test_dev->config_mutex);
756 return -EINVAL; 756 return -EINVAL;
@@ -880,10 +880,10 @@ static int test_dev_config_update_uint_sync(struct kmod_test_device *test_dev,
880 int (*test_sync)(struct kmod_test_device *test_dev)) 880 int (*test_sync)(struct kmod_test_device *test_dev))
881{ 881{
882 int ret; 882 int ret;
883 long new; 883 unsigned long new;
884 unsigned int old_val; 884 unsigned int old_val;
885 885
886 ret = kstrtol(buf, 10, &new); 886 ret = kstrtoul(buf, 10, &new);
887 if (ret) 887 if (ret)
888 return ret; 888 return ret;
889 889
@@ -918,9 +918,9 @@ static int test_dev_config_update_uint_range(struct kmod_test_device *test_dev,
918 unsigned int max) 918 unsigned int max)
919{ 919{
920 int ret; 920 int ret;
921 long new; 921 unsigned long new;
922 922
923 ret = kstrtol(buf, 10, &new); 923 ret = kstrtoul(buf, 10, &new);
924 if (ret) 924 if (ret)
925 return ret; 925 return ret;
926 926
@@ -1146,7 +1146,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
1146 struct kmod_test_device *test_dev = NULL; 1146 struct kmod_test_device *test_dev = NULL;
1147 int ret; 1147 int ret;
1148 1148
1149 mutex_unlock(&reg_dev_mutex); 1149 mutex_lock(&reg_dev_mutex);
1150 1150
1151 /* int should suffice for number of devices, test for wrap */ 1151 /* int should suffice for number of devices, test for wrap */
1152 if (unlikely(num_test_devs + 1) < 0) { 1152 if (unlikely(num_test_devs + 1) < 0) {
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 9075aa54e955..b06d9fe23a28 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -24,7 +24,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
24{ 24{
25 unsigned long flags; 25 unsigned long flags;
26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 26 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
27 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO); 27 __GFP_NOMEMALLOC | __GFP_NORETRY);
28 if (!page) 28 if (!page)
29 return NULL; 29 return NULL;
30 30
diff --git a/mm/debug.c b/mm/debug.c
index db1cd26d8752..5715448ab0b5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -124,9 +124,7 @@ void dump_mm(const struct mm_struct *mm)
124#ifdef CONFIG_NUMA_BALANCING 124#ifdef CONFIG_NUMA_BALANCING
125 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 125 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
126#endif 126#endif
127#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
128 "tlb_flush_pending %d\n" 127 "tlb_flush_pending %d\n"
129#endif
130 "def_flags: %#lx(%pGv)\n", 128 "def_flags: %#lx(%pGv)\n",
131 129
132 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, 130 mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
@@ -158,9 +156,7 @@ void dump_mm(const struct mm_struct *mm)
158#ifdef CONFIG_NUMA_BALANCING 156#ifdef CONFIG_NUMA_BALANCING
159 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 157 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
160#endif 158#endif
161#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 159 atomic_read(&mm->tlb_flush_pending),
162 mm->tlb_flush_pending,
163#endif
164 mm->def_flags, &mm->def_flags 160 mm->def_flags, &mm->def_flags
165 ); 161 );
166} 162}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86975dec0ba1..216114f6ef0b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1496,6 +1496,13 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1496 } 1496 }
1497 1497
1498 /* 1498 /*
1499 * The page_table_lock above provides a memory barrier
1500 * with change_protection_range.
1501 */
1502 if (mm_tlb_flush_pending(vma->vm_mm))
1503 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
1504
1505 /*
1499 * Migrate the THP to the requested node, returns with page unlocked 1506 * Migrate the THP to the requested node, returns with page unlocked
1500 * and access rights restored. 1507 * and access rights restored.
1501 */ 1508 */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a1a0ac0ad6f6..31e207cb399b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4062,9 +4062,9 @@ out:
4062 return ret; 4062 return ret;
4063out_release_unlock: 4063out_release_unlock:
4064 spin_unlock(ptl); 4064 spin_unlock(ptl);
4065out_release_nounlock:
4066 if (vm_shared) 4065 if (vm_shared)
4067 unlock_page(page); 4066 unlock_page(page);
4067out_release_nounlock:
4068 put_page(page); 4068 put_page(page);
4069 goto out; 4069 goto out;
4070} 4070}
diff --git a/mm/ksm.c b/mm/ksm.c
index 4dc92f138786..db20f8436bc3 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1038,7 +1038,8 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1038 goto out_unlock; 1038 goto out_unlock;
1039 1039
1040 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || 1040 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1041 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { 1041 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1042 mm_tlb_flush_pending(mm)) {
1042 pte_t entry; 1043 pte_t entry;
1043 1044
1044 swapped = PageSwapCache(page); 1045 swapped = PageSwapCache(page);
diff --git a/mm/memory.c b/mm/memory.c
index f65beaad319b..e158f7ac6730 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -215,12 +215,8 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
215 return true; 215 return true;
216} 216}
217 217
218/* tlb_gather_mmu 218void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
219 * Called to initialize an (on-stack) mmu_gather structure for page-table 219 unsigned long start, unsigned long end)
220 * tear-down from @mm. The @fullmm argument is used when @mm is without
221 * users and we're going to destroy the full address space (exit/execve).
222 */
223void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
224{ 220{
225 tlb->mm = mm; 221 tlb->mm = mm;
226 222
@@ -275,10 +271,14 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
275 * Called at the end of the shootdown operation to free up any resources 271 * Called at the end of the shootdown operation to free up any resources
276 * that were required. 272 * that were required.
277 */ 273 */
278void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 274void arch_tlb_finish_mmu(struct mmu_gather *tlb,
275 unsigned long start, unsigned long end, bool force)
279{ 276{
280 struct mmu_gather_batch *batch, *next; 277 struct mmu_gather_batch *batch, *next;
281 278
279 if (force)
280 __tlb_adjust_range(tlb, start, end - start);
281
282 tlb_flush_mmu(tlb); 282 tlb_flush_mmu(tlb);
283 283
284 /* keep the page table cache within bounds */ 284 /* keep the page table cache within bounds */
@@ -398,6 +398,34 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
398 398
399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ 399#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
400 400
401/* tlb_gather_mmu
402 * Called to initialize an (on-stack) mmu_gather structure for page-table
403 * tear-down from @mm. The @fullmm argument is used when @mm is without
404 * users and we're going to destroy the full address space (exit/execve).
405 */
406void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
407 unsigned long start, unsigned long end)
408{
409 arch_tlb_gather_mmu(tlb, mm, start, end);
410 inc_tlb_flush_pending(tlb->mm);
411}
412
413void tlb_finish_mmu(struct mmu_gather *tlb,
414 unsigned long start, unsigned long end)
415{
416 /*
417 * If there are parallel threads are doing PTE changes on same range
418 * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
419 * flush by batching, a thread has stable TLB entry can fail to flush
420 * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
421 * forcefully if we detect parallel PTE batching threads.
422 */
423 bool force = mm_tlb_flush_nested(tlb->mm);
424
425 arch_tlb_finish_mmu(tlb, start, end, force);
426 dec_tlb_flush_pending(tlb->mm);
427}
428
401/* 429/*
402 * Note: this doesn't free the actual pages themselves. That 430 * Note: this doesn't free the actual pages themselves. That
403 * has been handled earlier when unmapping all the memory regions. 431 * has been handled earlier when unmapping all the memory regions.
diff --git a/mm/migrate.c b/mm/migrate.c
index 627671551873..d68a41da6abb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1937,12 +1937,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1937 put_page(new_page); 1937 put_page(new_page);
1938 goto out_fail; 1938 goto out_fail;
1939 } 1939 }
1940 /*
1941 * We are not sure a pending tlb flush here is for a huge page
1942 * mapping or not. Hence use the tlb range variant
1943 */
1944 if (mm_tlb_flush_pending(mm))
1945 flush_tlb_range(vma, mmun_start, mmun_end);
1946 1940
1947 /* Prepare a page as a migration target */ 1941 /* Prepare a page as a migration target */
1948 __SetPageLocked(new_page); 1942 __SetPageLocked(new_page);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 4180ad8cc9c5..bd0f409922cb 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -244,7 +244,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
244 BUG_ON(addr >= end); 244 BUG_ON(addr >= end);
245 pgd = pgd_offset(mm, addr); 245 pgd = pgd_offset(mm, addr);
246 flush_cache_range(vma, addr, end); 246 flush_cache_range(vma, addr, end);
247 set_tlb_flush_pending(mm); 247 inc_tlb_flush_pending(mm);
248 do { 248 do {
249 next = pgd_addr_end(addr, end); 249 next = pgd_addr_end(addr, end);
250 if (pgd_none_or_clear_bad(pgd)) 250 if (pgd_none_or_clear_bad(pgd))
@@ -256,7 +256,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
256 /* Only flush the TLB if we actually modified any entries: */ 256 /* Only flush the TLB if we actually modified any entries: */
257 if (pages) 257 if (pages)
258 flush_tlb_range(vma, start, end); 258 flush_tlb_range(vma, start, end);
259 clear_tlb_flush_pending(mm); 259 dec_tlb_flush_pending(mm);
260 260
261 return pages; 261 return pages;
262} 262}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc32aa81f359..6d00f746c2fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4458,8 +4458,9 @@ long si_mem_available(void)
4458 * Part of the reclaimable slab consists of items that are in use, 4458 * Part of the reclaimable slab consists of items that are in use,
4459 * and cannot be freed. Cap this estimate at the low watermark. 4459 * and cannot be freed. Cap this estimate at the low watermark.
4460 */ 4460 */
4461 available += global_page_state(NR_SLAB_RECLAIMABLE) - 4461 available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
4462 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4462 min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
4463 wmark_low);
4463 4464
4464 if (available < 0) 4465 if (available < 0)
4465 available = 0; 4466 available = 0;
@@ -4602,8 +4603,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
4602 global_node_page_state(NR_FILE_DIRTY), 4603 global_node_page_state(NR_FILE_DIRTY),
4603 global_node_page_state(NR_WRITEBACK), 4604 global_node_page_state(NR_WRITEBACK),
4604 global_node_page_state(NR_UNSTABLE_NFS), 4605 global_node_page_state(NR_UNSTABLE_NFS),
4605 global_page_state(NR_SLAB_RECLAIMABLE), 4606 global_node_page_state(NR_SLAB_RECLAIMABLE),
4606 global_page_state(NR_SLAB_UNRECLAIMABLE), 4607 global_node_page_state(NR_SLAB_UNRECLAIMABLE),
4607 global_node_page_state(NR_FILE_MAPPED), 4608 global_node_page_state(NR_FILE_MAPPED),
4608 global_node_page_state(NR_SHMEM), 4609 global_node_page_state(NR_SHMEM),
4609 global_page_state(NR_PAGETABLE), 4610 global_page_state(NR_PAGETABLE),
@@ -7668,7 +7669,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
7668 7669
7669 /* Make sure the range is really isolated. */ 7670 /* Make sure the range is really isolated. */
7670 if (test_pages_isolated(outer_start, end, false)) { 7671 if (test_pages_isolated(outer_start, end, false)) {
7671 pr_info("%s: [%lx, %lx) PFNs busy\n", 7672 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
7672 __func__, outer_start, end); 7673 __func__, outer_start, end);
7673 ret = -EBUSY; 7674 ret = -EBUSY;
7674 goto done; 7675 goto done;
diff --git a/mm/rmap.c b/mm/rmap.c
index c8993c63eb25..c1286d47aa1f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -888,10 +888,10 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
888 .flags = PVMW_SYNC, 888 .flags = PVMW_SYNC,
889 }; 889 };
890 int *cleaned = arg; 890 int *cleaned = arg;
891 bool invalidation_needed = false;
891 892
892 while (page_vma_mapped_walk(&pvmw)) { 893 while (page_vma_mapped_walk(&pvmw)) {
893 int ret = 0; 894 int ret = 0;
894 address = pvmw.address;
895 if (pvmw.pte) { 895 if (pvmw.pte) {
896 pte_t entry; 896 pte_t entry;
897 pte_t *pte = pvmw.pte; 897 pte_t *pte = pvmw.pte;
@@ -899,11 +899,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
899 if (!pte_dirty(*pte) && !pte_write(*pte)) 899 if (!pte_dirty(*pte) && !pte_write(*pte))
900 continue; 900 continue;
901 901
902 flush_cache_page(vma, address, pte_pfn(*pte)); 902 flush_cache_page(vma, pvmw.address, pte_pfn(*pte));
903 entry = ptep_clear_flush(vma, address, pte); 903 entry = ptep_clear_flush(vma, pvmw.address, pte);
904 entry = pte_wrprotect(entry); 904 entry = pte_wrprotect(entry);
905 entry = pte_mkclean(entry); 905 entry = pte_mkclean(entry);
906 set_pte_at(vma->vm_mm, address, pte, entry); 906 set_pte_at(vma->vm_mm, pvmw.address, pte, entry);
907 ret = 1; 907 ret = 1;
908 } else { 908 } else {
909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 909#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
@@ -913,11 +913,11 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
914 continue; 914 continue;
915 915
916 flush_cache_page(vma, address, page_to_pfn(page)); 916 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
917 entry = pmdp_huge_clear_flush(vma, address, pmd); 917 entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd);
918 entry = pmd_wrprotect(entry); 918 entry = pmd_wrprotect(entry);
919 entry = pmd_mkclean(entry); 919 entry = pmd_mkclean(entry);
920 set_pmd_at(vma->vm_mm, address, pmd, entry); 920 set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry);
921 ret = 1; 921 ret = 1;
922#else 922#else
923 /* unexpected pmd-mapped page? */ 923 /* unexpected pmd-mapped page? */
@@ -926,11 +926,16 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
926 } 926 }
927 927
928 if (ret) { 928 if (ret) {
929 mmu_notifier_invalidate_page(vma->vm_mm, address);
930 (*cleaned)++; 929 (*cleaned)++;
930 invalidation_needed = true;
931 } 931 }
932 } 932 }
933 933
934 if (invalidation_needed) {
935 mmu_notifier_invalidate_range(vma->vm_mm, address,
936 address + (1UL << compound_order(page)));
937 }
938
934 return true; 939 return true;
935} 940}
936 941
@@ -1323,7 +1328,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1323 }; 1328 };
1324 pte_t pteval; 1329 pte_t pteval;
1325 struct page *subpage; 1330 struct page *subpage;
1326 bool ret = true; 1331 bool ret = true, invalidation_needed = false;
1327 enum ttu_flags flags = (enum ttu_flags)arg; 1332 enum ttu_flags flags = (enum ttu_flags)arg;
1328 1333
1329 /* munlock has nothing to gain from examining un-locked vmas */ 1334 /* munlock has nothing to gain from examining un-locked vmas */
@@ -1363,11 +1368,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1363 VM_BUG_ON_PAGE(!pvmw.pte, page); 1368 VM_BUG_ON_PAGE(!pvmw.pte, page);
1364 1369
1365 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1370 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1366 address = pvmw.address;
1367
1368 1371
1369 if (!(flags & TTU_IGNORE_ACCESS)) { 1372 if (!(flags & TTU_IGNORE_ACCESS)) {
1370 if (ptep_clear_flush_young_notify(vma, address, 1373 if (ptep_clear_flush_young_notify(vma, pvmw.address,
1371 pvmw.pte)) { 1374 pvmw.pte)) {
1372 ret = false; 1375 ret = false;
1373 page_vma_mapped_walk_done(&pvmw); 1376 page_vma_mapped_walk_done(&pvmw);
@@ -1376,7 +1379,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1376 } 1379 }
1377 1380
1378 /* Nuke the page table entry. */ 1381 /* Nuke the page table entry. */
1379 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1382 flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte));
1380 if (should_defer_flush(mm, flags)) { 1383 if (should_defer_flush(mm, flags)) {
1381 /* 1384 /*
1382 * We clear the PTE but do not flush so potentially 1385 * We clear the PTE but do not flush so potentially
@@ -1386,11 +1389,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1386 * transition on a cached TLB entry is written through 1389 * transition on a cached TLB entry is written through
1387 * and traps if the PTE is unmapped. 1390 * and traps if the PTE is unmapped.
1388 */ 1391 */
1389 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1392 pteval = ptep_get_and_clear(mm, pvmw.address,
1393 pvmw.pte);
1390 1394
1391 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1395 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1392 } else { 1396 } else {
1393 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1397 pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1394 } 1398 }
1395 1399
1396 /* Move the dirty bit to the page. Now the pte is gone. */ 1400 /* Move the dirty bit to the page. Now the pte is gone. */
@@ -1405,12 +1409,12 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1405 if (PageHuge(page)) { 1409 if (PageHuge(page)) {
1406 int nr = 1 << compound_order(page); 1410 int nr = 1 << compound_order(page);
1407 hugetlb_count_sub(nr, mm); 1411 hugetlb_count_sub(nr, mm);
1408 set_huge_swap_pte_at(mm, address, 1412 set_huge_swap_pte_at(mm, pvmw.address,
1409 pvmw.pte, pteval, 1413 pvmw.pte, pteval,
1410 vma_mmu_pagesize(vma)); 1414 vma_mmu_pagesize(vma));
1411 } else { 1415 } else {
1412 dec_mm_counter(mm, mm_counter(page)); 1416 dec_mm_counter(mm, mm_counter(page));
1413 set_pte_at(mm, address, pvmw.pte, pteval); 1417 set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
1414 } 1418 }
1415 1419
1416 } else if (pte_unused(pteval)) { 1420 } else if (pte_unused(pteval)) {
@@ -1434,7 +1438,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1434 swp_pte = swp_entry_to_pte(entry); 1438 swp_pte = swp_entry_to_pte(entry);
1435 if (pte_soft_dirty(pteval)) 1439 if (pte_soft_dirty(pteval))
1436 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1440 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1437 set_pte_at(mm, address, pvmw.pte, swp_pte); 1441 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1438 } else if (PageAnon(page)) { 1442 } else if (PageAnon(page)) {
1439 swp_entry_t entry = { .val = page_private(subpage) }; 1443 swp_entry_t entry = { .val = page_private(subpage) };
1440 pte_t swp_pte; 1444 pte_t swp_pte;
@@ -1460,7 +1464,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1460 * If the page was redirtied, it cannot be 1464 * If the page was redirtied, it cannot be
1461 * discarded. Remap the page to page table. 1465 * discarded. Remap the page to page table.
1462 */ 1466 */
1463 set_pte_at(mm, address, pvmw.pte, pteval); 1467 set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
1464 SetPageSwapBacked(page); 1468 SetPageSwapBacked(page);
1465 ret = false; 1469 ret = false;
1466 page_vma_mapped_walk_done(&pvmw); 1470 page_vma_mapped_walk_done(&pvmw);
@@ -1468,7 +1472,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1468 } 1472 }
1469 1473
1470 if (swap_duplicate(entry) < 0) { 1474 if (swap_duplicate(entry) < 0) {
1471 set_pte_at(mm, address, pvmw.pte, pteval); 1475 set_pte_at(mm, pvmw.address, pvmw.pte, pteval);
1472 ret = false; 1476 ret = false;
1473 page_vma_mapped_walk_done(&pvmw); 1477 page_vma_mapped_walk_done(&pvmw);
1474 break; 1478 break;
@@ -1484,14 +1488,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1484 swp_pte = swp_entry_to_pte(entry); 1488 swp_pte = swp_entry_to_pte(entry);
1485 if (pte_soft_dirty(pteval)) 1489 if (pte_soft_dirty(pteval))
1486 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1490 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1487 set_pte_at(mm, address, pvmw.pte, swp_pte); 1491 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1488 } else 1492 } else
1489 dec_mm_counter(mm, mm_counter_file(page)); 1493 dec_mm_counter(mm, mm_counter_file(page));
1490discard: 1494discard:
1491 page_remove_rmap(subpage, PageHuge(page)); 1495 page_remove_rmap(subpage, PageHuge(page));
1492 put_page(page); 1496 put_page(page);
1493 mmu_notifier_invalidate_page(mm, address); 1497 invalidation_needed = true;
1494 } 1498 }
1499
1500 if (invalidation_needed)
1501 mmu_notifier_invalidate_range(mm, address,
1502 address + (1UL << compound_order(page)));
1495 return ret; 1503 return ret;
1496} 1504}
1497 1505
diff --git a/mm/shmem.c b/mm/shmem.c
index b0aa6075d164..6540e5982444 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1022,7 +1022,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1022 */ 1022 */
1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1023 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1024 spin_lock(&sbinfo->shrinklist_lock); 1024 spin_lock(&sbinfo->shrinklist_lock);
1025 if (list_empty(&info->shrinklist)) { 1025 /*
1026 * _careful to defend against unlocked access to
1027 * ->shrink_list in shmem_unused_huge_shrink()
1028 */
1029 if (list_empty_careful(&info->shrinklist)) {
1026 list_add_tail(&info->shrinklist, 1030 list_add_tail(&info->shrinklist,
1027 &sbinfo->shrinklist); 1031 &sbinfo->shrinklist);
1028 sbinfo->shrinklist_len++; 1032 sbinfo->shrinklist_len++;
@@ -1817,7 +1821,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
1817 * to shrink under memory pressure. 1821 * to shrink under memory pressure.
1818 */ 1822 */
1819 spin_lock(&sbinfo->shrinklist_lock); 1823 spin_lock(&sbinfo->shrinklist_lock);
1820 if (list_empty(&info->shrinklist)) { 1824 /*
1825 * _careful to defend against unlocked access to
1826 * ->shrink_list in shmem_unused_huge_shrink()
1827 */
1828 if (list_empty_careful(&info->shrinklist)) {
1821 list_add_tail(&info->shrinklist, 1829 list_add_tail(&info->shrinklist,
1822 &sbinfo->shrinklist); 1830 &sbinfo->shrinklist);
1823 sbinfo->shrinklist_len++; 1831 sbinfo->shrinklist_len++;
diff --git a/mm/util.c b/mm/util.c
index 7b07ec852e01..9ecddf568fe3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -633,7 +633,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
633 * which are reclaimable, under pressure. The dentry 633 * which are reclaimable, under pressure. The dentry
634 * cache and most inode caches should fall into this 634 * cache and most inode caches should fall into this
635 */ 635 */
636 free += global_page_state(NR_SLAB_RECLAIMABLE); 636 free += global_node_page_state(NR_SLAB_RECLAIMABLE);
637 637
638 /* 638 /*
639 * Leave reserved pages. The pages are not for anonymous pages. 639 * Leave reserved pages. The pages are not for anonymous pages.
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index e1133bc634b5..8a3ce79b1307 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1549,9 +1549,41 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
1549 return found; 1549 return found;
1550} 1550}
1551 1551
1552/**
1553 * batadv_tt_global_sync_flags - update TT sync flags
1554 * @tt_global: the TT global entry to update sync flags in
1555 *
1556 * Updates the sync flag bits in the tt_global flag attribute with a logical
1557 * OR of all sync flags from any of its TT orig entries.
1558 */
1559static void
1560batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
1561{
1562 struct batadv_tt_orig_list_entry *orig_entry;
1563 const struct hlist_head *head;
1564 u16 flags = BATADV_NO_FLAGS;
1565
1566 rcu_read_lock();
1567 head = &tt_global->orig_list;
1568 hlist_for_each_entry_rcu(orig_entry, head, list)
1569 flags |= orig_entry->flags;
1570 rcu_read_unlock();
1571
1572 flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
1573 tt_global->common.flags = flags;
1574}
1575
1576/**
1577 * batadv_tt_global_orig_entry_add - add or update a TT orig entry
1578 * @tt_global: the TT global entry to add an orig entry in
1579 * @orig_node: the originator to add an orig entry for
1580 * @ttvn: translation table version number of this changeset
1581 * @flags: TT sync flags
1582 */
1552static void 1583static void
1553batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 1584batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1554 struct batadv_orig_node *orig_node, int ttvn) 1585 struct batadv_orig_node *orig_node, int ttvn,
1586 u8 flags)
1555{ 1587{
1556 struct batadv_tt_orig_list_entry *orig_entry; 1588 struct batadv_tt_orig_list_entry *orig_entry;
1557 1589
@@ -1561,7 +1593,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1561 * was added during a "temporary client detection" 1593 * was added during a "temporary client detection"
1562 */ 1594 */
1563 orig_entry->ttvn = ttvn; 1595 orig_entry->ttvn = ttvn;
1564 goto out; 1596 orig_entry->flags = flags;
1597 goto sync_flags;
1565 } 1598 }
1566 1599
1567 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC); 1600 orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
@@ -1573,6 +1606,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1573 batadv_tt_global_size_inc(orig_node, tt_global->common.vid); 1606 batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
1574 orig_entry->orig_node = orig_node; 1607 orig_entry->orig_node = orig_node;
1575 orig_entry->ttvn = ttvn; 1608 orig_entry->ttvn = ttvn;
1609 orig_entry->flags = flags;
1576 kref_init(&orig_entry->refcount); 1610 kref_init(&orig_entry->refcount);
1577 1611
1578 spin_lock_bh(&tt_global->list_lock); 1612 spin_lock_bh(&tt_global->list_lock);
@@ -1582,6 +1616,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
1582 spin_unlock_bh(&tt_global->list_lock); 1616 spin_unlock_bh(&tt_global->list_lock);
1583 atomic_inc(&tt_global->orig_list_count); 1617 atomic_inc(&tt_global->orig_list_count);
1584 1618
1619sync_flags:
1620 batadv_tt_global_sync_flags(tt_global);
1585out: 1621out:
1586 if (orig_entry) 1622 if (orig_entry)
1587 batadv_tt_orig_list_entry_put(orig_entry); 1623 batadv_tt_orig_list_entry_put(orig_entry);
@@ -1703,10 +1739,10 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1703 } 1739 }
1704 1740
1705 /* the change can carry possible "attribute" flags like the 1741 /* the change can carry possible "attribute" flags like the
1706 * TT_CLIENT_WIFI, therefore they have to be copied in the 1742 * TT_CLIENT_TEMP, therefore they have to be copied in the
1707 * client entry 1743 * client entry
1708 */ 1744 */
1709 common->flags |= flags; 1745 common->flags |= flags & (~BATADV_TT_SYNC_MASK);
1710 1746
1711 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 1747 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
1712 * one originator left in the list and we previously received a 1748 * one originator left in the list and we previously received a
@@ -1723,7 +1759,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
1723 } 1759 }
1724add_orig_entry: 1760add_orig_entry:
1725 /* add the new orig_entry (if needed) or update it */ 1761 /* add the new orig_entry (if needed) or update it */
1726 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 1762 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
1763 flags & BATADV_TT_SYNC_MASK);
1727 1764
1728 batadv_dbg(BATADV_DBG_TT, bat_priv, 1765 batadv_dbg(BATADV_DBG_TT, bat_priv,
1729 "Creating new global tt entry: %pM (vid: %d, via %pM)\n", 1766 "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
@@ -1946,6 +1983,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
1946 struct batadv_tt_orig_list_entry *orig, 1983 struct batadv_tt_orig_list_entry *orig,
1947 bool best) 1984 bool best)
1948{ 1985{
1986 u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
1949 void *hdr; 1987 void *hdr;
1950 struct batadv_orig_node_vlan *vlan; 1988 struct batadv_orig_node_vlan *vlan;
1951 u8 last_ttvn; 1989 u8 last_ttvn;
@@ -1975,7 +2013,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
1975 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) || 2013 nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
1976 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) || 2014 nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
1977 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) || 2015 nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
1978 nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags)) 2016 nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
1979 goto nla_put_failure; 2017 goto nla_put_failure;
1980 2018
1981 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST)) 2019 if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
@@ -2589,6 +2627,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2589 unsigned short vid) 2627 unsigned short vid)
2590{ 2628{
2591 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 2629 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
2630 struct batadv_tt_orig_list_entry *tt_orig;
2592 struct batadv_tt_common_entry *tt_common; 2631 struct batadv_tt_common_entry *tt_common;
2593 struct batadv_tt_global_entry *tt_global; 2632 struct batadv_tt_global_entry *tt_global;
2594 struct hlist_head *head; 2633 struct hlist_head *head;
@@ -2627,8 +2666,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2627 /* find out if this global entry is announced by this 2666 /* find out if this global entry is announced by this
2628 * originator 2667 * originator
2629 */ 2668 */
2630 if (!batadv_tt_global_entry_has_orig(tt_global, 2669 tt_orig = batadv_tt_global_orig_entry_find(tt_global,
2631 orig_node)) 2670 orig_node);
2671 if (!tt_orig)
2632 continue; 2672 continue;
2633 2673
2634 /* use network order to read the VID: this ensures that 2674 /* use network order to read the VID: this ensures that
@@ -2640,10 +2680,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2640 /* compute the CRC on flags that have to be kept in sync 2680 /* compute the CRC on flags that have to be kept in sync
2641 * among nodes 2681 * among nodes
2642 */ 2682 */
2643 flags = tt_common->flags & BATADV_TT_SYNC_MASK; 2683 flags = tt_orig->flags;
2644 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags)); 2684 crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
2645 2685
2646 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN); 2686 crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
2687
2688 batadv_tt_orig_list_entry_put(tt_orig);
2647 } 2689 }
2648 rcu_read_unlock(); 2690 rcu_read_unlock();
2649 } 2691 }
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ea43a6449247..a62795868794 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1260,6 +1260,7 @@ struct batadv_tt_global_entry {
1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client 1260 * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
1261 * @orig_node: pointer to orig node announcing this non-mesh client 1261 * @orig_node: pointer to orig node announcing this non-mesh client
1262 * @ttvn: translation table version number which added the non-mesh client 1262 * @ttvn: translation table version number which added the non-mesh client
1263 * @flags: per orig entry TT sync flags
1263 * @list: list node for batadv_tt_global_entry::orig_list 1264 * @list: list node for batadv_tt_global_entry::orig_list
1264 * @refcount: number of contexts the object is used 1265 * @refcount: number of contexts the object is used
1265 * @rcu: struct used for freeing in an RCU-safe manner 1266 * @rcu: struct used for freeing in an RCU-safe manner
@@ -1267,6 +1268,7 @@ struct batadv_tt_global_entry {
1267struct batadv_tt_orig_list_entry { 1268struct batadv_tt_orig_list_entry {
1268 struct batadv_orig_node *orig_node; 1269 struct batadv_orig_node *orig_node;
1269 u8 ttvn; 1270 u8 ttvn;
1271 u8 flags;
1270 struct hlist_node list; 1272 struct hlist_node list;
1271 struct kref refcount; 1273 struct kref refcount;
1272 struct rcu_head rcu; 1274 struct rcu_head rcu;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8515f8fe0460..ce15a06d5558 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2739,7 +2739,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2739{ 2739{
2740 if (tx_path) 2740 if (tx_path)
2741 return skb->ip_summed != CHECKSUM_PARTIAL && 2741 return skb->ip_summed != CHECKSUM_PARTIAL &&
2742 skb->ip_summed != CHECKSUM_NONE; 2742 skb->ip_summed != CHECKSUM_UNNECESSARY;
2743 2743
2744 return skb->ip_summed == CHECKSUM_NONE; 2744 return skb->ip_summed == CHECKSUM_NONE;
2745} 2745}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 76c2077c3f5b..2e548eca3489 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net)
1731 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; 1731 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1732#endif 1732#endif
1733 1733
1734 /* Some igmp sysctl, whose values are always used */
1735 net->ipv4.sysctl_igmp_max_memberships = 20;
1736 net->ipv4.sysctl_igmp_max_msf = 10;
1737 /* IGMP reports for link-local multicast groups are enabled by default */
1738 net->ipv4.sysctl_igmp_llm_reports = 1;
1739 net->ipv4.sysctl_igmp_qrv = 2;
1740
1734 return 0; 1741 return 0;
1735} 1742}
1736 1743
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c4c6e1969ed0..2ae8f54cb321 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1523,9 +1523,17 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
1523 int taglen; 1523 int taglen;
1524 1524
1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) { 1525 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
1526 if (optptr[0] == IPOPT_CIPSO) 1526 switch (optptr[0]) {
1527 case IPOPT_CIPSO:
1527 return optptr; 1528 return optptr;
1528 taglen = optptr[1]; 1529 case IPOPT_END:
1530 return NULL;
1531 case IPOPT_NOOP:
1532 taglen = 1;
1533 break;
1534 default:
1535 taglen = optptr[1];
1536 }
1529 optlen -= taglen; 1537 optlen -= taglen;
1530 optptr += taglen; 1538 optptr += taglen;
1531 } 1539 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8e0257d01200..1540db65241a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -450,6 +450,7 @@ out_unlock:
450out: 450out:
451 NAPI_GRO_CB(skb)->flush |= flush; 451 NAPI_GRO_CB(skb)->flush |= flush;
452 skb_gro_remcsum_cleanup(skb, &grc); 452 skb_gro_remcsum_cleanup(skb, &grc);
453 skb->remcsum_offload = 0;
453 454
454 return pp; 455 return pp;
455} 456}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 28f14afd0dd3..498706b072fb 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2974,12 +2974,6 @@ static int __net_init igmp_net_init(struct net *net)
2974 goto out_sock; 2974 goto out_sock;
2975 } 2975 }
2976 2976
2977 /* Sysctl initialization */
2978 net->ipv4.sysctl_igmp_max_memberships = 20;
2979 net->ipv4.sysctl_igmp_max_msf = 10;
2980 /* IGMP reports for link-local multicast groups are enabled by default */
2981 net->ipv4.sysctl_igmp_llm_reports = 1;
2982 net->ipv4.sysctl_igmp_qrv = 2;
2983 return 0; 2977 return 0;
2984 2978
2985out_sock: 2979out_sock:
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 50c74cd890bc..e153c40c2436 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -965,11 +965,12 @@ static int __ip_append_data(struct sock *sk,
965 csummode = CHECKSUM_PARTIAL; 965 csummode = CHECKSUM_PARTIAL;
966 966
967 cork->length += length; 967 cork->length += length;
968 if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || 968 if ((skb && skb_is_gso(skb)) ||
969 (skb && skb_is_gso(skb))) && 969 (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
970 (skb_queue_len(queue) <= 1) &&
970 (sk->sk_protocol == IPPROTO_UDP) && 971 (sk->sk_protocol == IPPROTO_UDP) &&
971 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 972 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
972 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { 973 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
973 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 974 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
974 hh_len, fragheaderlen, transhdrlen, 975 hh_len, fragheaderlen, transhdrlen,
975 maxfraglen, flags); 976 maxfraglen, flags);
@@ -1288,6 +1289,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1288 return -EINVAL; 1289 return -EINVAL;
1289 1290
1290 if ((size + skb->len > mtu) && 1291 if ((size + skb->len > mtu) &&
1292 (skb_queue_len(&sk->sk_write_queue) == 1) &&
1291 (sk->sk_protocol == IPPROTO_UDP) && 1293 (sk->sk_protocol == IPPROTO_UDP) &&
1292 (rt->dst.dev->features & NETIF_F_UFO)) { 1294 (rt->dst.dev->features & NETIF_F_UFO)) {
1293 if (skb->ip_summed != CHECKSUM_PARTIAL) 1295 if (skb->ip_summed != CHECKSUM_PARTIAL)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2920e0cb09f8..53de1424c13c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -107,6 +107,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 107#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 108#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 109#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
110#define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
110#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 111#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
111#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 112#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
112#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 113#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
@@ -2520,8 +2521,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2520 return; 2521 return;
2521 2522
2522 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2523 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2523 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || 2524 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2524 (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { 2525 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2525 tp->snd_cwnd = tp->snd_ssthresh; 2526 tp->snd_cwnd = tp->snd_ssthresh;
2526 tp->snd_cwnd_stamp = tcp_jiffies32; 2527 tp->snd_cwnd_stamp = tcp_jiffies32;
2527 } 2528 }
@@ -3004,10 +3005,7 @@ void tcp_rearm_rto(struct sock *sk)
3004 /* Offset the time elapsed after installing regular RTO */ 3005 /* Offset the time elapsed after installing regular RTO */
3005 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3006 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3006 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3007 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3007 struct sk_buff *skb = tcp_write_queue_head(sk); 3008 s64 delta_us = tcp_rto_delta_us(sk);
3008 u64 rto_time_stamp = skb->skb_mstamp +
3009 jiffies_to_usecs(rto);
3010 s64 delta_us = rto_time_stamp - tp->tcp_mstamp;
3011 /* delta_us may not be positive if the socket is locked 3009 /* delta_us may not be positive if the socket is locked
3012 * when the retrans timer fires and is rescheduled. 3010 * when the retrans timer fires and is rescheduled.
3013 */ 3011 */
@@ -3019,6 +3017,13 @@ void tcp_rearm_rto(struct sock *sk)
3019 } 3017 }
3020} 3018}
3021 3019
3020/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3021static void tcp_set_xmit_timer(struct sock *sk)
3022{
3023 if (!tcp_schedule_loss_probe(sk))
3024 tcp_rearm_rto(sk);
3025}
3026
3022/* If we get here, the whole TSO packet has not been acked. */ 3027/* If we get here, the whole TSO packet has not been acked. */
3023static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3028static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3024{ 3029{
@@ -3180,7 +3185,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3180 ca_rtt_us, sack->rate); 3185 ca_rtt_us, sack->rate);
3181 3186
3182 if (flag & FLAG_ACKED) { 3187 if (flag & FLAG_ACKED) {
3183 tcp_rearm_rto(sk); 3188 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3184 if (unlikely(icsk->icsk_mtup.probe_size && 3189 if (unlikely(icsk->icsk_mtup.probe_size &&
3185 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3190 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3186 tcp_mtup_probe_success(sk); 3191 tcp_mtup_probe_success(sk);
@@ -3208,7 +3213,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3208 * after when the head was last (re)transmitted. Otherwise the 3213 * after when the head was last (re)transmitted. Otherwise the
3209 * timeout may continue to extend in loss recovery. 3214 * timeout may continue to extend in loss recovery.
3210 */ 3215 */
3211 tcp_rearm_rto(sk); 3216 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3212 } 3217 }
3213 3218
3214 if (icsk->icsk_ca_ops->pkts_acked) { 3219 if (icsk->icsk_ca_ops->pkts_acked) {
@@ -3580,9 +3585,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3580 if (after(ack, tp->snd_nxt)) 3585 if (after(ack, tp->snd_nxt))
3581 goto invalid_ack; 3586 goto invalid_ack;
3582 3587
3583 if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
3584 tcp_rearm_rto(sk);
3585
3586 if (after(ack, prior_snd_una)) { 3588 if (after(ack, prior_snd_una)) {
3587 flag |= FLAG_SND_UNA_ADVANCED; 3589 flag |= FLAG_SND_UNA_ADVANCED;
3588 icsk->icsk_retransmits = 0; 3590 icsk->icsk_retransmits = 0;
@@ -3647,18 +3649,20 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3647 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked, 3649 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
3648 &sack_state); 3650 &sack_state);
3649 3651
3652 if (tp->tlp_high_seq)
3653 tcp_process_tlp_ack(sk, ack, flag);
3654 /* If needed, reset TLP/RTO timer; RACK may later override this. */
3655 if (flag & FLAG_SET_XMIT_TIMER)
3656 tcp_set_xmit_timer(sk);
3657
3650 if (tcp_ack_is_dubious(sk, flag)) { 3658 if (tcp_ack_is_dubious(sk, flag)) {
3651 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3659 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3652 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit); 3660 tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
3653 } 3661 }
3654 if (tp->tlp_high_seq)
3655 tcp_process_tlp_ack(sk, ack, flag);
3656 3662
3657 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3663 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3658 sk_dst_confirm(sk); 3664 sk_dst_confirm(sk);
3659 3665
3660 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3661 tcp_schedule_loss_probe(sk);
3662 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3666 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
3663 lost = tp->lost - lost; /* freshly marked lost */ 3667 lost = tp->lost - lost; /* freshly marked lost */
3664 tcp_rate_gen(sk, delivered, lost, sack_state.rate); 3668 tcp_rate_gen(sk, delivered, lost, sack_state.rate);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2f1588bf73da..b7661a68d498 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2377,24 +2377,15 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2377{ 2377{
2378 struct inet_connection_sock *icsk = inet_csk(sk); 2378 struct inet_connection_sock *icsk = inet_csk(sk);
2379 struct tcp_sock *tp = tcp_sk(sk); 2379 struct tcp_sock *tp = tcp_sk(sk);
2380 u32 timeout, tlp_time_stamp, rto_time_stamp;
2381 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2380 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
2381 u32 timeout, rto_delta_us;
2382 2382
2383 /* No consecutive loss probes. */
2384 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2385 tcp_rearm_rto(sk);
2386 return false;
2387 }
2388 /* Don't do any loss probe on a Fast Open connection before 3WHS 2383 /* Don't do any loss probe on a Fast Open connection before 3WHS
2389 * finishes. 2384 * finishes.
2390 */ 2385 */
2391 if (tp->fastopen_rsk) 2386 if (tp->fastopen_rsk)
2392 return false; 2387 return false;
2393 2388
2394 /* TLP is only scheduled when next timer event is RTO. */
2395 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2396 return false;
2397
2398 /* Schedule a loss probe in 2*RTT for SACK capable connections 2389 /* Schedule a loss probe in 2*RTT for SACK capable connections
2399 * in Open state, that are either limited by cwnd or application. 2390 * in Open state, that are either limited by cwnd or application.
2400 */ 2391 */
@@ -2417,14 +2408,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2417 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2408 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
2418 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2409 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2419 2410
2420 /* If RTO is shorter, just schedule TLP in its place. */ 2411 /* If the RTO formula yields an earlier time, then use that time. */
2421 tlp_time_stamp = tcp_jiffies32 + timeout; 2412 rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
2422 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2413 if (rto_delta_us > 0)
2423 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2414 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2424 s32 delta = rto_time_stamp - tcp_jiffies32;
2425 if (delta > 0)
2426 timeout = delta;
2427 }
2428 2415
2429 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2416 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2430 TCP_RTO_MAX); 2417 TCP_RTO_MAX);
@@ -3449,6 +3436,10 @@ int tcp_connect(struct sock *sk)
3449 int err; 3436 int err;
3450 3437
3451 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB); 3438 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB);
3439
3440 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3441 return -EHOSTUNREACH; /* Routing failure or similar. */
3442
3452 tcp_connect_init(sk); 3443 tcp_connect_init(sk);
3453 3444
3454 if (unlikely(tp->repair)) { 3445 if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index c0feeeef962a..e906014890b6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -652,7 +652,8 @@ static void tcp_keepalive_timer (unsigned long data)
652 goto death; 652 goto death;
653 } 653 }
654 654
655 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 655 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
656 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
656 goto out; 657 goto out;
657 658
658 elapsed = keepalive_time_when(tp); 659 elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e6276fa3750b..a7c804f73990 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
802 if (is_udplite) /* UDP-Lite */ 802 if (is_udplite) /* UDP-Lite */
803 csum = udplite_csum(skb); 803 csum = udplite_csum(skb);
804 804
805 else if (sk->sk_no_check_tx) { /* UDP csum disabled */ 805 else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
806 806
807 skb->ip_summed = CHECKSUM_NONE; 807 skb->ip_summed = CHECKSUM_NONE;
808 goto send; 808 goto send;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 781250151d40..0932c85b42af 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
235 if (uh->check == 0) 235 if (uh->check == 0)
236 uh->check = CSUM_MANGLED_0; 236 uh->check = CSUM_MANGLED_0;
237 237
238 skb->ip_summed = CHECKSUM_NONE; 238 skb->ip_summed = CHECKSUM_UNNECESSARY;
239 239
240 /* If there is no outer header we can fake a checksum offload 240 /* If there is no outer header we can fake a checksum offload
241 * due to the fact that we have already done the checksum in 241 * due to the fact that we have already done the checksum in
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 162efba0d0cd..2dfe50d8d609 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1381,11 +1381,12 @@ emsgsize:
1381 */ 1381 */
1382 1382
1383 cork->length += length; 1383 cork->length += length;
1384 if ((((length + (skb ? skb->len : headersize)) > mtu) || 1384 if ((skb && skb_is_gso(skb)) ||
1385 (skb && skb_is_gso(skb))) && 1385 (((length + (skb ? skb->len : headersize)) > mtu) &&
1386 (skb_queue_len(queue) <= 1) &&
1386 (sk->sk_protocol == IPPROTO_UDP) && 1387 (sk->sk_protocol == IPPROTO_UDP) &&
1387 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 1388 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
1388 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1389 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
1389 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1390 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1390 hh_len, fragheaderlen, exthdrlen, 1391 hh_len, fragheaderlen, exthdrlen,
1391 transhdrlen, mtu, flags, fl6); 1392 transhdrlen, mtu, flags, fl6);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4d30c96a819d..a640fbcba15d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2351,6 +2351,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
2351 if (on_link) 2351 if (on_link)
2352 nrt->rt6i_flags &= ~RTF_GATEWAY; 2352 nrt->rt6i_flags &= ~RTF_GATEWAY;
2353 2353
2354 nrt->rt6i_protocol = RTPROT_REDIRECT;
2354 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; 2355 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2355 2356
2356 if (ip6_ins_rt(nrt)) 2357 if (ip6_ins_rt(nrt))
@@ -2461,6 +2462,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
2461 .fc_dst_len = prefixlen, 2462 .fc_dst_len = prefixlen,
2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | 2463 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2463 RTF_UP | RTF_PREF(pref), 2464 RTF_UP | RTF_PREF(pref),
2465 .fc_protocol = RTPROT_RA,
2464 .fc_nlinfo.portid = 0, 2466 .fc_nlinfo.portid = 0,
2465 .fc_nlinfo.nlh = NULL, 2467 .fc_nlinfo.nlh = NULL,
2466 .fc_nlinfo.nl_net = net, 2468 .fc_nlinfo.nl_net = net,
@@ -2513,6 +2515,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2513 .fc_ifindex = dev->ifindex, 2515 .fc_ifindex = dev->ifindex,
2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | 2516 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref), 2517 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2518 .fc_protocol = RTPROT_RA,
2516 .fc_nlinfo.portid = 0, 2519 .fc_nlinfo.portid = 0,
2517 .fc_nlinfo.nlh = NULL, 2520 .fc_nlinfo.nlh = NULL,
2518 .fc_nlinfo.nl_net = dev_net(dev), 2521 .fc_nlinfo.nl_net = dev_net(dev),
@@ -3424,14 +3427,6 @@ static int rt6_fill_node(struct net *net,
3424 rtm->rtm_flags = 0; 3427 rtm->rtm_flags = 0;
3425 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 3428 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3426 rtm->rtm_protocol = rt->rt6i_protocol; 3429 rtm->rtm_protocol = rt->rt6i_protocol;
3427 if (rt->rt6i_flags & RTF_DYNAMIC)
3428 rtm->rtm_protocol = RTPROT_REDIRECT;
3429 else if (rt->rt6i_flags & RTF_ADDRCONF) {
3430 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3431 rtm->rtm_protocol = RTPROT_RA;
3432 else
3433 rtm->rtm_protocol = RTPROT_KERNEL;
3434 }
3435 3430
3436 if (rt->rt6i_flags & RTF_CACHE) 3431 if (rt->rt6i_flags & RTF_CACHE)
3437 rtm->rtm_flags |= RTM_F_CLONED; 3432 rtm->rtm_flags |= RTM_F_CLONED;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a2267f80febb..e7d378c032cb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
72 if (uh->check == 0) 72 if (uh->check == 0)
73 uh->check = CSUM_MANGLED_0; 73 uh->check = CSUM_MANGLED_0;
74 74
75 skb->ip_summed = CHECKSUM_NONE; 75 skb->ip_summed = CHECKSUM_UNNECESSARY;
76 76
77 /* If there is no outer header we can fake a checksum offload 77 /* If there is no outer header we can fake a checksum offload
78 * due to the fact that we have already done the checksum in 78 * due to the fact that we have already done the checksum in
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0615c2a950fa..008a45ca3112 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3700,14 +3700,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3700 3700
3701 if (optlen != sizeof(val)) 3701 if (optlen != sizeof(val))
3702 return -EINVAL; 3702 return -EINVAL;
3703 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3704 return -EBUSY;
3705 if (copy_from_user(&val, optval, sizeof(val))) 3703 if (copy_from_user(&val, optval, sizeof(val)))
3706 return -EFAULT; 3704 return -EFAULT;
3707 if (val > INT_MAX) 3705 if (val > INT_MAX)
3708 return -EINVAL; 3706 return -EINVAL;
3709 po->tp_reserve = val; 3707 lock_sock(sk);
3710 return 0; 3708 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3709 ret = -EBUSY;
3710 } else {
3711 po->tp_reserve = val;
3712 ret = 0;
3713 }
3714 release_sock(sk);
3715 return ret;
3711 } 3716 }
3712 case PACKET_LOSS: 3717 case PACKET_LOSS:
3713 { 3718 {
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index e10624aa6959..9722bf839d9d 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1015,8 +1015,10 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
1015 if (rds_ib_ring_empty(&ic->i_recv_ring)) 1015 if (rds_ib_ring_empty(&ic->i_recv_ring))
1016 rds_ib_stats_inc(s_ib_rx_ring_empty); 1016 rds_ib_stats_inc(s_ib_rx_ring_empty);
1017 1017
1018 if (rds_ib_ring_low(&ic->i_recv_ring)) 1018 if (rds_ib_ring_low(&ic->i_recv_ring)) {
1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT); 1019 rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
1020 rds_ib_stats_inc(s_ib_rx_refill_from_cq);
1021 }
1020} 1022}
1021 1023
1022int rds_ib_recv_path(struct rds_conn_path *cp) 1024int rds_ib_recv_path(struct rds_conn_path *cp)
@@ -1029,6 +1031,7 @@ int rds_ib_recv_path(struct rds_conn_path *cp)
1029 if (rds_conn_up(conn)) { 1031 if (rds_conn_up(conn)) {
1030 rds_ib_attempt_ack(ic); 1032 rds_ib_attempt_ack(ic);
1031 rds_ib_recv_refill(conn, 0, GFP_KERNEL); 1033 rds_ib_recv_refill(conn, 0, GFP_KERNEL);
1034 rds_ib_stats_inc(s_ib_rx_refill_from_thread);
1032 } 1035 }
1033 1036
1034 return ret; 1037 return ret;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 36f0ced9e60c..d516ba8178b8 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -36,8 +36,8 @@ static struct tc_action_ops act_ipt_ops;
36static unsigned int xt_net_id; 36static unsigned int xt_net_id;
37static struct tc_action_ops act_xt_ops; 37static struct tc_action_ops act_xt_ops;
38 38
39static int ipt_init_target(struct xt_entry_target *t, char *table, 39static int ipt_init_target(struct net *net, struct xt_entry_target *t,
40 unsigned int hook) 40 char *table, unsigned int hook)
41{ 41{
42 struct xt_tgchk_param par; 42 struct xt_tgchk_param par;
43 struct xt_target *target; 43 struct xt_target *target;
@@ -49,8 +49,9 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
49 return PTR_ERR(target); 49 return PTR_ERR(target);
50 50
51 t->u.kernel.target = target; 51 t->u.kernel.target = target;
52 memset(&par, 0, sizeof(par));
53 par.net = net;
52 par.table = table; 54 par.table = table;
53 par.entryinfo = NULL;
54 par.target = target; 55 par.target = target;
55 par.targinfo = t->data; 56 par.targinfo = t->data;
56 par.hook_mask = hook; 57 par.hook_mask = hook;
@@ -91,10 +92,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
91 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, 92 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
92}; 93};
93 94
94static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, 95static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
95 struct nlattr *est, struct tc_action **a, 96 struct nlattr *est, struct tc_action **a,
96 const struct tc_action_ops *ops, int ovr, int bind) 97 const struct tc_action_ops *ops, int ovr, int bind)
97{ 98{
99 struct tc_action_net *tn = net_generic(net, id);
98 struct nlattr *tb[TCA_IPT_MAX + 1]; 100 struct nlattr *tb[TCA_IPT_MAX + 1];
99 struct tcf_ipt *ipt; 101 struct tcf_ipt *ipt;
100 struct xt_entry_target *td, *t; 102 struct xt_entry_target *td, *t;
@@ -159,7 +161,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
159 if (unlikely(!t)) 161 if (unlikely(!t))
160 goto err2; 162 goto err2;
161 163
162 err = ipt_init_target(t, tname, hook); 164 err = ipt_init_target(net, t, tname, hook);
163 if (err < 0) 165 if (err < 0)
164 goto err3; 166 goto err3;
165 167
@@ -193,18 +195,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla,
193 struct nlattr *est, struct tc_action **a, int ovr, 195 struct nlattr *est, struct tc_action **a, int ovr,
194 int bind) 196 int bind)
195{ 197{
196 struct tc_action_net *tn = net_generic(net, ipt_net_id); 198 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
197 199 bind);
198 return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind);
199} 200}
200 201
201static int tcf_xt_init(struct net *net, struct nlattr *nla, 202static int tcf_xt_init(struct net *net, struct nlattr *nla,
202 struct nlattr *est, struct tc_action **a, int ovr, 203 struct nlattr *est, struct tc_action **a, int ovr,
203 int bind) 204 int bind)
204{ 205{
205 struct tc_action_net *tn = net_generic(net, xt_net_id); 206 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
206 207 bind);
207 return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind);
208} 208}
209 209
210static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, 210static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index aeef8011ac7d..9b4dcb6a16b5 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1455,10 +1455,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1455 /* Initiate synch mode if applicable */ 1455 /* Initiate synch mode if applicable */
1456 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { 1456 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1457 syncpt = iseqno + exp_pkts - 1; 1457 syncpt = iseqno + exp_pkts - 1;
1458 if (!tipc_link_is_up(l)) { 1458 if (!tipc_link_is_up(l))
1459 tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
1460 __tipc_node_link_up(n, bearer_id, xmitq); 1459 __tipc_node_link_up(n, bearer_id, xmitq);
1461 }
1462 if (n->state == SELF_UP_PEER_UP) { 1460 if (n->state == SELF_UP_PEER_UP) {
1463 n->sync_point = syncpt; 1461 n->sync_point = syncpt;
1464 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1462 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 3bd5f4f30235..bc443201d3ef 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -18,6 +18,7 @@ my $V = '0.26';
18 18
19use Getopt::Long qw(:config no_auto_abbrev); 19use Getopt::Long qw(:config no_auto_abbrev);
20use Cwd; 20use Cwd;
21use File::Find;
21 22
22my $cur_path = fastgetcwd() . '/'; 23my $cur_path = fastgetcwd() . '/';
23my $lk_path = "./"; 24my $lk_path = "./";
@@ -58,6 +59,7 @@ my $from_filename = 0;
58my $pattern_depth = 0; 59my $pattern_depth = 0;
59my $version = 0; 60my $version = 0;
60my $help = 0; 61my $help = 0;
62my $find_maintainer_files = 0;
61 63
62my $vcs_used = 0; 64my $vcs_used = 0;
63 65
@@ -249,6 +251,7 @@ if (!GetOptions(
249 'sections!' => \$sections, 251 'sections!' => \$sections,
250 'fe|file-emails!' => \$file_emails, 252 'fe|file-emails!' => \$file_emails,
251 'f|file' => \$from_filename, 253 'f|file' => \$from_filename,
254 'find-maintainer-files' => \$find_maintainer_files,
252 'v|version' => \$version, 255 'v|version' => \$version,
253 'h|help|usage' => \$help, 256 'h|help|usage' => \$help,
254 )) { 257 )) {
@@ -307,36 +310,74 @@ if (!top_of_kernel_tree($lk_path)) {
307 310
308my @typevalue = (); 311my @typevalue = ();
309my %keyword_hash; 312my %keyword_hash;
313my @mfiles = ();
310 314
311open (my $maint, '<', "${lk_path}MAINTAINERS") 315sub read_maintainer_file {
312 or die "$P: Can't open MAINTAINERS: $!\n"; 316 my ($file) = @_;
313while (<$maint>) { 317
314 my $line = $_; 318 open (my $maint, '<', "$file")
315 319 or die "$P: Can't open MAINTAINERS file '$file': $!\n";
316 if ($line =~ m/^([A-Z]):\s*(.*)/) { 320 while (<$maint>) {
317 my $type = $1; 321 my $line = $_;
318 my $value = $2; 322
319 323 if ($line =~ m/^([A-Z]):\s*(.*)/) {
320 ##Filename pattern matching 324 my $type = $1;
321 if ($type eq "F" || $type eq "X") { 325 my $value = $2;
322 $value =~ s@\.@\\\.@g; ##Convert . to \. 326
323 $value =~ s/\*/\.\*/g; ##Convert * to .* 327 ##Filename pattern matching
324 $value =~ s/\?/\./g; ##Convert ? to . 328 if ($type eq "F" || $type eq "X") {
325 ##if pattern is a directory and it lacks a trailing slash, add one 329 $value =~ s@\.@\\\.@g; ##Convert . to \.
326 if ((-d $value)) { 330 $value =~ s/\*/\.\*/g; ##Convert * to .*
327 $value =~ s@([^/])$@$1/@; 331 $value =~ s/\?/\./g; ##Convert ? to .
332 ##if pattern is a directory and it lacks a trailing slash, add one
333 if ((-d $value)) {
334 $value =~ s@([^/])$@$1/@;
335 }
336 } elsif ($type eq "K") {
337 $keyword_hash{@typevalue} = $value;
328 } 338 }
329 } elsif ($type eq "K") { 339 push(@typevalue, "$type:$value");
330 $keyword_hash{@typevalue} = $value; 340 } elsif (!(/^\s*$/ || /^\s*\#/)) {
341 $line =~ s/\n$//g;
342 push(@typevalue, $line);
331 } 343 }
332 push(@typevalue, "$type:$value");
333 } elsif (!/^(\s)*$/) {
334 $line =~ s/\n$//g;
335 push(@typevalue, $line);
336 } 344 }
345 close($maint);
346}
347
348sub find_is_maintainer_file {
349 my ($file) = $_;
350 return if ($file !~ m@/MAINTAINERS$@);
351 $file = $File::Find::name;
352 return if (! -f $file);
353 push(@mfiles, $file);
337} 354}
338close($maint);
339 355
356sub find_ignore_git {
357 return grep { $_ !~ /^\.git$/; } @_;
358}
359
360if (-d "${lk_path}MAINTAINERS") {
361 opendir(DIR, "${lk_path}MAINTAINERS") or die $!;
362 my @files = readdir(DIR);
363 closedir(DIR);
364 foreach my $file (@files) {
365 push(@mfiles, "${lk_path}MAINTAINERS/$file") if ($file !~ /^\./);
366 }
367}
368
369if ($find_maintainer_files) {
370 find( { wanted => \&find_is_maintainer_file,
371 preprocess => \&find_ignore_git,
372 no_chdir => 1,
373 }, "${lk_path}");
374} else {
375 push(@mfiles, "${lk_path}MAINTAINERS") if -f "${lk_path}MAINTAINERS";
376}
377
378foreach my $file (@mfiles) {
379 read_maintainer_file("$file");
380}
340 381
341# 382#
342# Read mail address map 383# Read mail address map
@@ -873,7 +914,7 @@ sub top_of_kernel_tree {
873 if ( (-f "${lk_path}COPYING") 914 if ( (-f "${lk_path}COPYING")
874 && (-f "${lk_path}CREDITS") 915 && (-f "${lk_path}CREDITS")
875 && (-f "${lk_path}Kbuild") 916 && (-f "${lk_path}Kbuild")
876 && (-f "${lk_path}MAINTAINERS") 917 && (-e "${lk_path}MAINTAINERS")
877 && (-f "${lk_path}Makefile") 918 && (-f "${lk_path}Makefile")
878 && (-f "${lk_path}README") 919 && (-f "${lk_path}README")
879 && (-d "${lk_path}Documentation") 920 && (-d "${lk_path}Documentation")
diff --git a/scripts/parse-maintainers.pl b/scripts/parse-maintainers.pl
index a0fe34349b24..e40b53db7f9f 100644
--- a/scripts/parse-maintainers.pl
+++ b/scripts/parse-maintainers.pl
@@ -2,9 +2,9 @@
2 2
3use strict; 3use strict;
4 4
5my %map; 5my $P = $0;
6 6
7# sort comparison function 7# sort comparison functions
8sub by_category($$) { 8sub by_category($$) {
9 my ($a, $b) = @_; 9 my ($a, $b) = @_;
10 10
@@ -15,20 +15,33 @@ sub by_category($$) {
15 $a =~ s/THE REST/ZZZZZZ/g; 15 $a =~ s/THE REST/ZZZZZZ/g;
16 $b =~ s/THE REST/ZZZZZZ/g; 16 $b =~ s/THE REST/ZZZZZZ/g;
17 17
18 $a cmp $b; 18 return $a cmp $b;
19} 19}
20 20
21sub alpha_output { 21sub by_pattern($$) {
22 my $key; 22 my ($a, $b) = @_;
23 my $sort_method = \&by_category; 23 my $preferred_order = 'MRPLSWTQBCFXNK';
24 my $sep = ""; 24
25 25 my $a1 = uc(substr($a, 0, 1));
26 foreach $key (sort $sort_method keys %map) { 26 my $b1 = uc(substr($b, 0, 1));
27 if ($key ne " ") { 27
28 print $sep . $key . "\n"; 28 my $a_index = index($preferred_order, $a1);
29 $sep = "\n"; 29 my $b_index = index($preferred_order, $b1);
30 } 30
31 print $map{$key}; 31 $a_index = 1000 if ($a_index == -1);
32 $b_index = 1000 if ($b_index == -1);
33
34 if (($a1 =~ /^F$/ && $b1 =~ /^F$/) ||
35 ($a1 =~ /^X$/ && $b1 =~ /^X$/)) {
36 return $a cmp $b;
37 }
38
39 if ($a_index < $b_index) {
40 return -1;
41 } elsif ($a_index == $b_index) {
42 return 0;
43 } else {
44 return 1;
32 } 45 }
33} 46}
34 47
@@ -39,39 +52,77 @@ sub trim {
39 return $s; 52 return $s;
40} 53}
41 54
55sub alpha_output {
56 my ($hashref, $filename) = (@_);
57
58 open(my $file, '>', "$filename") or die "$P: $filename: open failed - $!\n";
59 foreach my $key (sort by_category keys %$hashref) {
60 if ($key eq " ") {
61 chomp $$hashref{$key};
62 print $file $$hashref{$key};
63 } else {
64 print $file "\n" . $key . "\n";
65 foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
66 print $file ($pattern . "\n");
67 }
68 }
69 }
70 close($file);
71}
72
42sub file_input { 73sub file_input {
74 my ($hashref, $filename) = (@_);
75
43 my $lastline = ""; 76 my $lastline = "";
44 my $case = " "; 77 my $case = " ";
45 $map{$case} = ""; 78 $$hashref{$case} = "";
79
80 open(my $file, '<', "$filename") or die "$P: $filename: open failed - $!\n";
46 81
47 while (<>) { 82 while (<$file>) {
48 my $line = $_; 83 my $line = $_;
49 84
50 # Pattern line? 85 # Pattern line?
51 if ($line =~ m/^([A-Z]):\s*(.*)/) { 86 if ($line =~ m/^([A-Z]):\s*(.*)/) {
52 $line = $1 . ":\t" . trim($2) . "\n"; 87 $line = $1 . ":\t" . trim($2) . "\n";
53 if ($lastline eq "") { 88 if ($lastline eq "") {
54 $map{$case} = $map{$case} . $line; 89 $$hashref{$case} = $$hashref{$case} . $line;
55 next; 90 next;
56 } 91 }
57 $case = trim($lastline); 92 $case = trim($lastline);
58 exists $map{$case} and die "Header '$case' already exists"; 93 exists $$hashref{$case} and die "Header '$case' already exists";
59 $map{$case} = $line; 94 $$hashref{$case} = $line;
60 $lastline = ""; 95 $lastline = "";
61 next; 96 next;
62 } 97 }
63 98
64 if ($case eq " ") { 99 if ($case eq " ") {
65 $map{$case} = $map{$case} . $lastline; 100 $$hashref{$case} = $$hashref{$case} . $lastline;
66 $lastline = $line; 101 $lastline = $line;
67 next; 102 next;
68 } 103 }
69 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'"); 104 trim($lastline) eq "" or die ("Odd non-pattern line '$lastline' for '$case'");
70 $lastline = $line; 105 $lastline = $line;
71 } 106 }
72 $map{$case} = $map{$case} . $lastline; 107 $$hashref{$case} = $$hashref{$case} . $lastline;
108 close($file);
73} 109}
74 110
75&file_input; 111my %hash;
76&alpha_output; 112my %new_hash;
113
114file_input(\%hash, "MAINTAINERS");
115
116foreach my $type (@ARGV) {
117 foreach my $key (keys %hash) {
118 if ($key =~ /$type/ || $hash{$key} =~ /$type/) {
119 $new_hash{$key} = $hash{$key};
120 delete $hash{$key};
121 }
122 }
123}
124
125alpha_output(\%hash, "MAINTAINERS.new");
126alpha_output(\%new_hash, "SECTION.new");
127
77exit(0); 128exit(0);
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index 7598361ef1f1..da2172ff9662 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -11,6 +11,8 @@
11# define __NR_bpf 280 11# define __NR_bpf 280
12# elif defined(__sparc__) 12# elif defined(__sparc__)
13# define __NR_bpf 349 13# define __NR_bpf 349
14# elif defined(__s390__)
15# define __NR_bpf 351
14# else 16# else
15# error __NR_bpf not defined. libbpf does not support your arch. 17# error __NR_bpf not defined. libbpf does not support your arch.
16# endif 18# endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 256f571f2ab5..e5bbb090bf88 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -39,6 +39,8 @@
39# define __NR_bpf 280 39# define __NR_bpf 280
40# elif defined(__sparc__) 40# elif defined(__sparc__)
41# define __NR_bpf 349 41# define __NR_bpf 349
42# elif defined(__s390__)
43# define __NR_bpf 351
42# else 44# else
43# error __NR_bpf not defined. libbpf does not support your arch. 45# error __NR_bpf not defined. libbpf does not support your arch.
44# endif 46# endif
diff --git a/tools/testing/selftests/bpf/test_pkt_md_access.c b/tools/testing/selftests/bpf/test_pkt_md_access.c
index 71729d47eb85..7956302ecdf2 100644
--- a/tools/testing/selftests/bpf/test_pkt_md_access.c
+++ b/tools/testing/selftests/bpf/test_pkt_md_access.c
@@ -12,12 +12,23 @@
12 12
13int _version SEC("version") = 1; 13int _version SEC("version") = 1;
14 14
15#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
15#define TEST_FIELD(TYPE, FIELD, MASK) \ 16#define TEST_FIELD(TYPE, FIELD, MASK) \
16 { \ 17 { \
17 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \ 18 TYPE tmp = *(volatile TYPE *)&skb->FIELD; \
18 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \ 19 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
19 return TC_ACT_SHOT; \ 20 return TC_ACT_SHOT; \
20 } 21 }
22#else
23#define TEST_FIELD_OFFSET(a, b) ((sizeof(a) - sizeof(b)) / sizeof(b))
24#define TEST_FIELD(TYPE, FIELD, MASK) \
25 { \
26 TYPE tmp = *((volatile TYPE *)&skb->FIELD + \
27 TEST_FIELD_OFFSET(skb->FIELD, TYPE)); \
28 if (tmp != ((*(volatile __u32 *)&skb->FIELD) & MASK)) \
29 return TC_ACT_SHOT; \
30 }
31#endif
21 32
22SEC("test1") 33SEC("test1")
23int process(struct __sk_buff *skb) 34int process(struct __sk_buff *skb)
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index addea82f76c9..d3ed7324105e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8,6 +8,7 @@
8 * License as published by the Free Software Foundation. 8 * License as published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <endian.h>
11#include <asm/types.h> 12#include <asm/types.h>
12#include <linux/types.h> 13#include <linux/types.h>
13#include <stdint.h> 14#include <stdint.h>
@@ -1098,7 +1099,7 @@ static struct bpf_test tests[] = {
1098 "check skb->hash byte load permitted", 1099 "check skb->hash byte load permitted",
1099 .insns = { 1100 .insns = {
1100 BPF_MOV64_IMM(BPF_REG_0, 0), 1101 BPF_MOV64_IMM(BPF_REG_0, 0),
1101#ifdef __LITTLE_ENDIAN 1102#if __BYTE_ORDER == __LITTLE_ENDIAN
1102 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1103 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1103 offsetof(struct __sk_buff, hash)), 1104 offsetof(struct __sk_buff, hash)),
1104#else 1105#else
@@ -1135,7 +1136,7 @@ static struct bpf_test tests[] = {
1135 "check skb->hash byte load not permitted 3", 1136 "check skb->hash byte load not permitted 3",
1136 .insns = { 1137 .insns = {
1137 BPF_MOV64_IMM(BPF_REG_0, 0), 1138 BPF_MOV64_IMM(BPF_REG_0, 0),
1138#ifdef __LITTLE_ENDIAN 1139#if __BYTE_ORDER == __LITTLE_ENDIAN
1139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1140 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1140 offsetof(struct __sk_buff, hash) + 3), 1141 offsetof(struct __sk_buff, hash) + 3),
1141#else 1142#else
@@ -1244,7 +1245,7 @@ static struct bpf_test tests[] = {
1244 "check skb->hash half load permitted", 1245 "check skb->hash half load permitted",
1245 .insns = { 1246 .insns = {
1246 BPF_MOV64_IMM(BPF_REG_0, 0), 1247 BPF_MOV64_IMM(BPF_REG_0, 0),
1247#ifdef __LITTLE_ENDIAN 1248#if __BYTE_ORDER == __LITTLE_ENDIAN
1248 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1249 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1249 offsetof(struct __sk_buff, hash)), 1250 offsetof(struct __sk_buff, hash)),
1250#else 1251#else
@@ -1259,7 +1260,7 @@ static struct bpf_test tests[] = {
1259 "check skb->hash half load not permitted", 1260 "check skb->hash half load not permitted",
1260 .insns = { 1261 .insns = {
1261 BPF_MOV64_IMM(BPF_REG_0, 0), 1262 BPF_MOV64_IMM(BPF_REG_0, 0),
1262#ifdef __LITTLE_ENDIAN 1263#if __BYTE_ORDER == __LITTLE_ENDIAN
1263 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 1264 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1264 offsetof(struct __sk_buff, hash) + 2), 1265 offsetof(struct __sk_buff, hash) + 2),
1265#else 1266#else
@@ -5422,7 +5423,7 @@ static struct bpf_test tests[] = {
5422 "check bpf_perf_event_data->sample_period byte load permitted", 5423 "check bpf_perf_event_data->sample_period byte load permitted",
5423 .insns = { 5424 .insns = {
5424 BPF_MOV64_IMM(BPF_REG_0, 0), 5425 BPF_MOV64_IMM(BPF_REG_0, 0),
5425#ifdef __LITTLE_ENDIAN 5426#if __BYTE_ORDER == __LITTLE_ENDIAN
5426 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 5427 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5427 offsetof(struct bpf_perf_event_data, sample_period)), 5428 offsetof(struct bpf_perf_event_data, sample_period)),
5428#else 5429#else
@@ -5438,7 +5439,7 @@ static struct bpf_test tests[] = {
5438 "check bpf_perf_event_data->sample_period half load permitted", 5439 "check bpf_perf_event_data->sample_period half load permitted",
5439 .insns = { 5440 .insns = {
5440 BPF_MOV64_IMM(BPF_REG_0, 0), 5441 BPF_MOV64_IMM(BPF_REG_0, 0),
5441#ifdef __LITTLE_ENDIAN 5442#if __BYTE_ORDER == __LITTLE_ENDIAN
5442 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5443 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5443 offsetof(struct bpf_perf_event_data, sample_period)), 5444 offsetof(struct bpf_perf_event_data, sample_period)),
5444#else 5445#else
@@ -5454,7 +5455,7 @@ static struct bpf_test tests[] = {
5454 "check bpf_perf_event_data->sample_period word load permitted", 5455 "check bpf_perf_event_data->sample_period word load permitted",
5455 .insns = { 5456 .insns = {
5456 BPF_MOV64_IMM(BPF_REG_0, 0), 5457 BPF_MOV64_IMM(BPF_REG_0, 0),
5457#ifdef __LITTLE_ENDIAN 5458#if __BYTE_ORDER == __LITTLE_ENDIAN
5458 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 5459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5459 offsetof(struct bpf_perf_event_data, sample_period)), 5460 offsetof(struct bpf_perf_event_data, sample_period)),
5460#else 5461#else
@@ -5481,7 +5482,7 @@ static struct bpf_test tests[] = {
5481 "check skb->data half load not permitted", 5482 "check skb->data half load not permitted",
5482 .insns = { 5483 .insns = {
5483 BPF_MOV64_IMM(BPF_REG_0, 0), 5484 BPF_MOV64_IMM(BPF_REG_0, 0),
5484#ifdef __LITTLE_ENDIAN 5485#if __BYTE_ORDER == __LITTLE_ENDIAN
5485 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5486 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5486 offsetof(struct __sk_buff, data)), 5487 offsetof(struct __sk_buff, data)),
5487#else 5488#else
@@ -5497,7 +5498,7 @@ static struct bpf_test tests[] = {
5497 "check skb->tc_classid half load not permitted for lwt prog", 5498 "check skb->tc_classid half load not permitted for lwt prog",
5498 .insns = { 5499 .insns = {
5499 BPF_MOV64_IMM(BPF_REG_0, 0), 5500 BPF_MOV64_IMM(BPF_REG_0, 0),
5500#ifdef __LITTLE_ENDIAN 5501#if __BYTE_ORDER == __LITTLE_ENDIAN
5501 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, 5502 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5502 offsetof(struct __sk_buff, tc_classid)), 5503 offsetof(struct __sk_buff, tc_classid)),
5503#else 5504#else