aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-06-13 02:47:22 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-13 02:47:22 -0400
commit3f365cf304ba3d316b3df2474af8d7df6edd2455 (patch)
treebb52465b35606bec268e9c1b88c746f8caf0f919
parentbbf79d21bd4627a01ca8721c9373752819f8e4cc (diff)
parent252d2a4117bc181b287eeddf848863788da733ae (diff)
Merge branch 'sched/urgent' into x86/mm, to pick up dependent fix
Andy will need the following scheduler fix for the PCID series: 252d2a4117bc: sched/core: Idle_task_exit() shouldn't use switch_mm_irqs_off() So do a cross-merge. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt9
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt4
-rw-r--r--Documentation/networking/dpaa.txt194
-rw-r--r--Documentation/networking/tcp.txt31
-rw-r--r--MAINTAINERS26
-rw-r--r--arch/arm/boot/compressed/efi-header.S4
-rw-r--r--arch/arm/boot/compressed/head.S17
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts6
-rw-r--r--arch/arm/boot/dts/keystone-k2l-netcp.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi8
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h1
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-davinci/pm.c7
-rw-r--r--arch/arm/mm/dma-mapping.c29
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi3
-rw-r--r--arch/arm64/configs/defconfig10
-rw-r--r--arch/powerpc/Kconfig16
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/processor.h25
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c58
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/sparc/Kconfig12
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/multi3.S35
-rw-r--r--arch/sparc/mm/init_64.c89
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c3
-rw-r--r--block/bfq-cgroup.c116
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bfq-iosched.h23
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/blk-mq.c25
-rw-r--r--block/blk-throttle.c22
-rw-r--r--crypto/asymmetric_keys/public_key.c2
-rw-r--r--crypto/drbg.c5
-rw-r--r--crypto/gcm.c6
-rw-r--r--drivers/acpi/arm64/iort.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/ata/ahci.c38
-rw-r--r--drivers/ata/libahci_platform.c5
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/sata_mv.c13
-rw-r--r--drivers/ata/sata_rcar.c15
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/wakeup.c18
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/firmware/efi/efi-bgrt.c26
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c106
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c115
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c13
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/rmi4/rmi_f03.c2
-rw-r--r--drivers/iommu/of_iommu.c7
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/md/md.c16
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/media/Kconfig6
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/Kconfig14
-rw-r--r--drivers/media/cec/Makefile2
-rw-r--r--drivers/media/cec/cec-adap.c2
-rw-r--r--drivers/media/cec/cec-core.c8
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/vivid/Kconfig3
-rw-r--r--drivers/media/rc/rc-ir-raw.c13
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c2
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/misc/cxl/file.c7
-rw-r--r--drivers/misc/cxl/native.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h26
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c75
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c22
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio_bus.c13
-rw-r--r--drivers/net/phy/micrel.c42
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/vxlan.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c9
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/fc.c20
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvme/host/rdma.c44
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile2
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/dir-item.c2
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c126
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/stat.c1
-rw-r--r--fs/ufs/balloc.c26
-rw-r--r--fs/ufs/inode.c28
-rw-r--r--fs/ufs/super.c18
-rw-r--r--fs/ufs/util.h10
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h20
-rw-r--r--include/linux/compiler-clang.h7
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/media/cec-notifier.h2
-rw-r--r--include/media/cec.h4
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--kernel/cgroup/cgroup.c5
-rw-r--r--kernel/cgroup/cpuset.c4
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/suspend.c29
-rw-r--r--kernel/printk/printk.c46
-rw-r--r--kernel/rcu/srcu.c5
-rw-r--r--kernel/rcu/srcutiny.c7
-rw-r--r--kernel/rcu/srcutree.c5
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/core/devlink.c8
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/dsa/dsa.c47
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/legacy.c47
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/ht.c16
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c11
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c22
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--sound/core/timer.c7
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/atmel/atmel-classd.c9
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/rt286.c7
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/intel/skylake/skl.c162
-rw-r--r--sound/soc/intel/skylake/skl.h4
-rw-r--r--sound/soc/sh/rcar/adg.c6
-rw-r--r--sound/soc/sh/rcar/cmd.c1
-rw-r--r--sound/soc/sh/rcar/core.c51
-rw-r--r--sound/soc/sh/rcar/gen.c1
-rw-r--r--sound/soc/sh/rcar/rsnd.h2
-rw-r--r--sound/soc/sh/rcar/src.c12
-rw-r--r--sound/soc/sh/rcar/ssi.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c3
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--tools/perf/Documentation/perf-probe.txt8
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-script-python.txt23
-rw-r--r--tools/perf/arch/common.c1
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/builtin-trace.c4
-rw-r--r--tools/perf/tests/bp_signal.c14
-rw-r--r--tools/perf/tests/builtin-test.c7
-rw-r--r--tools/perf/tests/code-reading.c20
-rw-r--r--tools/perf/tests/tests.h3
-rw-r--r--tools/perf/util/annotate.c72
-rw-r--r--tools/perf/util/build-id.c45
-rw-r--r--tools/perf/util/build-id.h1
-rw-r--r--tools/perf/util/dso.c100
-rw-r--r--tools/perf/util/dso.h9
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/machine.c11
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/symbol-elf.c41
-rw-r--r--tools/perf/util/symbol.c4
-rw-r--r--tools/perf/util/unwind-libdw.c10
318 files changed, 2705 insertions, 1551 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 15f79c27748d..0f5c3b4347c6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -866,6 +866,15 @@
866 866
867 dscc4.setup= [NET] 867 dscc4.setup= [NET]
868 868
869 dt_cpu_ftrs= [PPC]
870 Format: {"off" | "known"}
871 Control how the dt_cpu_ftrs device-tree binding is
872 used for CPU feature discovery and setup (if it
873 exists).
874 off: Do not use it, fall back to legacy cpu table.
875 known: Do not pass through unknown features to guests
876 or userspace, only those that the kernel is aware of.
877
869 dump_apple_properties [X86] 878 dump_apple_properties [X86]
870 Dump name and content of EFI device properties on 879 Dump name and content of EFI device properties on
871 x86 Macs. Useful for driver authors to determine 880 x86 Macs. Useful for driver authors to determine
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7ef9dbb08957..1d4d0f49c9d0 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -26,6 +26,10 @@ Optional properties:
26- interrupt-controller : Indicates the switch is itself an interrupt 26- interrupt-controller : Indicates the switch is itself an interrupt
27 controller. This is used for the PHY interrupts. 27 controller. This is used for the PHY interrupts.
28#interrupt-cells = <2> : Controller uses two cells, number and flag 28#interrupt-cells = <2> : Controller uses two cells, number and flag
29- eeprom-length : Set to the length of an EEPROM connected to the
30 switch. Must be set if the switch can not detect
31 the presence and/or size of a connected EEPROM,
32 otherwise optional.
29- mdio : Container of PHY and devices on the switches MDIO 33- mdio : Container of PHY and devices on the switches MDIO
30 bus. 34 bus.
31- mdio? : Container of PHYs and devices on the external MDIO 35- mdio? : Container of PHYs and devices on the external MDIO
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644
index 000000000000..76e016d4d344
--- /dev/null
+++ b/Documentation/networking/dpaa.txt
@@ -0,0 +1,194 @@
1The QorIQ DPAA Ethernet Driver
2==============================
3
4Authors:
5Madalin Bucur <madalin.bucur@nxp.com>
6Camelia Groza <camelia.groza@nxp.com>
7
8Contents
9========
10
11 - DPAA Ethernet Overview
12 - DPAA Ethernet Supported SoCs
13 - Configuring DPAA Ethernet in your kernel
14 - DPAA Ethernet Frame Processing
15 - DPAA Ethernet Features
16 - Debugging
17
18DPAA Ethernet Overview
19======================
20
21DPAA stands for Data Path Acceleration Architecture and it is a
22set of networking acceleration IPs that are available on several
23generations of SoCs, both on PowerPC and ARM64.
24
25The Freescale DPAA architecture consists of a series of hardware blocks
26that support Ethernet connectivity. The Ethernet driver depends upon the
27following drivers in the Linux kernel:
28
29 - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
30 drivers/iommu/fsl_*
31 - Frame Manager (FMan)
32 drivers/net/ethernet/freescale/fman
33 - Queue Manager (QMan), Buffer Manager (BMan)
34 drivers/soc/fsl/qbman
35
36A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
37
38 dpaa_eth /eth0\ ... /ethN\
39 driver | | | |
40 ------------- ---- ----------- ---- -------------
41 -Ports / Tx Rx \ ... / Tx Rx \
42 FMan | | | |
43 -MACs | MAC0 | | MACN |
44 / dtsec0 \ ... / dtsecN \ (or tgec)
45 / \ / \(or memac)
46 --------- -------------- --- -------------- ---------
47 FMan, FMan Port, FMan SP, FMan MURAM drivers
48 ---------------------------------------------------------
49 FMan HW blocks: MURAM, MACs, Ports, SP
50 ---------------------------------------------------------
51
52The dpaa_eth relation to the QMan, BMan and FMan:
53 ________________________________
54 dpaa_eth / eth0 \
55 driver / \
56 --------- -^- -^- -^- --- ---------
57 QMan driver / \ / \ / \ \ / | BMan |
58 |Rx | |Rx | |Tx | |Tx | | driver |
59 --------- |Dfl| |Err| |Cnf| |FQs| | |
60 QMan HW |FQ | |FQ | |FQs| | | | |
61 / \ / \ / \ \ / | |
62 --------- --- --- --- -v- ---------
63 | FMan QMI | |
64 | FMan HW FMan BMI | BMan HW |
65 ----------------------- --------
66
67where the acronyms used above (and in the code) are:
68DPAA = Data Path Acceleration Architecture
69FMan = DPAA Frame Manager
70QMan = DPAA Queue Manager
71BMan = DPAA Buffers Manager
72QMI = QMan interface in FMan
73BMI = BMan interface in FMan
74FMan SP = FMan Storage Profiles
75MURAM = Multi-user RAM in FMan
76FQ = QMan Frame Queue
77Rx Dfl FQ = default reception FQ
78Rx Err FQ = Rx error frames FQ
79Tx Cnf FQ = Tx confirmation FQs
80Tx FQs = transmission frame queues
81dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
82tgec = ten gigabit Ethernet controller (10 Gbps)
83memac = multirate Ethernet MAC (10/100/1000/10000)
84
85DPAA Ethernet Supported SoCs
86============================
87
88The DPAA drivers enable the Ethernet controllers present on the following SoCs:
89
90# PPC
91P1023
92P2041
93P3041
94P4080
95P5020
96P5040
97T1023
98T1024
99T1040
100T1042
101T2080
102T4240
103B4860
104
105# ARM
106LS1043A
107LS1046A
108
109Configuring DPAA Ethernet in your kernel
110========================================
111
112To enable the DPAA Ethernet driver, the following Kconfig options are required:
113
114# common for arch/arm64 and arch/powerpc platforms
115CONFIG_FSL_DPAA=y
116CONFIG_FSL_FMAN=y
117CONFIG_FSL_DPAA_ETH=y
118CONFIG_FSL_XGMAC_MDIO=y
119
120# for arch/powerpc only
121CONFIG_FSL_PAMU=y
122
123# common options needed for the PHYs used on the RDBs
124CONFIG_VITESSE_PHY=y
125CONFIG_REALTEK_PHY=y
126CONFIG_AQUANTIA_PHY=y
127
128DPAA Ethernet Frame Processing
129==============================
130
131On Rx, buffers for the incoming frames are retrieved from one of the three
132existing buffers pools. The driver initializes and seeds these, each with
133buffers of different sizes: 1KB, 2KB and 4KB.
134
135On Tx, all transmitted frames are returned to the driver through Tx
136confirmation frame queues. The driver is then responsible for freeing the
137buffers. In order to do this properly, a backpointer is added to the buffer
138before transmission that points to the skb. When the buffer returns to the
139driver on a confirmation FQ, the skb can be correctly consumed.
140
141DPAA Ethernet Features
142======================
143
144Currently the DPAA Ethernet driver enables the basic features required for
145a Linux Ethernet driver. The support for advanced features will be added
146gradually.
147
148The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
149checksum offload feature is enabled by default and cannot be controlled through
150ethtool.
151
152The driver has support for multiple prioritized Tx traffic classes. Priorities
153range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
154strict priority levels. Each traffic class contains NR_CPU TX queues. By
155default, only one traffic class is enabled and the lowest priority Tx queues
156are used. Higher priority traffic classes can be enabled with the mqprio
157qdisc. For example, all four traffic classes are enabled on an interface with
158the following command. Furthermore, skb priority levels are mapped to traffic
159classes as follows:
160
161 * priorities 0 to 3 - traffic class 0 (low priority)
162 * priorities 4 to 7 - traffic class 1 (medium-low priority)
163 * priorities 8 to 11 - traffic class 2 (medium-high priority)
164 * priorities 12 to 15 - traffic class 3 (high priority)
165
166tc qdisc add dev <int> root handle 1: \
167 mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
168
169Debugging
170=========
171
172The following statistics are exported for each interface through ethtool:
173
174 - interrupt count per CPU
175 - Rx packets count per CPU
176 - Tx packets count per CPU
177 - Tx confirmed packets count per CPU
178 - Tx S/G frames count per CPU
179 - Tx error count per CPU
180 - Rx error count per CPU
181 - Rx error count per type
182 - congestion related statistics:
183 - congestion status
184 - time spent in congestion
185 - number of time the device entered congestion
186 - dropped packets count per cause
187
188The driver also exports the following information in sysfs:
189
190 - the FQ IDs for each FQ type
191 /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
192
193 - the IDs of the buffer pools in use
194 /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index bdc4c0db51e1..9c7139d57e57 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -1,7 +1,7 @@
1TCP protocol 1TCP protocol
2============ 2============
3 3
4Last updated: 9 February 2008 4Last updated: 3 June 2017
5 5
6Contents 6Contents
7======== 7========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
29A congestion control mechanism can be registered through functions in 29A congestion control mechanism can be registered through functions in
30tcp_cong.c. The functions used by the congestion control mechanism are 30tcp_cong.c. The functions used by the congestion control mechanism are
31registered via passing a tcp_congestion_ops struct to 31registered via passing a tcp_congestion_ops struct to
32tcp_register_congestion_control. As a minimum name, ssthresh, 32tcp_register_congestion_control. As a minimum, the congestion control
33cong_avoid must be valid. 33mechanism must provide a valid name and must implement either ssthresh,
34cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
34 35
35Private data for a congestion control mechanism is stored in tp->ca_priv. 36Private data for a congestion control mechanism is stored in tp->ca_priv.
36tcp_ca(tp) returns a pointer to this space. This is preallocated space - it 37tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
37is important to check the size of your private data will fit this space, or 38is important to check the size of your private data will fit this space, or
38alternatively space could be allocated elsewhere and a pointer to it could 39alternatively, space could be allocated elsewhere and a pointer to it could
39be stored here. 40be stored here.
40 41
41There are three kinds of congestion control algorithms currently: The 42There are three kinds of congestion control algorithms currently: The
42simplest ones are derived from TCP reno (highspeed, scalable) and just 43simplest ones are derived from TCP reno (highspeed, scalable) and just
43provide an alternative the congestion window calculation. More complex 44provide an alternative congestion window calculation. More complex
44ones like BIC try to look at other events to provide better 45ones like BIC try to look at other events to provide better
45heuristics. There are also round trip time based algorithms like 46heuristics. There are also round trip time based algorithms like
46Vegas and Westwood+. 47Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
49needs to maintain fairness and performance. Please review current 50needs to maintain fairness and performance. Please review current
50research and RFC's before developing new modules. 51research and RFC's before developing new modules.
51 52
52The method that is used to determine which congestion control mechanism is 53The default congestion control mechanism is chosen based on the
53determined by the setting of the sysctl net.ipv4.tcp_congestion_control. 54DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
54The default congestion control will be the last one registered (LIFO); 55value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
55so if you built everything as modules, the default will be reno. If you 56module will be autoloaded if needed and you will get the expected protocol. If
56build with the defaults from Kconfig, then CUBIC will be builtin (not a 57you ask for an unknown congestion method, then the sysctl attempt will fail.
57module) and it will end up the default.
58 58
59If you really want a particular default value then you will need 59If you remove a TCP congestion control module, then you will get the next
60to set it with the sysctl. If you use a sysctl, the module will be autoloaded
61if needed and you will get the expected protocol. If you ask for an
62unknown congestion method, then the sysctl attempt will fail.
63
64If you remove a tcp congestion control module, then you will get the next
65available one. Since reno cannot be built as a module, and cannot be 60available one. Since reno cannot be built as a module, and cannot be
66deleted, it will always be available. 61removed, it will always be available.
67 62
68How the new TCP output machine [nyi] works. 63How the new TCP output machine [nyi] works.
69=========================================== 64===========================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 7a28acd7f525..4d8e525b84ee 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1172,7 +1172,7 @@ N: clps711x
1172 1172
1173ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE 1173ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
1174M: Hartley Sweeten <hsweeten@visionengravers.com> 1174M: Hartley Sweeten <hsweeten@visionengravers.com>
1175M: Ryan Mallon <rmallon@gmail.com> 1175M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
1176L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1176L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1177S: Maintained 1177S: Maintained
1178F: arch/arm/mach-ep93xx/ 1178F: arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M: Gregory Clement <gregory.clement@free-electrons.com>
1489M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1489M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1490L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1490L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1491S: Maintained 1491S: Maintained
1492F: arch/arm/mach-mvebu/
1493F: drivers/rtc/rtc-armada38x.c
1494F: arch/arm/boot/dts/armada* 1492F: arch/arm/boot/dts/armada*
1495F: arch/arm/boot/dts/kirkwood* 1493F: arch/arm/boot/dts/kirkwood*
1494F: arch/arm/configs/mvebu_*_defconfig
1495F: arch/arm/mach-mvebu/
1496F: arch/arm64/boot/dts/marvell/armada* 1496F: arch/arm64/boot/dts/marvell/armada*
1497F: drivers/cpufreq/mvebu-cpufreq.c 1497F: drivers/cpufreq/mvebu-cpufreq.c
1498F: arch/arm/configs/mvebu_*_defconfig 1498F: drivers/irqchip/irq-armada-370-xp.c
1499F: drivers/irqchip/irq-mvebu-*
1500F: drivers/rtc/rtc-armada38x.c
1499 1501
1500ARM/Marvell Berlin SoC support 1502ARM/Marvell Berlin SoC support
1501M: Jisheng Zhang <jszhang@marvell.com> 1503M: Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N: rockchip
1721ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1723ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1722M: Kukjin Kim <kgene@kernel.org> 1724M: Kukjin Kim <kgene@kernel.org>
1723M: Krzysztof Kozlowski <krzk@kernel.org> 1725M: Krzysztof Kozlowski <krzk@kernel.org>
1724R: Javier Martinez Canillas <javier@osg.samsung.com>
1725L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1726L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1726L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1727L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1727Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ 1728Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F: drivers/edac/altera_edac.
1829ARM/STI ARCHITECTURE 1830ARM/STI ARCHITECTURE
1830M: Patrice Chotard <patrice.chotard@st.com> 1831M: Patrice Chotard <patrice.chotard@st.com>
1831L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1832L: kernel@stlinux.com
1833W: http://www.stlinux.com 1833W: http://www.stlinux.com
1834S: Maintained 1834S: Maintained
1835F: arch/arm/mach-sti/ 1835F: arch/arm/mach-sti/
@@ -7707,7 +7707,7 @@ F: drivers/platform/x86/hp_accel.c
7707 7707
7708LIVE PATCHING 7708LIVE PATCHING
7709M: Josh Poimboeuf <jpoimboe@redhat.com> 7709M: Josh Poimboeuf <jpoimboe@redhat.com>
7710M: Jessica Yu <jeyu@redhat.com> 7710M: Jessica Yu <jeyu@kernel.org>
7711M: Jiri Kosina <jikos@kernel.org> 7711M: Jiri Kosina <jikos@kernel.org>
7712M: Miroslav Benes <mbenes@suse.cz> 7712M: Miroslav Benes <mbenes@suse.cz>
7713R: Petr Mladek <pmladek@suse.com> 7713R: Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8508,7 @@ S: Odd Fixes
8508F: drivers/media/radio/radio-miropcm20* 8508F: drivers/media/radio/radio-miropcm20*
8509 8509
8510MELLANOX MLX4 core VPI driver 8510MELLANOX MLX4 core VPI driver
8511M: Yishai Hadas <yishaih@mellanox.com> 8511M: Tariq Toukan <tariqt@mellanox.com>
8512L: netdev@vger.kernel.org 8512L: netdev@vger.kernel.org
8513L: linux-rdma@vger.kernel.org 8513L: linux-rdma@vger.kernel.org
8514W: http://www.mellanox.com 8514W: http://www.mellanox.com
@@ -8516,7 +8516,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8516S: Supported 8516S: Supported
8517F: drivers/net/ethernet/mellanox/mlx4/ 8517F: drivers/net/ethernet/mellanox/mlx4/
8518F: include/linux/mlx4/ 8518F: include/linux/mlx4/
8519F: include/uapi/rdma/mlx4-abi.h
8520 8519
8521MELLANOX MLX4 IB driver 8520MELLANOX MLX4 IB driver
8522M: Yishai Hadas <yishaih@mellanox.com> 8521M: Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8525,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8526S: Supported 8525S: Supported
8527F: drivers/infiniband/hw/mlx4/ 8526F: drivers/infiniband/hw/mlx4/
8528F: include/linux/mlx4/ 8527F: include/linux/mlx4/
8528F: include/uapi/rdma/mlx4-abi.h
8529 8529
8530MELLANOX MLX5 core VPI driver 8530MELLANOX MLX5 core VPI driver
8531M: Saeed Mahameed <saeedm@mellanox.com> 8531M: Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8538,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8538S: Supported 8538S: Supported
8539F: drivers/net/ethernet/mellanox/mlx5/core/ 8539F: drivers/net/ethernet/mellanox/mlx5/core/
8540F: include/linux/mlx5/ 8540F: include/linux/mlx5/
8541F: include/uapi/rdma/mlx5-abi.h
8542 8541
8543MELLANOX MLX5 IB driver 8542MELLANOX MLX5 IB driver
8544M: Matan Barak <matanb@mellanox.com> 8543M: Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8548,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8549S: Supported 8548S: Supported
8550F: drivers/infiniband/hw/mlx5/ 8549F: drivers/infiniband/hw/mlx5/
8551F: include/linux/mlx5/ 8550F: include/linux/mlx5/
8551F: include/uapi/rdma/mlx5-abi.h
8552 8552
8553MELEXIS MLX90614 DRIVER 8553MELEXIS MLX90614 DRIVER
8554M: Crt Mori <cmo@melexis.com> 8554M: Crt Mori <cmo@melexis.com>
@@ -8588,7 +8588,7 @@ S: Maintained
8588F: drivers/media/dvb-frontends/mn88473* 8588F: drivers/media/dvb-frontends/mn88473*
8589 8589
8590MODULE SUPPORT 8590MODULE SUPPORT
8591M: Jessica Yu <jeyu@redhat.com> 8591M: Jessica Yu <jeyu@kernel.org>
8592M: Rusty Russell <rusty@rustcorp.com.au> 8592M: Rusty Russell <rusty@rustcorp.com.au>
8593T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 8593T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
8594S: Maintained 8594S: Maintained
@@ -11268,7 +11268,6 @@ F: drivers/media/rc/serial_ir.c
11268 11268
11269STI CEC DRIVER 11269STI CEC DRIVER
11270M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 11270M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
11271L: kernel@stlinux.com
11272S: Maintained 11271S: Maintained
11273F: drivers/staging/media/st-cec/ 11272F: drivers/staging/media/st-cec/
11274F: Documentation/devicetree/bindings/media/stih-cec.txt 11273F: Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11777,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
11778S: Supported 11777S: Supported
11779F: arch/arm/mach-davinci/ 11778F: arch/arm/mach-davinci/
11780F: drivers/i2c/busses/i2c-davinci.c 11779F: drivers/i2c/busses/i2c-davinci.c
11780F: arch/arm/boot/dts/da850*
11781 11781
11782TI DAVINCI SERIES MEDIA DRIVER 11782TI DAVINCI SERIES MEDIA DRIVER
11783M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com> 11783M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13861,7 +13861,7 @@ S: Odd fixes
13861F: drivers/net/wireless/wl3501* 13861F: drivers/net/wireless/wl3501*
13862 13862
13863WOLFSON MICROELECTRONICS DRIVERS 13863WOLFSON MICROELECTRONICS DRIVERS
13864L: patches@opensource.wolfsonmicro.com 13864L: patches@opensource.cirrus.com
13865T: git https://github.com/CirrusLogic/linux-drivers.git 13865T: git https://github.com/CirrusLogic/linux-drivers.git
13866W: https://github.com/CirrusLogic/linux-drivers/wiki 13866W: https://github.com/CirrusLogic/linux-drivers/wiki
13867S: Supported 13867S: Supported
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 9d5dc4fda3c1..3f7d1b74c5e0 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,14 +17,12 @@
17 @ there. 17 @ there.
18 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 18 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
19#else 19#else
20 mov r0, r0 20 W(mov) r0, r0
21#endif 21#endif
22 .endm 22 .endm
23 23
24 .macro __EFI_HEADER 24 .macro __EFI_HEADER
25#ifdef CONFIG_EFI_STUB 25#ifdef CONFIG_EFI_STUB
26 b __efi_start
27
28 .set start_offset, __efi_start - start 26 .set start_offset, __efi_start - start
29 .org start + 0x3c 27 .org start + 0x3c
30 @ 28 @
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7c711ba61417..8a756870c238 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,19 +130,22 @@ start:
130 .rept 7 130 .rept 7
131 __nop 131 __nop
132 .endr 132 .endr
133 ARM( mov r0, r0 ) 133#ifndef CONFIG_THUMB2_KERNEL
134 ARM( b 1f ) 134 mov r0, r0
135 THUMB( badr r12, 1f ) 135#else
136 THUMB( bx r12 ) 136 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
137 M_CLASS( nop.w ) @ M: already in Thumb2 mode
138 .thumb
139#endif
140 W(b) 1f
137 141
138 .word _magic_sig @ Magic numbers to help the loader 142 .word _magic_sig @ Magic numbers to help the loader
139 .word _magic_start @ absolute load/run zImage address 143 .word _magic_start @ absolute load/run zImage address
140 .word _magic_end @ zImage end address 144 .word _magic_end @ zImage end address
141 .word 0x04030201 @ endianness flag 145 .word 0x04030201 @ endianness flag
142 146
143 THUMB( .thumb ) 147 __EFI_HEADER
1441: __EFI_HEADER 1481:
145
146 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 149 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
147 AR_CLASS( mrs r9, cpsr ) 150 AR_CLASS( mrs r9, cpsr )
148#ifdef CONFIG_ARM_VIRT_EXT 151#ifdef CONFIG_ARM_VIRT_EXT
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 561f27d8d922..9444a9a9ba10 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
3#include <dt-bindings/clock/bcm2835-aux.h> 3#include <dt-bindings/clock/bcm2835-aux.h>
4#include <dt-bindings/gpio/gpio.h> 4#include <dt-bindings/gpio/gpio.h>
5 5
6/* firmware-provided startup stubs live here, where the secondary CPUs are
7 * spinning.
8 */
9/memreserve/ 0x00000000 0x00001000;
10
6/* This include file covers the common peripherals and configuration between 11/* This include file covers the common peripherals and configuration between
7 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to 12 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
8 * bcm2835.dtsi and bcm2836.dtsi. 13 * bcm2835.dtsi and bcm2836.dtsi.
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1d0ce2..d2be8aa3370b 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
120 120
121 ethphy0: ethernet-phy@2 { 121 ethphy0: ethernet-phy@2 {
122 reg = <2>; 122 reg = <2>;
123 micrel,led-mode = <1>;
124 clocks = <&clks IMX6UL_CLK_ENET_REF>;
125 clock-names = "rmii-ref";
123 }; 126 };
124 127
125 ethphy1: ethernet-phy@1 { 128 ethphy1: ethernet-phy@1 {
126 reg = <1>; 129 reg = <1>;
130 micrel,led-mode = <1>;
131 clocks = <&clks IMX6UL_CLK_ENET2_REF>;
132 clock-names = "rmii-ref";
127 }; 133 };
128 }; 134 };
129}; 135};
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f26824e83a..66f615a74118 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>; 140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
141 clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk"; 141 clock-names = "pa_clk", "ethss_clk", "cpts";
142 dma-coherent; 142 dma-coherent;
143 143
144 ti,navigator-dmas = <&dma_gbe 0>, 144 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7ebc0919..148650406cf7 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
232 }; 232 };
233 }; 233 };
234 234
235 osr: sram@70000000 {
236 compatible = "mmio-sram";
237 reg = <0x70000000 0x10000>;
238 #address-cells = <1>;
239 #size-cells = <1>;
240 clocks = <&clkosr>;
241 };
242
235 dspgpio0: keystone_dsp_gpio@02620240 { 243 dspgpio0: keystone_dsp_gpio@02620240 {
236 compatible = "ti,keystone-dsp-gpio"; 244 compatible = "ti,keystone-dsp-gpio";
237 gpio-controller; 245 gpio-controller;
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb28374e..06e2331f666d 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
1#include <versatile-ab.dts> 1#include "versatile-ab.dts"
2 2
3/ { 3/ {
4 model = "ARM Versatile PB"; 4 model = "ARM Versatile PB";
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index cf062472e07b..2b913f17d50f 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
235 return ret; 235 return ret;
236} 236}
237 237
238typedef void (*phys_reset_t)(unsigned long); 238typedef typeof(cpu_reset) phys_reset_t;
239 239
240void mcpm_cpu_power_down(void) 240void mcpm_cpu_power_down(void)
241{ 241{
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
300 * on the CPU. 300 * on the CPU.
301 */ 301 */
302 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 302 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
303 phys_reset(__pa_symbol(mcpm_entry_point)); 303 phys_reset(__pa_symbol(mcpm_entry_point), false);
304 304
305 /* should never get here */ 305 /* should never get here */
306 BUG(); 306 BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
389 __mcpm_cpu_down(cpu, cluster); 389 __mcpm_cpu_down(cpu, cluster);
390 390
391 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 391 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
392 phys_reset(__pa_symbol(mcpm_entry_point)); 392 phys_reset(__pa_symbol(mcpm_entry_point), false);
393 BUG(); 393 BUG();
394} 394}
395 395
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8f6e16..3234fe9bba6e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@ struct dev_archdata {
19#ifdef CONFIG_XEN 19#ifdef CONFIG_XEN
20 const struct dma_map_ops *dev_dma_ops; 20 const struct dma_map_ops *dev_dma_ops;
21#endif 21#endif
22 bool dma_coherent; 22 unsigned int dma_coherent:1;
23 unsigned int dma_ops_setup:1;
23}; 24};
24 25
25struct omap_device; 26struct omap_device;
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 302240c19a5a..a0d726a47c8a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
66#define pgprot_noncached(prot) (prot) 66#define pgprot_noncached(prot) (prot)
67#define pgprot_writecombine(prot) (prot) 67#define pgprot_writecombine(prot) (prot)
68#define pgprot_dmacoherent(prot) (prot) 68#define pgprot_dmacoherent(prot) (prot)
69#define pgprot_device(prot) (prot)
69 70
70 71
71/* 72/*
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924143f9..cbd959b73654 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,6 +1,7 @@
1menuconfig ARCH_AT91 1menuconfig ARCH_AT91
2 bool "Atmel SoCs" 2 bool "Atmel SoCs"
3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
4 select ARM_CPU_SUSPEND if PM
4 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
5 select GPIOLIB 6 select GPIOLIB
6 select PINCTRL 7 select PINCTRL
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb80354f303..b5cc05dc2cb2 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); 153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
154 if (!davinci_sram_suspend) { 154 if (!davinci_sram_suspend) {
155 pr_err("PM: cannot allocate SRAM memory\n"); 155 pr_err("PM: cannot allocate SRAM memory\n");
156 return -ENOMEM; 156 ret = -ENOMEM;
157 goto no_sram_mem;
157 } 158 }
158 159
159 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, 160 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
161 162
162 suspend_set_ops(&davinci_pm_ops); 163 suspend_set_ops(&davinci_pm_ops);
163 164
165 return 0;
166
167no_sram_mem:
168 iounmap(pm_config.ddrpsc_reg_base);
164no_ddrpsc_mem: 169no_ddrpsc_mem:
165 iounmap(pm_config.ddrpll_reg_base); 170 iounmap(pm_config.ddrpll_reg_base);
166no_ddrpll_mem: 171no_ddrpll_mem:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd2967b..bd83c531828a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
2311} 2311}
2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2313 2313
2314static void __arm_iommu_detach_device(struct device *dev) 2314/**
2315 * arm_iommu_detach_device
2316 * @dev: valid struct device pointer
2317 *
2318 * Detaches the provided device from a previously attached map.
2319 * This voids the dma operations (dma_map_ops pointer)
2320 */
2321void arm_iommu_detach_device(struct device *dev)
2315{ 2322{
2316 struct dma_iommu_mapping *mapping; 2323 struct dma_iommu_mapping *mapping;
2317 2324
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
2324 iommu_detach_device(mapping->domain, dev); 2331 iommu_detach_device(mapping->domain, dev);
2325 kref_put(&mapping->kref, release_iommu_mapping); 2332 kref_put(&mapping->kref, release_iommu_mapping);
2326 to_dma_iommu_mapping(dev) = NULL; 2333 to_dma_iommu_mapping(dev) = NULL;
2334 set_dma_ops(dev, NULL);
2327 2335
2328 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2336 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2329} 2337}
2330
2331/**
2332 * arm_iommu_detach_device
2333 * @dev: valid struct device pointer
2334 *
2335 * Detaches the provided device from a previously attached map.
2336 * This voids the dma operations (dma_map_ops pointer)
2337 */
2338void arm_iommu_detach_device(struct device *dev)
2339{
2340 __arm_iommu_detach_device(dev);
2341 set_dma_ops(dev, NULL);
2342}
2343EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2338EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2344 2339
2345static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2340static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2379 if (!mapping) 2374 if (!mapping)
2380 return; 2375 return;
2381 2376
2382 __arm_iommu_detach_device(dev); 2377 arm_iommu_detach_device(dev);
2383 arm_iommu_release_mapping(mapping); 2378 arm_iommu_release_mapping(mapping);
2384} 2379}
2385 2380
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2430 dev->dma_ops = xen_dma_ops; 2425 dev->dma_ops = xen_dma_ops;
2431 } 2426 }
2432#endif 2427#endif
2428 dev->archdata.dma_ops_setup = true;
2433} 2429}
2434 2430
2435void arch_teardown_dma_ops(struct device *dev) 2431void arch_teardown_dma_ops(struct device *dev)
2436{ 2432{
2433 if (!dev->archdata.dma_ops_setup)
2434 return;
2435
2437 arm_teardown_iommu_dma_ops(dev); 2436 arm_teardown_iommu_dma_ops(dev);
2438} 2437}
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df5201cd6..b4bc42ece754 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -231,8 +231,7 @@
231 cpm_crypto: crypto@800000 { 231 cpm_crypto: crypto@800000 {
232 compatible = "inside-secure,safexcel-eip197"; 232 compatible = "inside-secure,safexcel-eip197";
233 reg = <0x800000 0x200000>; 233 reg = <0x800000 0x200000>;
234 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 234 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
235 | IRQ_TYPE_LEVEL_HIGH)>,
236 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 235 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
237 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 236 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
238 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, 237 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75a8230..6e2058847ddc 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -221,8 +221,7 @@
221 cps_crypto: crypto@800000 { 221 cps_crypto: crypto@800000 {
222 compatible = "inside-secure,safexcel-eip197"; 222 compatible = "inside-secure,safexcel-eip197";
223 reg = <0x800000 0x200000>; 223 reg = <0x800000 0x200000>;
224 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 224 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
225 | IRQ_TYPE_LEVEL_HIGH)>,
226 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 225 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
227 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 226 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
228 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>, 227 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 65cdd878cfbd..97c123e09e45 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
68CONFIG_PCIE_ARMADA_8K=y 68CONFIG_PCIE_ARMADA_8K=y
69CONFIG_PCI_AARDVARK=y 69CONFIG_PCI_AARDVARK=y
70CONFIG_PCIE_RCAR=y 70CONFIG_PCIE_RCAR=y
71CONFIG_PCIE_ROCKCHIP=m
71CONFIG_PCI_HOST_GENERIC=y 72CONFIG_PCI_HOST_GENERIC=y
72CONFIG_PCI_XGENE=y 73CONFIG_PCI_XGENE=y
73CONFIG_ARM64_VA_BITS_48=y 74CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
208CONFIG_WL18XX=m 209CONFIG_WL18XX=m
209CONFIG_WLCORE_SDIO=m 210CONFIG_WLCORE_SDIO=m
210CONFIG_INPUT_EVDEV=y 211CONFIG_INPUT_EVDEV=y
212CONFIG_KEYBOARD_ADC=m
213CONFIG_KEYBOARD_CROS_EC=y
211CONFIG_KEYBOARD_GPIO=y 214CONFIG_KEYBOARD_GPIO=y
212CONFIG_INPUT_MISC=y 215CONFIG_INPUT_MISC=y
213CONFIG_INPUT_PM8941_PWRKEY=y 216CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
263CONFIG_SPI_ORION=y 266CONFIG_SPI_ORION=y
264CONFIG_SPI_PL022=y 267CONFIG_SPI_PL022=y
265CONFIG_SPI_QUP=y 268CONFIG_SPI_QUP=y
269CONFIG_SPI_ROCKCHIP=y
266CONFIG_SPI_S3C64XX=y 270CONFIG_SPI_S3C64XX=y
267CONFIG_SPI_SPIDEV=m 271CONFIG_SPI_SPIDEV=m
268CONFIG_SPMI=y 272CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
292CONFIG_CPU_THERMAL=y 296CONFIG_CPU_THERMAL=y
293CONFIG_THERMAL_EMULATION=y 297CONFIG_THERMAL_EMULATION=y
294CONFIG_EXYNOS_THERMAL=y 298CONFIG_EXYNOS_THERMAL=y
299CONFIG_ROCKCHIP_THERMAL=m
295CONFIG_WATCHDOG=y 300CONFIG_WATCHDOG=y
296CONFIG_S3C2410_WATCHDOG=y 301CONFIG_S3C2410_WATCHDOG=y
297CONFIG_MESON_GXBB_WATCHDOG=m 302CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
300CONFIG_BCM2835_WDT=y 305CONFIG_BCM2835_WDT=y
301CONFIG_MFD_CROS_EC=y 306CONFIG_MFD_CROS_EC=y
302CONFIG_MFD_CROS_EC_I2C=y 307CONFIG_MFD_CROS_EC_I2C=y
308CONFIG_MFD_CROS_EC_SPI=y
303CONFIG_MFD_EXYNOS_LPASS=m 309CONFIG_MFD_EXYNOS_LPASS=m
304CONFIG_MFD_HI655X_PMIC=y 310CONFIG_MFD_HI655X_PMIC=y
305CONFIG_MFD_MAX77620=y 311CONFIG_MFD_MAX77620=y
306CONFIG_MFD_SPMI_PMIC=y 312CONFIG_MFD_SPMI_PMIC=y
307CONFIG_MFD_RK808=y 313CONFIG_MFD_RK808=y
308CONFIG_MFD_SEC_CORE=y 314CONFIG_MFD_SEC_CORE=y
315CONFIG_REGULATOR_FAN53555=y
309CONFIG_REGULATOR_FIXED_VOLTAGE=y 316CONFIG_REGULATOR_FIXED_VOLTAGE=y
310CONFIG_REGULATOR_GPIO=y 317CONFIG_REGULATOR_GPIO=y
311CONFIG_REGULATOR_HI655X=y 318CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
473CONFIG_EXTCON_USB_GPIO=y 480CONFIG_EXTCON_USB_GPIO=y
474CONFIG_IIO=y 481CONFIG_IIO=y
475CONFIG_EXYNOS_ADC=y 482CONFIG_EXYNOS_ADC=y
483CONFIG_ROCKCHIP_SARADC=m
476CONFIG_PWM=y 484CONFIG_PWM=y
477CONFIG_PWM_BCM2835=m 485CONFIG_PWM_BCM2835=m
486CONFIG_PWM_CROS_EC=m
478CONFIG_PWM_MESON=m 487CONFIG_PWM_MESON=m
479CONFIG_PWM_ROCKCHIP=y 488CONFIG_PWM_ROCKCHIP=y
480CONFIG_PWM_SAMSUNG=y 489CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
484CONFIG_PHY_SUN4I_USB=y 493CONFIG_PHY_SUN4I_USB=y
485CONFIG_PHY_ROCKCHIP_INNO_USB2=y 494CONFIG_PHY_ROCKCHIP_INNO_USB2=y
486CONFIG_PHY_ROCKCHIP_EMMC=y 495CONFIG_PHY_ROCKCHIP_EMMC=y
496CONFIG_PHY_ROCKCHIP_PCIE=m
487CONFIG_PHY_XGENE=y 497CONFIG_PHY_XGENE=y
488CONFIG_PHY_TEGRA_XUSB=y 498CONFIG_PHY_TEGRA_XUSB=y
489CONFIG_ARM_SCPI_PROTOCOL=y 499CONFIG_ARM_SCPI_PROTOCOL=y
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7c8f9972f61..964da1891ea9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
380 380
381menu "Kernel options" 381menu "Kernel options"
382 382
383config PPC_DT_CPU_FTRS
384 bool "Device-tree based CPU feature discovery & setup"
385 depends on PPC_BOOK3S_64
386 default n
387 help
388 This enables code to use a new device tree binding for describing CPU
389 compatibility and features. Saying Y here will attempt to use the new
390 binding if the firmware provides it. Currently only the skiboot
391 firmware provides this binding.
392 If you're not sure say Y.
393
394config PPC_CPUFEATURES_ENABLE_UNKNOWN
395 bool "cpufeatures pass through unknown features to guest/userspace"
396 depends on PPC_DT_CPU_FTRS
397 default y
398
399config HIGHMEM 383config HIGHMEM
400 bool "High memory support" 384 bool "High memory support"
401 depends on PPC32 385 depends on PPC32
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b671ca..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
8#define H_PTE_INDEX_SIZE 9 8#define H_PTE_INDEX_SIZE 9
9#define H_PMD_INDEX_SIZE 7 9#define H_PMD_INDEX_SIZE 7
10#define H_PUD_INDEX_SIZE 9 10#define H_PUD_INDEX_SIZE 9
11#define H_PGD_INDEX_SIZE 12 11#define H_PGD_INDEX_SIZE 9
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) 14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c2d509584a98..d02ad93bf708 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -214,7 +214,6 @@ enum {
214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) 215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) 216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
217#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
218#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) 217#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
219 218
220#ifndef __ASSEMBLY__ 219#ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
463 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 462 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
464 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 463 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
465 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 464 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
466 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE) 465 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
467#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 466#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
468#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) 467#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
469#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 468#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a2123f291ab0..bb99b651085a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
110#define TASK_SIZE_128TB (0x0000800000000000UL) 110#define TASK_SIZE_128TB (0x0000800000000000UL)
111#define TASK_SIZE_512TB (0x0002000000000000UL) 111#define TASK_SIZE_512TB (0x0002000000000000UL)
112 112
113#ifdef CONFIG_PPC_BOOK3S_64 113/*
114 * For now 512TB is only supported with book3s and 64K linux page size.
115 */
116#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
114/* 117/*
115 * Max value currently used: 118 * Max value currently used:
116 */ 119 */
117#define TASK_SIZE_USER64 TASK_SIZE_512TB 120#define TASK_SIZE_USER64 TASK_SIZE_512TB
121#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
118#else 122#else
119#define TASK_SIZE_USER64 TASK_SIZE_64TB 123#define TASK_SIZE_USER64 TASK_SIZE_64TB
124#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
120#endif 125#endif
121 126
122/* 127/*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
132 * space during mmap's. 137 * space during mmap's.
133 */ 138 */
134#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 139#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
135#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4)) 140#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
136 141
137#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 142#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
138 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 143 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
143 * with 128TB and conditionally enable upto 512TB 148 * with 128TB and conditionally enable upto 512TB
144 */ 149 */
145#ifdef CONFIG_PPC_BOOK3S_64 150#ifdef CONFIG_PPC_BOOK3S_64
146#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 151#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
147 TASK_SIZE_USER32 : TASK_SIZE_128TB) 152 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
148#else 153#else
149#define DEFAULT_MAP_WINDOW TASK_SIZE 154#define DEFAULT_MAP_WINDOW TASK_SIZE
150#endif 155#endif
151 156
152#ifdef __powerpc64__ 157#ifdef __powerpc64__
153 158
154#ifdef CONFIG_PPC_BOOK3S_64 159#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
155/* Limit stack to 128TB */
156#define STACK_TOP_USER64 TASK_SIZE_128TB
157#else
158#define STACK_TOP_USER64 TASK_SIZE_USER64
159#endif
160
161#define STACK_TOP_USER32 TASK_SIZE_USER32 160#define STACK_TOP_USER32 TASK_SIZE_USER32
162 161
163#define STACK_TOP (is_32bit_task() ? \ 162#define STACK_TOP (is_32bit_task() ? \
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
44extern int sysfs_add_device_to_node(struct device *dev, int nid); 44extern int sysfs_add_device_to_node(struct device *dev, int nid);
45extern void sysfs_remove_device_from_node(struct device *dev, int nid); 45extern void sysfs_remove_device_from_node(struct device *dev, int nid);
46 46
47static inline int early_cpu_to_node(int cpu)
48{
49 int nid;
50
51 nid = numa_cpu_lookup_table[cpu];
52
53 /*
54 * Fall back to node 0 if nid is unset (it should be, except bugs).
55 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
56 */
57 return (nid < 0) ? 0 : nid;
58}
47#else 59#else
48 60
61static inline int early_cpu_to_node(int cpu) { return 0; }
62
49static inline void dump_numa_cpu_topology(void) {} 63static inline void dump_numa_cpu_topology(void) {}
50 64
51static inline int sysfs_add_device_to_node(struct device *dev, int nid) 65static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index fcc7588a96d6..4c7656dc4e04 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -8,6 +8,7 @@
8#include <linux/export.h> 8#include <linux/export.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/jump_label.h> 10#include <linux/jump_label.h>
11#include <linux/libfdt.h>
11#include <linux/memblock.h> 12#include <linux/memblock.h>
12#include <linux/printk.h> 13#include <linux/printk.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
642 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL}, 643 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
643 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 644 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
644 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 645 {"processor-utilization-of-resources-register", feat_enable_purr, 0},
645 {"subcore", feat_enable, CPU_FTR_SUBCORE},
646 {"no-execute", feat_enable, 0}, 646 {"no-execute", feat_enable, 0},
647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
648 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, 648 {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
671 {"wait-v3", feat_enable, 0}, 671 {"wait-v3", feat_enable, 0},
672}; 672};
673 673
674/* XXX: how to configure this? Default + boot time? */ 674static bool __initdata using_dt_cpu_ftrs;
675#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN 675static bool __initdata enable_unknown = true;
676#define CPU_FEATURE_ENABLE_UNKNOWN 1 676
677#else 677static int __init dt_cpu_ftrs_parse(char *str)
678#define CPU_FEATURE_ENABLE_UNKNOWN 0 678{
679#endif 679 if (!str)
680 return 0;
681
682 if (!strcmp(str, "off"))
683 using_dt_cpu_ftrs = false;
684 else if (!strcmp(str, "known"))
685 enable_unknown = false;
686 else
687 return 1;
688
689 return 0;
690}
691early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
680 692
681static void __init cpufeatures_setup_start(u32 isa) 693static void __init cpufeatures_setup_start(u32 isa)
682{ 694{
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
707 } 719 }
708 } 720 }
709 721
710 if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { 722 if (!known && enable_unknown) {
711 if (!feat_try_enable_unknown(f)) { 723 if (!feat_try_enable_unknown(f)) {
712 pr_info("not enabling: %s (unknown and unsupported by kernel)\n", 724 pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
713 f->name); 725 f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
756 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); 768 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
757} 769}
758 770
771static int __init disabled_on_cmdline(void)
772{
773 unsigned long root, chosen;
774 const char *p;
775
776 root = of_get_flat_dt_root();
777 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
778 if (chosen == -FDT_ERR_NOTFOUND)
779 return false;
780
781 p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
782 if (!p)
783 return false;
784
785 if (strstr(p, "dt_cpu_ftrs=off"))
786 return true;
787
788 return false;
789}
790
759static int __init fdt_find_cpu_features(unsigned long node, const char *uname, 791static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
760 int depth, void *data) 792 int depth, void *data)
761{ 793{
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
766 return 0; 798 return 0;
767} 799}
768 800
769static bool __initdata using_dt_cpu_ftrs = false;
770
771bool __init dt_cpu_ftrs_in_use(void) 801bool __init dt_cpu_ftrs_in_use(void)
772{ 802{
773 return using_dt_cpu_ftrs; 803 return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
775 805
776bool __init dt_cpu_ftrs_init(void *fdt) 806bool __init dt_cpu_ftrs_init(void *fdt)
777{ 807{
808 using_dt_cpu_ftrs = false;
809
778 /* Setup and verify the FDT, if it fails we just bail */ 810 /* Setup and verify the FDT, if it fails we just bail */
779 if (!early_init_dt_verify(fdt)) 811 if (!early_init_dt_verify(fdt))
780 return false; 812 return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
782 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) 814 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
783 return false; 815 return false;
784 816
817 if (disabled_on_cmdline())
818 return false;
819
785 cpufeatures_setup_cpu(); 820 cpufeatures_setup_cpu();
786 821
787 using_dt_cpu_ftrs = true; 822 using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
1027 1062
1028void __init dt_cpu_ftrs_scan(void) 1063void __init dt_cpu_ftrs_scan(void)
1029{ 1064{
1065 if (!using_dt_cpu_ftrs)
1066 return;
1067
1030 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); 1068 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1031} 1069}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index baae104b16c7..2ad725ef4368 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1666#ifdef CONFIG_VSX 1666#ifdef CONFIG_VSX
1667 current->thread.used_vsr = 0; 1667 current->thread.used_vsr = 0;
1668#endif 1668#endif
1669 current->thread.load_fp = 0;
1669 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1670 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1670 current->thread.fp_save_area = NULL; 1671 current->thread.fp_save_area = NULL;
1671#ifdef CONFIG_ALTIVEC 1672#ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1674 current->thread.vr_save_area = NULL; 1675 current->thread.vr_save_area = NULL;
1675 current->thread.vrsave = 0; 1676 current->thread.vrsave = 0;
1676 current->thread.used_vr = 0; 1677 current->thread.used_vr = 0;
1678 current->thread.load_vec = 0;
1677#endif /* CONFIG_ALTIVEC */ 1679#endif /* CONFIG_ALTIVEC */
1678#ifdef CONFIG_SPE 1680#ifdef CONFIG_SPE
1679 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1681 memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1685 current->thread.tm_tfhar = 0; 1687 current->thread.tm_tfhar = 0;
1686 current->thread.tm_texasr = 0; 1688 current->thread.tm_texasr = 0;
1687 current->thread.tm_tfiar = 0; 1689 current->thread.tm_tfiar = 0;
1690 current->thread.load_tm = 0;
1688#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1691#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1689} 1692}
1690EXPORT_SYMBOL(start_thread); 1693EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71dcda91755d..857129acf960 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
928 928
929#ifdef CONFIG_PPC_MM_SLICES 929#ifdef CONFIG_PPC_MM_SLICES
930#ifdef CONFIG_PPC64 930#ifdef CONFIG_PPC64
931 init_mm.context.addr_limit = TASK_SIZE_128TB; 931 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
932#else 932#else
933#error "context.addr_limit not initialized." 933#error "context.addr_limit not initialized."
934#endif 934#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f35ff9dea4fb..a8c1f99e9607 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
661 661
662static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 662static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
663{ 663{
664 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, 664 return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
665 __pa(MAX_DMA_ADDRESS)); 665 __pa(MAX_DMA_ADDRESS));
666} 666}
667 667
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
672 672
673static int pcpu_cpu_distance(unsigned int from, unsigned int to) 673static int pcpu_cpu_distance(unsigned int from, unsigned int to)
674{ 674{
675 if (cpu_to_node(from) == cpu_to_node(to)) 675 if (early_cpu_to_node(from) == early_cpu_to_node(to))
676 return LOCAL_DISTANCE; 676 return LOCAL_DISTANCE;
677 else 677 else
678 return REMOTE_DISTANCE; 678 return REMOTE_DISTANCE;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2ae78ef..a3edf813d455 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
99 * mm->context.addr_limit. Default to max task size so that we copy the 99 * mm->context.addr_limit. Default to max task size so that we copy the
100 * default values to paca which will help us to handle slb miss early. 100 * default values to paca which will help us to handle slb miss early.
101 */ 101 */
102 mm->context.addr_limit = TASK_SIZE_128TB; 102 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
103 103
104 /* 104 /*
105 * The old code would re-promote on fork, we don't do that when using 105 * The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e90ac35..bb28e1a41257 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
402 .name = "POWER9", 402 .name = "POWER9",
403 .n_counter = MAX_PMU_COUNTERS, 403 .n_counter = MAX_PMU_COUNTERS,
404 .add_fields = ISA207_ADD_FIELDS, 404 .add_fields = ISA207_ADD_FIELDS,
405 .test_adder = ISA207_TEST_ADDER, 405 .test_adder = P9_DD1_TEST_ADDER,
406 .compute_mmcr = isa207_compute_mmcr, 406 .compute_mmcr = isa207_compute_mmcr,
407 .config_bhrb = power9_config_bhrb, 407 .config_bhrb = power9_config_bhrb,
408 .bhrb_filter_map = power9_bhrb_filter_map, 408 .bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
421 .name = "POWER9", 421 .name = "POWER9",
422 .n_counter = MAX_PMU_COUNTERS, 422 .n_counter = MAX_PMU_COUNTERS,
423 .add_fields = ISA207_ADD_FIELDS, 423 .add_fields = ISA207_ADD_FIELDS,
424 .test_adder = P9_DD1_TEST_ADDER, 424 .test_adder = ISA207_TEST_ADDER,
425 .compute_mmcr = isa207_compute_mmcr, 425 .compute_mmcr = isa207_compute_mmcr,
426 .config_bhrb = power9_config_bhrb, 426 .config_bhrb = power9_config_bhrb,
427 .bhrb_filter_map = power9_bhrb_filter_map, 427 .bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3d9375..4fd64d3f5c44 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
59 59
60 In case of doubt, say Y 60 In case of doubt, say Y
61 61
62config PPC_DT_CPU_FTRS
63 bool "Device-tree based CPU feature discovery & setup"
64 depends on PPC_BOOK3S_64
65 default y
66 help
67 This enables code to use a new device tree binding for describing CPU
68 compatibility and features. Saying Y here will attempt to use the new
69 binding if the firmware provides it. Currently only the skiboot
70 firmware provides this binding.
71 If you're not sure say Y.
72
62config UDBG_RTAS_CONSOLE 73config UDBG_RTAS_CONSOLE
63 bool "RTAS based debug console" 74 bool "RTAS based debug console"
64 depends on PPC_RTAS 75 depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891ae80ee..84b7ac926ce6 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; 175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
176 if (!dump_skip(cprm, skip)) 176 if (!dump_skip(cprm, skip))
177 goto Eio; 177 goto Eio;
178
179 rc = 0;
178out: 180out:
179 free_page((unsigned long)buf); 181 free_page((unsigned long)buf);
180 return rc; 182 return rc;
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef11136f..8c6119280c13 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
407 407
408static int subcore_init(void) 408static int subcore_init(void)
409{ 409{
410 if (!cpu_has_feature(CPU_FTR_SUBCORE)) 410 unsigned pvr_ver;
411
412 pvr_ver = PVR_VER(mfspr(SPRN_PVR));
413
414 if (pvr_ver != PVR_POWER8 &&
415 pvr_ver != PVR_POWER8E &&
416 pvr_ver != PVR_POWER8NVL)
411 return 0; 417 return 0;
412 418
413 /* 419 /*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71ea44a..1fb162ba9d1c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
124 for (i = 0; i < num_lmbs; i++) { 124 for (i = 0; i < num_lmbs; i++) {
125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
127 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
127 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
128 } 129 }
129 130
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
147 for (i = 0; i < num_lmbs; i++) { 148 for (i = 0; i < num_lmbs; i++) {
148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 149 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
151 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
150 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 152 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
151 } 153 }
152 154
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b470b04..6afddae2fb47 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
75 75
76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
77{ 77{
78 struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); 78 struct u8_gpio_chip *u8_gc =
79 container_of(mm_gc, struct u8_gpio_chip, mm_gc);
79 80
80 u8_gc->data = in_8(mm_gc->regs); 81 u8_gc->data = in_8(mm_gc->regs);
81} 82}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0d21c0..b558c9e29de3 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@ config NR_CPUS
192 int "Maximum number of CPUs" 192 int "Maximum number of CPUs"
193 depends on SMP 193 depends on SMP
194 range 2 32 if SPARC32 194 range 2 32 if SPARC32
195 range 2 1024 if SPARC64 195 range 2 4096 if SPARC64
196 default 32 if SPARC32 196 default 32 if SPARC32
197 default 64 if SPARC64 197 default 4096 if SPARC64
198 198
199source kernel/Kconfig.hz 199source kernel/Kconfig.hz
200 200
@@ -295,9 +295,13 @@ config NUMA
295 depends on SPARC64 && SMP 295 depends on SPARC64 && SMP
296 296
297config NODES_SHIFT 297config NODES_SHIFT
298 int 298 int "Maximum NUMA Nodes (as a power of 2)"
299 default "4" 299 range 4 5 if SPARC64
300 default "5"
300 depends on NEED_MULTIPLE_NODES 301 depends on NEED_MULTIPLE_NODES
302 help
303 Specify the maximum number of NUMA Nodes available on the target
304 system. Increases memory reserved to accommodate various tables.
301 305
302# Some NUMA nodes have memory ranges that span 306# Some NUMA nodes have memory ranges that span
303# other nodes. Even though a pfn is valid and 307# other nodes. Even though a pfn is valid and
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
52#define CTX_NR_MASK TAG_CONTEXT_BITS 52#define CTX_NR_MASK TAG_CONTEXT_BITS
53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) 53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
54 54
55#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) 55#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
56#define CTX_VALID(__ctx) \ 56#define CTX_VALID(__ctx) \
57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) 57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) 58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6eba11..2cddcda4f85f 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
19extern unsigned long tlb_context_cache; 19extern unsigned long tlb_context_cache;
20extern unsigned long mmu_context_bmap[]; 20extern unsigned long mmu_context_bmap[];
21 21
22DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
22void get_new_mmu_context(struct mm_struct *mm); 23void get_new_mmu_context(struct mm_struct *mm);
23#ifdef CONFIG_SMP
24void smp_new_mmu_context_version(void);
25#else
26#define smp_new_mmu_context_version() do { } while (0)
27#endif
28
29int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 24int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
30void destroy_context(struct mm_struct *mm); 25void destroy_context(struct mm_struct *mm);
31 26
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
76static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 71static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
77{ 72{
78 unsigned long ctx_valid, flags; 73 unsigned long ctx_valid, flags;
79 int cpu; 74 int cpu = smp_processor_id();
80 75
76 per_cpu(per_cpu_secondary_mm, cpu) = mm;
81 if (unlikely(mm == &init_mm)) 77 if (unlikely(mm == &init_mm))
82 return; 78 return;
83 79
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
123 * for the first time, we must flush that context out of the 119 * for the first time, we must flush that context out of the
124 * local TLB. 120 * local TLB.
125 */ 121 */
126 cpu = smp_processor_id();
127 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 122 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
128 cpumask_set_cpu(cpu, mm_cpumask(mm)); 123 cpumask_set_cpu(cpu, mm_cpumask(mm));
129 __flush_tlb_mm(CTX_HWBITS(mm->context), 124 __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
133} 128}
134 129
135#define deactivate_mm(tsk,mm) do { } while (0) 130#define deactivate_mm(tsk,mm) do { } while (0)
136 131#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
137/* Activate a new MM instance for the current task. */
138static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
139{
140 unsigned long flags;
141 int cpu;
142
143 spin_lock_irqsave(&mm->context.lock, flags);
144 if (!CTX_VALID(mm->context))
145 get_new_mmu_context(mm);
146 cpu = smp_processor_id();
147 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
148 cpumask_set_cpu(cpu, mm_cpumask(mm));
149
150 load_secondary_context(mm);
151 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
152 tsb_context_switch(mm);
153 spin_unlock_irqrestore(&mm->context.lock, flags);
154}
155
156#endif /* !(__ASSEMBLY__) */ 132#endif /* !(__ASSEMBLY__) */
157 133
158#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 134#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
20#define PIL_SMP_CALL_FUNC 1 20#define PIL_SMP_CALL_FUNC 1
21#define PIL_SMP_RECEIVE_SIGNAL 2 21#define PIL_SMP_RECEIVE_SIGNAL 2
22#define PIL_SMP_CAPTURE 3 22#define PIL_SMP_CAPTURE 3
23#define PIL_SMP_CTX_NEW_VERSION 4
24#define PIL_DEVICE_IRQ 5 23#define PIL_DEVICE_IRQ 5
25#define PIL_SMP_CALL_FUNC_SNGL 6 24#define PIL_SMP_CALL_FUNC_SNGL 6
26#define PIL_DEFERRED_PCR_WORK 7 25#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
327 int compat_len; 327 int compat_len;
328 328
329 u64 dev_no; 329 u64 dev_no;
330 u64 id;
330 331
331 unsigned long channel_id; 332 unsigned long channel_id;
332 333
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7c8d94..f87265afb175 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
909 pbuf.req.handle = cp->handle; 909 pbuf.req.handle = cp->handle;
910 pbuf.req.major = 1; 910 pbuf.req.major = 1;
911 pbuf.req.minor = 0; 911 pbuf.req.minor = 0;
912 strcpy(pbuf.req.svc_id, cp->service_id); 912 strcpy(pbuf.id_buf, cp->service_id);
913 913
914 err = __ds_send(lp, &pbuf, msg_len); 914 err = __ds_send(lp, &pbuf, msg_len);
915 if (err > 0) 915 if (err > 0)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248aa0928..99dd133a029f 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1034{ 1034{
1035#ifdef CONFIG_SMP 1035#ifdef CONFIG_SMP
1036 unsigned long page; 1036 unsigned long page;
1037 void *mondo, *p;
1037 1038
1038 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 1039 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1040
1041 /* Make sure mondo block is 64byte aligned */
1042 p = kzalloc(127, GFP_KERNEL);
1043 if (!p) {
1044 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1045 prom_halt();
1046 }
1047 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1048 tb->cpu_mondo_block_pa = __pa(mondo);
1039 1049
1040 page = get_zeroed_page(GFP_KERNEL); 1050 page = get_zeroed_page(GFP_KERNEL);
1041 if (!page) { 1051 if (!page) {
1042 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 1052 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1043 prom_halt(); 1053 prom_halt();
1044 } 1054 }
1045 1055
1046 tb->cpu_mondo_block_pa = __pa(page); 1056 tb->cpu_list_pa = __pa(page);
1047 tb->cpu_list_pa = __pa(page + 64);
1048#endif 1057#endif
1049} 1058}
1050 1059
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551262c..6ae1e77be0bf 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
37/* smp_64.c */ 37/* smp_64.c */
38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); 39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
40void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
41void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); 40void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
42void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); 41void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
43 42
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac757cc..fdf31040a7dc 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
964 preempt_enable(); 964 preempt_enable();
965} 965}
966 966
967void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
968{
969 struct mm_struct *mm;
970 unsigned long flags;
971
972 clear_softint(1 << irq);
973
974 /* See if we need to allocate a new TLB context because
975 * the version of the one we are using is now out of date.
976 */
977 mm = current->active_mm;
978 if (unlikely(!mm || (mm == &init_mm)))
979 return;
980
981 spin_lock_irqsave(&mm->context.lock, flags);
982
983 if (unlikely(!CTX_VALID(mm->context)))
984 get_new_mmu_context(mm);
985
986 spin_unlock_irqrestore(&mm->context.lock, flags);
987
988 load_secondary_context(mm);
989 __flush_tlb_mm(CTX_HWBITS(mm->context),
990 SECONDARY_CONTEXT);
991}
992
993void smp_new_mmu_context_version(void)
994{
995 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
996}
997
998#ifdef CONFIG_KGDB 967#ifdef CONFIG_KGDB
999void kgdb_roundup_cpus(unsigned long flags) 968void kgdb_roundup_cpus(unsigned long flags)
1000{ 969{
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cfd0ad4..07c0df924960 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@ __tsb_context_switch:
455 .type copy_tsb,#function 455 .type copy_tsb,#function
456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
457 * %o2=new_tsb_base, %o3=new_tsb_size 457 * %o2=new_tsb_base, %o3=new_tsb_size
458 * %o4=page_size_shift
458 */ 459 */
459 sethi %uhi(TSB_PASS_BITS), %g7 460 sethi %uhi(TSB_PASS_BITS), %g7
460 srlx %o3, 4, %o3 461 srlx %o3, 4, %o3
461 add %o0, %o1, %g1 /* end of old tsb */ 462 add %o0, %o1, %o1 /* end of old tsb */
462 sllx %g7, 32, %g7 463 sllx %g7, 32, %g7
463 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 464 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
464 465
466 mov %o4, %g1 /* page_size_shift */
467
465661: prefetcha [%o0] ASI_N, #one_read 468661: prefetcha [%o0] ASI_N, #one_read
466 .section .tsb_phys_patch, "ax" 469 .section .tsb_phys_patch, "ax"
467 .word 661b 470 .word 661b
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
486 /* This can definitely be computed faster... */ 489 /* This can definitely be computed faster... */
487 srlx %o0, 4, %o5 /* Build index */ 490 srlx %o0, 4, %o5 /* Build index */
488 and %o5, 511, %o5 /* Mask index */ 491 and %o5, 511, %o5 /* Mask index */
489 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 492 sllx %o5, %g1, %o5 /* Put into vaddr position */
490 or %o4, %o5, %o4 /* Full VADDR. */ 493 or %o4, %o5, %o4 /* Full VADDR. */
491 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 494 srlx %o4, %g1, %o4 /* Shift down to create index */
492 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 495 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
493 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 496 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
494 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ 497 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
496 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 499 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
497 500
49880: add %o0, 16, %o0 50180: add %o0, 16, %o0
499 cmp %o0, %g1 502 cmp %o0, %o1
500 bne,pt %xcc, 90b 503 bne,pt %xcc, 90b
501 nop 504 nop
502 505
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f6556352..efe93ab4a9c0 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) 53tl0_irq4: BTRAP(0x44)
54#else 54#else
55tl0_irq1: BTRAP(0x41) 55tl0_irq1: BTRAP(0x41)
56tl0_irq2: BTRAP(0x42) 56tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857254fc..075d38980dee 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
302 if (!id) { 302 if (!id) {
303 dev_set_name(&vdev->dev, "%s", bus_id_name); 303 dev_set_name(&vdev->dev, "%s", bus_id_name);
304 vdev->dev_no = ~(u64)0; 304 vdev->dev_no = ~(u64)0;
305 vdev->id = ~(u64)0;
305 } else if (!cfg_handle) { 306 } else if (!cfg_handle) {
306 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); 307 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
307 vdev->dev_no = *id; 308 vdev->dev_no = *id;
309 vdev->id = ~(u64)0;
308 } else { 310 } else {
309 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, 311 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
310 *cfg_handle, *id); 312 *cfg_handle, *id);
311 vdev->dev_no = *cfg_handle; 313 vdev->dev_no = *cfg_handle;
314 vdev->id = *id;
312 } 315 }
313 316
314 vdev->dev.parent = parent; 317 vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
351 (void) vio_create_one(hp, node, &root_vdev->dev); 354 (void) vio_create_one(hp, node, &root_vdev->dev);
352} 355}
353 356
357struct vio_md_node_query {
358 const char *type;
359 u64 dev_no;
360 u64 id;
361};
362
354static int vio_md_node_match(struct device *dev, void *arg) 363static int vio_md_node_match(struct device *dev, void *arg)
355{ 364{
365 struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
356 struct vio_dev *vdev = to_vio_dev(dev); 366 struct vio_dev *vdev = to_vio_dev(dev);
357 367
358 if (vdev->mp == (u64) arg) 368 if (vdev->dev_no != query->dev_no)
359 return 1; 369 return 0;
370 if (vdev->id != query->id)
371 return 0;
372 if (strcmp(vdev->type, query->type))
373 return 0;
360 374
361 return 0; 375 return 1;
362} 376}
363 377
364static void vio_remove(struct mdesc_handle *hp, u64 node) 378static void vio_remove(struct mdesc_handle *hp, u64 node)
365{ 379{
380 const char *type;
381 const u64 *id, *cfg_handle;
382 u64 a;
383 struct vio_md_node_query query;
366 struct device *dev; 384 struct device *dev;
367 385
368 dev = device_find_child(&root_vdev->dev, (void *) node, 386 type = mdesc_get_property(hp, node, "device-type", NULL);
387 if (!type) {
388 type = mdesc_get_property(hp, node, "name", NULL);
389 if (!type)
390 type = mdesc_node_name(hp, node);
391 }
392
393 query.type = type;
394
395 id = mdesc_get_property(hp, node, "id", NULL);
396 cfg_handle = NULL;
397 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
398 u64 target;
399
400 target = mdesc_arc_target(hp, a);
401 cfg_handle = mdesc_get_property(hp, target,
402 "cfg-handle", NULL);
403 if (cfg_handle)
404 break;
405 }
406
407 if (!id) {
408 query.dev_no = ~(u64)0;
409 query.id = ~(u64)0;
410 } else if (!cfg_handle) {
411 query.dev_no = *id;
412 query.id = ~(u64)0;
413 } else {
414 query.dev_no = *cfg_handle;
415 query.id = *id;
416 }
417
418 dev = device_find_child(&root_vdev->dev, &query,
369 vio_md_node_match); 419 vio_md_node_match);
370 if (dev) { 420 if (dev) {
371 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); 421 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
372 422
373 device_unregister(dev); 423 device_unregister(dev);
374 put_device(dev); 424 put_device(dev);
425 } else {
426 if (!id)
427 printk(KERN_ERR "VIO: Removed unknown %s node.\n",
428 type);
429 else if (!cfg_handle)
430 printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
431 type, *id);
432 else
433 printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
434 type, *cfg_handle, *id);
375 } 435 }
376} 436}
377 437
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2f8b54..07c03e72d812 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
15lib-$(CONFIG_SPARC64) += atomic_64.o 15lib-$(CONFIG_SPARC64) += atomic_64.o
16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o 16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o 17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
18lib-$(CONFIG_SPARC64) += multi3.o
18 19
19lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o 20lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
20lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o 21lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 000000000000..d6b6c97fe3c7
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
1#include <linux/linkage.h>
2#include <asm/export.h>
3
4 .text
5 .align 4
6ENTRY(__multi3) /* %o0 = u, %o1 = v */
7 mov %o1, %g1
8 srl %o3, 0, %g4
9 mulx %g4, %g1, %o1
10 srlx %g1, 0x20, %g3
11 mulx %g3, %g4, %g5
12 sllx %g5, 0x20, %o5
13 srl %g1, 0, %g4
14 sub %o1, %o5, %o5
15 srlx %o5, 0x20, %o5
16 addcc %g5, %o5, %g5
17 srlx %o3, 0x20, %o5
18 mulx %g4, %o5, %g4
19 mulx %g3, %o5, %o5
20 sethi %hi(0x80000000), %g3
21 addcc %g5, %g4, %g5
22 srlx %g5, 0x20, %g5
23 add %g3, %g3, %g3
24 movcc %xcc, %g0, %g3
25 addcc %o5, %g5, %o5
26 sllx %g4, 0x20, %g4
27 add %o1, %g4, %o1
28 add %o5, %g3, %g2
29 mulx %g1, %o2, %g1
30 add %g1, %g2, %g1
31 mulx %o0, %o3, %o0
32 retl
33 add %g1, %o0, %o0
34ENDPROC(__multi3)
35EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
358 } 358 }
359 359
360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { 360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
361 pr_warn("hugepagesz=%llu not supported by MMU.\n", 361 hugetlb_bad_size();
362 pr_err("hugepagesz=%llu not supported by MMU.\n",
362 hugepage_size); 363 hugepage_size);
363 goto out; 364 goto out;
364 } 365 }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
706 707
707/* get_new_mmu_context() uses "cache + 1". */ 708/* get_new_mmu_context() uses "cache + 1". */
708DEFINE_SPINLOCK(ctx_alloc_lock); 709DEFINE_SPINLOCK(ctx_alloc_lock);
709unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 710unsigned long tlb_context_cache = CTX_FIRST_VERSION;
710#define MAX_CTX_NR (1UL << CTX_NR_BITS) 711#define MAX_CTX_NR (1UL << CTX_NR_BITS)
711#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 712#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
712DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 713DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
714DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
715
716static void mmu_context_wrap(void)
717{
718 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
719 unsigned long new_ver, new_ctx, old_ctx;
720 struct mm_struct *mm;
721 int cpu;
722
723 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
724
725 /* Reserve kernel context */
726 set_bit(0, mmu_context_bmap);
727
728 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
729 if (unlikely(new_ver == 0))
730 new_ver = CTX_FIRST_VERSION;
731 tlb_context_cache = new_ver;
732
733 /*
734 * Make sure that any new mm that are added into per_cpu_secondary_mm,
735 * are going to go through get_new_mmu_context() path.
736 */
737 mb();
738
739 /*
740 * Updated versions to current on those CPUs that had valid secondary
741 * contexts
742 */
743 for_each_online_cpu(cpu) {
744 /*
745 * If a new mm is stored after we took this mm from the array,
746 * it will go into get_new_mmu_context() path, because we
747 * already bumped the version in tlb_context_cache.
748 */
749 mm = per_cpu(per_cpu_secondary_mm, cpu);
750
751 if (unlikely(!mm || mm == &init_mm))
752 continue;
753
754 old_ctx = mm->context.sparc64_ctx_val;
755 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
756 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
757 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
758 mm->context.sparc64_ctx_val = new_ctx;
759 }
760 }
761}
713 762
714/* Caller does TLB context flushing on local CPU if necessary. 763/* Caller does TLB context flushing on local CPU if necessary.
715 * The caller also ensures that CTX_VALID(mm->context) is false. 764 * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
725{ 774{
726 unsigned long ctx, new_ctx; 775 unsigned long ctx, new_ctx;
727 unsigned long orig_pgsz_bits; 776 unsigned long orig_pgsz_bits;
728 int new_version;
729 777
730 spin_lock(&ctx_alloc_lock); 778 spin_lock(&ctx_alloc_lock);
779retry:
780 /* wrap might have happened, test again if our context became valid */
781 if (unlikely(CTX_VALID(mm->context)))
782 goto out;
731 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 783 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
732 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 784 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
733 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 785 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
734 new_version = 0;
735 if (new_ctx >= (1 << CTX_NR_BITS)) { 786 if (new_ctx >= (1 << CTX_NR_BITS)) {
736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 787 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
737 if (new_ctx >= ctx) { 788 if (new_ctx >= ctx) {
738 int i; 789 mmu_context_wrap();
739 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 790 goto retry;
740 CTX_FIRST_VERSION;
741 if (new_ctx == 1)
742 new_ctx = CTX_FIRST_VERSION;
743
744 /* Don't call memset, for 16 entries that's just
745 * plain silly...
746 */
747 mmu_context_bmap[0] = 3;
748 mmu_context_bmap[1] = 0;
749 mmu_context_bmap[2] = 0;
750 mmu_context_bmap[3] = 0;
751 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
752 mmu_context_bmap[i + 0] = 0;
753 mmu_context_bmap[i + 1] = 0;
754 mmu_context_bmap[i + 2] = 0;
755 mmu_context_bmap[i + 3] = 0;
756 }
757 new_version = 1;
758 goto out;
759 } 791 }
760 } 792 }
793 if (mm->context.sparc64_ctx_val)
794 cpumask_clear(mm_cpumask(mm));
761 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 795 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
762 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 796 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
763out:
764 tlb_context_cache = new_ctx; 797 tlb_context_cache = new_ctx;
765 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 798 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
799out:
766 spin_unlock(&ctx_alloc_lock); 800 spin_unlock(&ctx_alloc_lock);
767
768 if (unlikely(new_version))
769 smp_new_mmu_context_version();
770} 801}
771 802
772static int numa_enabled = 1; 803static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b22a47..0d4b998c7d7b 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ retry_tsb_alloc:
496 extern void copy_tsb(unsigned long old_tsb_base, 496 extern void copy_tsb(unsigned long old_tsb_base,
497 unsigned long old_tsb_size, 497 unsigned long old_tsb_size,
498 unsigned long new_tsb_base, 498 unsigned long new_tsb_base,
499 unsigned long new_tsb_size); 499 unsigned long new_tsb_size,
500 unsigned long page_size_shift);
500 unsigned long old_tsb_base = (unsigned long) old_tsb; 501 unsigned long old_tsb_base = (unsigned long) old_tsb;
501 unsigned long new_tsb_base = (unsigned long) new_tsb; 502 unsigned long new_tsb_base = (unsigned long) new_tsb;
502 503
@@ -504,7 +505,9 @@ retry_tsb_alloc:
504 old_tsb_base = __pa(old_tsb_base); 505 old_tsb_base = __pa(old_tsb_base);
505 new_tsb_base = __pa(new_tsb_base); 506 new_tsb_base = __pa(new_tsb_base);
506 } 507 }
507 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 508 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
509 tsb_index == MM_TSB_BASE ?
510 PAGE_SHIFT : REAL_HPAGE_SHIFT);
508 } 511 }
509 512
510 mm->context.tsb_block[tsb_index].tsb = new_tsb; 513 mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
972 retry 972 retry
973 973
974 .globl xcall_new_mmu_context_version
975xcall_new_mmu_context_version:
976 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
977 retry
978
979#ifdef CONFIG_KGDB 974#ifdef CONFIG_KGDB
980 .globl xcall_kgdb_capture 975 .globl xcall_kgdb_capture
981xcall_kgdb_capture: 976xcall_kgdb_capture:
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61095f8..6f077445647a 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
255 break; 255 break;
256 256
257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
258 case 11: /* GX1 with inverted Device ID */
258#ifdef CONFIG_PCI 259#ifdef CONFIG_PCI
259 { 260 {
260 u32 vendor, device; 261 u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index afdfd237b59f..f522415bf9e5 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
619 619
620 show_saved_mc(); 620 show_saved_mc();
621 621
622 /* initrd is going away, clear patch ptr. */
623 intel_ucode_patch = NULL;
624
622 return 0; 625 return 0;
623} 626}
624 627
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c8a32fb345cf..78b2e0db4fb2 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
52BFQG_FLAG_FNS(empty) 52BFQG_FLAG_FNS(empty)
53#undef BFQG_FLAG_FNS 53#undef BFQG_FLAG_FNS
54 54
55/* This should be called with the queue_lock held. */ 55/* This should be called with the scheduler lock held. */
56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57{ 57{
58 unsigned long long now; 58 unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
67 bfqg_stats_clear_waiting(stats); 67 bfqg_stats_clear_waiting(stats);
68} 68}
69 69
70/* This should be called with the queue_lock held. */ 70/* This should be called with the scheduler lock held. */
71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg) 72 struct bfq_group *curr_bfqg)
73{ 73{
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
81 bfqg_stats_mark_waiting(stats); 81 bfqg_stats_mark_waiting(stats);
82} 82}
83 83
84/* This should be called with the queue_lock held. */ 84/* This should be called with the scheduler lock held. */
85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86{ 86{
87 unsigned long long now; 87 unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
203 203
204static void bfqg_get(struct bfq_group *bfqg) 204static void bfqg_get(struct bfq_group *bfqg)
205{ 205{
206 return blkg_get(bfqg_to_blkg(bfqg)); 206 bfqg->ref++;
207} 207}
208 208
209void bfqg_put(struct bfq_group *bfqg) 209void bfqg_put(struct bfq_group *bfqg)
210{ 210{
211 return blkg_put(bfqg_to_blkg(bfqg)); 211 bfqg->ref--;
212
213 if (bfqg->ref == 0)
214 kfree(bfqg);
215}
216
217static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218{
219 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220 bfqg_get(bfqg);
221
222 blkg_get(bfqg_to_blkg(bfqg));
223}
224
225void bfqg_and_blkg_put(struct bfq_group *bfqg)
226{
227 bfqg_put(bfqg);
228
229 blkg_put(bfqg_to_blkg(bfqg));
212} 230}
213 231
214void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 232void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
312 if (bfqq) { 330 if (bfqq) {
313 bfqq->ioprio = bfqq->new_ioprio; 331 bfqq->ioprio = bfqq->new_ioprio;
314 bfqq->ioprio_class = bfqq->new_ioprio_class; 332 bfqq->ioprio_class = bfqq->new_ioprio_class;
315 bfqg_get(bfqg); 333 /*
334 * Make sure that bfqg and its associated blkg do not
335 * disappear before entity.
336 */
337 bfqg_and_blkg_get(bfqg);
316 } 338 }
317 entity->parent = bfqg->my_entity; /* NULL for root group */ 339 entity->parent = bfqg->my_entity; /* NULL for root group */
318 entity->sched_data = &bfqg->sched_data; 340 entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
399 return NULL; 421 return NULL;
400 } 422 }
401 423
424 /* see comments in bfq_bic_update_cgroup for why refcounting */
425 bfqg_get(bfqg);
402 return &bfqg->pd; 426 return &bfqg->pd;
403} 427}
404 428
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
426 struct bfq_group *bfqg = pd_to_bfqg(pd); 450 struct bfq_group *bfqg = pd_to_bfqg(pd);
427 451
428 bfqg_stats_exit(&bfqg->stats); 452 bfqg_stats_exit(&bfqg->stats);
429 return kfree(bfqg); 453 bfqg_put(bfqg);
430} 454}
431 455
432void bfq_pd_reset_stats(struct blkg_policy_data *pd) 456void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
496 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 520 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
497 * it on the new one. Avoid putting the entity on the old group idle tree. 521 * it on the new one. Avoid putting the entity on the old group idle tree.
498 * 522 *
499 * Must be called under the queue lock; the cgroup owning @bfqg must 523 * Must be called under the scheduler lock, to make sure that the blkg
500 * not disappear (by now this just means that we are called under 524 * owning @bfqg does not disappear (see comments in
501 * rcu_read_lock()). 525 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
526 * objects).
502 */ 527 */
503void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 528void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
504 struct bfq_group *bfqg) 529 struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
519 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 544 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
520 else if (entity->on_st) 545 else if (entity->on_st)
521 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 546 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
522 bfqg_put(bfqq_group(bfqq)); 547 bfqg_and_blkg_put(bfqq_group(bfqq));
523 548
524 /*
525 * Here we use a reference to bfqg. We don't need a refcounter
526 * as the cgroup reference will not be dropped, so that its
527 * destroy() callback will not be invoked.
528 */
529 entity->parent = bfqg->my_entity; 549 entity->parent = bfqg->my_entity;
530 entity->sched_data = &bfqg->sched_data; 550 entity->sched_data = &bfqg->sched_data;
531 bfqg_get(bfqg); 551 /* pin down bfqg and its associated blkg */
552 bfqg_and_blkg_get(bfqg);
532 553
533 if (bfq_bfqq_busy(bfqq)) { 554 if (bfq_bfqq_busy(bfqq)) {
534 bfq_pos_tree_add_move(bfqd, bfqq); 555 bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
545 * @bic: the bic to move. 566 * @bic: the bic to move.
546 * @blkcg: the blk-cgroup to move to. 567 * @blkcg: the blk-cgroup to move to.
547 * 568 *
548 * Move bic to blkcg, assuming that bfqd->queue is locked; the caller 569 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
549 * has to make sure that the reference to cgroup is valid across the call. 570 * sure that the reference to cgroup is valid across the call (see
571 * comments in bfq_bic_update_cgroup on this issue)
550 * 572 *
551 * NOTE: an alternative approach might have been to store the current 573 * NOTE: an alternative approach might have been to store the current
552 * cgroup in bfqq and getting a reference to it, reducing the lookup 574 * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
604 goto out; 626 goto out;
605 627
606 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); 628 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
629 /*
630 * Update blkg_path for bfq_log_* functions. We cache this
631 * path, and update it here, for the following
632 * reasons. Operations on blkg objects in blk-cgroup are
633 * protected with the request_queue lock, and not with the
634 * lock that protects the instances of this scheduler
635 * (bfqd->lock). This exposes BFQ to the following sort of
636 * race.
637 *
638 * The blkg_lookup performed in bfq_get_queue, protected
639 * through rcu, may happen to return the address of a copy of
640 * the original blkg. If this is the case, then the
641 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
642 * the blkg, is useless: it does not prevent blk-cgroup code
643 * from destroying both the original blkg and all objects
644 * directly or indirectly referred by the copy of the
645 * blkg.
646 *
647 * On the bright side, destroy operations on a blkg invoke, as
648 * a first step, hooks of the scheduler associated with the
649 * blkg. And these hooks are executed with bfqd->lock held for
650 * BFQ. As a consequence, for any blkg associated with the
651 * request queue this instance of the scheduler is attached
652 * to, we are guaranteed that such a blkg is not destroyed, and
653 * that all the pointers it contains are consistent, while we
654 * are holding bfqd->lock. A blkg_lookup performed with
655 * bfqd->lock held then returns a fully consistent blkg, which
656 * remains consistent until this lock is held.
657 *
658 * Thanks to the last fact, and to the fact that: (1) bfqg has
659 * been obtained through a blkg_lookup in the above
660 * assignment, and (2) bfqd->lock is being held, here we can
661 * safely use the policy data for the involved blkg (i.e., the
662 * field bfqg->pd) to get to the blkg associated with bfqg,
663 * and then we can safely use any field of blkg. After we
664 * release bfqd->lock, even just getting blkg through this
665 * bfqg may cause dangling references to be traversed, as
666 * bfqg->pd may not exist any more.
667 *
668 * In view of the above facts, here we cache, in the bfqg, any
669 * blkg data we may need for this bic, and for its associated
670 * bfq_queue. As of now, we need to cache only the path of the
671 * blkg, which is used in the bfq_log_* functions.
672 *
673 * Finally, note that bfqg itself needs to be protected from
674 * destruction on the blkg_free of the original blkg (which
675 * invokes bfq_pd_free). We use an additional private
676 * refcounter for bfqg, to let it disappear only after no
677 * bfq_queue refers to it any longer.
678 */
679 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
607 bic->blkcg_serial_nr = serial_nr; 680 bic->blkcg_serial_nr = serial_nr;
608out: 681out:
609 rcu_read_unlock(); 682 rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
640 * @bfqd: the device data structure with the root group. 713 * @bfqd: the device data structure with the root group.
641 * @bfqg: the group to move from. 714 * @bfqg: the group to move from.
642 * @st: the service tree with the entities. 715 * @st: the service tree with the entities.
643 *
644 * Needs queue_lock to be taken and reference to be valid over the call.
645 */ 716 */
646static void bfq_reparent_active_entities(struct bfq_data *bfqd, 717static void bfq_reparent_active_entities(struct bfq_data *bfqd,
647 struct bfq_group *bfqg, 718 struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
692 /* 763 /*
693 * The idle tree may still contain bfq_queues belonging 764 * The idle tree may still contain bfq_queues belonging
694 * to exited task because they never migrated to a different 765 * to exited task because they never migrated to a different
695 * cgroup from the one being destroyed now. No one else 766 * cgroup from the one being destroyed now.
696 * can access them so it's safe to act without any lock.
697 */ 767 */
698 bfq_flush_idle_tree(st); 768 bfq_flush_idle_tree(st);
699 769
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 08ce45096350..ed93da2462ab 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
3665 3665
3666 kmem_cache_free(bfq_pool, bfqq); 3666 kmem_cache_free(bfq_pool, bfqq);
3667#ifdef CONFIG_BFQ_GROUP_IOSCHED 3667#ifdef CONFIG_BFQ_GROUP_IOSCHED
3668 bfqg_put(bfqg); 3668 bfqg_and_blkg_put(bfqg);
3669#endif 3669#endif
3670} 3670}
3671 3671
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ae783c06dfd9..5c3bf9861492 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -759,6 +759,12 @@ struct bfq_group {
759 /* must be the first member */ 759 /* must be the first member */
760 struct blkg_policy_data pd; 760 struct blkg_policy_data pd;
761 761
762 /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
763 char blkg_path[128];
764
765 /* reference counter (see comments in bfq_bic_update_cgroup) */
766 int ref;
767
762 struct bfq_entity entity; 768 struct bfq_entity entity;
763 struct bfq_sched_data sched_data; 769 struct bfq_sched_data sched_data;
764 770
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
838struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); 844struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
839struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 845struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
840struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 846struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
841void bfqg_put(struct bfq_group *bfqg); 847void bfqg_and_blkg_put(struct bfq_group *bfqg);
842 848
843#ifdef CONFIG_BFQ_GROUP_IOSCHED 849#ifdef CONFIG_BFQ_GROUP_IOSCHED
844extern struct cftype bfq_blkcg_legacy_files[]; 850extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
910struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 916struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
911 917
912#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 918#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
913 char __pbuf[128]; \ 919 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
914 \
915 blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
916 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
917 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ 920 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
918 __pbuf, ##args); \ 921 bfqq_group(bfqq)->blkg_path, ##args); \
919} while (0) 922} while (0)
920 923
921#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 924#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
922 char __pbuf[128]; \ 925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
923 \
924 blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
926} while (0)
927 926
928#else /* CONFIG_BFQ_GROUP_IOSCHED */ 927#else /* CONFIG_BFQ_GROUP_IOSCHED */
929 928
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5384713d48bc..b5009a896a7f 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
176 return false; 176 return false;
177 177
178 if (!bio_sectors(bio))
179 return false;
180
178 /* Already protected? */ 181 /* Already protected? */
179 if (bio_integrity(bio)) 182 if (bio_integrity(bio))
180 return false; 183 return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1bcccedcc74f..bb66c96850b1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1462} 1462}
1463 1463
1464static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1464static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1465 bool may_sleep) 1465 struct request *rq,
1466 blk_qc_t *cookie, bool may_sleep)
1466{ 1467{
1467 struct request_queue *q = rq->q; 1468 struct request_queue *q = rq->q;
1468 struct blk_mq_queue_data bd = { 1469 struct blk_mq_queue_data bd = {
1469 .rq = rq, 1470 .rq = rq,
1470 .last = true, 1471 .last = true,
1471 }; 1472 };
1472 struct blk_mq_hw_ctx *hctx;
1473 blk_qc_t new_cookie; 1473 blk_qc_t new_cookie;
1474 int ret; 1474 int ret;
1475 bool run_queue = true;
1476
1477 if (blk_mq_hctx_stopped(hctx)) {
1478 run_queue = false;
1479 goto insert;
1480 }
1475 1481
1476 if (q->elevator) 1482 if (q->elevator)
1477 goto insert; 1483 goto insert;
1478 1484
1479 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1485 if (!blk_mq_get_driver_tag(rq, NULL, false))
1480 goto insert; 1486 goto insert;
1481 1487
1482 new_cookie = request_to_qc_t(hctx, rq); 1488 new_cookie = request_to_qc_t(hctx, rq);
@@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1500 1506
1501 __blk_mq_requeue_request(rq); 1507 __blk_mq_requeue_request(rq);
1502insert: 1508insert:
1503 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1509 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1504} 1510}
1505 1511
1506static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1512static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1508{ 1514{
1509 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1515 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1510 rcu_read_lock(); 1516 rcu_read_lock();
1511 __blk_mq_try_issue_directly(rq, cookie, false); 1517 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1512 rcu_read_unlock(); 1518 rcu_read_unlock();
1513 } else { 1519 } else {
1514 unsigned int srcu_idx; 1520 unsigned int srcu_idx;
@@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1516 might_sleep(); 1522 might_sleep();
1517 1523
1518 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1524 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1519 __blk_mq_try_issue_directly(rq, cookie, true); 1525 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1520 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1526 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1521 } 1527 }
1522} 1528}
@@ -1619,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1619 1625
1620 blk_mq_put_ctx(data.ctx); 1626 blk_mq_put_ctx(data.ctx);
1621 1627
1622 if (same_queue_rq) 1628 if (same_queue_rq) {
1629 data.hctx = blk_mq_map_queue(q,
1630 same_queue_rq->mq_ctx->cpu);
1623 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1631 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1624 &cookie); 1632 &cookie);
1633 }
1625 } else if (q->nr_hw_queues > 1 && is_sync) { 1634 } else if (q->nr_hw_queues > 1 && is_sync) {
1626 blk_mq_put_ctx(data.ctx); 1635 blk_mq_put_ctx(data.ctx);
1627 blk_mq_bio_to_request(rq, bio); 1636 blk_mq_bio_to_request(rq, bio);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fc13dd0c6e39..a7285bf2831c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
27#define MIN_THROTL_IOPS (10) 27#define MIN_THROTL_IOPS (10)
28#define DFL_LATENCY_TARGET (-1L) 28#define DFL_LATENCY_TARGET (-1L)
29#define DFL_IDLE_THRESHOLD (0) 29#define DFL_IDLE_THRESHOLD (0)
30#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
31#define LATENCY_FILTERED_SSD (0)
32/*
33 * For HD, very small latency comes from sequential IO. Such IO is helpless to
34 * help determine if its IO is impacted by others, hence we ignore the IO
35 */
36#define LATENCY_FILTERED_HD (1000L) /* 1ms */
30 37
31#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) 38#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
32 39
@@ -212,6 +219,7 @@ struct throtl_data
212 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE]; 219 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
213 struct latency_bucket __percpu *latency_buckets; 220 struct latency_bucket __percpu *latency_buckets;
214 unsigned long last_calculate_time; 221 unsigned long last_calculate_time;
222 unsigned long filtered_latency;
215 223
216 bool track_bio_latency; 224 bool track_bio_latency;
217}; 225};
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
698static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 706static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
699 unsigned long expires) 707 unsigned long expires)
700{ 708{
701 unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice; 709 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
702 710
703 /* 711 /*
704 * Since we are adjusting the throttle limit dynamically, the sleep 712 * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
2281 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), 2289 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2282 bio_op(bio), lat); 2290 bio_op(bio), lat);
2283 2291
2284 if (tg->latency_target) { 2292 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2285 int bucket; 2293 int bucket;
2286 unsigned int threshold; 2294 unsigned int threshold;
2287 2295
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
2417void blk_throtl_register_queue(struct request_queue *q) 2425void blk_throtl_register_queue(struct request_queue *q)
2418{ 2426{
2419 struct throtl_data *td; 2427 struct throtl_data *td;
2428 int i;
2420 2429
2421 td = q->td; 2430 td = q->td;
2422 BUG_ON(!td); 2431 BUG_ON(!td);
2423 2432
2424 if (blk_queue_nonrot(q)) 2433 if (blk_queue_nonrot(q)) {
2425 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2434 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2426 else 2435 td->filtered_latency = LATENCY_FILTERED_SSD;
2436 } else {
2427 td->throtl_slice = DFL_THROTL_SLICE_HD; 2437 td->throtl_slice = DFL_THROTL_SLICE_HD;
2438 td->filtered_latency = LATENCY_FILTERED_HD;
2439 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2440 td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2441 }
2428#ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2442#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2429 /* if no low limit, use previous default */ 2443 /* if no low limit, use previous default */
2430 td->throtl_slice = DFL_THROTL_SLICE_HD; 2444 td->throtl_slice = DFL_THROTL_SLICE_HD;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d3a989e718f5..3cd6e12cfc46 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
141 * signature and returns that to us. 141 * signature and returns that to us.
142 */ 142 */
143 ret = crypto_akcipher_verify(req); 143 ret = crypto_akcipher_verify(req);
144 if (ret == -EINPROGRESS) { 144 if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
145 wait_for_completion(&compl.completion); 145 wait_for_completion(&compl.completion);
146 ret = compl.err; 146 ret = compl.err;
147 } 147 }
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fa749f470135..cdb27ac4b226 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1767 break; 1767 break;
1768 case -EINPROGRESS: 1768 case -EINPROGRESS:
1769 case -EBUSY: 1769 case -EBUSY:
1770 ret = wait_for_completion_interruptible( 1770 wait_for_completion(&drbg->ctr_completion);
1771 &drbg->ctr_completion); 1771 if (!drbg->ctr_async_err) {
1772 if (!ret && !drbg->ctr_async_err) {
1773 reinit_completion(&drbg->ctr_completion); 1772 reinit_completion(&drbg->ctr_completion);
1774 break; 1773 break;
1775 } 1774 }
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b7ad808be3d4..3841b5eafa7e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
152 152
153 err = crypto_skcipher_encrypt(&data->req); 153 err = crypto_skcipher_encrypt(&data->req);
154 if (err == -EINPROGRESS || err == -EBUSY) { 154 if (err == -EINPROGRESS || err == -EBUSY) {
155 err = wait_for_completion_interruptible( 155 wait_for_completion(&data->result.completion);
156 &data->result.completion); 156 err = data->result.err;
157 if (!err)
158 err = data->result.err;
159 } 157 }
160 158
161 if (err) 159 if (err)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c5fecf97ee2f..797b28dc7b34 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
666 int ret = -ENODEV; 666 int ret = -ENODEV;
667 struct fwnode_handle *iort_fwnode; 667 struct fwnode_handle *iort_fwnode;
668 668
669 /*
670 * If we already translated the fwspec there
671 * is nothing left to do, return the iommu_ops.
672 */
673 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
674 if (ops)
675 return ops;
676
677 if (node) { 669 if (node) {
678 iort_fwnode = iort_get_fwnode(node); 670 iort_fwnode = iort_get_fwnode(node);
679 if (!iort_fwnode) 671 if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
735 u32 streamid = 0; 727 u32 streamid = 0;
736 int err; 728 int err;
737 729
730 /*
731 * If we already translated the fwspec there
732 * is nothing left to do, return the iommu_ops.
733 */
734 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
735 if (ops)
736 return ops;
737
738 if (dev_is_pci(dev)) { 738 if (dev_is_pci(dev)) {
739 struct pci_bus *bus = to_pci_dev(dev)->bus; 739 struct pci_bus *bus = to_pci_dev(dev)->bus;
740 u32 rid; 740 u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
782 if (err) 782 if (err)
783 ops = ERR_PTR(err); 783 ops = ERR_PTR(err);
784 784
785 /* Ignore all other errors apart from EPROBE_DEFER */
786 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
787 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
788 ops = NULL;
789 }
790
785 return ops; 791 return ops;
786} 792}
787 793
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a9a9ab3399d4..d42eeef9d928 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || 782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && 783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
784 (battery->capacity_now <= battery->alarm))) 784 (battery->capacity_now <= battery->alarm)))
785 pm_wakeup_hard_event(&battery->device->dev); 785 pm_wakeup_event(&battery->device->dev, 0);
786 786
787 return result; 787 return result;
788} 788}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 9ad8cdb58743..e19f530f1083 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
217 } 217 }
218 218
219 if (state) 219 if (state)
220 pm_wakeup_hard_event(&device->dev); 220 pm_wakeup_event(&device->dev, 0);
221 221
222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
223 if (ret == NOTIFY_DONE) 223 if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
402 } else { 402 } else {
403 int keycode; 403 int keycode;
404 404
405 pm_wakeup_hard_event(&device->dev); 405 pm_wakeup_event(&device->dev, 0);
406 if (button->suspended) 406 if (button->suspended)
407 break; 407 break;
408 408
@@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
534 lid_device = device; 534 lid_device = device;
535 } 535 }
536 536
537 device_init_wakeup(&device->dev, true);
538 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); 537 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
539 return 0; 538 return 0;
540 539
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 798d5003a039..993fd31394c8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -24,7 +24,6 @@
24#include <linux/pm_qos.h> 24#include <linux/pm_qos.h>
25#include <linux/pm_domain.h> 25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/suspend.h>
28 27
29#include "internal.h" 28#include "internal.h"
30 29
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
400 mutex_lock(&acpi_pm_notifier_lock); 399 mutex_lock(&acpi_pm_notifier_lock);
401 400
402 if (adev->wakeup.flags.notifier_present) { 401 if (adev->wakeup.flags.notifier_present) {
403 pm_wakeup_ws_event(adev->wakeup.ws, 0, true); 402 __pm_wakeup_event(adev->wakeup.ws, 0);
404 if (adev->wakeup.context.work.func) 403 if (adev->wakeup.context.work.func)
405 queue_pm_work(&adev->wakeup.context.work); 404 queue_pm_work(&adev->wakeup.context.work);
406 } 405 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e39ec7b7cb67..3a10d7573477 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
1371 iort_set_dma_mask(dev); 1371 iort_set_dma_mask(dev);
1372 1372
1373 iommu = iort_iommu_configure(dev); 1373 iommu = iort_iommu_configure(dev);
1374 if (IS_ERR(iommu)) 1374 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
1375 return PTR_ERR(iommu); 1375 return -EPROBE_DEFER;
1376 1376
1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
1378 /* 1378 /*
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a6574d626340..097d630ab886 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
663 acpi_os_wait_events_complete(); 663 acpi_os_wait_events_complete();
664 if (acpi_sci_irq_valid()) 664 if (acpi_sci_irq_valid())
665 enable_irq_wake(acpi_sci_irq); 665 enable_irq_wake(acpi_sci_irq);
666
667 return 0; 666 return 0;
668} 667}
669 668
670static void acpi_freeze_wake(void)
671{
672 /*
673 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
674 * that the SCI has triggered while suspended, so cancel the wakeup in
675 * case it has not been a wakeup event (the GPEs will be checked later).
676 */
677 if (acpi_sci_irq_valid() &&
678 !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
679 pm_system_cancel_wakeup();
680}
681
682static void acpi_freeze_sync(void)
683{
684 /*
685 * Process all pending events in case there are any wakeup ones.
686 *
687 * The EC driver uses the system workqueue, so that one needs to be
688 * flushed too.
689 */
690 acpi_os_wait_events_complete();
691 flush_scheduled_work();
692}
693
694static void acpi_freeze_restore(void) 669static void acpi_freeze_restore(void)
695{ 670{
696 acpi_disable_wakeup_devices(ACPI_STATE_S0); 671 acpi_disable_wakeup_devices(ACPI_STATE_S0);
697 if (acpi_sci_irq_valid()) 672 if (acpi_sci_irq_valid())
698 disable_irq_wake(acpi_sci_irq); 673 disable_irq_wake(acpi_sci_irq);
699
700 acpi_enable_all_runtime_gpes(); 674 acpi_enable_all_runtime_gpes();
701} 675}
702 676
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
708static const struct platform_freeze_ops acpi_freeze_ops = { 682static const struct platform_freeze_ops acpi_freeze_ops = {
709 .begin = acpi_freeze_begin, 683 .begin = acpi_freeze_begin,
710 .prepare = acpi_freeze_prepare, 684 .prepare = acpi_freeze_prepare,
711 .wake = acpi_freeze_wake,
712 .sync = acpi_freeze_sync,
713 .restore = acpi_freeze_restore, 685 .restore = acpi_freeze_restore,
714 .end = acpi_freeze_end, 686 .end = acpi_freeze_end,
715}; 687};
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 2fc52407306c..c69954023c2e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1364{} 1364{}
1365#endif 1365#endif
1366 1366
1367/*
1368 * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
1369 * as DUMMY, or detected but eventually get a "link down" and never get up
1370 * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
1371 * port_map may hold a value of 0x00.
1372 *
1373 * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
1374 * and can significantly reduce the occurrence of the problem.
1375 *
1376 * https://bugzilla.kernel.org/show_bug.cgi?id=189471
1377 */
1378static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
1379 struct pci_dev *pdev)
1380{
1381 static const struct dmi_system_id sysids[] = {
1382 {
1383 .ident = "Acer Switch Alpha 12",
1384 .matches = {
1385 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1386 DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
1387 },
1388 },
1389 { }
1390 };
1391
1392 if (dmi_check_system(sysids)) {
1393 dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
1394 if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
1395 hpriv->port_map = 0x7;
1396 hpriv->cap = 0xC734FF02;
1397 }
1398 }
1399}
1400
1367#ifdef CONFIG_ARM64 1401#ifdef CONFIG_ARM64
1368/* 1402/*
1369 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. 1403 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1636 "online status unreliable, applying workaround\n"); 1670 "online status unreliable, applying workaround\n");
1637 } 1671 }
1638 1672
1673
1674 /* Acer SA5-271 workaround modifies private_data */
1675 acer_sa5_271_workaround(hpriv, pdev);
1676
1639 /* CAP.NP sometimes indicate the index of the last enabled 1677 /* CAP.NP sometimes indicate the index of the last enabled
1640 * port, at other times, that of the last possible port, so 1678 * port, at other times, that of the last possible port, so
1641 * determining the maximum port number requires looking at 1679 * determining the maximum port number requires looking at
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index aaa761b9081c..cd2eab6aa92e 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
514 514
515 irq = platform_get_irq(pdev, 0); 515 irq = platform_get_irq(pdev, 0);
516 if (irq <= 0) { 516 if (irq <= 0) {
517 dev_err(dev, "no irq\n"); 517 if (irq != -EPROBE_DEFER)
518 return -EINVAL; 518 dev_err(dev, "no irq\n");
519 return irq;
519 } 520 }
520 521
521 hpriv->irq = irq; 522 hpriv->irq = irq;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2d83b8c75965..e157a0e44419 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
6800 } 6800 }
6801 6801
6802 force_ent->port = simple_strtoul(id, &endp, 10); 6802 force_ent->port = simple_strtoul(id, &endp, 10);
6803 if (p == endp || *endp != '\0') { 6803 if (id == endp || *endp != '\0') {
6804 *reason = "invalid port/link"; 6804 *reason = "invalid port/link";
6805 return -EINVAL; 6805 return -EINVAL;
6806 } 6806 }
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b66bcda88320..3b2246dded74 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
4067 struct ata_host *host; 4067 struct ata_host *host;
4068 struct mv_host_priv *hpriv; 4068 struct mv_host_priv *hpriv;
4069 struct resource *res; 4069 struct resource *res;
4070 void __iomem *mmio;
4071 int n_ports = 0, irq = 0; 4070 int n_ports = 0, irq = 0;
4072 int rc; 4071 int rc;
4073 int port; 4072 int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
4086 * Get the register base first 4085 * Get the register base first
4087 */ 4086 */
4088 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4087 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4089 mmio = devm_ioremap_resource(&pdev->dev, res); 4088 if (res == NULL)
4090 if (IS_ERR(mmio)) 4089 return -EINVAL;
4091 return PTR_ERR(mmio);
4092 4090
4093 /* allocate host */ 4091 /* allocate host */
4094 if (pdev->dev.of_node) { 4092 if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
4132 hpriv->board_idx = chip_soc; 4130 hpriv->board_idx = chip_soc;
4133 4131
4134 host->iomap = NULL; 4132 host->iomap = NULL;
4135 hpriv->base = mmio - SATAHC0_REG_BASE; 4133 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4134 resource_size(res));
4135 if (!hpriv->base)
4136 return -ENOMEM;
4137
4138 hpriv->base -= SATAHC0_REG_BASE;
4136 4139
4137 hpriv->clk = clk_get(&pdev->dev, NULL); 4140 hpriv->clk = clk_get(&pdev->dev, NULL);
4138 if (IS_ERR(hpriv->clk)) 4141 if (IS_ERR(hpriv->clk))
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 5d38245a7a73..b7939a2c1fab 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
890 dev_err(&pdev->dev, "failed to get access to sata clock\n"); 890 dev_err(&pdev->dev, "failed to get access to sata clock\n");
891 return PTR_ERR(priv->clk); 891 return PTR_ERR(priv->clk);
892 } 892 }
893 clk_prepare_enable(priv->clk); 893
894 ret = clk_prepare_enable(priv->clk);
895 if (ret)
896 return ret;
894 897
895 host = ata_host_alloc(&pdev->dev, 1); 898 host = ata_host_alloc(&pdev->dev, 1);
896 if (!host) { 899 if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
970 struct ata_host *host = dev_get_drvdata(dev); 973 struct ata_host *host = dev_get_drvdata(dev);
971 struct sata_rcar_priv *priv = host->private_data; 974 struct sata_rcar_priv *priv = host->private_data;
972 void __iomem *base = priv->base; 975 void __iomem *base = priv->base;
976 int ret;
973 977
974 clk_prepare_enable(priv->clk); 978 ret = clk_prepare_enable(priv->clk);
979 if (ret)
980 return ret;
975 981
976 /* ack and mask */ 982 /* ack and mask */
977 iowrite32(0, base + SATAINTSTAT_REG); 983 iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
988{ 994{
989 struct ata_host *host = dev_get_drvdata(dev); 995 struct ata_host *host = dev_get_drvdata(dev);
990 struct sata_rcar_priv *priv = host->private_data; 996 struct sata_rcar_priv *priv = host->private_data;
997 int ret;
991 998
992 clk_prepare_enable(priv->clk); 999 ret = clk_prepare_enable(priv->clk);
1000 if (ret)
1001 return ret;
993 1002
994 sata_rcar_setup_port(host); 1003 sata_rcar_setup_port(host);
995 1004
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e987a6f55d36..9faee1c893e5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1091 if (async_error) 1091 if (async_error)
1092 goto Complete; 1092 goto Complete;
1093 1093
1094 if (pm_wakeup_pending()) {
1095 async_error = -EBUSY;
1096 goto Complete;
1097 }
1098
1094 if (dev->power.syscore || dev->power.direct_complete) 1099 if (dev->power.syscore || dev->power.direct_complete)
1095 goto Complete; 1100 goto Complete;
1096 1101
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 9c36b27996fc..c313b600d356 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
28/* First wakeup IRQ seen by the kernel in the last cycle. */ 28/* First wakeup IRQ seen by the kernel in the last cycle. */
29unsigned int pm_wakeup_irq __read_mostly; 29unsigned int pm_wakeup_irq __read_mostly;
30 30
31/* If greater than 0 and the system is suspending, terminate the suspend. */ 31/* If set and the system is suspending, terminate the suspend. */
32static atomic_t pm_abort_suspend __read_mostly; 32static bool pm_abort_suspend __read_mostly;
33 33
34/* 34/*
35 * Combined counters of registered wakeup events and wakeup events in progress. 35 * Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
855 pm_print_active_wakeup_sources(); 855 pm_print_active_wakeup_sources();
856 } 856 }
857 857
858 return ret || atomic_read(&pm_abort_suspend) > 0; 858 return ret || pm_abort_suspend;
859} 859}
860 860
861void pm_system_wakeup(void) 861void pm_system_wakeup(void)
862{ 862{
863 atomic_inc(&pm_abort_suspend); 863 pm_abort_suspend = true;
864 freeze_wake(); 864 freeze_wake();
865} 865}
866EXPORT_SYMBOL_GPL(pm_system_wakeup); 866EXPORT_SYMBOL_GPL(pm_system_wakeup);
867 867
868void pm_system_cancel_wakeup(void) 868void pm_wakeup_clear(void)
869{
870 atomic_dec(&pm_abort_suspend);
871}
872
873void pm_wakeup_clear(bool reset)
874{ 869{
870 pm_abort_suspend = false;
875 pm_wakeup_irq = 0; 871 pm_wakeup_irq = 0;
876 if (reset)
877 atomic_set(&pm_abort_suspend, 0);
878} 872}
879 873
880void pm_system_irq_wakeup(unsigned int irq_number) 874void pm_system_irq_wakeup(unsigned int irq_number)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d932906f24..ebbd0c3fe0ed 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
608 */ 608 */
609static int loop_flush(struct loop_device *lo) 609static int loop_flush(struct loop_device *lo)
610{ 610{
611 /* loop not yet configured, no running thread, nothing to flush */
612 if (lo->lo_state != Lo_bound)
613 return 0;
611 return loop_switch(lo, NULL); 614 return loop_switch(lo, NULL);
612} 615}
613 616
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7de5bd76a31..eb1158532de3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
571static int min_perf_pct_min(void) 571static int min_perf_pct_min(void)
572{ 572{
573 struct cpudata *cpu = all_cpu_data[0]; 573 struct cpudata *cpu = all_cpu_data[0];
574 int turbo_pstate = cpu->pstate.turbo_pstate;
574 575
575 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 576 return turbo_pstate ?
576 cpu->pstate.turbo_pstate); 577 DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
577} 578}
578 579
579static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 580static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 8bf27323f7a3..b58233e4ed71 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,6 +27,26 @@ struct bmp_header {
27 u32 size; 27 u32 size;
28} __packed; 28} __packed;
29 29
30static bool efi_bgrt_addr_valid(u64 addr)
31{
32 efi_memory_desc_t *md;
33
34 for_each_efi_memory_desc(md) {
35 u64 size;
36 u64 end;
37
38 if (md->type != EFI_BOOT_SERVICES_DATA)
39 continue;
40
41 size = md->num_pages << EFI_PAGE_SHIFT;
42 end = md->phys_addr + size;
43 if (addr >= md->phys_addr && addr < end)
44 return true;
45 }
46
47 return false;
48}
49
30void __init efi_bgrt_init(struct acpi_table_header *table) 50void __init efi_bgrt_init(struct acpi_table_header *table)
31{ 51{
32 void *image; 52 void *image;
@@ -36,7 +56,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
36 if (acpi_disabled) 56 if (acpi_disabled)
37 return; 57 return;
38 58
39 if (!efi_enabled(EFI_BOOT)) 59 if (!efi_enabled(EFI_MEMMAP))
40 return; 60 return;
41 61
42 if (table->length < sizeof(bgrt_tab)) { 62 if (table->length < sizeof(bgrt_tab)) {
@@ -65,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
65 goto out; 85 goto out;
66 } 86 }
67 87
88 if (!efi_bgrt_addr_valid(bgrt->image_address)) {
89 pr_notice("Ignoring BGRT: invalid image address\n");
90 goto out;
91 }
68 image = early_memremap(bgrt->image_address, sizeof(bmp_header)); 92 image = early_memremap(bgrt->image_address, sizeof(bmp_header));
69 if (!image) { 93 if (!image) {
70 pr_notice("Ignoring BGRT: failed to map image header memory\n"); 94 pr_notice("Ignoring BGRT: failed to map image header memory\n");
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719284b0..aa885a614e27 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
508 bool has_connectors = 508 bool has_connectors =
509 !!new_crtc_state->connector_mask; 509 !!new_crtc_state->connector_mask;
510 510
511 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
512
511 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 513 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
512 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 514 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
513 crtc->base.id, crtc->name); 515 crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
551 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 553 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
552 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 554 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
553 555
556 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
557
554 /* 558 /*
555 * This only sets crtc->connectors_changed for routing changes, 559 * This only sets crtc->connectors_changed for routing changes,
556 * drivers must set crtc->connectors_changed themselves when 560 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
650 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 654 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
651 const struct drm_plane_helper_funcs *funcs; 655 const struct drm_plane_helper_funcs *funcs;
652 656
657 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
658
653 funcs = plane->helper_private; 659 funcs = plane->helper_private;
654 660
655 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 661 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
2663 2669
2664 drm_modeset_acquire_init(&ctx, 0); 2670 drm_modeset_acquire_init(&ctx, 0);
2665 while (1) { 2671 while (1) {
2672 err = drm_modeset_lock_all_ctx(dev, &ctx);
2673 if (err)
2674 goto out;
2675
2666 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 2676 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
2677out:
2667 if (err != -EDEADLK) 2678 if (err != -EDEADLK)
2668 break; 2679 break;
2669 2680
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb46a425..37b8ad3e30d8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
358void drm_unplug_dev(struct drm_device *dev) 358void drm_unplug_dev(struct drm_device *dev)
359{ 359{
360 /* for a USB device */ 360 /* for a USB device */
361 drm_dev_unregister(dev); 361 if (drm_core_check_feature(dev, DRIVER_MODESET))
362 drm_modeset_unregister_all(dev);
363
364 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
365 drm_minor_unregister(dev, DRM_MINOR_RENDER);
366 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
362 367
363 mutex_lock(&drm_global_mutex); 368 mutex_lock(&drm_global_mutex);
364 369
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c9630f..f77dcfaade6c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
760 * Get the endpoint node. In our case, dsi has one output port1 760 * Get the endpoint node. In our case, dsi has one output port1
761 * to which the external HDMI bridge is connected. 761 * to which the external HDMI bridge is connected.
762 */ 762 */
763 ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge); 763 ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
764 if (ret) 764 if (ret)
765 return ret; 765 return ret;
766 766
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c994fe6e65b2..48428672fc6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1235 goto out_fini; 1235 goto out_fini;
1236 1236
1237 pci_set_drvdata(pdev, &dev_priv->drm); 1237 pci_set_drvdata(pdev, &dev_priv->drm);
1238 /*
1239 * Disable the system suspend direct complete optimization, which can
1240 * leave the device suspended skipping the driver's suspend handlers
1241 * if the device was already runtime suspended. This is needed due to
1242 * the difference in our runtime and system suspend sequence and
1243 * becaue the HDA driver may require us to enable the audio power
1244 * domain during system suspend.
1245 */
1246 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1238 1247
1239 ret = i915_driver_init_early(dev_priv, ent); 1248 ret = i915_driver_init_early(dev_priv, ent);
1240 if (ret < 0) 1249 if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 963f6d4481f7..2c453a4e97d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2991 return false; 2991 return false;
2992} 2992}
2993 2993
2994static inline bool
2995intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2996{
2997#ifdef CONFIG_INTEL_IOMMU
2998 if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
2999 return true;
3000#endif
3001 return false;
3002}
3003
2994int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 3004int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2995 int enable_ppgtt); 3005 int enable_ppgtt);
2996 3006
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df18b58..462031cbd77f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3298{ 3298{
3299 int ret; 3299 int ret;
3300 3300
3301 /* If the device is asleep, we have no requests outstanding */
3302 if (!READ_ONCE(i915->gt.awake))
3303 return 0;
3304
3301 if (flags & I915_WAIT_LOCKED) { 3305 if (flags & I915_WAIT_LOCKED) {
3302 struct i915_gem_timeline *tl; 3306 struct i915_gem_timeline *tl;
3303 3307
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 50b8f1139ff9..f1989b8792dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2191 gen8_set_pte(&gtt_base[i], scratch_pte); 2191 gen8_set_pte(&gtt_base[i], scratch_pte);
2192} 2192}
2193 2193
2194static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2195{
2196 struct drm_i915_private *dev_priv = vm->i915;
2197
2198 /*
2199 * Make sure the internal GAM fifo has been cleared of all GTT
2200 * writes before exiting stop_machine(). This guarantees that
2201 * any aperture accesses waiting to start in another process
2202 * cannot back up behind the GTT writes causing a hang.
2203 * The register can be any arbitrary GAM register.
2204 */
2205 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2206}
2207
2208struct insert_page {
2209 struct i915_address_space *vm;
2210 dma_addr_t addr;
2211 u64 offset;
2212 enum i915_cache_level level;
2213};
2214
2215static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2216{
2217 struct insert_page *arg = _arg;
2218
2219 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2220 bxt_vtd_ggtt_wa(arg->vm);
2221
2222 return 0;
2223}
2224
2225static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2226 dma_addr_t addr,
2227 u64 offset,
2228 enum i915_cache_level level,
2229 u32 unused)
2230{
2231 struct insert_page arg = { vm, addr, offset, level };
2232
2233 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2234}
2235
2236struct insert_entries {
2237 struct i915_address_space *vm;
2238 struct sg_table *st;
2239 u64 start;
2240 enum i915_cache_level level;
2241};
2242
2243static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2244{
2245 struct insert_entries *arg = _arg;
2246
2247 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2248 bxt_vtd_ggtt_wa(arg->vm);
2249
2250 return 0;
2251}
2252
2253static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2254 struct sg_table *st,
2255 u64 start,
2256 enum i915_cache_level level,
2257 u32 unused)
2258{
2259 struct insert_entries arg = { vm, st, start, level };
2260
2261 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2262}
2263
2264struct clear_range {
2265 struct i915_address_space *vm;
2266 u64 start;
2267 u64 length;
2268};
2269
2270static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2271{
2272 struct clear_range *arg = _arg;
2273
2274 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2275 bxt_vtd_ggtt_wa(arg->vm);
2276
2277 return 0;
2278}
2279
2280static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2281 u64 start,
2282 u64 length)
2283{
2284 struct clear_range arg = { vm, start, length };
2285
2286 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2287}
2288
2194static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2289static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2195 u64 start, u64 length) 2290 u64 start, u64 length)
2196{ 2291{
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2785 2880
2786 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 2881 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
2787 2882
2883 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2884 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2885 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2886 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2887 if (ggtt->base.clear_range != nop_clear_range)
2888 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2889 }
2890
2788 ggtt->invalidate = gen6_ggtt_invalidate; 2891 ggtt->invalidate = gen6_ggtt_invalidate;
2789 2892
2790 return ggtt_probe_common(ggtt, size); 2893 return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
2997 3100
2998void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3101void i915_ggtt_disable_guc(struct drm_i915_private *i915)
2999{ 3102{
3000 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3103 if (i915->ggtt.invalidate == guc_ggtt_invalidate)
3104 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3001} 3105}
3002 3106
3003void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3107void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d4317a49..fb5231f98c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
278 obj->mm.quirked = false; 278 obj->mm.quirked = false;
279 } 279 }
280 if (!i915_gem_object_is_tiled(obj)) { 280 if (!i915_gem_object_is_tiled(obj)) {
281 GEM_BUG_ON(!obj->mm.quirked); 281 GEM_BUG_ON(obj->mm.quirked);
282 __i915_gem_object_pin_pages(obj); 282 __i915_gem_object_pin_pages(obj);
283 obj->mm.quirked = true; 283 obj->mm.quirked = true;
284 } 284 }
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4e564d..1a78363c7f4a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
208static const struct intel_device_info intel_ironlake_m_info = { 208static const struct intel_device_info intel_ironlake_m_info = {
209 GEN5_FEATURES, 209 GEN5_FEATURES,
210 .platform = INTEL_IRONLAKE, 210 .platform = INTEL_IRONLAKE,
211 .is_mobile = 1, 211 .is_mobile = 1, .has_fbc = 1,
212}; 212};
213 213
214#define GEN6_FEATURES \ 214#define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
390 .has_hw_contexts = 1, \ 390 .has_hw_contexts = 1, \
391 .has_logical_ring_contexts = 1, \ 391 .has_logical_ring_contexts = 1, \
392 .has_guc = 1, \ 392 .has_guc = 1, \
393 .has_decoupled_mmio = 1, \
394 .has_aliasing_ppgtt = 1, \ 393 .has_aliasing_ppgtt = 1, \
395 .has_full_ppgtt = 1, \ 394 .has_full_ppgtt = 1, \
396 .has_full_48bit_ppgtt = 1, \ 395 .has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cabe52a4e3b..569717a12723 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12203 * type. For DP ports it behaves like most other platforms, but on HDMI 12203 * type. For DP ports it behaves like most other platforms, but on HDMI
12204 * there's an extra 1 line difference. So we need to add two instead of 12204 * there's an extra 1 line difference. So we need to add two instead of
12205 * one to the value. 12205 * one to the value.
12206 *
12207 * On VLV/CHV DSI the scanline counter would appear to increment
12208 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12209 * that means we can't tell whether we're in vblank or not while
12210 * we're on that particular line. We must still set scanline_offset
12211 * to 1 so that the vblank timestamps come out correct when we query
12212 * the scanline counter from within the vblank interrupt handler.
12213 * However if queried just before the start of vblank we'll get an
12214 * answer that's slightly in the future.
12206 */ 12215 */
12207 if (IS_GEN2(dev_priv)) { 12216 if (IS_GEN2(dev_priv)) {
12208 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12217 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0c836b..f94eacff196c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1075 return 0; 1075 return 0;
1076} 1076}
1077 1077
1078static bool ring_is_idle(struct intel_engine_cs *engine)
1079{
1080 struct drm_i915_private *dev_priv = engine->i915;
1081 bool idle = true;
1082
1083 intel_runtime_pm_get(dev_priv);
1084
1085 /* No bit for gen2, so assume the CS parser is idle */
1086 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1087 idle = false;
1088
1089 intel_runtime_pm_put(dev_priv);
1090
1091 return idle;
1092}
1093
1078/** 1094/**
1079 * intel_engine_is_idle() - Report if the engine has finished process all work 1095 * intel_engine_is_idle() - Report if the engine has finished process all work
1080 * @engine: the intel_engine_cs 1096 * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1084 */ 1100 */
1085bool intel_engine_is_idle(struct intel_engine_cs *engine) 1101bool intel_engine_is_idle(struct intel_engine_cs *engine)
1086{ 1102{
1087 struct drm_i915_private *dev_priv = engine->i915;
1088
1089 /* Any inflight/incomplete requests? */ 1103 /* Any inflight/incomplete requests? */
1090 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 1104 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1091 intel_engine_last_submit(engine))) 1105 intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
1100 return false; 1114 return false;
1101 1115
1102 /* Ring stopped? */ 1116 /* Ring stopped? */
1103 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1117 if (!ring_is_idle(engine))
1104 return false; 1118 return false;
1105 1119
1106 return true; 1120 return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add18b26..d93c58410bff 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
83 int *width, int *height) 83 int *width, int *height)
84{ 84{
85 int w, h;
86
87 if (drm_rotation_90_or_270(cache->plane.rotation)) {
88 w = cache->plane.src_h;
89 h = cache->plane.src_w;
90 } else {
91 w = cache->plane.src_w;
92 h = cache->plane.src_h;
93 }
94
95 if (width) 85 if (width)
96 *width = w; 86 *width = cache->plane.src_w;
97 if (height) 87 if (height)
98 *height = h; 88 *height = cache->plane.src_h;
99} 89}
100 90
101static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 91static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
746 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 736 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
747 737
748 cache->plane.rotation = plane_state->base.rotation; 738 cache->plane.rotation = plane_state->base.rotation;
739 /*
740 * Src coordinates are already rotated by 270 degrees for
741 * the 90/270 degree plane rotation cases (to match the
742 * GTT mapping), hence no need to account for rotation here.
743 */
749 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 744 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
750 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 745 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
751 cache->plane.visible = plane_state->base.visible; 746 cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd603f401..2ca481b5aa69 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4335,11 +4335,19 @@ skl_compute_wm(struct drm_atomic_state *state)
4335 struct drm_crtc_state *cstate; 4335 struct drm_crtc_state *cstate;
4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4337 struct skl_wm_values *results = &intel_state->wm_results; 4337 struct skl_wm_values *results = &intel_state->wm_results;
4338 struct drm_device *dev = state->dev;
4338 struct skl_pipe_wm *pipe_wm; 4339 struct skl_pipe_wm *pipe_wm;
4339 bool changed = false; 4340 bool changed = false;
4340 int ret, i; 4341 int ret, i;
4341 4342
4342 /* 4343 /*
4344 * When we distrust bios wm we always need to recompute to set the
4345 * expected DDB allocations for each CRTC.
4346 */
4347 if (to_i915(dev)->wm.distrust_bios_wm)
4348 changed = true;
4349
4350 /*
4343 * If this transaction isn't actually touching any CRTC's, don't 4351 * If this transaction isn't actually touching any CRTC's, don't
4344 * bother with watermark calculation. Note that if we pass this 4352 * bother with watermark calculation. Note that if we pass this
4345 * test, we're guaranteed to hold at least one CRTC state mutex, 4353 * test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
4349 */ 4357 */
4350 for_each_new_crtc_in_state(state, crtc, cstate, i) 4358 for_each_new_crtc_in_state(state, crtc, cstate, i)
4351 changed = true; 4359 changed = true;
4360
4352 if (!changed) 4361 if (!changed)
4353 return 0; 4362 return 0;
4354 4363
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0d2baf..559f1ab42bfc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
435 } 435 }
436 436
437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ 437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
438 if (intel_crtc->config->pipe_src_w > 3200 || 438 if (dev_priv->psr.psr2_support &&
439 intel_crtc->config->pipe_src_h > 2000) { 439 (intel_crtc->config->pipe_src_w > 3200 ||
440 intel_crtc->config->pipe_src_h > 2000)) {
440 dev_priv->psr.psr2_support = false; 441 dev_priv->psr.psr2_support = false;
441 return false; 442 return false;
442 } 443 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c717c7cd..e6517edcd16b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
83 */ 83 */
84void intel_pipe_update_start(struct intel_crtc *crtc) 84void intel_pipe_update_start(struct intel_crtc *crtc)
85{ 85{
86 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
86 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 87 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
87 long timeout = msecs_to_jiffies_timeout(1); 88 long timeout = msecs_to_jiffies_timeout(1);
88 int scanline, min, max, vblank_start; 89 int scanline, min, max, vblank_start;
89 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 90 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
91 bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
92 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
90 DEFINE_WAIT(wait); 93 DEFINE_WAIT(wait);
91 94
92 vblank_start = adjusted_mode->crtc_vblank_start; 95 vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 142
140 drm_crtc_vblank_put(&crtc->base); 143 drm_crtc_vblank_put(&crtc->base);
141 144
145 /*
146 * On VLV/CHV DSI the scanline counter would appear to
147 * increment approx. 1/3 of a scanline before start of vblank.
148 * The registers still get latched at start of vblank however.
149 * This means we must not write any registers on the first
150 * line of vblank (since not the whole line is actually in
151 * vblank). And unfortunately we can't use the interrupt to
152 * wait here since it will fire too soon. We could use the
153 * frame start interrupt instead since it will fire after the
154 * critical scanline, but that would require more changes
155 * in the interrupt code. So for now we'll just do the nasty
156 * thing and poll for the bad scanline to pass us by.
157 *
158 * FIXME figure out if BXT+ DSI suffers from this as well
159 */
160 while (need_vlv_dsi_wa && scanline == vblank_start)
161 scanline = intel_get_crtc_scanline(crtc);
162
142 crtc->debug.scanline_start = scanline; 163 crtc->debug.scanline_start = scanline;
143 crtc->debug.start_vbl_time = ktime_get(); 164 crtc->debug.start_vbl_time = ktime_get();
144 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 165 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73aeddac..f84115261ae7 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
59 * available in the work queue (note, the queue is shared, 59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but 60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge! 61 * it should not be huge!
62 * q_fail: failed to enqueue a work item. This should never happen,
63 * because we check for space beforehand.
64 * b_fail: failed to ring the doorbell. This should never happen, unless 62 * b_fail: failed to ring the doorbell. This should never happen, unless
65 * somehow the hardware misbehaves, or maybe if the GuC firmware 63 * somehow the hardware misbehaves, or maybe if the GuC firmware
66 * crashes? We probably need to reset the GPU to recover. 64 * crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801fab039..8b05ecb8fdef 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
673 ret = drm_of_find_panel_or_bridge(child, 673 ret = drm_of_find_panel_or_bridge(child,
674 imx_ldb->lvds_mux ? 4 : 2, 0, 674 imx_ldb->lvds_mux ? 4 : 2, 0,
675 &channel->panel, &channel->bridge); 675 &channel->panel, &channel->bridge);
676 if (ret) 676 if (ret && ret != -ENODEV)
677 return ret; 677 return ret;
678 678
679 /* panel ddc only if there is no bridge */ 679 /* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995a990f..b5cc6e12334c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
19#include <drm/drm_of.h> 19#include <drm/drm_of.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h> 21#include <linux/component.h>
22#include <linux/iopoll.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/of.h> 24#include <linux/of.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
900 901
901static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 902static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
902{ 903{
903 u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ 904 int ret;
904 905 u32 val;
905 while (timeout_ms--) {
906 if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
907 break;
908
909 usleep_range(2, 4);
910 }
911 906
912 if (timeout_ms == 0) { 907 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
908 4, 2000000);
909 if (ret) {
913 DRM_WARN("polling dsi wait not busy timeout!\n"); 910 DRM_WARN("polling dsi wait not busy timeout!\n");
914 911
915 mtk_dsi_enable(dsi); 912 mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03b0347..0a4ffd724146 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
1062 } 1062 }
1063 1063
1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
1065 if (err) { 1065 if (err < 0) {
1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", 1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
1067 err); 1067 err);
1068 return err; 1068 return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5f0fce..10b227d83e9a 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
152 .max_register = 0x1000, 152 .max_register = 0x1000,
153}; 153};
154 154
155static int meson_drv_bind(struct device *dev) 155static int meson_drv_bind_master(struct device *dev, bool has_components)
156{ 156{
157 struct platform_device *pdev = to_platform_device(dev); 157 struct platform_device *pdev = to_platform_device(dev);
158 struct meson_drm *priv; 158 struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
233 if (ret) 233 if (ret)
234 goto free_drm; 234 goto free_drm;
235 235
236 ret = component_bind_all(drm->dev, drm); 236 if (has_components) {
237 if (ret) { 237 ret = component_bind_all(drm->dev, drm);
238 dev_err(drm->dev, "Couldn't bind all components\n"); 238 if (ret) {
239 goto free_drm; 239 dev_err(drm->dev, "Couldn't bind all components\n");
240 goto free_drm;
241 }
240 } 242 }
241 243
242 ret = meson_plane_create(priv); 244 ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
276 return ret; 278 return ret;
277} 279}
278 280
281static int meson_drv_bind(struct device *dev)
282{
283 return meson_drv_bind_master(dev, true);
284}
285
279static void meson_drv_unbind(struct device *dev) 286static void meson_drv_unbind(struct device *dev)
280{ 287{
281 struct drm_device *drm = dev_get_drvdata(dev); 288 struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
357 count += meson_probe_remote(pdev, &match, np, remote); 364 count += meson_probe_remote(pdev, &match, np, remote);
358 } 365 }
359 366
367 if (count && !match)
368 return meson_drv_bind_master(&pdev->dev, false);
369
360 /* If some endpoints were found, initialize the nodes */ 370 /* If some endpoints were found, initialize the nodes */
361 if (count) { 371 if (count) {
362 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count); 372 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe347b3..820a4805916f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
4 4
5struct nvkm_alarm { 5struct nvkm_alarm {
6 struct list_head head; 6 struct list_head head;
7 struct list_head exec;
7 u64 timestamp; 8 u64 timestamp;
8 void (*func)(struct nvkm_alarm *); 9 void (*func)(struct nvkm_alarm *);
9}; 10};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 36268e1802b5..15a13d09d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
80module_param_named(modeset, nouveau_modeset, int, 0400); 80module_param_named(modeset, nouveau_modeset, int, 0400);
81 81
82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); 82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
83int nouveau_runtime_pm = -1; 83static int nouveau_runtime_pm = -1;
84module_param_named(runpm, nouveau_runtime_pm, int, 0400); 84module_param_named(runpm, nouveau_runtime_pm, int, 0400);
85 85
86static struct drm_driver driver_stub; 86static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
495 nouveau_fbcon_init(dev); 495 nouveau_fbcon_init(dev);
496 nouveau_led_init(dev); 496 nouveau_led_init(dev);
497 497
498 if (nouveau_runtime_pm != 0) { 498 if (nouveau_pmops_runtime()) {
499 pm_runtime_use_autosuspend(dev->dev); 499 pm_runtime_use_autosuspend(dev->dev);
500 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 500 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
501 pm_runtime_set_active(dev->dev); 501 pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
527{ 527{
528 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
529 529
530 if (nouveau_runtime_pm != 0) { 530 if (nouveau_pmops_runtime()) {
531 pm_runtime_get_sync(dev->dev); 531 pm_runtime_get_sync(dev->dev);
532 pm_runtime_forbid(dev->dev); 532 pm_runtime_forbid(dev->dev);
533 } 533 }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
726 return nouveau_do_resume(drm_dev, false); 726 return nouveau_do_resume(drm_dev, false);
727} 727}
728 728
729bool
730nouveau_pmops_runtime()
731{
732 if (nouveau_runtime_pm == -1)
733 return nouveau_is_optimus() || nouveau_is_v1_dsm();
734 return nouveau_runtime_pm == 1;
735}
736
729static int 737static int
730nouveau_pmops_runtime_suspend(struct device *dev) 738nouveau_pmops_runtime_suspend(struct device *dev)
731{ 739{
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
733 struct drm_device *drm_dev = pci_get_drvdata(pdev); 741 struct drm_device *drm_dev = pci_get_drvdata(pdev);
734 int ret; 742 int ret;
735 743
736 if (nouveau_runtime_pm == 0) { 744 if (!nouveau_pmops_runtime()) {
737 pm_runtime_forbid(dev);
738 return -EBUSY;
739 }
740
741 /* are we optimus enabled? */
742 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
743 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
744 pm_runtime_forbid(dev); 745 pm_runtime_forbid(dev);
745 return -EBUSY; 746 return -EBUSY;
746 } 747 }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
765 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; 766 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
766 int ret; 767 int ret;
767 768
768 if (nouveau_runtime_pm == 0) 769 if (!nouveau_pmops_runtime()) {
769 return -EINVAL; 770 pm_runtime_forbid(dev);
771 return -EBUSY;
772 }
770 773
771 pci_set_power_state(pdev, PCI_D0); 774 pci_set_power_state(pdev, PCI_D0);
772 pci_restore_state(pdev); 775 pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
796 struct nouveau_drm *drm = nouveau_drm(drm_dev); 799 struct nouveau_drm *drm = nouveau_drm(drm_dev);
797 struct drm_crtc *crtc; 800 struct drm_crtc *crtc;
798 801
799 if (nouveau_runtime_pm == 0) { 802 if (!nouveau_pmops_runtime()) {
800 pm_runtime_forbid(dev);
801 return -EBUSY;
802 }
803
804 /* are we optimus enabled? */
805 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
806 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
807 pm_runtime_forbid(dev); 803 pm_runtime_forbid(dev);
808 return -EBUSY; 804 return -EBUSY;
809 } 805 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f49ad3..a11b6aaed325 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
108#include <nvif/object.h> 108#include <nvif/object.h>
109#include <nvif/device.h> 109#include <nvif/device.h>
110 110
111extern int nouveau_runtime_pm;
112
113struct nouveau_drm { 111struct nouveau_drm {
114 struct nouveau_cli client; 112 struct nouveau_cli client;
115 struct drm_device *dev; 113 struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
195 193
196int nouveau_pmops_suspend(struct device *); 194int nouveau_pmops_suspend(struct device *);
197int nouveau_pmops_resume(struct device *); 195int nouveau_pmops_resume(struct device *);
196bool nouveau_pmops_runtime(void);
198 197
199#include <nvkm/core/tegra.h> 198#include <nvkm/core/tegra.h>
200 199
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc0cec8..02fe0efb9e16 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@ void
87nouveau_vga_init(struct nouveau_drm *drm) 87nouveau_vga_init(struct nouveau_drm *drm)
88{ 88{
89 struct drm_device *dev = drm->dev; 89 struct drm_device *dev = drm->dev;
90 bool runtime = false; 90 bool runtime = nouveau_pmops_runtime();
91 91
92 /* only relevant for PCI devices */ 92 /* only relevant for PCI devices */
93 if (!dev->pdev) 93 if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
99 if (pci_is_thunderbolt_attached(dev->pdev)) 99 if (pci_is_thunderbolt_attached(dev->pdev))
100 return; 100 return;
101 101
102 if (nouveau_runtime_pm == 1)
103 runtime = true;
104 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
105 runtime = true;
106 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); 102 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
107 103
108 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 104 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
113nouveau_vga_fini(struct nouveau_drm *drm) 109nouveau_vga_fini(struct nouveau_drm *drm)
114{ 110{
115 struct drm_device *dev = drm->dev; 111 struct drm_device *dev = drm->dev;
116 bool runtime = false; 112 bool runtime = nouveau_pmops_runtime();
117 113
118 vga_client_register(dev->pdev, NULL, NULL, NULL); 114 vga_client_register(dev->pdev, NULL, NULL, NULL);
119 115
120 if (pci_is_thunderbolt_attached(dev->pdev)) 116 if (pci_is_thunderbolt_attached(dev->pdev))
121 return; 117 return;
122 118
123 if (nouveau_runtime_pm == 1)
124 runtime = true;
125 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
126 runtime = true;
127
128 vga_switcheroo_unregister_client(dev->pdev); 119 vga_switcheroo_unregister_client(dev->pdev);
129 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 120 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
130 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); 121 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a7663249b3ba..06e564a9ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2107 asyc->set.dither = true; 2107 asyc->set.dither = true;
2108 } 2108 }
2109 } else { 2109 } else {
2110 asyc->set.mask = ~0; 2110 if (asyc)
2111 asyc->set.mask = ~0;
2111 asyh->set.mask = ~0; 2112 asyh->set.mask = ~0;
2112 } 2113 }
2113 2114
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86eae0a0d..2437f7d41ca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
50 /* Move to completed list. We'll drop the lock before 50 /* Move to completed list. We'll drop the lock before
51 * executing the callback so it can reschedule itself. 51 * executing the callback so it can reschedule itself.
52 */ 52 */
53 list_move_tail(&alarm->head, &exec); 53 list_del_init(&alarm->head);
54 list_add(&alarm->exec, &exec);
54 } 55 }
55 56
56 /* Shut down interrupt if no more pending alarms. */ 57 /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
59 spin_unlock_irqrestore(&tmr->lock, flags); 60 spin_unlock_irqrestore(&tmr->lock, flags);
60 61
61 /* Execute completed callbacks. */ 62 /* Execute completed callbacks. */
62 list_for_each_entry_safe(alarm, atemp, &exec, head) { 63 list_for_each_entry_safe(alarm, atemp, &exec, exec) {
63 list_del_init(&alarm->head); 64 list_del(&alarm->exec);
64 alarm->func(alarm); 65 alarm->func(alarm);
65 } 66 }
66} 67}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9c9240..ce5f2d1f9994 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
245 struct drm_connector_state *conn_state) 245 struct drm_connector_state *conn_state)
246{ 246{
247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
248 struct rockchip_dp_device *dp = to_dp(encoder);
249 int ret;
250 248
251 /* 249 /*
252 * The hardware IC designed that VOP must output the RGB10 video 250 * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
258 256
259 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 257 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
260 s->output_type = DRM_MODE_CONNECTOR_eDP; 258 s->output_type = DRM_MODE_CONNECTOR_eDP;
261 if (dp->data->chip_type == RK3399_EDP) {
262 /*
263 * For RK3399, VOP Lit must code the out mode to RGB888,
264 * VOP Big must code the out mode to RGB10.
265 */
266 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
267 encoder);
268 if (ret > 0)
269 s->output_mode = ROCKCHIP_OUT_MODE_P888;
270 }
271 259
272 return 0; 260 return 0;
273} 261}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd3d26b..14fa1f8351e8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
615{ 615{
616 struct cdn_dp_device *dp = encoder_to_dp(encoder); 616 struct cdn_dp_device *dp = encoder_to_dp(encoder);
617 int ret, val; 617 int ret, val;
618 struct rockchip_crtc_state *state;
619 618
620 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 619 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
621 if (ret < 0) { 620 if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
625 624
626 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 625 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
627 (ret) ? "LIT" : "BIG"); 626 (ret) ? "LIT" : "BIG");
628 state = to_rockchip_crtc_state(encoder->crtc->state); 627 if (ret)
629 if (ret) {
630 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 628 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
631 state->output_mode = ROCKCHIP_OUT_MODE_P888; 629 else
632 } else {
633 val = DP_SEL_VOP_LIT << 16; 630 val = DP_SEL_VOP_LIT << 16;
634 state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
635 }
636 631
637 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 632 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
638 if (ret) 633 if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d1e095..45589d6ce65e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
875static void vop_crtc_enable(struct drm_crtc *crtc) 875static void vop_crtc_enable(struct drm_crtc *crtc)
876{ 876{
877 struct vop *vop = to_vop(crtc); 877 struct vop *vop = to_vop(crtc);
878 const struct vop_data *vop_data = vop->data;
878 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 879 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
879 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 880 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
880 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 881 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
967 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 968 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
968 s->output_type); 969 s->output_type);
969 } 970 }
971
972 /*
973 * if vop is not support RGB10 output, need force RGB10 to RGB888.
974 */
975 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
976 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
977 s->output_mode = ROCKCHIP_OUT_MODE_P888;
970 VOP_CTRL_SET(vop, out_mode, s->output_mode); 978 VOP_CTRL_SET(vop, out_mode, s->output_mode);
971 979
972 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); 980 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85dbd2..9979fd0c2282 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@ struct vop_data {
142 const struct vop_intr *intr; 142 const struct vop_intr *intr;
143 const struct vop_win_data *win; 143 const struct vop_win_data *win;
144 unsigned int win_size; 144 unsigned int win_size;
145
146#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
147 u64 feature;
145}; 148};
146 149
147/* interrupt define */ 150/* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da44442aab0..bafd698a28b1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
275static const struct vop_data rk3288_vop = { 275static const struct vop_data rk3288_vop = {
276 .init_table = rk3288_init_reg_table, 276 .init_table = rk3288_init_reg_table,
277 .table_size = ARRAY_SIZE(rk3288_init_reg_table), 277 .table_size = ARRAY_SIZE(rk3288_init_reg_table),
278 .feature = VOP_FEATURE_OUTPUT_RGB10,
278 .intr = &rk3288_vop_intr, 279 .intr = &rk3288_vop_intr,
279 .ctrl = &rk3288_ctrl_data, 280 .ctrl = &rk3288_ctrl_data,
280 .win = rk3288_vop_win_data, 281 .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
343static const struct vop_data rk3399_vop_big = { 344static const struct vop_data rk3399_vop_big = {
344 .init_table = rk3399_init_reg_table, 345 .init_table = rk3399_init_reg_table,
345 .table_size = ARRAY_SIZE(rk3399_init_reg_table), 346 .table_size = ARRAY_SIZE(rk3399_init_reg_table),
347 .feature = VOP_FEATURE_OUTPUT_RGB10,
346 .intr = &rk3399_vop_intr, 348 .intr = &rk3399_vop_intr,
347 .ctrl = &rk3399_ctrl_data, 349 .ctrl = &rk3399_ctrl_data,
348 /* 350 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c5ec6a..4b948fba9eec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
41#include <drm/ttm/ttm_module.h> 41#include <drm/ttm/ttm_module.h>
42#include "vmwgfx_fence.h" 42#include "vmwgfx_fence.h"
43 43
44#define VMWGFX_DRIVER_DATE "20170221" 44#define VMWGFX_DRIVER_DATE "20170607"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 12 46#define VMWGFX_DRIVER_MINOR 13
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806b06bf..a1c68e6a689e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
368 return fifo_state->static_buffer; 368 return fifo_state->static_buffer;
369 else { 369 else {
370 fifo_state->dynamic_buffer = vmalloc(bytes); 370 fifo_state->dynamic_buffer = vmalloc(bytes);
371 if (!fifo_state->dynamic_buffer)
372 goto out_err;
371 return fifo_state->dynamic_buffer; 373 return fifo_state->dynamic_buffer;
372 } 374 }
373 } 375 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2a4030..1d2db5d912b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
274} 274}
275 275
276 276
277
278/**
279 * vmw_du_cursor_plane_update() - Update cursor image and location
280 *
281 * @plane: plane object to update
282 * @crtc: owning CRTC of @plane
283 * @fb: framebuffer to flip onto plane
284 * @crtc_x: x offset of plane on crtc
285 * @crtc_y: y offset of plane on crtc
286 * @crtc_w: width of plane rectangle on crtc
287 * @crtc_h: height of plane rectangle on crtc
288 * @src_x: Not used
289 * @src_y: Not used
290 * @src_w: Not used
291 * @src_h: Not used
292 *
293 *
294 * RETURNS:
295 * Zero on success, error code on failure
296 */
297int vmw_du_cursor_plane_update(struct drm_plane *plane,
298 struct drm_crtc *crtc,
299 struct drm_framebuffer *fb,
300 int crtc_x, int crtc_y,
301 unsigned int crtc_w,
302 unsigned int crtc_h,
303 uint32_t src_x, uint32_t src_y,
304 uint32_t src_w, uint32_t src_h)
305{
306 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
307 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
308 struct vmw_surface *surface = NULL;
309 struct vmw_dma_buffer *dmabuf = NULL;
310 s32 hotspot_x, hotspot_y;
311 int ret;
312
313 hotspot_x = du->hotspot_x + fb->hot_x;
314 hotspot_y = du->hotspot_y + fb->hot_y;
315
316 /* A lot of the code assumes this */
317 if (crtc_w != 64 || crtc_h != 64) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 if (vmw_framebuffer_to_vfb(fb)->dmabuf)
323 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
324 else
325 surface = vmw_framebuffer_to_vfbs(fb)->surface;
326
327 if (surface && !surface->snooper.image) {
328 DRM_ERROR("surface not suitable for cursor\n");
329 ret = -EINVAL;
330 goto out;
331 }
332
333 /* setup new image */
334 ret = 0;
335 if (surface) {
336 /* vmw_user_surface_lookup takes one reference */
337 du->cursor_surface = surface;
338
339 du->cursor_age = du->cursor_surface->snooper.age;
340
341 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
342 64, 64, hotspot_x, hotspot_y);
343 } else if (dmabuf) {
344 /* vmw_user_surface_lookup takes one reference */
345 du->cursor_dmabuf = dmabuf;
346
347 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
348 hotspot_x, hotspot_y);
349 } else {
350 vmw_cursor_update_position(dev_priv, false, 0, 0);
351 goto out;
352 }
353
354 if (!ret) {
355 du->cursor_x = crtc_x + du->set_gui_x;
356 du->cursor_y = crtc_y + du->set_gui_y;
357
358 vmw_cursor_update_position(dev_priv, true,
359 du->cursor_x + hotspot_x,
360 du->cursor_y + hotspot_y);
361 }
362
363out:
364 return ret;
365}
366
367
368int vmw_du_cursor_plane_disable(struct drm_plane *plane)
369{
370 if (plane->fb) {
371 drm_framebuffer_unreference(plane->fb);
372 plane->fb = NULL;
373 }
374
375 return -EINVAL;
376}
377
378
379void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 277void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
380{ 278{
381 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 279 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
473 371
474 372
475void 373void
476vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
477 struct drm_plane_state *old_state)
478{
479 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
480 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
481
482 drm_atomic_set_fb_for_plane(plane->state, NULL);
483 vmw_cursor_update_position(dev_priv, false, 0, 0);
484}
485
486
487void
488vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 374vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
489 struct drm_plane_state *old_state) 375 struct drm_plane_state *old_state)
490{ 376{
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1498 */ 1384 */
1499 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1385 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1500 dmabuf && only_2d && 1386 dmabuf && only_2d &&
1387 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1501 dev_priv->active_display_unit == vmw_du_screen_target) { 1388 dev_priv->active_display_unit == vmw_du_screen_target) {
1502 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1389 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1503 dmabuf, &surface); 1390 dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2818a..5f8d678ae675 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
256 u16 *r, u16 *g, u16 *b, 256 u16 *r, u16 *g, u16 *b,
257 uint32_t size, 257 uint32_t size,
258 struct drm_modeset_acquire_ctx *ctx); 258 struct drm_modeset_acquire_ctx *ctx);
259int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
260 uint32_t handle, uint32_t width, uint32_t height,
261 int32_t hot_x, int32_t hot_y);
262int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
263int vmw_du_connector_set_property(struct drm_connector *connector, 259int vmw_du_connector_set_property(struct drm_connector *connector,
264 struct drm_property *property, 260 struct drm_property *property,
265 uint64_t val); 261 uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
339/* Universal Plane Helpers */ 335/* Universal Plane Helpers */
340void vmw_du_primary_plane_destroy(struct drm_plane *plane); 336void vmw_du_primary_plane_destroy(struct drm_plane *plane);
341void vmw_du_cursor_plane_destroy(struct drm_plane *plane); 337void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
342int vmw_du_cursor_plane_disable(struct drm_plane *plane);
343int vmw_du_cursor_plane_update(struct drm_plane *plane,
344 struct drm_crtc *crtc,
345 struct drm_framebuffer *fb,
346 int crtc_x, int crtc_y,
347 unsigned int crtc_w,
348 unsigned int crtc_h,
349 uint32_t src_x, uint32_t src_y,
350 uint32_t src_w, uint32_t src_h);
351 338
352/* Atomic Helpers */ 339/* Atomic Helpers */
353int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 340int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
356 struct drm_plane_state *state); 343 struct drm_plane_state *state);
357void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 344void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
358 struct drm_plane_state *old_state); 345 struct drm_plane_state *old_state);
359void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
360 struct drm_plane_state *old_state);
361int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 346int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
362 struct drm_plane_state *new_state); 347 struct drm_plane_state *new_state);
363void vmw_du_plane_cleanup_fb(struct drm_plane *plane, 348void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bdf09b6..50be1f034f9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@ enum stdu_content_type {
56 * @right: Right side of bounding box. 56 * @right: Right side of bounding box.
57 * @top: Top side of bounding box. 57 * @top: Top side of bounding box.
58 * @bottom: Bottom side of bounding box. 58 * @bottom: Bottom side of bounding box.
59 * @fb_left: Left side of the framebuffer/content bounding box
60 * @fb_top: Top of the framebuffer/content bounding box
59 * @buf: DMA buffer when DMA-ing between buffer and screen targets. 61 * @buf: DMA buffer when DMA-ing between buffer and screen targets.
60 * @sid: Surface ID when copying between surface and screen targets. 62 * @sid: Surface ID when copying between surface and screen targets.
61 */ 63 */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
63 struct vmw_kms_dirty base; 65 struct vmw_kms_dirty base;
64 SVGA3dTransferType transfer; 66 SVGA3dTransferType transfer;
65 s32 left, right, top, bottom; 67 s32 left, right, top, bottom;
68 s32 fb_left, fb_top;
66 u32 pitch; 69 u32 pitch;
67 union { 70 union {
68 struct vmw_dma_buffer *buf; 71 struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
647 * 650 *
648 * @dirty: The closure structure. 651 * @dirty: The closure structure.
649 * 652 *
650 * This function calculates the bounding box for all the incoming clips 653 * This function calculates the bounding box for all the incoming clips.
651 */ 654 */
652static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) 655static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
653{ 656{
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
656 659
657 dirty->num_hits = 1; 660 dirty->num_hits = 1;
658 661
659 /* Calculate bounding box */ 662 /* Calculate destination bounding box */
660 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); 663 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
661 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); 664 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
662 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); 665 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
663 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); 666 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
667
668 /*
669 * Calculate content bounding box. We only need the top-left
670 * coordinate because width and height will be the same as the
671 * destination bounding box above
672 */
673 ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
674 ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y);
664} 675}
665 676
666 677
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
697 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 708 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
698 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 709 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
699 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used); 710 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
700 src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp; 711 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
701 712
702 dst_pitch = ddirty->pitch; 713 dst_pitch = ddirty->pitch;
703 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 714 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
704 dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp; 715 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
705 716
706 717
707 /* Figure out the real direction */ 718 /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
760 } 771 }
761 772
762out_cleanup: 773out_cleanup:
763 ddirty->left = ddirty->top = S32_MAX; 774 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
764 ddirty->right = ddirty->bottom = S32_MIN; 775 ddirty->right = ddirty->bottom = S32_MIN;
765} 776}
766 777
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
812 SVGA3D_READ_HOST_VRAM; 823 SVGA3D_READ_HOST_VRAM;
813 ddirty.left = ddirty.top = S32_MAX; 824 ddirty.left = ddirty.top = S32_MAX;
814 ddirty.right = ddirty.bottom = S32_MIN; 825 ddirty.right = ddirty.bottom = S32_MIN;
826 ddirty.fb_left = ddirty.fb_top = S32_MAX;
815 ddirty.pitch = vfb->base.pitches[0]; 827 ddirty.pitch = vfb->base.pitches[0];
816 ddirty.buf = buf; 828 ddirty.buf = buf;
817 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; 829 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1355 DRM_ERROR("Failed to bind surface to STDU.\n"); 1367 DRM_ERROR("Failed to bind surface to STDU.\n");
1356 else 1368 else
1357 crtc->primary->fb = plane->state->fb; 1369 crtc->primary->fb = plane->state->fb;
1370
1371 ret = vmw_stdu_update_st(dev_priv, stdu);
1372
1373 if (ret)
1374 DRM_ERROR("Failed to update STDU.\n");
1358} 1375}
1359 1376
1360 1377
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341fe32b..6b70bd259953 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1275 int ret; 1275 int ret;
1276 uint32_t size; 1276 uint32_t size;
1277 uint32_t backup_handle; 1277 uint32_t backup_handle = 0;
1278 1278
1279 if (req->multisample_count != 0) 1279 if (req->multisample_count != 0)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
1282 if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1283 return -EINVAL;
1284
1282 if (unlikely(vmw_user_surface_size == 0)) 1285 if (unlikely(vmw_user_surface_size == 0))
1283 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1286 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1284 128; 1287 128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1314 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1317 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1315 &res->backup, 1318 &res->backup,
1316 &user_srf->backup_base); 1319 &user_srf->backup_base);
1317 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1320 if (ret == 0) {
1318 res->backup_size) { 1321 if (res->backup->base.num_pages * PAGE_SIZE <
1319 DRM_ERROR("Surface backup buffer is too small.\n"); 1322 res->backup_size) {
1320 vmw_dmabuf_unreference(&res->backup); 1323 DRM_ERROR("Surface backup buffer is too small.\n");
1321 ret = -EINVAL; 1324 vmw_dmabuf_unreference(&res->backup);
1322 goto out_unlock; 1325 ret = -EINVAL;
1326 goto out_unlock;
1327 } else {
1328 backup_handle = req->buffer_handle;
1329 }
1323 } 1330 }
1324 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1331 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1325 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1332 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1491 dev_priv->stdu_max_height); 1498 dev_priv->stdu_max_height);
1492 1499
1493 if (size.width > max_width || size.height > max_height) { 1500 if (size.width > max_width || size.height > max_height) {
1494 DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u", 1501 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1495 size.width, size.height, 1502 size.width, size.height,
1496 max_width, max_height); 1503 max_width, max_height);
1497 return -EINVAL; 1504 return -EINVAL;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 16d556816b5f..2fb5f432a54c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
725 spin_lock_irqsave(&ipu->lock, flags); 725 spin_lock_irqsave(&ipu->lock, flags);
726 726
727 val = ipu_cm_read(ipu, IPU_CONF); 727 val = ipu_cm_read(ipu, IPU_CONF);
728 if (vdi) { 728 if (vdi)
729 val |= IPU_CONF_IC_INPUT; 729 val |= IPU_CONF_IC_INPUT;
730 } else { 730 else
731 val &= ~IPU_CONF_IC_INPUT; 731 val &= ~IPU_CONF_IC_INPUT;
732 if (csi_id == 1) 732
733 val |= IPU_CONF_CSI_SEL; 733 if (csi_id == 1)
734 else 734 val |= IPU_CONF_CSI_SEL;
735 val &= ~IPU_CONF_CSI_SEL; 735 else
736 } 736 val &= ~IPU_CONF_CSI_SEL;
737
737 ipu_cm_write(ipu, val, IPU_CONF); 738 ipu_cm_write(ipu, val, IPU_CONF);
738 739
739 spin_unlock_irqrestore(&ipu->lock, flags); 740 spin_unlock_irqrestore(&ipu->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c55563379e2e..c35f74c83065 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
131 if (pre->in_use) 131 if (pre->in_use)
132 return -EBUSY; 132 return -EBUSY;
133 133
134 clk_prepare_enable(pre->clk_axi);
135
136 /* first get the engine out of reset and remove clock gating */ 134 /* first get the engine out of reset and remove clock gating */
137 writel(0, pre->regs + IPU_PRE_CTRL); 135 writel(0, pre->regs + IPU_PRE_CTRL);
138 136
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
149 147
150void ipu_pre_put(struct ipu_pre *pre) 148void ipu_pre_put(struct ipu_pre *pre)
151{ 149{
152 u32 val; 150 writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
153
154 val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
155 writel(val, pre->regs + IPU_PRE_CTRL);
156
157 clk_disable_unprepare(pre->clk_axi);
158 151
159 pre->in_use = false; 152 pre->in_use = false;
160} 153}
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
249 if (!pre->buffer_virt) 242 if (!pre->buffer_virt)
250 return -ENOMEM; 243 return -ENOMEM;
251 244
245 clk_prepare_enable(pre->clk_axi);
246
252 pre->dev = dev; 247 pre->dev = dev;
253 platform_set_drvdata(pdev, pre); 248 platform_set_drvdata(pdev, pre);
254 mutex_lock(&ipu_pre_list_mutex); 249 mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
268 available_pres--; 263 available_pres--;
269 mutex_unlock(&ipu_pre_list_mutex); 264 mutex_unlock(&ipu_pre_list_mutex);
270 265
266 clk_disable_unprepare(pre->clk_axi);
267
271 if (pre->buffer_virt) 268 if (pre->buffer_virt)
272 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt, 269 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
273 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4); 270 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e73d968023f7..f1fa1f172107 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad
1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1122 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1123 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1124 * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1125 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1126 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1125 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1127 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1525 }, 1527 },
1526 }, 1528 },
1527 { 1529 {
1530 /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
1531 .matches = {
1532 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1533 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
1534 },
1535 },
1536 {
1528 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ 1537 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
1529 .matches = { 1538 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1539 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1546 }, 1555 },
1547 }, 1556 },
1548 { 1557 {
1558 /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
1559 .matches = {
1560 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1561 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
1562 },
1563 },
1564 {
1549 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ 1565 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
1550 .matches = { 1566 .matches = {
1551 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1567 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 77dad045a468..ad71a5e768dc 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
146 if (!serio) 146 if (!serio)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 serio->id.type = SERIO_8042; 149 serio->id.type = SERIO_PS_PSTHRU;
150 serio->write = rmi_f03_pt_write; 150 serio->write = rmi_f03_pt_write;
151 serio->port_data = f03; 151 serio->port_data = f03;
152 152
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 9f44ee8ea1bc..19779b88a479 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -118,6 +118,7 @@ static const struct iommu_ops
118 118
119 ops = iommu_ops_from_fwnode(fwnode); 119 ops = iommu_ops_from_fwnode(fwnode);
120 if ((ops && !ops->of_xlate) || 120 if ((ops && !ops->of_xlate) ||
121 !of_device_is_available(iommu_spec->np) ||
121 (!ops && !of_iommu_driver_present(iommu_spec->np))) 122 (!ops && !of_iommu_driver_present(iommu_spec->np)))
122 return NULL; 123 return NULL;
123 124
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
236 ops = ERR_PTR(err); 237 ops = ERR_PTR(err);
237 } 238 }
238 239
240 /* Ignore all other errors apart from EPROBE_DEFER */
241 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
242 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
243 ops = NULL;
244 }
245
239 return ops; 246 return ops;
240} 247}
241 248
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index d07dd5196ffc..8aa158a09180 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
2364 id); 2364 id);
2365 return NULL; 2365 return NULL;
2366 } else { 2366 } else {
2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); 2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
2368 if (!rs) 2368 if (!rs)
2369 return NULL; 2369 return NULL;
2370 rs->state = CCPResetIdle; 2370 rs->state = CCPResetIdle;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 8b7faea2ddf8..422dced7c90a 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
75 if (sk->sk_state != MISDN_BOUND) 75 if (sk->sk_state != MISDN_BOUND)
76 continue; 76 continue;
77 if (!cskb) 77 if (!cskb)
78 cskb = skb_copy(skb, GFP_KERNEL); 78 cskb = skb_copy(skb, GFP_ATOMIC);
79 if (!cskb) { 79 if (!cskb) {
80 printk(KERN_WARNING "%s no skb\n", __func__); 80 printk(KERN_WARNING "%s no skb\n", __func__);
81 break; 81 break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 212a6777ff31..87edc342ccb3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
5174 5174
5175static void no_op(struct percpu_ref *r) {} 5175static void no_op(struct percpu_ref *r) {}
5176 5176
5177int mddev_init_writes_pending(struct mddev *mddev)
5178{
5179 if (mddev->writes_pending.percpu_count_ptr)
5180 return 0;
5181 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5182 return -ENOMEM;
5183 /* We want to start with the refcount at zero */
5184 percpu_ref_put(&mddev->writes_pending);
5185 return 0;
5186}
5187EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5188
5177static int md_alloc(dev_t dev, char *name) 5189static int md_alloc(dev_t dev, char *name)
5178{ 5190{
5179 /* 5191 /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
5239 blk_queue_make_request(mddev->queue, md_make_request); 5251 blk_queue_make_request(mddev->queue, md_make_request);
5240 blk_set_stacking_limits(&mddev->queue->limits); 5252 blk_set_stacking_limits(&mddev->queue->limits);
5241 5253
5242 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5243 goto abort;
5244 /* We want to start with the refcount at zero */
5245 percpu_ref_put(&mddev->writes_pending);
5246 disk = alloc_disk(1 << shift); 5254 disk = alloc_disk(1 << shift);
5247 if (!disk) { 5255 if (!disk) {
5248 blk_cleanup_queue(mddev->queue); 5256 blk_cleanup_queue(mddev->queue);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 11f15146ce51..0fa1de42c42b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
648extern void md_wakeup_thread(struct md_thread *thread); 648extern void md_wakeup_thread(struct md_thread *thread);
649extern void md_check_recovery(struct mddev *mddev); 649extern void md_check_recovery(struct mddev *mddev);
650extern void md_reap_sync_thread(struct mddev *mddev); 650extern void md_reap_sync_thread(struct mddev *mddev);
651extern int mddev_init_writes_pending(struct mddev *mddev);
651extern void md_write_start(struct mddev *mddev, struct bio *bi); 652extern void md_write_start(struct mddev *mddev, struct bio *bi);
652extern void md_write_inc(struct mddev *mddev, struct bio *bi); 653extern void md_write_inc(struct mddev *mddev, struct bio *bi);
653extern void md_write_end(struct mddev *mddev); 654extern void md_write_end(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af5056d56878..e1a7e3d4c5e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
3063 mdname(mddev)); 3063 mdname(mddev));
3064 return -EIO; 3064 return -EIO;
3065 } 3065 }
3066 if (mddev_init_writes_pending(mddev) < 0)
3067 return -ENOMEM;
3066 /* 3068 /*
3067 * copy the already verified devices into our private RAID1 3069 * copy the already verified devices into our private RAID1
3068 * bookkeeping area. [whatever we allocate in run(), 3070 * bookkeeping area. [whatever we allocate in run(),
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4343d7ff9916..797ed60abd5e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
3611 int first = 1; 3611 int first = 1;
3612 bool discard_supported = false; 3612 bool discard_supported = false;
3613 3613
3614 if (mddev_init_writes_pending(mddev) < 0)
3615 return -ENOMEM;
3616
3614 if (mddev->private == NULL) { 3617 if (mddev->private == NULL) {
3615 conf = setup_conf(mddev); 3618 conf = setup_conf(mddev);
3616 if (IS_ERR(conf)) 3619 if (IS_ERR(conf))
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 722064689e82..ec0f951ae19f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7118,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
7118 long long min_offset_diff = 0; 7118 long long min_offset_diff = 0;
7119 int first = 1; 7119 int first = 1;
7120 7120
7121 if (mddev_init_writes_pending(mddev) < 0)
7122 return -ENOMEM;
7123
7121 if (mddev->recovery_cp != MaxSector) 7124 if (mddev->recovery_cp != MaxSector)
7122 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7125 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
7123 mdname(mddev)); 7126 mdname(mddev));
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b72edd27f880..55d9c2b82b7e 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,6 +2,12 @@
2# Multimedia device configuration 2# Multimedia device configuration
3# 3#
4 4
5config CEC_CORE
6 tristate
7
8config CEC_NOTIFIER
9 bool
10
5menuconfig MEDIA_SUPPORT 11menuconfig MEDIA_SUPPORT
6 tristate "Multimedia support" 12 tristate "Multimedia support"
7 depends on HAS_IOMEM 13 depends on HAS_IOMEM
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 523fea3648ad..044503aa8801 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -4,8 +4,6 @@
4 4
5media-objs := media-device.o media-devnode.o media-entity.o 5media-objs := media-device.o media-devnode.o media-entity.o
6 6
7obj-$(CONFIG_CEC_CORE) += cec/
8
9# 7#
10# I2C drivers should come before other drivers, otherwise they'll fail 8# I2C drivers should come before other drivers, otherwise they'll fail
11# when compiled as builtin drivers 9# when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE) += dvb-core/
26# There are both core and drivers at RC subtree - merge before drivers 24# There are both core and drivers at RC subtree - merge before drivers
27obj-y += rc/ 25obj-y += rc/
28 26
27obj-$(CONFIG_CEC_CORE) += cec/
28
29# 29#
30# Finally, merge the drivers that require the core 30# Finally, merge the drivers that require the core
31# 31#
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index f944d93e3167..4e25a950ae6f 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,19 +1,5 @@
1config CEC_CORE
2 tristate
3 depends on MEDIA_CEC_SUPPORT
4 default y
5
6config MEDIA_CEC_NOTIFIER
7 bool
8
9config MEDIA_CEC_RC 1config MEDIA_CEC_RC
10 bool "HDMI CEC RC integration" 2 bool "HDMI CEC RC integration"
11 depends on CEC_CORE && RC_CORE 3 depends on CEC_CORE && RC_CORE
12 ---help--- 4 ---help---
13 Pass on CEC remote control messages to the RC framework. 5 Pass on CEC remote control messages to the RC framework.
14
15config MEDIA_CEC_DEBUG
16 bool "HDMI CEC debugfs interface"
17 depends on CEC_CORE && DEBUG_FS
18 ---help---
19 Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 402a6c62a3e8..eaf408e64669 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,6 +1,6 @@
1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o 1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
2 2
3ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) 3ifeq ($(CONFIG_CEC_NOTIFIER),y)
4 cec-objs += cec-notifier.o 4 cec-objs += cec-notifier.o
5endif 5endif
6 6
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f5fe01c9da8a..9dfc79800c71 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); 1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
1865} 1865}
1866 1866
1867#ifdef CONFIG_MEDIA_CEC_DEBUG 1867#ifdef CONFIG_DEBUG_FS
1868/* 1868/*
1869 * Log the current state of the CEC adapter. 1869 * Log the current state of the CEC adapter.
1870 * Very useful for debugging. 1870 * Very useful for debugging.
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f9ebff90f8eb..2f87748ba4fc 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
187 put_device(&devnode->dev); 187 put_device(&devnode->dev);
188} 188}
189 189
190#ifdef CONFIG_MEDIA_CEC_NOTIFIER 190#ifdef CONFIG_CEC_NOTIFIER
191static void cec_cec_notify(struct cec_adapter *adap, u16 pa) 191static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
192{ 192{
193 cec_s_phys_addr(adap, pa, false); 193 cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
323 } 323 }
324 324
325 dev_set_drvdata(&adap->devnode.dev, adap); 325 dev_set_drvdata(&adap->devnode.dev, adap);
326#ifdef CONFIG_MEDIA_CEC_DEBUG 326#ifdef CONFIG_DEBUG_FS
327 if (!top_cec_dir) 327 if (!top_cec_dir)
328 return 0; 328 return 0;
329 329
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
355 adap->rc = NULL; 355 adap->rc = NULL;
356#endif 356#endif
357 debugfs_remove_recursive(adap->cec_dir); 357 debugfs_remove_recursive(adap->cec_dir);
358#ifdef CONFIG_MEDIA_CEC_NOTIFIER 358#ifdef CONFIG_CEC_NOTIFIER
359 if (adap->notifier) 359 if (adap->notifier)
360 cec_notifier_unregister(adap->notifier); 360 cec_notifier_unregister(adap->notifier);
361#endif 361#endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
395 return ret; 395 return ret;
396 } 396 }
397 397
398#ifdef CONFIG_MEDIA_CEC_DEBUG 398#ifdef CONFIG_DEBUG_FS
399 top_cec_dir = debugfs_create_dir("cec", NULL); 399 top_cec_dir = debugfs_create_dir("cec", NULL);
400 if (IS_ERR_OR_NULL(top_cec_dir)) { 400 if (IS_ERR_OR_NULL(top_cec_dir)) {
401 pr_warn("cec: Failed to create debugfs cec dir\n"); 401 pr_warn("cec: Failed to create debugfs cec dir\n");
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index fd181c99ce11..aaa9471c7d11 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
220 220
221config VIDEO_ADV7604_CEC 221config VIDEO_ADV7604_CEC
222 bool "Enable Analog Devices ADV7604 CEC support" 222 bool "Enable Analog Devices ADV7604 CEC support"
223 depends on VIDEO_ADV7604 && CEC_CORE 223 depends on VIDEO_ADV7604
224 select CEC_CORE
224 ---help--- 225 ---help---
225 When selected the adv7604 will support the optional 226 When selected the adv7604 will support the optional
226 HDMI CEC feature. 227 HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
240 241
241config VIDEO_ADV7842_CEC 242config VIDEO_ADV7842_CEC
242 bool "Enable Analog Devices ADV7842 CEC support" 243 bool "Enable Analog Devices ADV7842 CEC support"
243 depends on VIDEO_ADV7842 && CEC_CORE 244 depends on VIDEO_ADV7842
245 select CEC_CORE
244 ---help--- 246 ---help---
245 When selected the adv7842 will support the optional 247 When selected the adv7842 will support the optional
246 HDMI CEC feature. 248 HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
478 480
479config VIDEO_ADV7511_CEC 481config VIDEO_ADV7511_CEC
480 bool "Enable Analog Devices ADV7511 CEC support" 482 bool "Enable Analog Devices ADV7511 CEC support"
481 depends on VIDEO_ADV7511 && CEC_CORE 483 depends on VIDEO_ADV7511
484 select CEC_CORE
482 ---help--- 485 ---help---
483 When selected the adv7511 will support the optional 486 When selected the adv7511 will support the optional
484 HDMI CEC feature. 487 HDMI CEC feature.
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ac026ee1ca07..041cb80a26b1 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
501 501
502config VIDEO_SAMSUNG_S5P_CEC 502config VIDEO_SAMSUNG_S5P_CEC
503 tristate "Samsung S5P CEC driver" 503 tristate "Samsung S5P CEC driver"
504 depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 504 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
505 select MEDIA_CEC_NOTIFIER 505 select CEC_CORE
506 select CEC_NOTIFIER
506 ---help--- 507 ---help---
507 This is a driver for Samsung S5P HDMI CEC interface. It uses the 508 This is a driver for Samsung S5P HDMI CEC interface. It uses the
508 generic CEC framework interface. 509 generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
511 512
512config VIDEO_STI_HDMI_CEC 513config VIDEO_STI_HDMI_CEC
513 tristate "STMicroelectronics STiH4xx HDMI CEC driver" 514 tristate "STMicroelectronics STiH4xx HDMI CEC driver"
514 depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) 515 depends on ARCH_STI || COMPILE_TEST
515 select MEDIA_CEC_NOTIFIER 516 select CEC_CORE
517 select CEC_NOTIFIER
516 ---help--- 518 ---help---
517 This is a driver for STIH4xx HDMI CEC interface. It uses the 519 This is a driver for STIH4xx HDMI CEC interface. It uses the
518 generic CEC framework interface. 520 generic CEC framework interface.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b36ac19dc6e4..154de92dd809 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -26,7 +26,8 @@ config VIDEO_VIVID
26 26
27config VIDEO_VIVID_CEC 27config VIDEO_VIVID_CEC
28 bool "Enable CEC emulation support" 28 bool "Enable CEC emulation support"
29 depends on VIDEO_VIVID && CEC_CORE 29 depends on VIDEO_VIVID
30 select CEC_CORE
30 ---help--- 31 ---help---
31 When selected the vivid module will emulate the optional 32 When selected the vivid module will emulate the optional
32 HDMI CEC feature. 33 HDMI CEC feature.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 90f66dc7c0d7..a2fc1a1d58b0 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
211 */ 211 */
212void ir_raw_event_handle(struct rc_dev *dev) 212void ir_raw_event_handle(struct rc_dev *dev)
213{ 213{
214 if (!dev->raw) 214 if (!dev->raw || !dev->raw->thread)
215 return; 215 return;
216 216
217 wake_up_process(dev->raw->thread); 217 wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
490{ 490{
491 int rc; 491 int rc;
492 struct ir_raw_handler *handler; 492 struct ir_raw_handler *handler;
493 struct task_struct *thread;
493 494
494 if (!dev) 495 if (!dev)
495 return -EINVAL; 496 return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
507 * because the event is coming from userspace 508 * because the event is coming from userspace
508 */ 509 */
509 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { 510 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
510 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, 511 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
511 "rc%u", dev->minor); 512 dev->minor);
512 513
513 if (IS_ERR(dev->raw->thread)) { 514 if (IS_ERR(thread)) {
514 rc = PTR_ERR(dev->raw->thread); 515 rc = PTR_ERR(thread);
515 goto out; 516 goto out;
516 } 517 }
518
519 dev->raw->thread = thread;
517 } 520 }
518 521
519 mutex_lock(&ir_raw_handler_lock); 522 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 8937f3986a01..18ead44824ba 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_PULSE8_CEC 1config USB_PULSE8_CEC
2 tristate "Pulse Eight HDMI CEC" 2 tristate "Pulse Eight HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 3eb86607efb8..030ef01b1ff0 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_RAINSHADOW_CEC 1config USB_RAINSHADOW_CEC
2 tristate "RainShadow Tech HDMI CEC" 2 tristate "RainShadow Tech HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 541ca543f71f..71bd68548c9c 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
119 119
120 while (true) { 120 while (true) {
121 unsigned long flags; 121 unsigned long flags;
122 bool exit_loop; 122 bool exit_loop = false;
123 char data; 123 char data;
124 124
125 spin_lock_irqsave(&rain->buf_lock, flags); 125 spin_lock_irqsave(&rain->buf_lock, flags);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 35910f945bfa..99e644cda4d1 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
581 return of_platform_populate(np, NULL, NULL, dev); 581 return of_platform_populate(np, NULL, NULL, dev);
582} 582}
583 583
584static int atmel_ebi_resume(struct device *dev) 584static __maybe_unused int atmel_ebi_resume(struct device *dev)
585{ 585{
586 struct atmel_ebi *ebi = dev_get_drvdata(dev); 586 struct atmel_ebi *ebi = dev_get_drvdata(dev);
587 struct atmel_ebi_dev *ebid; 587 struct atmel_ebi_dev *ebid;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 17b433f1ce23..0761271d68c5 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
159 159
160 /* Do this outside the status_mutex to avoid a circular dependency with 160 /* Do this outside the status_mutex to avoid a circular dependency with
161 * the locking in cxl_mmap_fault() */ 161 * the locking in cxl_mmap_fault() */
162 if (copy_from_user(&work, uwork, 162 if (copy_from_user(&work, uwork, sizeof(work)))
163 sizeof(struct cxl_ioctl_start_work))) { 163 return -EFAULT;
164 rc = -EFAULT;
165 goto out;
166 }
167 164
168 mutex_lock(&ctx->status_mutex); 165 mutex_lock(&ctx->status_mutex);
169 if (ctx->status != OPENED) { 166 if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 871a2f09c718..8d6ea9712dbd 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
1302 1302
1303void cxl_native_release_psl_err_irq(struct cxl *adapter) 1303void cxl_native_release_psl_err_irq(struct cxl *adapter)
1304{ 1304{
1305 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) 1305 if (adapter->native->err_virq == 0 ||
1306 adapter->native->err_virq !=
1307 irq_find_mapping(NULL, adapter->native->err_hwirq))
1306 return; 1308 return;
1307 1309
1308 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1309 cxl_unmap_irq(adapter->native->err_virq, adapter); 1311 cxl_unmap_irq(adapter->native->err_virq, adapter);
1310 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1312 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1311 kfree(adapter->irq_name); 1313 kfree(adapter->irq_name);
1314 adapter->native->err_virq = 0;
1312} 1315}
1313 1316
1314int cxl_native_register_serr_irq(struct cxl_afu *afu) 1317int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
1346 1349
1347void cxl_native_release_serr_irq(struct cxl_afu *afu) 1350void cxl_native_release_serr_irq(struct cxl_afu *afu)
1348{ 1351{
1349 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1352 if (afu->serr_virq == 0 ||
1353 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1350 return; 1354 return;
1351 1355
1352 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1356 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1353 cxl_unmap_irq(afu->serr_virq, afu); 1357 cxl_unmap_irq(afu->serr_virq, afu);
1354 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1358 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1355 kfree(afu->err_irq_name); 1359 kfree(afu->err_irq_name);
1360 afu->serr_virq = 0;
1356} 1361}
1357 1362
1358int cxl_native_register_psl_irq(struct cxl_afu *afu) 1363int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
1375 1380
1376void cxl_native_release_psl_irq(struct cxl_afu *afu) 1381void cxl_native_release_psl_irq(struct cxl_afu *afu)
1377{ 1382{
1378 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) 1383 if (afu->native->psl_virq == 0 ||
1384 afu->native->psl_virq !=
1385 irq_find_mapping(NULL, afu->native->psl_hwirq))
1379 return; 1386 return;
1380 1387
1381 cxl_unmap_irq(afu->native->psl_virq, afu); 1388 cxl_unmap_irq(afu->native->psl_virq, afu);
1382 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1389 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1383 kfree(afu->psl_irq_name); 1390 kfree(afu->psl_irq_name);
1391 afu->native->psl_virq = 0;
1384} 1392}
1385 1393
1386static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 1394static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 96046bb12ca1..14c0be98e0a4 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
114 return -EOPNOTSUPP; 114 return -EOPNOTSUPP;
115} 115}
116 116
117int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, 117static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
118 int src_port, u16 data) 118 int src_dev, int src_port, u16 data)
119{ 119{
120 return -EOPNOTSUPP; 120 return -EOPNOTSUPP;
121} 121}
122 122
123int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) 123static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
124{ 124{
125 return -EOPNOTSUPP; 125 return -EOPNOTSUPP;
126} 126}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b3bc87fe3764..0a98c369df20 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
324 struct xgbe_ring *ring, 324 struct xgbe_ring *ring,
325 struct xgbe_ring_data *rdata) 325 struct xgbe_ring_data *rdata)
326{ 326{
327 int order, ret; 327 int ret;
328 328
329 if (!ring->rx_hdr_pa.pages) { 329 if (!ring->rx_hdr_pa.pages) {
330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); 330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
333 } 333 }
334 334
335 if (!ring->rx_buf_pa.pages) { 335 if (!ring->rx_buf_pa.pages) {
336 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
337 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, 336 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
338 order); 337 PAGE_ALLOC_COSTLY_ORDER);
339 if (ret) 338 if (ret)
340 return ret; 339 return ret;
341 } 340 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 099b374c1b17..5274501428e4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
2026 priv->num_rx_desc_words = params->num_rx_desc_words; 2026 priv->num_rx_desc_words = params->num_rx_desc_words;
2027 2027
2028 priv->irq0 = platform_get_irq(pdev, 0); 2028 priv->irq0 = platform_get_irq(pdev, 0);
2029 if (!priv->is_lite) 2029 if (!priv->is_lite) {
2030 priv->irq1 = platform_get_irq(pdev, 1); 2030 priv->irq1 = platform_get_irq(pdev, 1);
2031 priv->wol_irq = platform_get_irq(pdev, 2); 2031 priv->wol_irq = platform_get_irq(pdev, 2);
2032 } else {
2033 priv->wol_irq = platform_get_irq(pdev, 1);
2034 }
2032 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2035 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2033 dev_err(&pdev->dev, "invalid interrupts\n"); 2036 dev_err(&pdev->dev, "invalid interrupts\n");
2034 ret = -EINVAL; 2037 ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index eccb3d1b6abb..5f49334dcad5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1926 } 1926 }
1927 1927
1928 /* select a non-FCoE queue */ 1928 /* select a non-FCoE queue */
1929 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1930} 1930}
1931 1931
1932void bnx2x_set_num_queues(struct bnx2x *bp) 1932void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c6764bb5..77ed2f628f9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
2196 if (err) 2196 if (err)
2197 goto irq_err; 2197 goto irq_err;
2198 } 2198 }
2199
2200 mutex_lock(&uld_mutex);
2199 enable_rx(adap); 2201 enable_rx(adap);
2200 t4_sge_start(adap); 2202 t4_sge_start(adap);
2201 t4_intr_enable(adap); 2203 t4_intr_enable(adap);
2202 adap->flags |= FULL_INIT_DONE; 2204 adap->flags |= FULL_INIT_DONE;
2205 mutex_unlock(&uld_mutex);
2206
2203 notify_ulds(adap, CXGB4_STATE_UP); 2207 notify_ulds(adap, CXGB4_STATE_UP);
2204#if IS_ENABLED(CONFIG_IPV6) 2208#if IS_ENABLED(CONFIG_IPV6)
2205 update_clip(adap); 2209 update_clip(adap);
@@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
2771{ 2775{
2772 int port; 2776 int port;
2773 2777
2778 if (pci_channel_offline(adap->pdev))
2779 return;
2780
2774 /* Disable the SGE since ULDs are going to free resources that 2781 /* Disable the SGE since ULDs are going to free resources that
2775 * could be exposed to the adapter. RDMA MWs for example... 2782 * could be exposed to the adapter. RDMA MWs for example...
2776 */ 2783 */
@@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3882 spin_lock(&adap->stats_lock); 3889 spin_lock(&adap->stats_lock);
3883 for_each_port(adap, i) { 3890 for_each_port(adap, i) {
3884 struct net_device *dev = adap->port[i]; 3891 struct net_device *dev = adap->port[i];
3885 3892 if (dev) {
3886 netif_device_detach(dev); 3893 netif_device_detach(dev);
3887 netif_carrier_off(dev); 3894 netif_carrier_off(dev);
3895 }
3888 } 3896 }
3889 spin_unlock(&adap->stats_lock); 3897 spin_unlock(&adap->stats_lock);
3890 disable_interrupts(adap); 3898 disable_interrupts(adap);
@@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
3963 rtnl_lock(); 3971 rtnl_lock();
3964 for_each_port(adap, i) { 3972 for_each_port(adap, i) {
3965 struct net_device *dev = adap->port[i]; 3973 struct net_device *dev = adap->port[i];
3966 3974 if (dev) {
3967 if (netif_running(dev)) { 3975 if (netif_running(dev)) {
3968 link_start(dev); 3976 link_start(dev);
3969 cxgb_set_rxmode(dev); 3977 cxgb_set_rxmode(dev);
3978 }
3979 netif_device_attach(dev);
3970 } 3980 }
3971 netif_device_attach(dev);
3972 } 3981 }
3973 rtnl_unlock(); 3982 rtnl_unlock();
3974} 3983}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96f6d..3a34aa629f7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
4557 */ 4557 */
4558void t4_intr_disable(struct adapter *adapter) 4558void t4_intr_disable(struct adapter *adapter)
4559{ 4559{
4560 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); 4560 u32 whoami, pf;
4561 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 4561
4562 if (pci_channel_offline(adapter->pdev))
4563 return;
4564
4565 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4566 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4562 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 4567 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4563 4568
4564 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); 4569 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 3549d3876278..f2d623a7aee0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x10
40#define T4FW_VERSION_MICRO 0x2B 40#define T4FW_VERSION_MICRO 0x2D
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x10
49#define T5FW_VERSION_MICRO 0x2B 49#define T5FW_VERSION_MICRO 0x2D
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x10
58#define T6FW_VERSION_MICRO 0x2B 58#define T6FW_VERSION_MICRO 0x2D
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e863ba74d005..8bb0db990c8f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
739 if (ret) 739 if (ret)
740 return ret; 740 return ret;
741 741
742 napi_enable(&priv->napi);
743
742 ethoc_init_ring(priv, dev->mem_start); 744 ethoc_init_ring(priv, dev->mem_start);
743 ethoc_reset(priv); 745 ethoc_reset(priv);
744 746
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
754 priv->old_duplex = -1; 756 priv->old_duplex = -1;
755 757
756 phy_start(dev->phydev); 758 phy_start(dev->phydev);
757 napi_enable(&priv->napi);
758 759
759 if (netif_msg_ifup(priv)) { 760 if (netif_msg_ifup(priv)) {
760 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", 761 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b374ff5..a10de1e9c157 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
381{ 381{
382 const struct of_device_id *id = 382 const struct of_device_id *id =
383 of_match_device(fsl_pq_mdio_match, &pdev->dev); 383 of_match_device(fsl_pq_mdio_match, &pdev->dev);
384 const struct fsl_pq_mdio_data *data = id->data; 384 const struct fsl_pq_mdio_data *data;
385 struct device_node *np = pdev->dev.of_node; 385 struct device_node *np = pdev->dev.of_node;
386 struct resource res; 386 struct resource res;
387 struct device_node *tbi; 387 struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
389 struct mii_bus *new_bus; 389 struct mii_bus *new_bus;
390 int err; 390 int err;
391 391
392 if (!id) {
393 dev_err(&pdev->dev, "Failed to match device\n");
394 return -ENODEV;
395 }
396
397 data = id->data;
398
392 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); 399 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
393 400
394 new_bus = mdiobus_alloc_size(sizeof(*priv)); 401 new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329dba99..a93757c255f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -81,7 +81,7 @@
81static const char ibmvnic_driver_name[] = "ibmvnic"; 81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); 84MODULE_AUTHOR("Santiago Leon");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5c9c9e06ff5..150caf6ca2b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
295 **/ 295 **/
296void i40e_service_event_schedule(struct i40e_pf *pf) 296void i40e_service_event_schedule(struct i40e_pf *pf)
297{ 297{
298 if (!test_bit(__I40E_VSI_DOWN, pf->state) && 298 if (!test_bit(__I40E_DOWN, pf->state) &&
299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
300 queue_work(i40e_wq, &pf->service_task); 300 queue_work(i40e_wq, &pf->service_task);
301} 301}
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3611 * this is not a performance path and napi_schedule() 3611 * this is not a performance path and napi_schedule()
3612 * can deal with rescheduling. 3612 * can deal with rescheduling.
3613 */ 3613 */
3614 if (!test_bit(__I40E_VSI_DOWN, pf->state)) 3614 if (!test_bit(__I40E_DOWN, pf->state))
3615 napi_schedule_irqoff(&q_vector->napi); 3615 napi_schedule_irqoff(&q_vector->napi);
3616 } 3616 }
3617 3617
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3687enable_intr: 3687enable_intr:
3688 /* re-enable interrupt causes */ 3688 /* re-enable interrupt causes */
3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3690 if (!test_bit(__I40E_VSI_DOWN, pf->state)) { 3690 if (!test_bit(__I40E_DOWN, pf->state)) {
3691 i40e_service_event_schedule(pf); 3691 i40e_service_event_schedule(pf);
3692 i40e_irq_dynamic_enable_icr0(pf, false); 3692 i40e_irq_dynamic_enable_icr0(pf, false);
3693 } 3693 }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6203{ 6203{
6204 6204
6205 /* if interface is down do nothing */ 6205 /* if interface is down do nothing */
6206 if (test_bit(__I40E_VSI_DOWN, pf->state)) 6206 if (test_bit(__I40E_DOWN, pf->state))
6207 return; 6207 return;
6208 6208
6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
6344 int i; 6344 int i;
6345 6345
6346 /* if interface is down do nothing */ 6346 /* if interface is down do nothing */
6347 if (test_bit(__I40E_VSI_DOWN, pf->state) || 6347 if (test_bit(__I40E_DOWN, pf->state) ||
6348 test_bit(__I40E_CONFIG_BUSY, pf->state)) 6348 test_bit(__I40E_CONFIG_BUSY, pf->state))
6349 return; 6349 return;
6350 6350
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6401 } 6401 }
6402 if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { 6402 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6403 reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); 6403 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6404 clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); 6404 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6405 } 6405 }
6406 6406
6407 /* If there's a recovery already waiting, it takes 6407 /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6415 6415
6416 /* If we're already down or resetting, just bail */ 6416 /* If we're already down or resetting, just bail */
6417 if (reset_flags && 6417 if (reset_flags &&
6418 !test_bit(__I40E_VSI_DOWN, pf->state) && 6418 !test_bit(__I40E_DOWN, pf->state) &&
6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { 6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6420 rtnl_lock(); 6420 rtnl_lock();
6421 i40e_do_reset(pf, reset_flags, true); 6421 i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7002 u32 val; 7002 u32 val;
7003 int v; 7003 int v;
7004 7004
7005 if (test_bit(__I40E_VSI_DOWN, pf->state)) 7005 if (test_bit(__I40E_DOWN, pf->state))
7006 goto clear_recovery; 7006 goto clear_recovery;
7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7008 7008
@@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
9767 return -ENODEV; 9767 return -ENODEV;
9768 } 9768 }
9769 if (vsi == pf->vsi[pf->lan_vsi] && 9769 if (vsi == pf->vsi[pf->lan_vsi] &&
9770 !test_bit(__I40E_VSI_DOWN, pf->state)) { 9770 !test_bit(__I40E_DOWN, pf->state)) {
9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9772 return -ENODEV; 9772 return -ENODEV;
9773 } 9773 }
@@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11003 } 11003 }
11004 pf->next_vsi = 0; 11004 pf->next_vsi = 0;
11005 pf->pdev = pdev; 11005 pf->pdev = pdev;
11006 set_bit(__I40E_VSI_DOWN, pf->state); 11006 set_bit(__I40E_DOWN, pf->state);
11007 11007
11008 hw = &pf->hw; 11008 hw = &pf->hw;
11009 hw->back = pf; 11009 hw->back = pf;
@@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11293 * before setting up the misc vector or we get a race and the vector 11293 * before setting up the misc vector or we get a race and the vector
11294 * ends up disabled forever. 11294 * ends up disabled forever.
11295 */ 11295 */
11296 clear_bit(__I40E_VSI_DOWN, pf->state); 11296 clear_bit(__I40E_DOWN, pf->state);
11297 11297
11298 /* In case of MSIX we are going to setup the misc vector right here 11298 /* In case of MSIX we are going to setup the misc vector right here
11299 * to handle admin queue events etc. In case of legacy and MSI 11299 * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11448 11448
11449 /* Unwind what we've done if something failed in the setup */ 11449 /* Unwind what we've done if something failed in the setup */
11450err_vsis: 11450err_vsis:
11451 set_bit(__I40E_VSI_DOWN, pf->state); 11451 set_bit(__I40E_DOWN, pf->state);
11452 i40e_clear_interrupt_scheme(pf); 11452 i40e_clear_interrupt_scheme(pf);
11453 kfree(pf->vsi); 11453 kfree(pf->vsi);
11454err_switch_setup: 11454err_switch_setup:
@@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
11500 11500
11501 /* no more scheduling of any task */ 11501 /* no more scheduling of any task */
11502 set_bit(__I40E_SUSPENDED, pf->state); 11502 set_bit(__I40E_SUSPENDED, pf->state);
11503 set_bit(__I40E_VSI_DOWN, pf->state); 11503 set_bit(__I40E_DOWN, pf->state);
11504 if (pf->service_timer.data) 11504 if (pf->service_timer.data)
11505 del_timer_sync(&pf->service_timer); 11505 del_timer_sync(&pf->service_timer);
11506 if (pf->service_task.func) 11506 if (pf->service_task.func)
@@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
11740 struct i40e_hw *hw = &pf->hw; 11740 struct i40e_hw *hw = &pf->hw;
11741 11741
11742 set_bit(__I40E_SUSPENDED, pf->state); 11742 set_bit(__I40E_SUSPENDED, pf->state);
11743 set_bit(__I40E_VSI_DOWN, pf->state); 11743 set_bit(__I40E_DOWN, pf->state);
11744 rtnl_lock(); 11744 rtnl_lock();
11745 i40e_prep_for_reset(pf, true); 11745 i40e_prep_for_reset(pf, true);
11746 rtnl_unlock(); 11746 rtnl_unlock();
@@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11789 int retval = 0; 11789 int retval = 0;
11790 11790
11791 set_bit(__I40E_SUSPENDED, pf->state); 11791 set_bit(__I40E_SUSPENDED, pf->state);
11792 set_bit(__I40E_VSI_DOWN, pf->state); 11792 set_bit(__I40E_DOWN, pf->state);
11793 11793
11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) 11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
11795 i40e_enable_mc_magic_wake(pf); 11795 i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
11841 11841
11842 /* handling the reset will rebuild the device state */ 11842 /* handling the reset will rebuild the device state */
11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { 11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
11844 clear_bit(__I40E_VSI_DOWN, pf->state); 11844 clear_bit(__I40E_DOWN, pf->state);
11845 rtnl_lock(); 11845 rtnl_lock();
11846 i40e_reset_and_rebuild(pf, false, true); 11846 i40e_reset_and_rebuild(pf, false, true);
11847 rtnl_unlock(); 11847 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 29321a6167a6..cd894f4023b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1854#if (PAGE_SIZE < 8192) 1854#if (PAGE_SIZE < 8192)
1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1856#else 1856#else
1857 unsigned int truesize = SKB_DATA_ALIGN(size); 1857 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1858 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1858#endif 1859#endif
1859 struct sk_buff *skb; 1860 struct sk_buff *skb;
1860 1861
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index dfe241a12ad0..12b02e530503 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1190#if (PAGE_SIZE < 8192) 1190#if (PAGE_SIZE < 8192)
1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1192#else 1192#else
1193 unsigned int truesize = SKB_DATA_ALIGN(size); 1193 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1194 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1194#endif 1195#endif
1195 struct sk_buff *skb; 1196 struct sk_buff *skb;
1196 1197
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae5fdc2df654..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
1562 qpn = priv->drop_qp.qpn; 1562 qpn = priv->drop_qp.qpn;
1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
1565 if (qpn < priv->rss_map.base_qpn ||
1566 qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
1567 en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
1568 return -EINVAL;
1569 }
1570 } else { 1565 } else {
1571 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1566 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
1572 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", 1567 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b681555..0710b3677464 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
38#include <linux/mlx4/qp.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
40#include "mlx4.h" 41#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
985 if (IS_ERR(mailbox)) 986 if (IS_ERR(mailbox))
986 return PTR_ERR(mailbox); 987 return PTR_ERR(mailbox);
987 988
989 if (!mlx4_qp_lookup(dev, rule->qpn)) {
990 mlx4_err_rule(dev, "QP doesn't exist\n", rule);
991 ret = -EINVAL;
992 goto out;
993 }
994
988 trans_rule_ctrl_to_hw(rule, mailbox->buf); 995 trans_rule_ctrl_to_hw(rule, mailbox->buf);
989 996
990 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
991 998
992 list_for_each_entry(cur, &rule->list, list) { 999 list_for_each_entry(cur, &rule->list, list) {
993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
994 if (ret < 0) { 1001 if (ret < 0)
995 mlx4_free_cmd_mailbox(dev, mailbox); 1002 goto out;
996 return ret; 1003
997 }
998 size += ret; 1004 size += ret;
999 } 1005 }
1000 1006
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
1021 } 1027 }
1022 } 1028 }
1023 1029
1030out:
1024 mlx4_free_cmd_mailbox(dev, mailbox); 1031 mlx4_free_cmd_mailbox(dev, mailbox);
1025 1032
1026 return ret; 1033 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2d6abd4662b1..5a310d313e94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
384 __mlx4_qp_free_icm(dev, qpn); 384 __mlx4_qp_free_icm(dev, qpn);
385} 385}
386 386
387struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
388{
389 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
390 struct mlx4_qp *qp;
391
392 spin_lock(&qp_table->lock);
393
394 qp = __mlx4_qp_lookup(dev, qpn);
395
396 spin_unlock(&qp_table->lock);
397 return qp;
398}
399
387int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
388{ 401{
389 struct mlx4_priv *priv = mlx4_priv(dev); 402 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
471 } 484 }
472 485
473 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 486 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
487 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
488 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
489 err = -EOPNOTSUPP;
490 goto out;
491 }
492
474 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 493 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
475 cmd->qp_context.qos_vport = params->qos_vport; 494 cmd->qp_context.qos_vport = params->qos_vport;
476 } 495 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 07516545474f..812783865205 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256} 5256}
5257 5257
5258static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259 struct mlx4_vf_immed_vlan_work *work)
5260{
5261 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262 ctx->qp_context.qos_vport = work->qos_vport;
5263}
5264
5258void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5265void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5259{ 5266{
5260 struct mlx4_vf_immed_vlan_work *work = 5267 struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5369 qp->sched_queue & 0xC7; 5376 qp->sched_queue & 0xC7;
5370 upd_context->qp_context.pri_path.sched_queue |= 5377 upd_context->qp_context.pri_path.sched_queue |=
5371 ((work->qos & 0x7) << 3); 5378 ((work->qos & 0x7) << 3);
5372 upd_context->qp_mask |= 5379
5373 cpu_to_be64(1ULL << 5380 if (dev->caps.flags2 &
5374 MLX4_UPD_QP_MASK_QOS_VPP); 5381 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5375 upd_context->qp_context.qos_vport = 5382 update_qos_vpp(upd_context, work);
5376 work->qos_vport;
5377 } 5383 }
5378 5384
5379 err = mlx4_cmd(dev, mailbox->dma, 5385 err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index fe5546bb4153..af945edfee19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -621,10 +621,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
622 priv->irq_info[i].mask); 622 priv->irq_info[i].mask);
623 623
624#ifdef CONFIG_SMP 624 if (IS_ENABLED(CONFIG_SMP) &&
625 if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) 625 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); 626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
627#endif
628 627
629 return 0; 628 return 0;
630} 629}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d1236a4fe..715b3aaf83ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
1731 break; 1731 break;
1732 default: 1732 default:
1733 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1733 DP_VERBOSE(cdev, QED_MSG_SP,
1734 "Invalid protocol type = %d\n", type);
1734 return; 1735 return;
1735 } 1736 }
1736} 1737}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7245b1072518..81312924df14 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
1824 u32 (*get_cap_size)(void *, int); 1824 u32 (*get_cap_size)(void *, int);
1825 void (*set_sys_info)(void *, int, u32); 1825 void (*set_sys_info)(void *, int, u32);
1826 void (*store_cap_mask)(void *, u32); 1826 void (*store_cap_mask)(void *, u32);
1827 bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
1828 bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
1827}; 1829};
1828 1830
1829extern struct qlcnic_nic_template qlcnic_vf_ops; 1831extern struct qlcnic_nic_template qlcnic_vf_ops;
1830 1832
1831static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) 1833static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1832{ 1834{
1833 return adapter->ahw->extra_capability[0] & 1835 return adapter->ahw->extra_capability[0] &
1834 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; 1836 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
1835} 1837}
1836 1838
1837static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) 1839static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1838{ 1840{
1839 return adapter->ahw->extra_capability[0] & 1841 return adapter->ahw->extra_capability[0] &
1840 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; 1842 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
1841} 1843}
1842 1844
1845static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1846{
1847 return false;
1848}
1849
1850static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1851{
1852 return false;
1853}
1854
1855static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
1856{
1857 return adapter->ahw->hw_ops->encap_rx_offload(adapter);
1858}
1859
1860static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
1861{
1862 return adapter->ahw->hw_ops->encap_tx_offload(adapter);
1863}
1864
1843static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1865static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1844{ 1866{
1845 return adapter->nic_ops->start_firmware(adapter); 1867 return adapter->nic_ops->start_firmware(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4fb68797630e..f7080d0ab874 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
242 .get_cap_size = qlcnic_83xx_get_cap_size, 242 .get_cap_size = qlcnic_83xx_get_cap_size,
243 .set_sys_info = qlcnic_83xx_set_sys_info, 243 .set_sys_info = qlcnic_83xx_set_sys_info,
244 .store_cap_mask = qlcnic_83xx_store_cap_mask, 244 .store_cap_mask = qlcnic_83xx_store_cap_mask,
245 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
246 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
245}; 247};
246 248
247static struct qlcnic_nic_template qlcnic_83xx_ops = { 249static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 838cc0ceafd8..7848cf04b29a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341 } 341 }
342 return -EIO; 342 return -EIO;
343 } 343 }
344 usleep_range(1000, 1500); 344 udelay(1200);
345 } 345 }
346 346
347 if (id_reg) 347 if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b6628aaa6e4a..1b5f7d57b6f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
632 .get_cap_size = qlcnic_82xx_get_cap_size, 632 .get_cap_size = qlcnic_82xx_get_cap_size,
633 .set_sys_info = qlcnic_82xx_set_sys_info, 633 .set_sys_info = qlcnic_82xx_set_sys_info,
634 .store_cap_mask = qlcnic_82xx_store_cap_mask, 634 .store_cap_mask = qlcnic_82xx_store_cap_mask,
635 .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
636 .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
635}; 637};
636 638
637static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) 639static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 2f656f395f39..c58180f40844 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
77 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 77 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr, 78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
80 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
81 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
80}; 82};
81 83
82static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 84static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index cc065ffbe4b5..bcd4708b3745 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
931 emac_mac_config(adpt); 931 emac_mac_config(adpt);
932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q); 932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
933 933
934 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 934 adpt->phydev->irq = PHY_POLL;
935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, 935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
936 PHY_INTERFACE_MODE_SGMII); 936 PHY_INTERFACE_MODE_SGMII);
937 if (ret) { 937 if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 441c19366489..18461fcb9815 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -13,15 +13,11 @@
13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver. 13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
14 */ 14 */
15 15
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_net.h>
19#include <linux/of_mdio.h> 16#include <linux/of_mdio.h>
20#include <linux/phy.h> 17#include <linux/phy.h>
21#include <linux/iopoll.h> 18#include <linux/iopoll.h>
22#include <linux/acpi.h> 19#include <linux/acpi.h>
23#include "emac.h" 20#include "emac.h"
24#include "emac-mac.h"
25 21
26/* EMAC base register offsets */ 22/* EMAC base register offsets */
27#define EMAC_MDIO_CTRL 0x001414 23#define EMAC_MDIO_CTRL 0x001414
@@ -52,62 +48,10 @@
52 48
53#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
54 50
55#define EMAC_LINK_SPEED_DEFAULT (\
56 EMAC_LINK_SPEED_10_HALF |\
57 EMAC_LINK_SPEED_10_FULL |\
58 EMAC_LINK_SPEED_100_HALF |\
59 EMAC_LINK_SPEED_100_FULL |\
60 EMAC_LINK_SPEED_1GB_FULL)
61
62/**
63 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
64 * @adpt: the emac adapter
65 *
66 * The autopoll feature takes over the MDIO bus. In order for
67 * the PHY driver to be able to talk to the PHY over the MDIO
68 * bus, we need to temporarily disable the autopoll feature.
69 */
70static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
71{
72 u32 val;
73
74 /* disable autopoll */
75 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
76
77 /* wait for any mdio polling to complete */
78 if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
79 !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
80 return 0;
81
82 /* failed to disable; ensure it is enabled before returning */
83 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
84
85 return -EBUSY;
86}
87
88/**
89 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
90 * @adpt: the emac adapter
91 *
92 * The EMAC has the ability to poll the external PHY on the MDIO
93 * bus for link state changes. This eliminates the need for the
94 * driver to poll the phy. If if the link state does change,
95 * the EMAC issues an interrupt on behalf of the PHY.
96 */
97static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
98{
99 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
100}
101
102static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
103{ 52{
104 struct emac_adapter *adpt = bus->priv; 53 struct emac_adapter *adpt = bus->priv;
105 u32 reg; 54 u32 reg;
106 int ret;
107
108 ret = emac_phy_mdio_autopoll_disable(adpt);
109 if (ret)
110 return ret;
111 55
112 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 56 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
113 (addr << PHY_ADDR_SHFT)); 57 (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
122 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
123 !(reg & (MDIO_START | MDIO_BUSY)), 67 !(reg & (MDIO_START | MDIO_BUSY)),
124 100, MDIO_WAIT_TIMES * 100)) 68 100, MDIO_WAIT_TIMES * 100))
125 ret = -EIO; 69 return -EIO;
126 else
127 ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
128 70
129 emac_phy_mdio_autopoll_enable(adpt); 71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
130
131 return ret;
132} 72}
133 73
134static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) 74static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
135{ 75{
136 struct emac_adapter *adpt = bus->priv; 76 struct emac_adapter *adpt = bus->priv;
137 u32 reg; 77 u32 reg;
138 int ret;
139
140 ret = emac_phy_mdio_autopoll_disable(adpt);
141 if (ret)
142 return ret;
143 78
144 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 79 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
145 (addr << PHY_ADDR_SHFT)); 80 (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
155 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
156 !(reg & (MDIO_START | MDIO_BUSY)), 100, 91 !(reg & (MDIO_START | MDIO_BUSY)), 100,
157 MDIO_WAIT_TIMES * 100)) 92 MDIO_WAIT_TIMES * 100))
158 ret = -EIO; 93 return -EIO;
159 94
160 emac_phy_mdio_autopoll_enable(adpt); 95 return 0;
161
162 return ret;
163} 96}
164 97
165/* Configure the MDIO bus and connect the external PHY */ 98/* Configure the MDIO bus and connect the external PHY */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 28a8cdc36485..98a326faea29 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -50,19 +50,7 @@
50#define DMAR_DLY_CNT_DEF 15 50#define DMAR_DLY_CNT_DEF 15
51#define DMAW_DLY_CNT_DEF 4 51#define DMAW_DLY_CNT_DEF 4
52 52
53#define IMR_NORMAL_MASK (\ 53#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
54 ISR_ERROR |\
55 ISR_GPHY_LINK |\
56 ISR_TX_PKT |\
57 GPHY_WAKEUP_INT)
58
59#define IMR_EXTENDED_MASK (\
60 SW_MAN_INT |\
61 ISR_OVER |\
62 ISR_ERROR |\
63 ISR_GPHY_LINK |\
64 ISR_TX_PKT |\
65 GPHY_WAKEUP_INT)
66 54
67#define ISR_TX_PKT (\ 55#define ISR_TX_PKT (\
68 TX_PKT_INT |\ 56 TX_PKT_INT |\
@@ -70,10 +58,6 @@
70 TX_PKT_INT2 |\ 58 TX_PKT_INT2 |\
71 TX_PKT_INT3) 59 TX_PKT_INT3)
72 60
73#define ISR_GPHY_LINK (\
74 GPHY_LINK_UP_INT |\
75 GPHY_LINK_DOWN_INT)
76
77#define ISR_OVER (\ 61#define ISR_OVER (\
78 RFD0_UR_INT |\ 62 RFD0_UR_INT |\
79 RFD1_UR_INT |\ 63 RFD1_UR_INT |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
187 if (status & ISR_OVER) 171 if (status & ISR_OVER)
188 net_warn_ratelimited("warning: TX/RX overflow\n"); 172 net_warn_ratelimited("warning: TX/RX overflow\n");
189 173
190 /* link event */
191 if (status & ISR_GPHY_LINK)
192 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
193
194exit: 174exit:
195 /* enable the interrupt */ 175 /* enable the interrupt */
196 writel(irq->mask, adpt->base + EMAC_INT_MASK); 176 writel(irq->mask, adpt->base + EMAC_INT_MASK);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3cd7989c007d..784782da3a85 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
230 int ring_size; 230 int ring_size;
231 int i; 231 int i;
232 232
233 /* Free RX skb ringbuffer */
234 if (priv->rx_skb[q]) {
235 for (i = 0; i < priv->num_rx_ring[q]; i++)
236 dev_kfree_skb(priv->rx_skb[q][i]);
237 }
238 kfree(priv->rx_skb[q]);
239 priv->rx_skb[q] = NULL;
240
241 /* Free aligned TX buffers */
242 kfree(priv->tx_align[q]);
243 priv->tx_align[q] = NULL;
244
245 if (priv->rx_ring[q]) { 233 if (priv->rx_ring[q]) {
246 for (i = 0; i < priv->num_rx_ring[q]; i++) { 234 for (i = 0; i < priv->num_rx_ring[q]; i++) {
247 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
270 priv->tx_ring[q] = NULL; 258 priv->tx_ring[q] = NULL;
271 } 259 }
272 260
261 /* Free RX skb ringbuffer */
262 if (priv->rx_skb[q]) {
263 for (i = 0; i < priv->num_rx_ring[q]; i++)
264 dev_kfree_skb(priv->rx_skb[q][i]);
265 }
266 kfree(priv->rx_skb[q]);
267 priv->rx_skb[q] = NULL;
268
269 /* Free aligned TX buffers */
270 kfree(priv->tx_align[q]);
271 priv->tx_align[q] = NULL;
272
273 /* Free TX skb ringbuffer. 273 /* Free TX skb ringbuffer.
274 * SKBs are freed by ravb_tx_free() call above. 274 * SKBs are freed by ravb_tx_free() call above.
275 */ 275 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 489ef146201e..6a9c954492f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -37,6 +37,7 @@
37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) 37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
38#define TSE_PCS_CONTROL_REG 0x00 38#define TSE_PCS_CONTROL_REG 0x00
39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) 39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
40#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
40#define TSE_PCS_IF_MODE_REG 0x28 41#define TSE_PCS_IF_MODE_REG 0x28
41#define TSE_PCS_LINK_TIMER_0_REG 0x24 42#define TSE_PCS_LINK_TIMER_0_REG 0x24
42#define TSE_PCS_LINK_TIMER_1_REG 0x26 43#define TSE_PCS_LINK_TIMER_1_REG 0x26
@@ -65,6 +66,7 @@
65#define TSE_PCS_SW_RESET_TIMEOUT 100 66#define TSE_PCS_SW_RESET_TIMEOUT 100
66#define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 67#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
67#define TSE_PCS_USE_SGMII_ENA BIT(0) 68#define TSE_PCS_USE_SGMII_ENA BIT(0)
69#define TSE_PCS_IF_USE_SGMII 0x03
68 70
69#define SGMII_ADAPTER_CTRL_REG 0x00 71#define SGMII_ADAPTER_CTRL_REG 0x00
70#define SGMII_ADAPTER_DISABLE 0x0001 72#define SGMII_ADAPTER_DISABLE 0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
101{ 103{
102 int ret = 0; 104 int ret = 0;
103 105
104 writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); 106 writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
107
108 writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
105 109
106 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); 110 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
107 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); 111 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a74c481401c4..12236daf7bb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1208 u32 rx_count = priv->plat->rx_queues_to_use; 1208 u32 rx_count = priv->plat->rx_queues_to_use;
1209 unsigned int bfsize = 0; 1209 unsigned int bfsize = 0;
1210 int ret = -ENOMEM; 1210 int ret = -ENOMEM;
1211 u32 queue; 1211 int queue;
1212 int i; 1212 int i;
1213 1213
1214 if (priv->hw->mode->set_16kib_bfsize) 1214 if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2724 2724
2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2726 0, 1, 2726 0, 1,
2727 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), 2727 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2728 0, 0); 2728 0, 0);
2729 2729
2730 tmp_len -= TSO_MAX_BUFF_SIZE; 2730 tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2947 int i, csum_insertion = 0, is_jumbo = 0; 2947 int i, csum_insertion = 0, is_jumbo = 0;
2948 u32 queue = skb_get_queue_mapping(skb); 2948 u32 queue = skb_get_queue_mapping(skb);
2949 int nfrags = skb_shinfo(skb)->nr_frags; 2949 int nfrags = skb_shinfo(skb)->nr_frags;
2950 unsigned int entry, first_entry; 2950 int entry;
2951 unsigned int first_entry;
2951 struct dma_desc *desc, *first; 2952 struct dma_desc *desc, *first;
2952 struct stmmac_tx_queue *tx_q; 2953 struct stmmac_tx_queue *tx_q;
2953 unsigned int enh_desc; 2954 unsigned int enh_desc;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 959fd12d2e67..6ebb0f559a42 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
1133 1133
1134 /* make enough headroom for basic scenario */ 1134 /* make enough headroom for basic scenario */
1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
1136 if (ip_tunnel_info_af(info) == AF_INET) { 1136 if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
1137 encap_len += sizeof(struct iphdr); 1137 encap_len += sizeof(struct iphdr);
1138 dev->max_mtu -= sizeof(struct iphdr); 1138 dev->max_mtu -= sizeof(struct iphdr);
1139 } else { 1139 } else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8c3633c1d078..97e3bc60c3e7 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
576 case HDLCDRVCTL_CALIBRATE: 576 case HDLCDRVCTL_CALIBRATE:
577 if(!capable(CAP_SYS_RAWIO)) 577 if(!capable(CAP_SYS_RAWIO))
578 return -EPERM; 578 return -EPERM;
579 if (s->par.bitrate <= 0)
580 return -EINVAL;
579 if (bi.data.calibrate > INT_MAX / s->par.bitrate) 581 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
580 return -EINVAL; 582 return -EINVAL;
581 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 583 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9097e42bec2e..57297ba23987 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
1127 if (adv < 0) 1127 if (adv < 0)
1128 return adv; 1128 return adv;
1129 1129
1130 lpa &= adv;
1131
1132 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 1130 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
1133 phydev->duplex = DUPLEX_FULL; 1131 phydev->duplex = DUPLEX_FULL;
1134 else 1132 else
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8e73f5f36e71..f99c21f78b63 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
658 return 0; 658 return 0;
659} 659}
660 660
661static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
662{
663 int rc;
664
665 /* Some devices have extra OF data and an OF-style MODALIAS */
666 rc = of_device_uevent_modalias(dev, env);
667 if (rc != -ENODEV)
668 return rc;
669
670 return 0;
671}
672
661#ifdef CONFIG_PM 673#ifdef CONFIG_PM
662static int mdio_bus_suspend(struct device *dev) 674static int mdio_bus_suspend(struct device *dev)
663{ 675{
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
708struct bus_type mdio_bus_type = { 720struct bus_type mdio_bus_type = {
709 .name = "mdio_bus", 721 .name = "mdio_bus",
710 .match = mdio_bus_match, 722 .match = mdio_bus_match,
723 .uevent = mdio_uevent,
711 .pm = MDIO_BUS_PM_OPS, 724 .pm = MDIO_BUS_PM_OPS,
712}; 725};
713EXPORT_SYMBOL(mdio_bus_type); 726EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18f062c..b9252b8d81ff 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,23 +268,12 @@ out:
268 return ret; 268 return ret;
269} 269}
270 270
271static int kszphy_config_init(struct phy_device *phydev) 271/* Some config bits need to be set again on resume, handle them here. */
272static int kszphy_config_reset(struct phy_device *phydev)
272{ 273{
273 struct kszphy_priv *priv = phydev->priv; 274 struct kszphy_priv *priv = phydev->priv;
274 const struct kszphy_type *type;
275 int ret; 275 int ret;
276 276
277 if (!priv)
278 return 0;
279
280 type = priv->type;
281
282 if (type->has_broadcast_disable)
283 kszphy_broadcast_disable(phydev);
284
285 if (type->has_nand_tree_disable)
286 kszphy_nand_tree_disable(phydev);
287
288 if (priv->rmii_ref_clk_sel) { 277 if (priv->rmii_ref_clk_sel) {
289 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); 278 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
290 if (ret) { 279 if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
295 } 284 }
296 285
297 if (priv->led_mode >= 0) 286 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 287 kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
299 288
300 return 0; 289 return 0;
301} 290}
302 291
292static int kszphy_config_init(struct phy_device *phydev)
293{
294 struct kszphy_priv *priv = phydev->priv;
295 const struct kszphy_type *type;
296
297 if (!priv)
298 return 0;
299
300 type = priv->type;
301
302 if (type->has_broadcast_disable)
303 kszphy_broadcast_disable(phydev);
304
305 if (type->has_nand_tree_disable)
306 kszphy_nand_tree_disable(phydev);
307
308 return kszphy_config_reset(phydev);
309}
310
303static int ksz8041_config_init(struct phy_device *phydev) 311static int ksz8041_config_init(struct phy_device *phydev)
304{ 312{
305 struct device_node *of_node = phydev->mdio.dev.of_node; 313 struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
700 708
701static int kszphy_resume(struct phy_device *phydev) 709static int kszphy_resume(struct phy_device *phydev)
702{ 710{
711 int ret;
712
703 genphy_resume(phydev); 713 genphy_resume(phydev);
704 714
715 ret = kszphy_config_reset(phydev);
716 if (ret)
717 return ret;
718
705 /* Enable PHY Interrupts */ 719 /* Enable PHY Interrupts */
706 if (phy_interrupt_is_valid(phydev)) { 720 if (phy_interrupt_is_valid(phydev)) {
707 phydev->interrupts = PHY_INTERRUPT_ENABLED; 721 phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 82ab8fb82587..7524caa0f29d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
241 * phy_lookup_setting - lookup a PHY setting 241 * phy_lookup_setting - lookup a PHY setting
242 * @speed: speed to match 242 * @speed: speed to match
243 * @duplex: duplex to match 243 * @duplex: duplex to match
244 * @feature: allowed link modes 244 * @features: allowed link modes
245 * @exact: an exact match is required 245 * @exact: an exact match is required
246 * 246 *
247 * Search the settings array for a setting that matches the speed and 247 * Search the settings array for a setting that matches the speed and
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e9246cc49c3..a871f45ecc79 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
869 unsigned int len; 869 unsigned int len;
870 870
871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
872 rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); 872 rq->min_buf_len, PAGE_SIZE - hdr_len);
873 return ALIGN(len, L1_CACHE_BYTES); 873 return ALIGN(len, L1_CACHE_BYTES);
874} 874}
875 875
@@ -2144,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
2144 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2144 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2145 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2145 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2146 2146
2147 return max(min_buf_len, hdr_len); 2147 return max(max(min_buf_len, hdr_len) - hdr_len,
2148 (unsigned int)GOOD_PACKET_LEN);
2148} 2149}
2149 2150
2150static int virtnet_find_vqs(struct virtnet_info *vi) 2151static int virtnet_find_vqs(struct virtnet_info *vi)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 328b4712683c..a6b5052c1d36 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
59 59
60static int vxlan_sock_add(struct vxlan_dev *vxlan); 60static int vxlan_sock_add(struct vxlan_dev *vxlan);
61 61
62static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
63
62/* per-network namespace private data for this module */ 64/* per-network namespace private data for this module */
63struct vxlan_net { 65struct vxlan_net {
64 struct list_head vxlan_list; 66 struct list_head vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
740 call_rcu(&f->rcu, vxlan_fdb_free); 742 call_rcu(&f->rcu, vxlan_fdb_free);
741} 743}
742 744
745static void vxlan_dst_free(struct rcu_head *head)
746{
747 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
748
749 dst_cache_destroy(&rd->dst_cache);
750 kfree(rd);
751}
752
753static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
754 struct vxlan_rdst *rd)
755{
756 list_del_rcu(&rd->list);
757 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
758 call_rcu(&rd->rcu, vxlan_dst_free);
759}
760
743static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 761static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
744 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 762 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
745 __be32 *vni, u32 *ifindex) 763 __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
864 * otherwise destroy the fdb entry 882 * otherwise destroy the fdb entry
865 */ 883 */
866 if (rd && !list_is_singular(&f->remotes)) { 884 if (rd && !list_is_singular(&f->remotes)) {
867 list_del_rcu(&rd->list); 885 vxlan_fdb_dst_destroy(vxlan, f, rd);
868 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
869 kfree_rcu(rd, rcu);
870 goto out; 886 goto out;
871 } 887 }
872 888
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
1067 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1083 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1068 synchronize_net(); 1084 synchronize_net();
1069 1085
1086 vxlan_vs_del_dev(vxlan);
1087
1070 if (__vxlan_sock_release_prep(sock4)) { 1088 if (__vxlan_sock_release_prep(sock4)) {
1071 udp_tunnel_sock_release(sock4->sock); 1089 udp_tunnel_sock_release(sock4->sock);
1072 kfree(sock4); 1090 kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
2342 mod_timer(&vxlan->age_timer, next_timer); 2360 mod_timer(&vxlan->age_timer, next_timer);
2343} 2361}
2344 2362
2363static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2364{
2365 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2366
2367 spin_lock(&vn->sock_lock);
2368 hlist_del_init_rcu(&vxlan->hlist);
2369 spin_unlock(&vn->sock_lock);
2370}
2371
2345static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2372static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2346{ 2373{
2347 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2374 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3286static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3313static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3287{ 3314{
3288 struct vxlan_dev *vxlan = netdev_priv(dev); 3315 struct vxlan_dev *vxlan = netdev_priv(dev);
3289 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3290 3316
3291 vxlan_flush(vxlan, true); 3317 vxlan_flush(vxlan, true);
3292 3318
3293 spin_lock(&vn->sock_lock);
3294 if (!hlist_unhashed(&vxlan->hlist))
3295 hlist_del_rcu(&vxlan->hlist);
3296 spin_unlock(&vn->sock_lock);
3297
3298 gro_cells_destroy(&vxlan->gro_cells); 3319 gro_cells_destroy(&vxlan->gro_cells);
3299 list_del(&vxlan->next); 3320 list_del(&vxlan->next);
3300 unregister_netdevice_queue(dev, head); 3321 unregister_netdevice_queue(dev, head);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index d5e993dc9b23..517a315e259b 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
1271 qcom_smem_state_put(wcn->tx_enable_state); 1271 qcom_smem_state_put(wcn->tx_enable_state);
1272 qcom_smem_state_put(wcn->tx_rings_empty_state); 1272 qcom_smem_state_put(wcn->tx_rings_empty_state);
1273 1273
1274 rpmsg_destroy_ept(wcn->smd_channel);
1275
1274 iounmap(wcn->dxe_base); 1276 iounmap(wcn->dxe_base);
1275 iounmap(wcn->ccu_base); 1277 iounmap(wcn->ccu_base);
1276 1278
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fc64b8913aa6..e03450059b06 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
3422 /* otherwise, set txglomalign */ 3422 /* otherwise, set txglomalign */
3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align; 3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align;
3424 /* SDIO ADMA requires at least 32 bit alignment */ 3424 /* SDIO ADMA requires at least 32 bit alignment */
3425 value = max_t(u32, value, 4); 3425 value = max_t(u32, value, ALIGNMENT);
3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, 3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3427 sizeof(u32)); 3427 sizeof(u32));
3428 } 3428 }
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 3b3e076571d6..45e2efc70d19 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
79/* Lowest firmware API version supported */ 79/* Lowest firmware API version supported */
80#define IWL7260_UCODE_API_MIN 17 80#define IWL7260_UCODE_API_MIN 17
81#define IWL7265_UCODE_API_MIN 17 81#define IWL7265_UCODE_API_MIN 17
82#define IWL7265D_UCODE_API_MIN 17 82#define IWL7265D_UCODE_API_MIN 22
83#define IWL3168_UCODE_API_MIN 20 83#define IWL3168_UCODE_API_MIN 22
84 84
85/* NVM versions */ 85/* NVM versions */
86#define IWL7260_NVM_VERSION 0x0a1d 86#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b9718c0cf174..89137717c1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
74#define IWL8265_UCODE_API_MAX 30 74#define IWL8265_UCODE_API_MAX 30
75 75
76/* Lowest firmware API version supported */ 76/* Lowest firmware API version supported */
77#define IWL8000_UCODE_API_MIN 17 77#define IWL8000_UCODE_API_MIN 22
78#define IWL8265_UCODE_API_MIN 20 78#define IWL8265_UCODE_API_MIN 22
79 79
80/* NVM versions */ 80/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 81#define IWL8000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 306bc967742e..77efbb78e867 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -370,6 +370,7 @@
370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c) 370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
371 371
372#define DBGC_IN_SAMPLE (0xa03c00) 372#define DBGC_IN_SAMPLE (0xa03c00)
373#define DBGC_OUT_CTRL (0xa03c0c)
373 374
374/* enable the ID buf for read */ 375/* enable the ID buf for read */
375#define WFPM_PS_CTL_CLR 0xA0300C 376#define WFPM_PS_CTL_CLR 0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index 1b7d265ffb0a..a10c6aae9ab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -307,6 +307,11 @@ enum {
307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ 307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
308#define LQ_FLAG_COLOR_POS 1 308#define LQ_FLAG_COLOR_POS 1
309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) 309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
310#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
311 LQ_FLAG_COLOR_POS)
312#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
313 LQ_FLAG_COLOR_MSK)
314#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
310 315
311/* Bit 4-5: Tx RTS BW Signalling 316/* Bit 4-5: Tx RTS BW Signalling
312 * (0) No RTS BW signalling 317 * (0) No RTS BW signalling
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 81b98915b1a4..1360ebfdc51b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -519,8 +519,11 @@ struct agg_tx_status {
519 * bit-7 invalid rate indication 519 * bit-7 invalid rate indication
520 */ 520 */
521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f 521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
522#define TX_RES_RATE_TABLE_COLOR_POS 4
522#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 523#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
523#define TX_RES_INV_RATE_INDEX_MSK 0x80 524#define TX_RES_INV_RATE_INDEX_MSK 0x80
525#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
526 TX_RES_RATE_TABLE_COLOR_POS)
524 527
525#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) 528#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
526#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) 529#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 7b86a4f1b574..c8712e6eea74 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
1002 return 0; 1002 return 0;
1003} 1003}
1004 1004
1005static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
1006{
1007 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
1008 iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1009 else
1010 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
1011}
1012
1013int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) 1005int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1014{ 1006{
1015 u8 *ptr; 1007 u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1023 /* EARLY START - firmware's configuration is hard coded */ 1015 /* EARLY START - firmware's configuration is hard coded */
1024 if ((!mvm->fw->dbg_conf_tlv[conf_id] || 1016 if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
1025 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && 1017 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
1026 conf_id == FW_DBG_START_FROM_ALIVE) { 1018 conf_id == FW_DBG_START_FROM_ALIVE)
1027 iwl_mvm_restart_early_start(mvm);
1028 return 0; 1019 return 0;
1029 }
1030 1020
1031 if (!mvm->fw->dbg_conf_tlv[conf_id]) 1021 if (!mvm->fw->dbg_conf_tlv[conf_id])
1032 return -EINVAL; 1022 return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 0f1831b41915..fd2fc46e2fe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; 1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd; 1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd;
1042 } u = {}; 1042 } u = {};
1043 struct iwl_mac_beacon_cmd beacon_cmd; 1043 struct iwl_mac_beacon_cmd beacon_cmd = {};
1044 struct ieee80211_tx_info *info; 1044 struct ieee80211_tx_info *info;
1045 u32 beacon_skb_len; 1045 u32 beacon_skb_len;
1046 u32 rate, tx_flags; 1046 u32 rate, tx_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4e74a6b90e70..52f8d7a6a7dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
1730 */ 1730 */
1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) 1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1732{ 1732{
1733 u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
1734 IWL_MVM_CMD_QUEUE;
1735
1733 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & 1736 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
1734 ~BIT(IWL_MVM_CMD_QUEUE)); 1737 ~BIT(cmd_queue));
1735} 1738}
1736 1739
1737static inline 1740static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1753 if (!iwl_mvm_has_new_tx_api(mvm)) 1756 if (!iwl_mvm_has_new_tx_api(mvm))
1754 iwl_free_fw_paging(mvm); 1757 iwl_free_fw_paging(mvm);
1755 mvm->ucode_loaded = false; 1758 mvm->ucode_loaded = false;
1759 mvm->fw_dbg_conf = FW_DBG_INVALID;
1756 iwl_trans_stop_device(mvm->trans); 1760 iwl_trans_stop_device(mvm->trans);
1757} 1761}
1758 1762
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 9ffff6ed8133..3da5ec40aaea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
1149 1149
1150 mutex_lock(&mvm->mutex); 1150 mutex_lock(&mvm->mutex);
1151 1151
1152 /* stop recording */
1153 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1152 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1153 /* stop recording */
1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1155
1156 iwl_mvm_fw_error_dump(mvm);
1157
1158 /* start recording again if the firmware is not crashed */
1159 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1160 mvm->fw->dbg_dest_tlv)
1161 iwl_clear_bits_prph(mvm->trans,
1162 MON_BUFF_SAMPLE_CTL, 0x100);
1155 } else { 1163 } else {
1164 u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
1165 u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
1166
1167 /* stop recording */
1156 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); 1168 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
1157 /* wait before we collect the data till the DBGC stop */
1158 udelay(100); 1169 udelay(100);
1159 } 1170 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
1171 /* wait before we collect the data till the DBGC stop */
1172 udelay(500);
1160 1173
1161 iwl_mvm_fw_error_dump(mvm); 1174 iwl_mvm_fw_error_dump(mvm);
1162 1175
1163 /* start recording again if the firmware is not crashed */ 1176 /* start recording again if the firmware is not crashed */
1164 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 1177 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1165 mvm->fw->dbg_dest_tlv && 1178 mvm->fw->dbg_dest_tlv) {
1166 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 1179 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
1180 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
1181 }
1182 }
1167 1183
1168 mutex_unlock(&mvm->mutex); 1184 mutex_unlock(&mvm->mutex);
1169 1185
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7788eefcd2bd..aa785cf3cf68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1083 rs_get_lower_rate_in_column(lq_sta, rate); 1083 rs_get_lower_rate_in_column(lq_sta, rate);
1084} 1084}
1085 1085
1086/* Check if both rates are identical
1087 * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
1088 * with a rate indicating STBC/BFER and ANT_AB.
1089 */
1090static inline bool rs_rate_equal(struct rs_rate *a,
1091 struct rs_rate *b,
1092 bool allow_ant_mismatch)
1093
1094{
1095 bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
1096 (a->bfer == b->bfer);
1097
1098 if (allow_ant_mismatch) {
1099 if (a->stbc || a->bfer) {
1100 WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
1101 a->stbc, a->bfer, a->ant);
1102 ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
1103 } else if (b->stbc || b->bfer) {
1104 WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
1105 b->stbc, b->bfer, b->ant);
1106 ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
1107 }
1108 }
1109
1110 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
1111 (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
1112}
1113
1114/* Check if both rates share the same column */ 1086/* Check if both rates share the same column */
1115static inline bool rs_rate_column_match(struct rs_rate *a, 1087static inline bool rs_rate_column_match(struct rs_rate *a,
1116 struct rs_rate *b) 1088 struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1182 u32 lq_hwrate; 1154 u32 lq_hwrate;
1183 struct rs_rate lq_rate, tx_resp_rate; 1155 struct rs_rate lq_rate, tx_resp_rate;
1184 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1156 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1185 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; 1157 u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
1158 u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
1159 u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
1186 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; 1160 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1187 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1161 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1188 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; 1162 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
1189 bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
1190 IWL_UCODE_TLV_API_LQ_SS_PARAMS);
1191 1163
1192 /* Treat uninitialized rate scaling data same as non-existing. */ 1164 /* Treat uninitialized rate scaling data same as non-existing. */
1193 if (!lq_sta) { 1165 if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1262 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1234 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1263 1235
1264 /* Here we actually compare this rate to the latest LQ command */ 1236 /* Here we actually compare this rate to the latest LQ command */
1265 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { 1237 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
1266 IWL_DEBUG_RATE(mvm, 1238 IWL_DEBUG_RATE(mvm,
1267 "initial tx resp rate 0x%x does not match 0x%x\n", 1239 "tx resp color 0x%x does not match 0x%x\n",
1268 tx_resp_hwrate, lq_hwrate); 1240 lq_color, LQ_FLAG_COLOR_GET(table->flags));
1269 1241
1270 /* 1242 /*
1271 * Since rates mis-match, the last LQ command may have failed. 1243 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3326 u8 valid_tx_ant = 0; 3298 u8 valid_tx_ant = 0;
3327 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; 3299 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3328 bool toggle_ant = false; 3300 bool toggle_ant = false;
3301 u32 color;
3329 3302
3330 memcpy(&rate, initial_rate, sizeof(rate)); 3303 memcpy(&rate, initial_rate, sizeof(rate));
3331 3304
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3380 num_rates, num_retries, valid_tx_ant, 3353 num_rates, num_retries, valid_tx_ant,
3381 toggle_ant); 3354 toggle_ant);
3382 3355
3356 /* update the color of the LQ command (as a counter at bits 1-3) */
3357 color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
3358 lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
3383} 3359}
3384 3360
3385struct rs_bfer_active_iter_data { 3361struct rs_bfer_active_iter_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ee207f2c0a90..3abde1cb0303 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2017 Intel Deutschland GmbH
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
357 } pers; 358 } pers;
358}; 359};
359 360
361/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
362 * Note, it's iwlmvm <-> mac80211 interface.
363 * bits 0-7: reduced tx power
364 * bits 8-10: LQ command's color
365 */
366#define RS_DRV_DATA_TXP_MSK 0xff
367#define RS_DRV_DATA_LQ_COLOR_POS 8
368#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
369#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
370 RS_DRV_DATA_LQ_COLOR_POS)
371#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
372 (((uintptr_t)_p) |\
373 ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
374
360/* Initialize station's rate scaling information after adding station */ 375/* Initialize station's rate scaling information after adding station */
361void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 376void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
362 enum nl80211_band band, bool init); 377 enum nl80211_band band, bool init);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f5c786ddc526..614d67810d05 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2120 if (!iwl_mvm_is_dqa_supported(mvm)) 2120 if (!iwl_mvm_is_dqa_supported(mvm))
2121 return 0; 2121 return 0;
2122 2122
2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) 2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2124 vif->type != NL80211_IFTYPE_ADHOC))
2124 return -ENOTSUPP; 2125 return -ENOTSUPP;
2125 2126
2126 /* 2127 /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2155 mvmvif->cab_queue = queue; 2156 mvmvif->cab_queue = queue;
2156 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2157 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2157 IWL_UCODE_TLV_API_STA_TYPE)) { 2158 IWL_UCODE_TLV_API_STA_TYPE)) {
2159 /*
2160 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2161 * invalid, so make sure we use the queue we want.
2162 * Note that this is done here as we want to avoid making DQA
2163 * changes in mac80211 layer.
2164 */
2165 if (vif->type == NL80211_IFTYPE_ADHOC) {
2166 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2167 mvmvif->cab_queue = vif->cab_queue;
2168 }
2158 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2169 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2159 &cfg, timeout); 2170 &cfg, timeout);
2160 } 2171 }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3321 3332
3322 /* Get the station from the mvm local station table */ 3333 /* Get the station from the mvm local station table */
3323 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3334 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3324 if (!mvm_sta) { 3335 if (mvm_sta)
3325 IWL_ERR(mvm, "Failed to find station\n"); 3336 sta_id = mvm_sta->sta_id;
3326 return -EINVAL;
3327 }
3328 sta_id = mvm_sta->sta_id;
3329 3337
3330 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3338 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3331 keyconf->keyidx, sta_id); 3339 keyconf->keyidx, sta_id);
3332 3340
3333 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3341 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3334 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3342 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3335 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3343 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3336 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3344 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3337 3345
3338 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3346 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 2716cb5483bf..ad62b67dceb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
313 * This is basically (last acked packet++). 313 * This is basically (last acked packet++).
314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
316 * @lq_color: the color of the LQ command as it appears in tx response.
316 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 317 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
317 * @state: state of the BA agreement establishment / tear down. 318 * @state: state of the BA agreement establishment / tear down.
318 * @txq_id: Tx queue used by the BA session / DQA 319 * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
331 u16 next_reclaimed; 332 u16 next_reclaimed;
332 /* The rest is Tx AGG related */ 333 /* The rest is Tx AGG related */
333 u32 rate_n_flags; 334 u32 rate_n_flags;
335 u8 lq_color;
334 bool amsdu_in_ampdu_allowed; 336 bool amsdu_in_ampdu_allowed;
335 enum iwl_mvm_agg_state state; 337 enum iwl_mvm_agg_state state;
336 u16 txq_id; 338 u16 txq_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f9cbd197246f..506d58104e1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); 790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
791 int ret; 791 int ret;
792 792
793 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
794 return -EIO;
795
796 mutex_lock(&mvm->mutex); 793 mutex_lock(&mvm->mutex);
797 794
795 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
796 ret = -EIO;
797 goto unlock;
798 }
799
798 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { 800 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
799 ret = -EINVAL; 801 ret = -EINVAL;
800 goto unlock; 802 goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bcaceb64a6e8..f21901cd4a4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1323 struct iwl_mvm_sta *mvmsta; 1323 struct iwl_mvm_sta *mvmsta;
1324 struct sk_buff_head skbs; 1324 struct sk_buff_head skbs;
1325 u8 skb_freed = 0; 1325 u8 skb_freed = 0;
1326 u8 lq_color;
1326 u16 next_reclaimed, seq_ctl; 1327 u16 next_reclaimed, seq_ctl;
1327 bool is_ndp = false; 1328 bool is_ndp = false;
1328 1329
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1405 info->status.tx_time = 1406 info->status.tx_time =
1406 le16_to_cpu(tx_resp->wireless_media_time); 1407 le16_to_cpu(tx_resp->wireless_media_time);
1407 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); 1408 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1409 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1408 info->status.status_driver_data[0] = 1410 info->status.status_driver_data[0] =
1409 (void *)(uintptr_t)tx_resp->reduced_tpc; 1411 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1410 1412
1411 ieee80211_tx_status(mvm->hw, skb); 1413 ieee80211_tx_status(mvm->hw, skb);
1412 } 1414 }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1638 le32_to_cpu(tx_resp->initial_rate); 1640 le32_to_cpu(tx_resp->initial_rate);
1639 mvmsta->tid_data[tid].tx_time = 1641 mvmsta->tid_data[tid].tx_time =
1640 le16_to_cpu(tx_resp->wireless_media_time); 1642 le16_to_cpu(tx_resp->wireless_media_time);
1643 mvmsta->tid_data[tid].lq_color =
1644 (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
1645 TX_RES_RATE_TABLE_COLOR_POS;
1641 } 1646 }
1642 1647
1643 rcu_read_unlock(); 1648 rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1707 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1712 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1708 1713
1709 freed = 0; 1714 freed = 0;
1715
1716 /* pack lq color from tid_data along the reduced txp */
1717 ba_info->status.status_driver_data[0] =
1718 RS_DRV_DATA_PACK(tid_data->lq_color,
1719 ba_info->status.status_driver_data[0]);
1710 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; 1720 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1711 1721
1712 skb_queue_walk(&reclaimed_skbs, skb) { 1722 skb_queue_walk(&reclaimed_skbs, skb) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 70acf850a9f1..93cbc7a69bcd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
2803#ifdef CONFIG_PM_SLEEP 2803#ifdef CONFIG_PM_SLEEP
2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2805{ 2805{
2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2807 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2807 return iwl_pci_fw_enter_d0i3(trans); 2808 return iwl_pci_fw_enter_d0i3(trans);
2808 2809
2809 return 0; 2810 return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2811 2812
2812static void iwl_trans_pcie_resume(struct iwl_trans *trans) 2813static void iwl_trans_pcie_resume(struct iwl_trans *trans)
2813{ 2814{
2814 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2815 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2816 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2815 iwl_pci_fw_exit_d0i3(trans); 2817 iwl_pci_fw_exit_d0i3(trans);
2816} 2818}
2817#endif /* CONFIG_PM_SLEEP */ 2819#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9fb46a6f47cf..9c9bfbbabdf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
906 906
907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { 907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
908 ret = -EINVAL; 908 ret = -EINVAL;
909 goto error; 909 goto error_free_resp;
910 } 910 }
911 911
912 rsp = (void *)hcmd.resp_pkt->data; 912 rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
915 if (qid > ARRAY_SIZE(trans_pcie->txq)) { 915 if (qid > ARRAY_SIZE(trans_pcie->txq)) {
916 WARN_ONCE(1, "queue index %d unsupported", qid); 916 WARN_ONCE(1, "queue index %d unsupported", qid);
917 ret = -EIO; 917 ret = -EIO;
918 goto error; 918 goto error_free_resp;
919 } 919 }
920 920
921 if (test_and_set_bit(qid, trans_pcie->queue_used)) { 921 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
922 WARN_ONCE(1, "queue %d already used", qid); 922 WARN_ONCE(1, "queue %d already used", qid);
923 ret = -EIO; 923 ret = -EIO;
924 goto error; 924 goto error_free_resp;
925 } 925 }
926 926
927 txq->id = qid; 927 txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
934 (txq->write_ptr) | (qid << 16)); 934 (txq->write_ptr) | (qid << 16));
935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
936 936
937 iwl_free_resp(&hcmd);
937 return qid; 938 return qid;
938 939
940error_free_resp:
941 iwl_free_resp(&hcmd);
939error: 942error:
940 iwl_pcie_gen2_txq_free_memory(trans, txq); 943 iwl_pcie_gen2_txq_free_memory(trans, txq);
941 return ret; 944 return ret;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a60926410438..903d5813023a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
56static int nvme_char_major; 56static int nvme_char_major;
57module_param(nvme_char_major, int, 0); 57module_param(nvme_char_major, int, 0);
58 58
59static unsigned long default_ps_max_latency_us = 25000; 59static unsigned long default_ps_max_latency_us = 100000;
60module_param(default_ps_max_latency_us, ulong, 0644); 60module_param(default_ps_max_latency_us, ulong, 0644);
61MODULE_PARM_DESC(default_ps_max_latency_us, 61MODULE_PARM_DESC(default_ps_max_latency_us,
62 "max power saving latency for new devices; use PM QOS to change per device"); 62 "max power saving latency for new devices; use PM QOS to change per device");
@@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1342 * transitioning between power states. Therefore, when running 1342 * transitioning between power states. Therefore, when running
1343 * in any given state, we will enter the next lower-power 1343 * in any given state, we will enter the next lower-power
1344 * non-operational state after waiting 50 * (enlat + exlat) 1344 * non-operational state after waiting 50 * (enlat + exlat)
1345 * microseconds, as long as that state's total latency is under 1345 * microseconds, as long as that state's exit latency is under
1346 * the requested maximum latency. 1346 * the requested maximum latency.
1347 * 1347 *
1348 * We will not autonomously enter any non-operational state for 1348 * We will not autonomously enter any non-operational state for
@@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1387 * lowest-power state, not the number of states. 1387 * lowest-power state, not the number of states.
1388 */ 1388 */
1389 for (state = (int)ctrl->npss; state >= 0; state--) { 1389 for (state = (int)ctrl->npss; state >= 0; state--) {
1390 u64 total_latency_us, transition_ms; 1390 u64 total_latency_us, exit_latency_us, transition_ms;
1391 1391
1392 if (target) 1392 if (target)
1393 table->entries[state] = target; 1393 table->entries[state] = target;
@@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1408 NVME_PS_FLAGS_NON_OP_STATE)) 1408 NVME_PS_FLAGS_NON_OP_STATE))
1409 continue; 1409 continue;
1410 1410
1411 total_latency_us = 1411 exit_latency_us =
1412 (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + 1412 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
1413 + le32_to_cpu(ctrl->psd[state].exit_lat); 1413 if (exit_latency_us > ctrl->ps_max_latency_us)
1414 if (total_latency_us > ctrl->ps_max_latency_us)
1415 continue; 1414 continue;
1416 1415
1416 total_latency_us =
1417 exit_latency_us +
1418 le32_to_cpu(ctrl->psd[state].entry_lat);
1419
1417 /* 1420 /*
1418 * This state is good. Use it as the APST idle 1421 * This state is good. Use it as the APST idle
1419 * target for higher power states. 1422 * target for higher power states.
@@ -2438,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2438 struct nvme_ns *ns; 2441 struct nvme_ns *ns;
2439 2442
2440 mutex_lock(&ctrl->namespaces_mutex); 2443 mutex_lock(&ctrl->namespaces_mutex);
2444
2445 /* Forcibly start all queues to avoid having stuck requests */
2446 blk_mq_start_hw_queues(ctrl->admin_q);
2447
2441 list_for_each_entry(ns, &ctrl->namespaces, list) { 2448 list_for_each_entry(ns, &ctrl->namespaces, list) {
2442 /* 2449 /*
2443 * Revalidating a dead namespace sets capacity to 0. This will 2450 * Revalidating a dead namespace sets capacity to 0. This will
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5b14cbefb724..92964cef0f4b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1139/* *********************** NVME Ctrl Routines **************************** */ 1139/* *********************** NVME Ctrl Routines **************************** */
1140 1140
1141static void __nvme_fc_final_op_cleanup(struct request *rq); 1141static void __nvme_fc_final_op_cleanup(struct request *rq);
1142static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1142 1143
1143static int 1144static int
1144nvme_fc_reinit_request(void *data, struct request *rq) 1145nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1265 struct nvme_command *sqe = &op->cmd_iu.sqe; 1266 struct nvme_command *sqe = &op->cmd_iu.sqe;
1266 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1267 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1267 union nvme_result result; 1268 union nvme_result result;
1268 bool complete_rq; 1269 bool complete_rq, terminate_assoc = true;
1269 1270
1270 /* 1271 /*
1271 * WARNING: 1272 * WARNING:
@@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1294 * fabricate a CQE, the following fields will not be set as they 1295 * fabricate a CQE, the following fields will not be set as they
1295 * are not referenced: 1296 * are not referenced:
1296 * cqe.sqid, cqe.sqhd, cqe.command_id 1297 * cqe.sqid, cqe.sqhd, cqe.command_id
1298 *
1299 * Failure or error of an individual i/o, in a transport
1300 * detected fashion unrelated to the nvme completion status,
1301 * potentially cause the initiator and target sides to get out
1302 * of sync on SQ head/tail (aka outstanding io count allowed).
1303 * Per FC-NVME spec, failure of an individual command requires
1304 * the connection to be terminated, which in turn requires the
1305 * association to be terminated.
1297 */ 1306 */
1298 1307
1299 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1308 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1359 goto done; 1368 goto done;
1360 } 1369 }
1361 1370
1371 terminate_assoc = false;
1372
1362done: 1373done:
1363 if (op->flags & FCOP_FLAGS_AEN) { 1374 if (op->flags & FCOP_FLAGS_AEN) {
1364 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1375 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@ done:
1366 atomic_set(&op->state, FCPOP_STATE_IDLE); 1377 atomic_set(&op->state, FCPOP_STATE_IDLE);
1367 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1378 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1368 nvme_fc_ctrl_put(ctrl); 1379 nvme_fc_ctrl_put(ctrl);
1369 return; 1380 goto check_error;
1370 } 1381 }
1371 1382
1372 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1383 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@ done:
1379 nvme_end_request(rq, status, result); 1390 nvme_end_request(rq, status, result);
1380 } else 1391 } else
1381 __nvme_fc_final_op_cleanup(rq); 1392 __nvme_fc_final_op_cleanup(rq);
1393
1394check_error:
1395 if (terminate_assoc)
1396 nvme_fc_error_recovery(ctrl, "transport detected io error");
1382} 1397}
1383 1398
1384static int 1399static int
@@ -2791,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2791 ctrl->ctrl.opts = NULL; 2806 ctrl->ctrl.opts = NULL;
2792 /* initiate nvme ctrl ref counting teardown */ 2807 /* initiate nvme ctrl ref counting teardown */
2793 nvme_uninit_ctrl(&ctrl->ctrl); 2808 nvme_uninit_ctrl(&ctrl->ctrl);
2809 nvme_put_ctrl(&ctrl->ctrl);
2794 2810
2795 /* as we're past the point where we transition to the ref 2811 /* as we're past the point where we transition to the ref
2796 * counting teardown path, if we return a bad pointer here, 2812 * counting teardown path, if we return a bad pointer here,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d52701df7245..951042a375d6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1368 1368
1369 /* If there is a reset ongoing, we shouldn't reset again. */ 1369 /* If there is a reset ongoing, we shouldn't reset again. */
1370 if (work_busy(&dev->reset_work)) 1370 if (dev->ctrl.state == NVME_CTRL_RESETTING)
1371 return false; 1371 return false;
1372 1372
1373 /* We shouldn't reset unless the controller is on fatal error state 1373 /* We shouldn't reset unless the controller is on fatal error state
@@ -1903,7 +1903,7 @@ static void nvme_reset_work(struct work_struct *work)
1903 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1903 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
1904 int result = -ENODEV; 1904 int result = -ENODEV;
1905 1905
1906 if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1906 if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
1907 goto out; 1907 goto out;
1908 1908
1909 /* 1909 /*
@@ -1913,9 +1913,6 @@ static void nvme_reset_work(struct work_struct *work)
1913 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1913 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1914 nvme_dev_disable(dev, false); 1914 nvme_dev_disable(dev, false);
1915 1915
1916 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
1917 goto out;
1918
1919 result = nvme_pci_enable(dev); 1916 result = nvme_pci_enable(dev);
1920 if (result) 1917 if (result)
1921 goto out; 1918 goto out;
@@ -2009,8 +2006,8 @@ static int nvme_reset(struct nvme_dev *dev)
2009{ 2006{
2010 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 2007 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
2011 return -ENODEV; 2008 return -ENODEV;
2012 if (work_busy(&dev->reset_work)) 2009 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
2013 return -ENODEV; 2010 return -EBUSY;
2014 if (!queue_work(nvme_workq, &dev->reset_work)) 2011 if (!queue_work(nvme_workq, &dev->reset_work))
2015 return -EBUSY; 2012 return -EBUSY;
2016 return 0; 2013 return 0;
@@ -2136,6 +2133,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2136 if (result) 2133 if (result)
2137 goto release_pools; 2134 goto release_pools;
2138 2135
2136 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
2139 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2137 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2140 2138
2141 queue_work(nvme_workq, &dev->reset_work); 2139 queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2177,7 @@ static void nvme_remove(struct pci_dev *pdev)
2179 2177
2180 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2178 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2181 2179
2180 cancel_work_sync(&dev->reset_work);
2182 pci_set_drvdata(pdev, NULL); 2181 pci_set_drvdata(pdev, NULL);
2183 2182
2184 if (!pci_device_is_present(pdev)) { 2183 if (!pci_device_is_present(pdev)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 28bd255c144d..24397d306d53 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
753 if (ret) 753 if (ret)
754 goto requeue; 754 goto requeue;
755 755
756 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
757
758 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 756 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
759 if (ret) 757 if (ret)
760 goto stop_admin_q; 758 goto requeue;
761 759
762 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 760 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
763 761
764 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 762 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
765 if (ret) 763 if (ret)
766 goto stop_admin_q; 764 goto requeue;
767 765
768 nvme_start_keep_alive(&ctrl->ctrl); 766 nvme_start_keep_alive(&ctrl->ctrl);
769 767
770 if (ctrl->queue_count > 1) { 768 if (ctrl->queue_count > 1) {
771 ret = nvme_rdma_init_io_queues(ctrl); 769 ret = nvme_rdma_init_io_queues(ctrl);
772 if (ret) 770 if (ret)
773 goto stop_admin_q; 771 goto requeue;
774 772
775 ret = nvme_rdma_connect_io_queues(ctrl); 773 ret = nvme_rdma_connect_io_queues(ctrl);
776 if (ret) 774 if (ret)
777 goto stop_admin_q; 775 goto requeue;
778 } 776 }
779 777
780 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 778 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
782 ctrl->ctrl.opts->nr_reconnects = 0; 780 ctrl->ctrl.opts->nr_reconnects = 0;
783 781
784 if (ctrl->queue_count > 1) { 782 if (ctrl->queue_count > 1) {
785 nvme_start_queues(&ctrl->ctrl);
786 nvme_queue_scan(&ctrl->ctrl); 783 nvme_queue_scan(&ctrl->ctrl);
787 nvme_queue_async_events(&ctrl->ctrl); 784 nvme_queue_async_events(&ctrl->ctrl);
788 } 785 }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
791 788
792 return; 789 return;
793 790
794stop_admin_q:
795 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
796requeue: 791requeue:
797 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 792 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
798 ctrl->ctrl.opts->nr_reconnects); 793 ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
823 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 818 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
824 nvme_cancel_request, &ctrl->ctrl); 819 nvme_cancel_request, &ctrl->ctrl);
825 820
821 /*
822 * queues are not a live anymore, so restart the queues to fail fast
823 * new IO
824 */
825 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
826 nvme_start_queues(&ctrl->ctrl);
827
826 nvme_rdma_reconnect_or_remove(ctrl); 828 nvme_rdma_reconnect_or_remove(ctrl);
827} 829}
828 830
@@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
1433/* 1435/*
1434 * We cannot accept any other command until the Connect command has completed. 1436 * We cannot accept any other command until the Connect command has completed.
1435 */ 1437 */
1436static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1438static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1437 struct request *rq) 1439 struct request *rq)
1438{ 1440{
1439 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1441 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1441 1443
1442 if (!blk_rq_is_passthrough(rq) || 1444 if (!blk_rq_is_passthrough(rq) ||
1443 cmd->common.opcode != nvme_fabrics_command || 1445 cmd->common.opcode != nvme_fabrics_command ||
1444 cmd->fabrics.fctype != nvme_fabrics_type_connect) 1446 cmd->fabrics.fctype != nvme_fabrics_type_connect) {
1445 return false; 1447 /*
1448 * reconnecting state means transport disruption, which
1449 * can take a long time and even might fail permanently,
1450 * so we can't let incoming I/O be requeued forever.
1451 * fail it fast to allow upper layers a chance to
1452 * failover.
1453 */
1454 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
1455 return -EIO;
1456 else
1457 return -EAGAIN;
1458 }
1446 } 1459 }
1447 1460
1448 return true; 1461 return 0;
1449} 1462}
1450 1463
1451static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1464static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1463 1476
1464 WARN_ON_ONCE(rq->tag < 0); 1477 WARN_ON_ONCE(rq->tag < 0);
1465 1478
1466 if (!nvme_rdma_queue_is_ready(queue, rq)) 1479 ret = nvme_rdma_queue_is_ready(queue, rq);
1467 return BLK_MQ_RQ_QUEUE_BUSY; 1480 if (unlikely(ret))
1481 goto err;
1468 1482
1469 dev = queue->device->dev; 1483 dev = queue->device->dev;
1470 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1484 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 9416d052cb89..28c38c756f92 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
144 coherent ? " " : " not "); 144 coherent ? " " : " not ");
145 145
146 iommu = of_iommu_configure(dev, np); 146 iommu = of_iommu_configure(dev, np);
147 if (IS_ERR(iommu)) 147 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
148 return PTR_ERR(iommu); 148 return -EPROBE_DEFER;
149 149
150 dev_dbg(dev, "device is%sbehind an iommu\n", 150 dev_dbg(dev, "device is%sbehind an iommu\n",
151 iommu ? " " : " not "); 151 iommu ? " " : " not ");
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 35ce53edabf9..d5e5229308f2 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
155} 155}
156 156
157postcore_initcall(hi6220_reset_init); 157postcore_initcall(hi6220_reset_init);
158
159MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8ea01904c0ea..466517c7c8e6 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
19 19
20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o 20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
21 21
22ccflags-y += -Werror
23
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 1d7f7ab94cac..6b13a3a66e49 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
4 4
5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o 5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o 6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
7
8ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index fceb9e9b881b..c9c0e1245858 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,3 +1 @@
1obj-$(CONFIG_VIDEO_OV5693) += ov5693.o obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
2
3ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 3fa7c1c1479f..f126a89a08e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
353 353
354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror 354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
355 355
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7a92a5e1d40c..feca75b07fdd 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
362 st->global_error = 1; 362 st->global_error = 1;
363 } 363 }
364 } 364 }
365 st->va += PAGE_SIZE * nr; 365 st->va += XEN_PAGE_SIZE * nr;
366 st->index += nr; 366 st->index += nr / XEN_PFN_PER_PAGE;
367 367
368 return 0; 368 return 0;
369} 369}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 643c70d2b2e6..4f8f75d9e839 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2563,7 +2563,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, 2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2564 unsigned num_items) 2564 unsigned num_items)
2565{ 2565{
2566 return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2566 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
2567} 2567}
2568 2568
2569/* 2569/*
@@ -2573,7 +2573,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, 2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
2574 unsigned num_items) 2574 unsigned num_items)
2575{ 2575{
2576 return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 2576 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
2577} 2577}
2578 2578
2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 60a750678a82..c24d615e3d7f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -468,7 +468,7 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
468 468
469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) { 469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
470 btrfs_crit(fs_info, "invalid dir item name len: %u", 470 btrfs_crit(fs_info, "invalid dir item name len: %u",
471 (unsigned)btrfs_dir_data_len(leaf, dir_item)); 471 (unsigned)btrfs_dir_name_len(leaf, dir_item));
472 return 1; 472 return 1;
473 } 473 }
474 474
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8685d67185d0..5f678dcb20e6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3467,10 +3467,12 @@ static int write_dev_supers(struct btrfs_device *device,
3467 * we fua the first super. The others we allow 3467 * we fua the first super. The others we allow
3468 * to go down lazy. 3468 * to go down lazy.
3469 */ 3469 */
3470 if (i == 0) 3470 if (i == 0) {
3471 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); 3471 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3472 else 3472 REQ_SYNC | REQ_FUA, bh);
3473 } else {
3473 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 3474 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3475 }
3474 if (ret) 3476 if (ret)
3475 errors++; 3477 errors++;
3476 } 3478 }
@@ -3535,7 +3537,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3535 3537
3536 bio->bi_end_io = btrfs_end_empty_barrier; 3538 bio->bi_end_io = btrfs_end_empty_barrier;
3537 bio->bi_bdev = device->bdev; 3539 bio->bi_bdev = device->bdev;
3538 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 3540 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3539 init_completion(&device->flush_wait); 3541 init_completion(&device->flush_wait);
3540 bio->bi_private = &device->flush_wait; 3542 bio->bi_private = &device->flush_wait;
3541 device->flush_bio = bio; 3543 device->flush_bio = bio;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e390451c72e6..33d979e9ea2a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3993,6 +3993,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3993 info->space_info_kobj, "%s", 3993 info->space_info_kobj, "%s",
3994 alloc_name(found->flags)); 3994 alloc_name(found->flags));
3995 if (ret) { 3995 if (ret) {
3996 percpu_counter_destroy(&found->total_bytes_pinned);
3996 kfree(found); 3997 kfree(found);
3997 return ret; 3998 return ret;
3998 } 3999 }
@@ -4844,7 +4845,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4844 spin_unlock(&delayed_rsv->lock); 4845 spin_unlock(&delayed_rsv->lock);
4845 4846
4846commit: 4847commit:
4847 trans = btrfs_join_transaction(fs_info->fs_root); 4848 trans = btrfs_join_transaction(fs_info->extent_root);
4848 if (IS_ERR(trans)) 4849 if (IS_ERR(trans))
4849 return -ENOSPC; 4850 return -ENOSPC;
4850 4851
@@ -4862,7 +4863,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
4862 struct btrfs_space_info *space_info, u64 num_bytes, 4863 struct btrfs_space_info *space_info, u64 num_bytes,
4863 u64 orig_bytes, int state) 4864 u64 orig_bytes, int state)
4864{ 4865{
4865 struct btrfs_root *root = fs_info->fs_root; 4866 struct btrfs_root *root = fs_info->extent_root;
4866 struct btrfs_trans_handle *trans; 4867 struct btrfs_trans_handle *trans;
4867 int nr; 4868 int nr;
4868 int ret = 0; 4869 int ret = 0;
@@ -5062,7 +5063,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5062 int flush_state = FLUSH_DELAYED_ITEMS_NR; 5063 int flush_state = FLUSH_DELAYED_ITEMS_NR;
5063 5064
5064 spin_lock(&space_info->lock); 5065 spin_lock(&space_info->lock);
5065 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, 5066 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
5066 space_info); 5067 space_info);
5067 if (!to_reclaim) { 5068 if (!to_reclaim) {
5068 spin_unlock(&space_info->lock); 5069 spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8da3edf2ac3..d3619e010005 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2458,7 +2458,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2458 if (!uptodate) { 2458 if (!uptodate) {
2459 ClearPageUptodate(page); 2459 ClearPageUptodate(page);
2460 SetPageError(page); 2460 SetPageError(page);
2461 ret = ret < 0 ? ret : -EIO; 2461 ret = err < 0 ? err : -EIO;
2462 mapping_set_error(page->mapping, ret); 2462 mapping_set_error(page->mapping, ret);
2463 } 2463 }
2464} 2464}
@@ -4377,6 +4377,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
4377 return NULL; 4377 return NULL;
4378} 4378}
4379 4379
4380/*
4381 * To cache previous fiemap extent
4382 *
4383 * Will be used for merging fiemap extent
4384 */
4385struct fiemap_cache {
4386 u64 offset;
4387 u64 phys;
4388 u64 len;
4389 u32 flags;
4390 bool cached;
4391};
4392
4393/*
4394 * Helper to submit fiemap extent.
4395 *
4396 * Will try to merge current fiemap extent specified by @offset, @phys,
4397 * @len and @flags with cached one.
4398 * And only when we fails to merge, cached one will be submitted as
4399 * fiemap extent.
4400 *
4401 * Return value is the same as fiemap_fill_next_extent().
4402 */
4403static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4404 struct fiemap_cache *cache,
4405 u64 offset, u64 phys, u64 len, u32 flags)
4406{
4407 int ret = 0;
4408
4409 if (!cache->cached)
4410 goto assign;
4411
4412 /*
4413 * Sanity check, extent_fiemap() should have ensured that new
4414 * fiemap extent won't overlap with cahced one.
4415 * Not recoverable.
4416 *
4417 * NOTE: Physical address can overlap, due to compression
4418 */
4419 if (cache->offset + cache->len > offset) {
4420 WARN_ON(1);
4421 return -EINVAL;
4422 }
4423
4424 /*
4425 * Only merges fiemap extents if
4426 * 1) Their logical addresses are continuous
4427 *
4428 * 2) Their physical addresses are continuous
4429 * So truly compressed (physical size smaller than logical size)
4430 * extents won't get merged with each other
4431 *
4432 * 3) Share same flags except FIEMAP_EXTENT_LAST
4433 * So regular extent won't get merged with prealloc extent
4434 */
4435 if (cache->offset + cache->len == offset &&
4436 cache->phys + cache->len == phys &&
4437 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4438 (flags & ~FIEMAP_EXTENT_LAST)) {
4439 cache->len += len;
4440 cache->flags |= flags;
4441 goto try_submit_last;
4442 }
4443
4444 /* Not mergeable, need to submit cached one */
4445 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4446 cache->len, cache->flags);
4447 cache->cached = false;
4448 if (ret)
4449 return ret;
4450assign:
4451 cache->cached = true;
4452 cache->offset = offset;
4453 cache->phys = phys;
4454 cache->len = len;
4455 cache->flags = flags;
4456try_submit_last:
4457 if (cache->flags & FIEMAP_EXTENT_LAST) {
4458 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4459 cache->phys, cache->len, cache->flags);
4460 cache->cached = false;
4461 }
4462 return ret;
4463}
4464
4465/*
4466 * Sanity check for fiemap cache
4467 *
4468 * All fiemap cache should be submitted by emit_fiemap_extent()
4469 * Iteration should be terminated either by last fiemap extent or
4470 * fieinfo->fi_extents_max.
4471 * So no cached fiemap should exist.
4472 */
4473static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
4474 struct fiemap_extent_info *fieinfo,
4475 struct fiemap_cache *cache)
4476{
4477 int ret;
4478
4479 if (!cache->cached)
4480 return 0;
4481
4482 /* Small and recoverbale problem, only to info developer */
4483#ifdef CONFIG_BTRFS_DEBUG
4484 WARN_ON(1);
4485#endif
4486 btrfs_warn(fs_info,
4487 "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
4488 cache->offset, cache->phys, cache->len, cache->flags);
4489 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4490 cache->len, cache->flags);
4491 cache->cached = false;
4492 if (ret > 0)
4493 ret = 0;
4494 return ret;
4495}
4496
4380int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4497int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4381 __u64 start, __u64 len, get_extent_t *get_extent) 4498 __u64 start, __u64 len, get_extent_t *get_extent)
4382{ 4499{
@@ -4394,6 +4511,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4394 struct extent_state *cached_state = NULL; 4511 struct extent_state *cached_state = NULL;
4395 struct btrfs_path *path; 4512 struct btrfs_path *path;
4396 struct btrfs_root *root = BTRFS_I(inode)->root; 4513 struct btrfs_root *root = BTRFS_I(inode)->root;
4514 struct fiemap_cache cache = { 0 };
4397 int end = 0; 4515 int end = 0;
4398 u64 em_start = 0; 4516 u64 em_start = 0;
4399 u64 em_len = 0; 4517 u64 em_len = 0;
@@ -4573,8 +4691,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4573 flags |= FIEMAP_EXTENT_LAST; 4691 flags |= FIEMAP_EXTENT_LAST;
4574 end = 1; 4692 end = 1;
4575 } 4693 }
4576 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 4694 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4577 em_len, flags); 4695 em_len, flags);
4578 if (ret) { 4696 if (ret) {
4579 if (ret == 1) 4697 if (ret == 1)
4580 ret = 0; 4698 ret = 0;
@@ -4582,6 +4700,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4582 } 4700 }
4583 } 4701 }
4584out_free: 4702out_free:
4703 if (!ret)
4704 ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
4585 free_extent_map(em); 4705 free_extent_map(em);
4586out: 4706out:
4587 btrfs_free_path(path); 4707 btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17cbe9306faf..ef3c98c527c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2952,7 +2952,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2952 2952
2953 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2953 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2954 ordered_extent->file_offset + ordered_extent->len - 1, 2954 ordered_extent->file_offset + ordered_extent->len - 1,
2955 EXTENT_DEFRAG, 1, cached_state); 2955 EXTENT_DEFRAG, 0, cached_state);
2956 if (ret) { 2956 if (ret) {
2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7483,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7483 int found = false; 7483 int found = false;
7484 void **pagep = NULL; 7484 void **pagep = NULL;
7485 struct page *page = NULL; 7485 struct page *page = NULL;
7486 int start_idx; 7486 unsigned long start_idx;
7487 int end_idx; 7487 unsigned long end_idx;
7488 7488
7489 start_idx = start >> PAGE_SHIFT; 7489 start_idx = start >> PAGE_SHIFT;
7490 7490
diff --git a/fs/stat.c b/fs/stat.c
index f494b182c7c7..c35610845ab1 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
672 inode->i_bytes -= 512; 672 inode->i_bytes -= 512;
673 } 673 }
674} 674}
675EXPORT_SYMBOL(__inode_add_bytes);
675 676
676void inode_add_bytes(struct inode *inode, loff_t bytes) 677void inode_add_bytes(struct inode *inode, loff_t bytes)
677{ 678{
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a0376a2c1c29..d642cc0a8271 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
82 ufs_error (sb, "ufs_free_fragments", 82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i); 83 "bit already cleared for fragment %u", i);
84 } 84 }
85 85
86 inode_sub_bytes(inode, count << uspi->s_fshift);
86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 uspi->cs_total.cs_nffree += count; 88 uspi->cs_total.cs_nffree += count;
88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@ do_more:
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 } 186 }
186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1); 190 ufs_clusteracct (sb, ucpi, blkno, 1);
189 191
@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
494 return 0; 496 return 0;
495} 497}
496 498
499static bool try_add_frags(struct inode *inode, unsigned frags)
500{
501 unsigned size = frags * i_blocksize(inode);
502 spin_lock(&inode->i_lock);
503 __inode_add_bytes(inode, size);
504 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
505 __inode_sub_bytes(inode, size);
506 spin_unlock(&inode->i_lock);
507 return false;
508 }
509 spin_unlock(&inode->i_lock);
510 return true;
511}
512
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 513static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498 unsigned oldcount, unsigned newcount) 514 unsigned oldcount, unsigned newcount)
499{ 515{
@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
530 for (i = oldcount; i < newcount; i++) 546 for (i = oldcount; i < newcount; i++)
531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 547 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532 return 0; 548 return 0;
549
550 if (!try_add_frags(inode, count))
551 return 0;
533 /* 552 /*
534 * Block can be extended 553 * Block can be extended
535 */ 554 */
@@ -647,6 +666,7 @@ cg_found:
647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 666 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648 i = uspi->s_fpb - count; 667 i = uspi->s_fpb - count;
649 668
669 inode_sub_bytes(inode, i << uspi->s_fshift);
650 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 670 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651 uspi->cs_total.cs_nffree += i; 671 uspi->cs_total.cs_nffree += i;
652 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); 672 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@ cg_found:
657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 677 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658 if (result == INVBLOCK) 678 if (result == INVBLOCK)
659 return 0; 679 return 0;
680 if (!try_add_frags(inode, count))
681 return 0;
660 for (i = 0; i < count; i++) 682 for (i = 0; i < count; i++)
661 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); 683 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662 684
@@ -716,6 +738,8 @@ norot:
716 return INVBLOCK; 738 return INVBLOCK;
717 ucpi->c_rotor = result; 739 ucpi->c_rotor = result;
718gotit: 740gotit:
741 if (!try_add_frags(inode, uspi->s_fpb))
742 return 0;
719 blkno = ufs_fragstoblks(result); 743 blkno = ufs_fragstoblks(result);
720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 744 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 745 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7e41aee7b69a..da553ffec85b 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
235 235
236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 new_size, err, locked_page); 238 new_size - (lastfrag & uspi->s_fpbmask), err,
239 locked_page);
239 return tmp != 0; 240 return tmp != 0;
240} 241}
241 242
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
284 goal += uspi->s_fpb; 285 goal += uspi->s_fpb;
285 } 286 }
286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 287 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
287 goal, uspi->s_fpb, err, locked_page); 288 goal, nfrags, err, locked_page);
288 289
289 if (!tmp) { 290 if (!tmp) {
290 *err = -ENOSPC; 291 *err = -ENOSPC;
@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
402 403
403 if (!create) { 404 if (!create) {
404 phys64 = ufs_frag_map(inode, offsets, depth); 405 phys64 = ufs_frag_map(inode, offsets, depth);
405 goto out; 406 if (phys64)
407 map_bh(bh_result, sb, phys64 + frag);
408 return 0;
406 } 409 }
407 410
408 /* This code entered only while writing ....? */ 411 /* This code entered only while writing ....? */
@@ -841,8 +844,11 @@ void ufs_evict_inode(struct inode * inode)
841 truncate_inode_pages_final(&inode->i_data); 844 truncate_inode_pages_final(&inode->i_data);
842 if (want_delete) { 845 if (want_delete) {
843 inode->i_size = 0; 846 inode->i_size = 0;
844 if (inode->i_blocks) 847 if (inode->i_blocks &&
848 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
849 S_ISLNK(inode->i_mode)))
845 ufs_truncate_blocks(inode); 850 ufs_truncate_blocks(inode);
851 ufs_update_inode(inode, inode_needs_sync(inode));
846 } 852 }
847 853
848 invalidate_inode_buffers(inode); 854 invalidate_inode_buffers(inode);
@@ -1100,7 +1106,7 @@ out:
1100 return err; 1106 return err;
1101} 1107}
1102 1108
1103static void __ufs_truncate_blocks(struct inode *inode) 1109static void ufs_truncate_blocks(struct inode *inode)
1104{ 1110{
1105 struct ufs_inode_info *ufsi = UFS_I(inode); 1111 struct ufs_inode_info *ufsi = UFS_I(inode);
1106 struct super_block *sb = inode->i_sb; 1112 struct super_block *sb = inode->i_sb;
@@ -1183,7 +1189,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
1183 1189
1184 truncate_setsize(inode, size); 1190 truncate_setsize(inode, size);
1185 1191
1186 __ufs_truncate_blocks(inode); 1192 ufs_truncate_blocks(inode);
1187 inode->i_mtime = inode->i_ctime = current_time(inode); 1193 inode->i_mtime = inode->i_ctime = current_time(inode);
1188 mark_inode_dirty(inode); 1194 mark_inode_dirty(inode);
1189out: 1195out:
@@ -1191,16 +1197,6 @@ out:
1191 return err; 1197 return err;
1192} 1198}
1193 1199
1194static void ufs_truncate_blocks(struct inode *inode)
1195{
1196 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1197 S_ISLNK(inode->i_mode)))
1198 return;
1199 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1200 return;
1201 __ufs_truncate_blocks(inode);
1202}
1203
1204int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1200int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1205{ 1201{
1206 struct inode *inode = d_inode(dentry); 1202 struct inode *inode = d_inode(dentry);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 29ecaf739449..878cc6264f1a 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
746 return; 746 return;
747} 747}
748 748
749static u64 ufs_max_bytes(struct super_block *sb)
750{
751 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
752 int bits = uspi->s_apbshift;
753 u64 res;
754
755 if (bits > 21)
756 res = ~0ULL;
757 else
758 res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
759 (1LL << (3*bits));
760
761 if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
762 return MAX_LFS_FILESIZE;
763 return res << uspi->s_bshift;
764}
765
749static int ufs_fill_super(struct super_block *sb, void *data, int silent) 766static int ufs_fill_super(struct super_block *sb, void *data, int silent)
750{ 767{
751 struct ufs_sb_info * sbi; 768 struct ufs_sb_info * sbi;
@@ -1211,6 +1228,7 @@ magic_found:
1211 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1228 "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
1212 uspi->s_maxsymlinklen = maxsymlen; 1229 uspi->s_maxsymlinklen = maxsymlen;
1213 } 1230 }
1231 sb->s_maxbytes = ufs_max_bytes(sb);
1214 sb->s_max_links = UFS_LINK_MAX; 1232 sb->s_max_links = UFS_LINK_MAX;
1215 1233
1216 inode = ufs_iget(sb, UFS_ROOTINO); 1234 inode = ufs_iget(sb, UFS_ROOTINO);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53dbc81..398019fb1448 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
473static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 473static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
475{ 475{
476 u8 mask;
476 switch (uspi->s_fpb) { 477 switch (uspi->s_fpb) {
477 case 8: 478 case 8:
478 return (*ubh_get_addr (ubh, begin + block) == 0xff); 479 return (*ubh_get_addr (ubh, begin + block) == 0xff);
479 case 4: 480 case 4:
480 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 481 mask = 0x0f << ((block & 0x01) << 2);
482 return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
481 case 2: 483 case 2:
482 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 484 mask = 0x03 << ((block & 0x03) << 1);
485 return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
483 case 1: 486 case 1:
484 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 487 mask = 0x01 << (block & 0x07);
488 return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
485 } 489 }
486 return 0; 490 return 0;
487} 491}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 21745946cae1..ec47101cb1bf 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -48,6 +48,7 @@ enum {
48 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 48 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
49 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 49 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
50 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 50 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
51 CSS_DYING = (1 << 4), /* css is dying */
51}; 52};
52 53
53/* bits in struct cgroup flags field */ 54/* bits in struct cgroup flags field */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed2573e149fa..710a005c6b7a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
344} 344}
345 345
346/** 346/**
347 * css_is_dying - test whether the specified css is dying
348 * @css: target css
349 *
350 * Test whether @css is in the process of offlining or already offline. In
351 * most cases, ->css_online() and ->css_offline() callbacks should be
352 * enough; however, the actual offline operations are RCU delayed and this
353 * test returns %true also when @css is scheduled to be offlined.
354 *
355 * This is useful, for example, when the use case requires synchronous
356 * behavior with respect to cgroup removal. cgroup removal schedules css
357 * offlining but the css can seem alive while the operation is being
358 * delayed. If the delay affects user visible semantics, this test can be
359 * used to resolve the situation.
360 */
361static inline bool css_is_dying(struct cgroup_subsys_state *css)
362{
363 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
364}
365
366/**
347 * css_put - put a css reference 367 * css_put - put a css reference
348 * @css: target css 368 * @css: target css
349 * 369 *
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..ea9126006a69 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,10 @@
15 * with any version that can compile the kernel 15 * with any version that can compile the kernel
16 */ 16 */
17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
18
19/*
20 * GCC does not warn about unused static inline functions for
21 * -Wunused-function. This turns out to avoid the need for complex #ifdef
22 * directives. Suppress the warning in clang as well.
23 */
24#define inline inline __attribute__((unused))
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 4eac2670bfa1..92f20832fd28 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
78 78
79struct iommu_domain; 79struct iommu_domain;
80struct msi_msg; 80struct msi_msg;
81struct device;
81 82
82static inline int iommu_dma_init(void) 83static inline int iommu_dma_init(void)
83{ 84{
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 9ec5e22846e0..0e306c5a86d6 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -153,7 +153,7 @@ struct elevator_type
153#endif 153#endif
154 154
155 /* managed by elevator core */ 155 /* managed by elevator core */
156 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ 156 char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
157 struct list_head list; 157 struct list_head list;
158}; 158};
159 159
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f62ce8d..8e2828d48d7f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
470 u16 rate_val; 470 u16 rate_val;
471}; 471};
472 472
473struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
473int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 474int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
474 enum mlx4_update_qp_attr attr, 475 enum mlx4_update_qp_attr attr,
475 struct mlx4_update_qp_params *params); 476 struct mlx4_update_qp_params *params);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 167ad8831aaf..4c1d5f7e62c4 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
172{ 172{
173 int retval; 173 int retval;
174 174
175 preempt_disable();
176 retval = __srcu_read_lock(sp); 175 retval = __srcu_read_lock(sp);
177 preempt_enable();
178 rcu_lock_acquire(&(sp)->dep_map); 176 rcu_lock_acquire(&(sp)->dep_map);
179 return retval; 177 return retval;
180} 178}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
189struct platform_freeze_ops { 189struct platform_freeze_ops {
190 int (*begin)(void); 190 int (*begin)(void);
191 int (*prepare)(void); 191 int (*prepare)(void);
192 void (*wake)(void);
193 void (*sync)(void);
194 void (*restore)(void); 192 void (*restore)(void);
195 void (*end)(void); 193 void (*end)(void);
196}; 194};
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
430 428
431extern bool pm_wakeup_pending(void); 429extern bool pm_wakeup_pending(void);
432extern void pm_system_wakeup(void); 430extern void pm_system_wakeup(void);
433extern void pm_system_cancel_wakeup(void); 431extern void pm_wakeup_clear(void);
434extern void pm_wakeup_clear(bool reset);
435extern void pm_system_irq_wakeup(unsigned int irq_number); 432extern void pm_system_irq_wakeup(unsigned int irq_number);
436extern bool pm_get_wakeup_count(unsigned int *count, bool block); 433extern bool pm_get_wakeup_count(unsigned int *count, bool block);
437extern bool pm_save_wakeup_count(unsigned int count); 434extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
481 478
482static inline bool pm_wakeup_pending(void) { return false; } 479static inline bool pm_wakeup_pending(void) { return false; }
483static inline void pm_system_wakeup(void) {} 480static inline void pm_system_wakeup(void) {}
484static inline void pm_wakeup_clear(bool reset) {} 481static inline void pm_wakeup_clear(void) {}
485static inline void pm_system_irq_wakeup(unsigned int irq_number) {} 482static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
486 483
487static inline void lock_system_sleep(void) {} 484static inline void lock_system_sleep(void) {}
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index eb50ce54b759..413335c8cb52 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -29,7 +29,7 @@ struct edid;
29struct cec_adapter; 29struct cec_adapter;
30struct cec_notifier; 30struct cec_notifier;
31 31
32#ifdef CONFIG_MEDIA_CEC_NOTIFIER 32#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
33 33
34/** 34/**
35 * cec_notifier_get - find or create a new cec_notifier for the given device. 35 * cec_notifier_get - find or create a new cec_notifier for the given device.
diff --git a/include/media/cec.h b/include/media/cec.h
index b8eb895731d5..bfa88d4d67e1 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -173,7 +173,7 @@ struct cec_adapter {
173 bool passthrough; 173 bool passthrough;
174 struct cec_log_addrs log_addrs; 174 struct cec_log_addrs log_addrs;
175 175
176#ifdef CONFIG_MEDIA_CEC_NOTIFIER 176#ifdef CONFIG_CEC_NOTIFIER
177 struct cec_notifier *notifier; 177 struct cec_notifier *notifier;
178#endif 178#endif
179 179
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
300 */ 300 */
301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); 301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
302 302
303#ifdef CONFIG_MEDIA_CEC_NOTIFIER 303#ifdef CONFIG_CEC_NOTIFIER
304void cec_register_cec_notifier(struct cec_adapter *adap, 304void cec_register_cec_notifier(struct cec_adapter *adap,
305 struct cec_notifier *notifier); 305 struct cec_notifier *notifier);
306#endif 306#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index dbf0abba33b8..3e505bbff8ca 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
1007 */ 1007 */
1008extern const struct proto_ops inet6_stream_ops; 1008extern const struct proto_ops inet6_stream_ops;
1009extern const struct proto_ops inet6_dgram_ops; 1009extern const struct proto_ops inet6_dgram_ops;
1010extern const struct proto_ops inet6_sockraw_ops;
1010 1011
1011struct group_source_req; 1012struct group_source_req;
1012struct group_filter; 1013struct group_filter;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427ae902..be6223c586fa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
925 /* call when ack arrives (optional) */ 925 /* call when ack arrives (optional) */
926 void (*in_ack_event)(struct sock *sk, u32 flags); 926 void (*in_ack_event)(struct sock *sk, u32 flags);
927 /* new value of cwnd after loss (optional) */ 927 /* new value of cwnd after loss (required) */
928 u32 (*undo_cwnd)(struct sock *sk); 928 u32 (*undo_cwnd)(struct sock *sk);
929 /* hook for packet ack accounting (optional) */ 929 /* hook for packet ack accounting (optional) */
930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c3c9a0e1b3c9..8d4e85eae42c 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
4265{ 4265{
4266 lockdep_assert_held(&cgroup_mutex); 4266 lockdep_assert_held(&cgroup_mutex);
4267 4267
4268 if (css->flags & CSS_DYING)
4269 return;
4270
4271 css->flags |= CSS_DYING;
4272
4268 /* 4273 /*
4269 * This must happen before css is disassociated with its cgroup. 4274 * This must happen before css is disassociated with its cgroup.
4270 * See seq_css() for details. 4275 * See seq_css() for details.
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index f6501f4f6040..ae643412948a 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -176,9 +176,9 @@ typedef enum {
176} cpuset_flagbits_t; 176} cpuset_flagbits_t;
177 177
178/* convenient tests for these bits */ 178/* convenient tests for these bits */
179static inline bool is_cpuset_online(const struct cpuset *cs) 179static inline bool is_cpuset_online(struct cpuset *cs)
180{ 180{
181 return test_bit(CS_ONLINE, &cs->flags); 181 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
182} 182}
183 183
184static inline int is_cpu_exclusive(const struct cpuset *cs) 184static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9ae6fbe5b5cf..cb5103413bd8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1659 mutex_unlock(&cpuhp_state_mutex); 1659 mutex_unlock(&cpuhp_state_mutex);
1660 if (ret) 1660 if (ret)
1661 return ret; 1661 goto out;
1662 1662
1663 if (st->state < target) 1663 if (st->state < target)
1664 ret = do_cpu_up(dev->id, target); 1664 ret = do_cpu_up(dev->id, target);
1665 else 1665 else
1666 ret = do_cpu_down(dev->id, target); 1666 ret = do_cpu_down(dev->id, target);
1667 1667out:
1668 unlock_device_hotplug(); 1668 unlock_device_hotplug();
1669 return ret ? ret : count; 1669 return ret ? ret : count;
1670} 1670}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6e75a5c9412d..6c4e523dc1e2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
7316 return __perf_event_account_interrupt(event, 1); 7316 return __perf_event_account_interrupt(event, 1);
7317} 7317}
7318 7318
7319static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
7320{
7321 /*
7322 * Due to interrupt latency (AKA "skid"), we may enter the
7323 * kernel before taking an overflow, even if the PMU is only
7324 * counting user events.
7325 * To avoid leaking information to userspace, we must always
7326 * reject kernel samples when exclude_kernel is set.
7327 */
7328 if (event->attr.exclude_kernel && !user_mode(regs))
7329 return false;
7330
7331 return true;
7332}
7333
7319/* 7334/*
7320 * Generic event overflow handling, sampling. 7335 * Generic event overflow handling, sampling.
7321 */ 7336 */
@@ -7337,6 +7352,12 @@ static int __perf_event_overflow(struct perf_event *event,
7337 ret = __perf_event_account_interrupt(event, throttle); 7352 ret = __perf_event_account_interrupt(event, throttle);
7338 7353
7339 /* 7354 /*
7355 * For security, drop the skid kernel samples if necessary.
7356 */
7357 if (!sample_is_allowed(event, regs))
7358 return ret;
7359
7360 /*
7340 * XXX event_limit might not quite work as expected on inherited 7361 * XXX event_limit might not quite work as expected on inherited
7341 * events 7362 * events
7342 */ 7363 */
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..c7209f060eeb 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,7 @@ int freeze_processes(void)
132 if (!pm_freezing) 132 if (!pm_freezing)
133 atomic_inc(&system_freezing_cnt); 133 atomic_inc(&system_freezing_cnt);
134 134
135 pm_wakeup_clear(true); 135 pm_wakeup_clear();
136 pr_info("Freezing user space processes ... "); 136 pr_info("Freezing user space processes ... ");
137 pm_freezing = true; 137 pm_freezing = true;
138 error = try_to_freeze_tasks(true); 138 error = try_to_freeze_tasks(true);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c0248c74d6d4..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -72,8 +72,6 @@ static void freeze_begin(void)
72 72
73static void freeze_enter(void) 73static void freeze_enter(void)
74{ 74{
75 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
76
77 spin_lock_irq(&suspend_freeze_lock); 75 spin_lock_irq(&suspend_freeze_lock);
78 if (pm_wakeup_pending()) 76 if (pm_wakeup_pending())
79 goto out; 77 goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
100 out: 98 out:
101 suspend_freeze_state = FREEZE_STATE_NONE; 99 suspend_freeze_state = FREEZE_STATE_NONE;
102 spin_unlock_irq(&suspend_freeze_lock); 100 spin_unlock_irq(&suspend_freeze_lock);
103
104 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
105}
106
107static void s2idle_loop(void)
108{
109 do {
110 freeze_enter();
111
112 if (freeze_ops && freeze_ops->wake)
113 freeze_ops->wake();
114
115 dpm_resume_noirq(PMSG_RESUME);
116 if (freeze_ops && freeze_ops->sync)
117 freeze_ops->sync();
118
119 if (pm_wakeup_pending())
120 break;
121
122 pm_wakeup_clear(false);
123 } while (!dpm_suspend_noirq(PMSG_SUSPEND));
124} 101}
125 102
126void freeze_wake(void) 103void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
394 * all the devices are suspended. 371 * all the devices are suspended.
395 */ 372 */
396 if (state == PM_SUSPEND_FREEZE) { 373 if (state == PM_SUSPEND_FREEZE) {
397 s2idle_loop(); 374 trace_suspend_resume(TPS("machine_suspend"), state, true);
398 goto Platform_early_resume; 375 freeze_enter();
376 trace_suspend_resume(TPS("machine_suspend"), state, false);
377 goto Platform_wake;
399 } 378 }
400 379
401 error = disable_nonboot_cpus(); 380 error = disable_nonboot_cpus();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a1aecf44ab07..a1db38abac5b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
269#define MAX_CMDLINECONSOLES 8 269#define MAX_CMDLINECONSOLES 8
270 270
271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
272static int console_cmdline_cnt;
273 272
274static int preferred_console = -1; 273static int preferred_console = -1;
275int console_set_on_cmdline; 274int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
1906 * See if this tty is not yet registered, and 1905 * See if this tty is not yet registered, and
1907 * if we have a slot free. 1906 * if we have a slot free.
1908 */ 1907 */
1909 for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) { 1908 for (i = 0, c = console_cmdline;
1909 i < MAX_CMDLINECONSOLES && c->name[0];
1910 i++, c++) {
1910 if (strcmp(c->name, name) == 0 && c->index == idx) { 1911 if (strcmp(c->name, name) == 0 && c->index == idx) {
1911 if (brl_options) 1912 if (!brl_options)
1912 return 0; 1913 preferred_console = i;
1913
1914 /*
1915 * Maintain an invariant that will help to find if
1916 * the matching console is preferred, see
1917 * register_console():
1918 *
1919 * The last non-braille console is always
1920 * the preferred one.
1921 */
1922 if (i != console_cmdline_cnt - 1)
1923 swap(console_cmdline[i],
1924 console_cmdline[console_cmdline_cnt - 1]);
1925
1926 preferred_console = console_cmdline_cnt - 1;
1927
1928 return 0; 1914 return 0;
1929 } 1915 }
1930 } 1916 }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
1937 braille_set_options(c, brl_options); 1923 braille_set_options(c, brl_options);
1938 1924
1939 c->index = idx; 1925 c->index = idx;
1940 console_cmdline_cnt++;
1941 return 0; 1926 return 0;
1942} 1927}
1943/* 1928/*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
2477 } 2462 }
2478 2463
2479 /* 2464 /*
2480 * See if this console matches one we selected on the command line. 2465 * See if this console matches one we selected on
2481 * 2466 * the command line.
2482 * There may be several entries in the console_cmdline array matching
2483 * with the same console, one with newcon->match(), another by
2484 * name/index:
2485 *
2486 * pl011,mmio,0x87e024000000,115200 -- added from SPCR
2487 * ttyAMA0 -- added from command line
2488 *
2489 * Traverse the console_cmdline array in reverse order to be
2490 * sure that if this console is preferred then it will be the first
2491 * matching entry. We use the invariant that is maintained in
2492 * __add_preferred_console().
2493 */ 2467 */
2494 for (i = console_cmdline_cnt - 1; i >= 0; i--) { 2468 for (i = 0, c = console_cmdline;
2495 c = console_cmdline + i; 2469 i < MAX_CMDLINECONSOLES && c->name[0];
2496 2470 i++, c++) {
2497 if (!newcon->match || 2471 if (!newcon->match ||
2498 newcon->match(newcon, c->name, c->index, c->options) != 0) { 2472 newcon->match(newcon, c->name, c->index, c->options) != 0) {
2499 /* default matching */ 2473 /* default matching */
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 584d8a983883..dea03614263f 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
263 263
264/* 264/*
265 * Counts the new reader in the appropriate per-CPU element of the 265 * Counts the new reader in the appropriate per-CPU element of the
266 * srcu_struct. Must be called from process context. 266 * srcu_struct.
267 * Returns an index that must be passed to the matching srcu_read_unlock(). 267 * Returns an index that must be passed to the matching srcu_read_unlock().
268 */ 268 */
269int __srcu_read_lock(struct srcu_struct *sp) 269int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
271 int idx; 271 int idx;
272 272
273 idx = READ_ONCE(sp->completed) & 0x1; 273 idx = READ_ONCE(sp->completed) & 0x1;
274 __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); 274 this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
275 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 275 smp_mb(); /* B */ /* Avoid leaking the critical section. */
276 return idx; 276 return idx;
277} 277}
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
281 * Removes the count for the old reader from the appropriate per-CPU 281 * Removes the count for the old reader from the appropriate per-CPU
282 * element of the srcu_struct. Note that this may well be a different 282 * element of the srcu_struct. Note that this may well be a different
283 * CPU than that which was incremented by the corresponding srcu_read_lock(). 283 * CPU than that which was incremented by the corresponding srcu_read_lock().
284 * Must be called from process context.
285 */ 284 */
286void __srcu_read_unlock(struct srcu_struct *sp, int idx) 285void __srcu_read_unlock(struct srcu_struct *sp, int idx)
287{ 286{
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 36e1f82faed1..32798eb14853 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
97 97
98/* 98/*
99 * Counts the new reader in the appropriate per-CPU element of the 99 * Counts the new reader in the appropriate per-CPU element of the
100 * srcu_struct. Must be called from process context. 100 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
101 * Returns an index that must be passed to the matching srcu_read_unlock(). 101 * __srcu_read_unlock() must be in the same handler instance. Returns an
102 * index that must be passed to the matching srcu_read_unlock().
102 */ 103 */
103int __srcu_read_lock(struct srcu_struct *sp) 104int __srcu_read_lock(struct srcu_struct *sp)
104{ 105{
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
112 113
113/* 114/*
114 * Removes the count for the old reader from the appropriate element of 115 * Removes the count for the old reader from the appropriate element of
115 * the srcu_struct. Must be called from process context. 116 * the srcu_struct.
116 */ 117 */
117void __srcu_read_unlock(struct srcu_struct *sp, int idx) 118void __srcu_read_unlock(struct srcu_struct *sp, int idx)
118{ 119{
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3ae8474557df..157654fa436a 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
357 357
358/* 358/*
359 * Counts the new reader in the appropriate per-CPU element of the 359 * Counts the new reader in the appropriate per-CPU element of the
360 * srcu_struct. Must be called from process context. 360 * srcu_struct.
361 * Returns an index that must be passed to the matching srcu_read_unlock(). 361 * Returns an index that must be passed to the matching srcu_read_unlock().
362 */ 362 */
363int __srcu_read_lock(struct srcu_struct *sp) 363int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
365 int idx; 365 int idx;
366 366
367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 367 idx = READ_ONCE(sp->srcu_idx) & 0x1;
368 __this_cpu_inc(sp->sda->srcu_lock_count[idx]); 368 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */
370 return idx; 370 return idx;
371} 371}
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
375 * Removes the count for the old reader from the appropriate per-CPU 375 * Removes the count for the old reader from the appropriate per-CPU
376 * element of the srcu_struct. Note that this may well be a different 376 * element of the srcu_struct. Note that this may well be a different
377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 377 * CPU than that which was incremented by the corresponding srcu_read_lock().
378 * Must be called from process context.
379 */ 378 */
380void __srcu_read_unlock(struct srcu_struct *sp, int idx) 379void __srcu_read_unlock(struct srcu_struct *sp, int idx)
381{ 380{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 803c3bc274c4..326d4f88e2b1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5605,7 +5605,7 @@ void idle_task_exit(void)
5605 BUG_ON(cpu_online(smp_processor_id())); 5605 BUG_ON(cpu_online(smp_processor_id()));
5606 5606
5607 if (mm != &init_mm) { 5607 if (mm != &init_mm) {
5608 switch_mm_irqs_off(mm, &init_mm, current); 5608 switch_mm(mm, &init_mm, current);
5609 finish_arch_post_lock_switch(); 5609 finish_arch_post_lock_switch();
5610 } 5610 }
5611 mmdrop(mm); 5611 mmdrop(mm);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d71109321841..c77e4b1d51c0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3563,7 +3563,7 @@ static inline void check_schedstat_required(void)
3563 trace_sched_stat_runtime_enabled()) { 3563 trace_sched_stat_runtime_enabled()) {
3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
3565 "stat_blocked and stat_runtime require the " 3565 "stat_blocked and stat_runtime require the "
3566 "kernel parameter schedstats=enabled or " 3566 "kernel parameter schedstats=enable or "
3567 "kernel.sched_schedstats=1\n"); 3567 "kernel.sched_schedstats=1\n");
3568 } 3568 }
3569#endif 3569#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 574f78824d8a..32bd3ead9ba1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
595 err = 0; 595 err = 0;
596 switch (nla_type(attr)) { 596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!(p->flags & BR_VLAN_TUNNEL)) 598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL; 599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err) 601 if (err)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0db8102995a5..6f12a5271219 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
179 br_debug(br, "using kernel STP\n"); 179 br_debug(br, "using kernel STP\n");
180 180
181 /* To start timers on any ports left in blocking */ 181 /* To start timers on any ports left in blocking */
182 mod_timer(&br->hello_timer, jiffies + br->hello_time); 182 if (br->dev->flags & IFF_UP)
183 mod_timer(&br->hello_timer, jiffies + br->hello_time);
183 br_port_state_selection(br); 184 br_port_state_selection(br);
184 } 185 }
185 186
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a292e7c..a0adfc31a3fe 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ start_again:
1680 1680
1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1682 &devlink_nl_family, NLM_F_MULTI, cmd);
1683 if (!hdr) 1683 if (!hdr) {
1684 nlmsg_free(skb);
1684 return -EMSGSIZE; 1685 return -EMSGSIZE;
1686 }
1685 1687
1686 if (devlink_nl_put_handle(skb, devlink)) 1688 if (devlink_nl_put_handle(skb, devlink))
1687 goto nla_put_failure; 1689 goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
2098 2100
2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2101 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2102 &devlink_nl_family, NLM_F_MULTI, cmd);
2101 if (!hdr) 2103 if (!hdr) {
2104 nlmsg_free(skb);
2102 return -EMSGSIZE; 2105 return -EMSGSIZE;
2106 }
2103 2107
2104 if (devlink_nl_put_handle(skb, devlink)) 2108 if (devlink_nl_put_handle(skb, devlink))
2105 goto nla_put_failure; 2109 goto nla_put_failure;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..b1be7c01efe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3754 3754
3755 spin_lock_irqsave(&q->lock, flags); 3755 spin_lock_irqsave(&q->lock, flags);
3756 skb = __skb_dequeue(q); 3756 skb = __skb_dequeue(q);
3757 if (skb && (skb_next = skb_peek(q))) 3757 if (skb && (skb_next = skb_peek(q))) {
3758 icmp_next = is_icmp_err_skb(skb_next); 3758 icmp_next = is_icmp_err_skb(skb_next);
3759 if (icmp_next)
3760 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3761 }
3759 spin_unlock_irqrestore(&q->lock, flags); 3762 spin_unlock_irqrestore(&q->lock, flags);
3760 3763
3761 if (is_icmp_err_skb(skb) && !icmp_next) 3764 if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..90038d45a547 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
223 return 0; 223 return 0;
224} 224}
225 225
226#ifdef CONFIG_PM_SLEEP
227int dsa_switch_suspend(struct dsa_switch *ds)
228{
229 int i, ret = 0;
230
231 /* Suspend slave network devices */
232 for (i = 0; i < ds->num_ports; i++) {
233 if (!dsa_is_port_initialized(ds, i))
234 continue;
235
236 ret = dsa_slave_suspend(ds->ports[i].netdev);
237 if (ret)
238 return ret;
239 }
240
241 if (ds->ops->suspend)
242 ret = ds->ops->suspend(ds);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(dsa_switch_suspend);
247
248int dsa_switch_resume(struct dsa_switch *ds)
249{
250 int i, ret = 0;
251
252 if (ds->ops->resume)
253 ret = ds->ops->resume(ds);
254
255 if (ret)
256 return ret;
257
258 /* Resume slave network devices */
259 for (i = 0; i < ds->num_ports; i++) {
260 if (!dsa_is_port_initialized(ds, i))
261 continue;
262
263 ret = dsa_slave_resume(ds->ports[i].netdev);
264 if (ret)
265 return ret;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dsa_switch_resume);
271#endif
272
226static struct packet_type dsa_pack_type __read_mostly = { 273static struct packet_type dsa_pack_type __read_mostly = {
227 .type = cpu_to_be16(ETH_P_XDSA), 274 .type = cpu_to_be16(ETH_P_XDSA),
228 .func = dsa_switch_rcv, 275 .func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..7796580e99ee 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
484 dsa_ds_unapply(dst, ds); 484 dsa_ds_unapply(dst, ds);
485 } 485 }
486 486
487 if (dst->cpu_switch) 487 if (dst->cpu_switch) {
488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
489 dst->cpu_switch = NULL;
490 }
489 491
490 pr_info("DSA: tree %d unapplied\n", dst->tree); 492 pr_info("DSA: tree %d unapplied\n", dst->tree);
491 dst->applied = false; 493 dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..7281098df04e 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
289 dsa_switch_unregister_notifier(ds); 289 dsa_switch_unregister_notifier(ds);
290} 290}
291 291
292#ifdef CONFIG_PM_SLEEP
293int dsa_switch_suspend(struct dsa_switch *ds)
294{
295 int i, ret = 0;
296
297 /* Suspend slave network devices */
298 for (i = 0; i < ds->num_ports; i++) {
299 if (!dsa_is_port_initialized(ds, i))
300 continue;
301
302 ret = dsa_slave_suspend(ds->ports[i].netdev);
303 if (ret)
304 return ret;
305 }
306
307 if (ds->ops->suspend)
308 ret = ds->ops->suspend(ds);
309
310 return ret;
311}
312EXPORT_SYMBOL_GPL(dsa_switch_suspend);
313
314int dsa_switch_resume(struct dsa_switch *ds)
315{
316 int i, ret = 0;
317
318 if (ds->ops->resume)
319 ret = ds->ops->resume(ds);
320
321 if (ret)
322 return ret;
323
324 /* Resume slave network devices */
325 for (i = 0; i < ds->num_ports; i++) {
326 if (!dsa_is_port_initialized(ds, i))
327 continue;
328
329 ret = dsa_slave_resume(ds->ports[i].netdev);
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(dsa_switch_resume);
337#endif
338
339/* platform driver init and cleanup *****************************************/ 292/* platform driver init and cleanup *****************************************/
340static int dev_is_class(struct device *dev, void *class) 293static int dev_is_class(struct device *dev, void *class)
341{ 294{
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad1661343..58925b6597de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
1043 .type = SOCK_DGRAM, 1043 .type = SOCK_DGRAM,
1044 .protocol = IPPROTO_ICMP, 1044 .protocol = IPPROTO_ICMP,
1045 .prot = &ping_prot, 1045 .prot = &ping_prot,
1046 .ops = &inet_dgram_ops, 1046 .ops = &inet_sockraw_ops,
1047 .flags = INET_PROTOSW_REUSE, 1047 .flags = INET_PROTOSW_REUSE,
1048 }, 1048 },
1049 1049
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 59792d283ff8..b5ea036ca781 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
2381 return 0; 2381 return 0;
2382} 2382}
2383 2383
2384static int tcp_repair_options_est(struct tcp_sock *tp, 2384static int tcp_repair_options_est(struct sock *sk,
2385 struct tcp_repair_opt __user *optbuf, unsigned int len) 2385 struct tcp_repair_opt __user *optbuf, unsigned int len)
2386{ 2386{
2387 struct tcp_sock *tp = tcp_sk(sk);
2387 struct tcp_repair_opt opt; 2388 struct tcp_repair_opt opt;
2388 2389
2389 while (len >= sizeof(opt)) { 2390 while (len >= sizeof(opt)) {
@@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
2396 switch (opt.opt_code) { 2397 switch (opt.opt_code) {
2397 case TCPOPT_MSS: 2398 case TCPOPT_MSS:
2398 tp->rx_opt.mss_clamp = opt.opt_val; 2399 tp->rx_opt.mss_clamp = opt.opt_val;
2400 tcp_mtup_init(sk);
2399 break; 2401 break;
2400 case TCPOPT_WINDOW: 2402 case TCPOPT_WINDOW:
2401 { 2403 {
@@ -2555,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2555 if (!tp->repair) 2557 if (!tp->repair)
2556 err = -EINVAL; 2558 err = -EINVAL;
2557 else if (sk->sk_state == TCP_ESTABLISHED) 2559 else if (sk->sk_state == TCP_ESTABLISHED)
2558 err = tcp_repair_options_est(tp, 2560 err = tcp_repair_options_est(sk,
2559 (struct tcp_repair_opt __user *)optval, 2561 (struct tcp_repair_opt __user *)optval,
2560 optlen); 2562 optlen);
2561 else 2563 else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512054a6..324c9bcc5456 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
180{ 180{
181 const struct inet_connection_sock *icsk = inet_csk(sk); 181 const struct inet_connection_sock *icsk = inet_csk(sk);
182 182
183 tcp_sk(sk)->prior_ssthresh = 0;
183 if (icsk->icsk_ca_ops->init) 184 if (icsk->icsk_ca_ops->init)
184 icsk->icsk_ca_ops->init(sk); 185 icsk->icsk_ca_ops->init(sk);
185 if (tcp_ca_needs_ecn(sk)) 186 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de713c6..8d772fea1dde 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1319 struct ipv6hdr *ip6_hdr; 1319 struct ipv6hdr *ip6_hdr;
1320 struct ipv6_opt_hdr *hop; 1320 struct ipv6_opt_hdr *hop;
1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1321 unsigned char buf[CALIPSO_MAX_BUFFER];
1322 int len_delta, new_end, pad; 1322 int len_delta, new_end, pad, payload;
1323 unsigned int start, end; 1323 unsigned int start, end;
1324 1324
1325 ip6_hdr = ipv6_hdr(skb); 1325 ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1346 if (ret_val < 0) 1346 if (ret_val < 0)
1347 return ret_val; 1347 return ret_val;
1348 1348
1349 ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
1350
1349 if (len_delta) { 1351 if (len_delta) {
1350 if (len_delta > 0) 1352 if (len_delta > 0)
1351 skb_push(skb, len_delta); 1353 skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1355 sizeof(*ip6_hdr) + start); 1357 sizeof(*ip6_hdr) + start);
1356 skb_reset_network_header(skb); 1358 skb_reset_network_header(skb);
1357 ip6_hdr = ipv6_hdr(skb); 1359 ip6_hdr = ipv6_hdr(skb);
1360 payload = ntohs(ip6_hdr->payload_len);
1361 ip6_hdr->payload_len = htons(payload + len_delta);
1358 } 1362 }
1359 1363
1360 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); 1364 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 280268f1dd7b..cdb3728faca7 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
116 116
117 if (udpfrag) { 117 if (udpfrag) {
118 int err = ip6_find_1stfragopt(skb, &prevhdr); 118 int err = ip6_find_1stfragopt(skb, &prevhdr);
119 if (err < 0) 119 if (err < 0) {
120 kfree_skb_list(segs);
120 return ERR_PTR(err); 121 return ERR_PTR(err);
122 }
121 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
122 fptr->frag_off = htons(offset); 124 fptr->frag_off = htons(offset);
123 if (skb->next) 125 if (skb->next)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7ae6c503f1ca..9b37f9747fc6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1095 1095
1096 if (!dst) { 1096 if (!dst) {
1097route_lookup: 1097route_lookup:
1098 /* add dsfield to flowlabel for route lookup */
1099 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1100
1098 dst = ip6_route_output(net, NULL, fl6); 1101 dst = ip6_route_output(net, NULL, fl6);
1099 1102
1100 if (dst->error) 1103 if (dst->error)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa90e6d..ac826dd338ff 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
192 .type = SOCK_DGRAM, 192 .type = SOCK_DGRAM,
193 .protocol = IPPROTO_ICMPV6, 193 .protocol = IPPROTO_ICMPV6,
194 .prot = &pingv6_prot, 194 .prot = &pingv6_prot,
195 .ops = &inet6_dgram_ops, 195 .ops = &inet6_sockraw_ops,
196 .flags = INET_PROTOSW_REUSE, 196 .flags = INET_PROTOSW_REUSE,
197}; 197};
198 198
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9e261d..60be012fe708 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
1338#endif /* CONFIG_PROC_FS */ 1338#endif /* CONFIG_PROC_FS */
1339 1339
1340/* Same as inet6_dgram_ops, sans udp_poll. */ 1340/* Same as inet6_dgram_ops, sans udp_poll. */
1341static const struct proto_ops inet6_sockraw_ops = { 1341const struct proto_ops inet6_sockraw_ops = {
1342 .family = PF_INET6, 1342 .family = PF_INET6,
1343 .owner = THIS_MODULE, 1343 .owner = THIS_MODULE,
1344 .release = inet6_release, 1344 .release = inet6_release,
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f31912..9ad07a91708e 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
31 31
32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
33 if (hdr_len < 0)
34 return hdr_len;
33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
34 skb_set_network_header(skb, -x->props.header_len); 36 skb_set_network_header(skb, -x->props.header_len);
35 skb->transport_header = skb->network_header + hdr_len; 37 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62f7bef..cf2392b2ac71 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
741 ieee80211_agg_start_txq(sta, tid, true); 741 ieee80211_agg_start_txq(sta, tid, true);
742} 742}
743 743
744void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
745 struct tid_ampdu_tx *tid_tx)
745{ 746{
746 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 struct ieee80211_sub_if_data *sdata = sta->sdata;
747 struct ieee80211_local *local = sdata->local; 748 struct ieee80211_local *local = sdata->local;
748 struct sta_info *sta;
749 struct tid_ampdu_tx *tid_tx;
750 749
751 trace_api_start_tx_ba_cb(sdata, ra, tid); 750 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
751 return;
752
753 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
754 ieee80211_agg_tx_operational(local, sta, tid);
755}
756
757static struct tid_ampdu_tx *
758ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
759 const u8 *ra, u16 tid, struct sta_info **sta)
760{
761 struct tid_ampdu_tx *tid_tx;
752 762
753 if (tid >= IEEE80211_NUM_TIDS) { 763 if (tid >= IEEE80211_NUM_TIDS) {
754 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 764 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
755 tid, IEEE80211_NUM_TIDS); 765 tid, IEEE80211_NUM_TIDS);
756 return; 766 return NULL;
757 } 767 }
758 768
759 mutex_lock(&local->sta_mtx); 769 *sta = sta_info_get_bss(sdata, ra);
760 sta = sta_info_get_bss(sdata, ra); 770 if (!*sta) {
761 if (!sta) {
762 mutex_unlock(&local->sta_mtx);
763 ht_dbg(sdata, "Could not find station: %pM\n", ra); 771 ht_dbg(sdata, "Could not find station: %pM\n", ra);
764 return; 772 return NULL;
765 } 773 }
766 774
767 mutex_lock(&sta->ampdu_mlme.mtx); 775 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
768 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
769 776
770 if (WARN_ON(!tid_tx)) { 777 if (WARN_ON(!tid_tx))
771 ht_dbg(sdata, "addBA was not requested!\n"); 778 ht_dbg(sdata, "addBA was not requested!\n");
772 goto unlock;
773 }
774 779
775 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 780 return tid_tx;
776 goto unlock;
777
778 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
779 ieee80211_agg_tx_operational(local, sta, tid);
780
781 unlock:
782 mutex_unlock(&sta->ampdu_mlme.mtx);
783 mutex_unlock(&local->sta_mtx);
784} 781}
785 782
786void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 783void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
788{ 785{
789 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
790 struct ieee80211_local *local = sdata->local; 787 struct ieee80211_local *local = sdata->local;
791 struct ieee80211_ra_tid *ra_tid; 788 struct sta_info *sta;
792 struct sk_buff *skb = dev_alloc_skb(0); 789 struct tid_ampdu_tx *tid_tx;
793 790
794 if (unlikely(!skb)) 791 trace_api_start_tx_ba_cb(sdata, ra, tid);
795 return;
796 792
797 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 793 rcu_read_lock();
798 memcpy(&ra_tid->ra, ra, ETH_ALEN); 794 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
799 ra_tid->tid = tid; 795 if (!tid_tx)
796 goto out;
800 797
801 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 798 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
802 skb_queue_tail(&sdata->skb_queue, skb); 799 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
803 ieee80211_queue_work(&local->hw, &sdata->work); 800 out:
801 rcu_read_unlock();
804} 802}
805EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
806 804
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
860} 858}
861EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 859EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
862 860
863void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 861void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
862 struct tid_ampdu_tx *tid_tx)
864{ 863{
865 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 struct ieee80211_sub_if_data *sdata = sta->sdata;
866 struct ieee80211_local *local = sdata->local;
867 struct sta_info *sta;
868 struct tid_ampdu_tx *tid_tx;
869 bool send_delba = false; 865 bool send_delba = false;
870 866
871 trace_api_stop_tx_ba_cb(sdata, ra, tid); 867 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
872 868 sta->sta.addr, tid);
873 if (tid >= IEEE80211_NUM_TIDS) {
874 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
875 tid, IEEE80211_NUM_TIDS);
876 return;
877 }
878
879 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
880
881 mutex_lock(&local->sta_mtx);
882
883 sta = sta_info_get_bss(sdata, ra);
884 if (!sta) {
885 ht_dbg(sdata, "Could not find station: %pM\n", ra);
886 goto unlock;
887 }
888 869
889 mutex_lock(&sta->ampdu_mlme.mtx);
890 spin_lock_bh(&sta->lock); 870 spin_lock_bh(&sta->lock);
891 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
892 871
893 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 872 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
894 ht_dbg(sdata, 873 ht_dbg(sdata,
895 "unexpected callback to A-MPDU stop for %pM tid %d\n", 874 "unexpected callback to A-MPDU stop for %pM tid %d\n",
896 sta->sta.addr, tid); 875 sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
906 spin_unlock_bh(&sta->lock); 885 spin_unlock_bh(&sta->lock);
907 886
908 if (send_delba) 887 if (send_delba)
909 ieee80211_send_delba(sdata, ra, tid, 888 ieee80211_send_delba(sdata, sta->sta.addr, tid,
910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 889 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
911
912 mutex_unlock(&sta->ampdu_mlme.mtx);
913 unlock:
914 mutex_unlock(&local->sta_mtx);
915} 890}
916 891
917void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 892void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
919{ 894{
920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 895 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
921 struct ieee80211_local *local = sdata->local; 896 struct ieee80211_local *local = sdata->local;
922 struct ieee80211_ra_tid *ra_tid; 897 struct sta_info *sta;
923 struct sk_buff *skb = dev_alloc_skb(0); 898 struct tid_ampdu_tx *tid_tx;
924 899
925 if (unlikely(!skb)) 900 trace_api_stop_tx_ba_cb(sdata, ra, tid);
926 return;
927 901
928 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 902 rcu_read_lock();
929 memcpy(&ra_tid->ra, ra, ETH_ALEN); 903 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
930 ra_tid->tid = tid; 904 if (!tid_tx)
905 goto out;
931 906
932 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 907 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
933 skb_queue_tail(&sdata->skb_queue, skb); 908 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
934 ieee80211_queue_work(&local->hw, &sdata->work); 909 out:
910 rcu_read_unlock();
935} 911}
936EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
937 913
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a528773563..6ca5442b1e03 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright 2017 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
289{ 290{
290 int i; 291 int i;
291 292
292 cancel_work_sync(&sta->ampdu_mlme.work);
293
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 293 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 __ieee80211_stop_tx_ba_session(sta, i, reason); 294 __ieee80211_stop_tx_ba_session(sta, i, reason);
296 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
299 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
300 } 299 }
300
301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work);
301} 303}
302 304
303void ieee80211_ba_session_work(struct work_struct *work) 305void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
352 spin_unlock_bh(&sta->lock); 354 spin_unlock_bh(&sta->lock);
353 355
354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 356 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
355 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 357 if (!tid_tx)
356 &tid_tx->state)) 358 continue;
359
360 if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
361 ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
362 if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
357 ___ieee80211_stop_tx_ba_session(sta, tid, 363 ___ieee80211_stop_tx_ba_session(sta, tid,
358 AGG_STOP_LOCAL_REQUEST); 364 AGG_STOP_LOCAL_REQUEST);
365 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
366 ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
359 } 367 }
360 mutex_unlock(&sta->ampdu_mlme.mtx); 368 mutex_unlock(&sta->ampdu_mlme.mtx);
361} 369}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c148f554..665501ac358f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
1036 1036
1037enum sdata_queue_type { 1037enum sdata_queue_type {
1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
1039 IEEE80211_SDATA_QUEUE_AGG_START = 1,
1040 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
1041 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
1042 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
1043}; 1041};
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1427 return local->hw.wiphy->bands[band]; 1425 return local->hw.wiphy->bands[band];
1428} 1426}
1429 1427
1430/* this struct represents 802.11n's RA/TID combination */
1431struct ieee80211_ra_tid {
1432 u8 ra[ETH_ALEN];
1433 u16 tid;
1434};
1435
1436/* this struct holds the value parsing from channel switch IE */ 1428/* this struct holds the value parsing from channel switch IE */
1437struct ieee80211_csa_ie { 1429struct ieee80211_csa_ie {
1438 struct cfg80211_chan_def chandef; 1430 struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1794 enum ieee80211_agg_stop_reason reason); 1786 enum ieee80211_agg_stop_reason reason);
1795int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1787int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1796 enum ieee80211_agg_stop_reason reason); 1788 enum ieee80211_agg_stop_reason reason);
1797void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1789void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
1798void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1790 struct tid_ampdu_tx *tid_tx);
1791void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1792 struct tid_ampdu_tx *tid_tx);
1799void ieee80211_ba_session_work(struct work_struct *work); 1793void ieee80211_ba_session_work(struct work_struct *work);
1800void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1794void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1801void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1795void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81f5d81..8fae1a72e6a7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
1237 struct ieee80211_local *local = sdata->local; 1237 struct ieee80211_local *local = sdata->local;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 struct sta_info *sta; 1239 struct sta_info *sta;
1240 struct ieee80211_ra_tid *ra_tid;
1241 struct ieee80211_rx_agg *rx_agg; 1240 struct ieee80211_rx_agg *rx_agg;
1242 1241
1243 if (!ieee80211_sdata_running(sdata)) 1242 if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1252 while ((skb = skb_dequeue(&sdata->skb_queue))) {
1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1253 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1255 1254
1256 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1255 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1257 ra_tid = (void *)&skb->cb;
1258 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
1259 ra_tid->tid);
1260 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
1261 ra_tid = (void *)&skb->cb;
1262 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
1263 ra_tid->tid);
1264 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1265 rx_agg = (void *)&skb->cb; 1256 rx_agg = (void *)&skb->cb;
1266 mutex_lock(&local->sta_mtx); 1257 mutex_lock(&local->sta_mtx);
1267 sta = sta_info_get_bss(sdata, rx_agg->addr); 1258 sta = sta_info_get_bss(sdata, rx_agg->addr);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a835bb0..403e3cc58b57 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
2155 struct ieee80211_sta_rx_stats *cpurxs; 2155 struct ieee80211_sta_rx_stats *cpurxs;
2156 2156
2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2158 sinfo->rx_packets += cpurxs->dropped; 2158 sinfo->rx_dropped_misc += cpurxs->dropped;
2159 } 2159 }
2160 } 2160 }
2161 2161
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cacb20d5..ea0747d6a6da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
116#define HT_AGG_STATE_STOPPING 3 116#define HT_AGG_STATE_STOPPING 3
117#define HT_AGG_STATE_WANT_START 4 117#define HT_AGG_STATE_WANT_START 4
118#define HT_AGG_STATE_WANT_STOP 5 118#define HT_AGG_STATE_WANT_STOP 5
119#define HT_AGG_STATE_START_CB 6
120#define HT_AGG_STATE_STOP_CB 7
119 121
120enum ieee80211_agg_stop_reason { 122enum ieee80211_agg_stop_reason {
121 AGG_STOP_DECLINED, 123 AGG_STOP_DECLINED,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66009da..7b05fd1497ce 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
1418 continue; 1418 continue;
1419 alive++; 1419 alive++;
1420 nh_flags &= ~flags; 1420 nh_flags &= ~flags;
1421 WRITE_ONCE(nh->nh_flags, flags); 1421 WRITE_ONCE(nh->nh_flags, nh_flags);
1422 } endfor_nexthops(rt); 1422 } endfor_nexthops(rt);
1423 1423
1424 WRITE_ONCE(rt->rt_nhn_alive, alive); 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9799a50bc604..a8be9b72e6cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -890,8 +890,13 @@ restart:
890 } 890 }
891out: 891out:
892 local_bh_enable(); 892 local_bh_enable();
893 if (last) 893 if (last) {
894 /* nf ct hash resize happened, now clear the leftover. */
895 if ((struct nf_conn *)cb->args[1] == last)
896 cb->args[1] = 0;
897
894 nf_ct_put(last); 898 nf_ct_put(last);
899 }
895 900
896 while (i) { 901 while (i) {
897 i--; 902 i--;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d599a85..1c5b14a6cab3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
512 u8 pf, unsigned int hooknum) 512 u8 pf, unsigned int hooknum)
513{ 513{
514 const struct sctphdr *sh; 514 const struct sctphdr *sh;
515 struct sctphdr _sctph;
516 const char *logmsg; 515 const char *logmsg;
517 516
518 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 if (skb->len < dataoff + sizeof(struct sctphdr)) {
519 if (!sh) {
520 logmsg = "nf_ct_sctp: short packet "; 518 logmsg = "nf_ct_sctp: short packet ";
521 goto out_invalid; 519 goto out_invalid;
522 } 520 }
523 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
524 skb->ip_summed == CHECKSUM_NONE) { 522 skb->ip_summed == CHECKSUM_NONE) {
523 if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
524 logmsg = "nf_ct_sctp: failed to read header ";
525 goto out_invalid;
526 }
527 sh = (const struct sctphdr *)(skb->data + dataoff);
525 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
526 logmsg = "nf_ct_sctp: bad CRC "; 529 logmsg = "nf_ct_sctp: bad CRC ";
527 goto out_invalid; 530 goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ef0be325a0c6..6c72922d20ca 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
567 * will delete entry from already-freed table. 567 * will delete entry from already-freed table.
568 */ 568 */
569 ct->status &= ~IPS_NAT_DONE_MASK; 569 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
571 nf_nat_bysource_params); 571 nf_nat_bysource_params);
572 572
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb53f0a..fbdbaa00dd5f 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
116 else if (d > 0) 116 else if (d > 0)
117 p = &parent->rb_right; 117 p = &parent->rb_right;
118 else { 118 else {
119 if (nft_set_elem_active(&rbe->ext, genmask)) { 119 if (nft_rbtree_interval_end(rbe) &&
120 if (nft_rbtree_interval_end(rbe) && 120 !nft_rbtree_interval_end(new)) {
121 !nft_rbtree_interval_end(new)) 121 p = &parent->rb_left;
122 p = &parent->rb_left; 122 } else if (!nft_rbtree_interval_end(rbe) &&
123 else if (!nft_rbtree_interval_end(rbe) && 123 nft_rbtree_interval_end(new)) {
124 nft_rbtree_interval_end(new)) 124 p = &parent->rb_right;
125 p = &parent->rb_right; 125 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
126 else { 126 *ext = &rbe->ext;
127 *ext = &rbe->ext; 127 return -EEXIST;
128 return -EEXIST; 128 } else {
129 } 129 p = &parent->rb_left;
130 } 130 }
131 } 131 }
132 } 132 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f00a6ec..7586d446d7dc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
1415 goto out; 1416 goto out;
1416 } 1417 }
1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1418 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1418 NETLINK_CB(p->skb2).nsid_is_set = true; 1419 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1420 NETLINK_CB(p->skb2).nsid_is_set = true;
1419 val = netlink_broadcast_deliver(sk, p->skb2); 1421 val = netlink_broadcast_deliver(sk, p->skb2);
1420 if (val < 0) { 1422 if (val < 0) {
1421 netlink_overrun(sk); 1423 netlink_overrun(sk);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 2f836ca09860..cd67d1c12cf1 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
1618 if (err < 0) 1618 if (err < 0)
1619 goto __err; 1619 goto __err;
1620 1620
1621 tu->qhead = tu->qtail = tu->qused = 0;
1621 kfree(tu->queue); 1622 kfree(tu->queue);
1622 tu->queue = NULL; 1623 tu->queue = NULL;
1623 kfree(tu->tqueue); 1624 kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1959 1960
1960 tu = file->private_data; 1961 tu = file->private_data;
1961 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); 1962 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
1963 mutex_lock(&tu->ioctl_lock);
1962 spin_lock_irq(&tu->qlock); 1964 spin_lock_irq(&tu->qlock);
1963 while ((long)count - result >= unit) { 1965 while ((long)count - result >= unit) {
1964 while (!tu->qused) { 1966 while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1974 add_wait_queue(&tu->qchange_sleep, &wait); 1976 add_wait_queue(&tu->qchange_sleep, &wait);
1975 1977
1976 spin_unlock_irq(&tu->qlock); 1978 spin_unlock_irq(&tu->qlock);
1979 mutex_unlock(&tu->ioctl_lock);
1977 schedule(); 1980 schedule();
1981 mutex_lock(&tu->ioctl_lock);
1978 spin_lock_irq(&tu->qlock); 1982 spin_lock_irq(&tu->qlock);
1979 1983
1980 remove_wait_queue(&tu->qchange_sleep, &wait); 1984 remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1994 tu->qused--; 1998 tu->qused--;
1995 spin_unlock_irq(&tu->qlock); 1999 spin_unlock_irq(&tu->qlock);
1996 2000
1997 mutex_lock(&tu->ioctl_lock);
1998 if (tu->tread) { 2001 if (tu->tread) {
1999 if (copy_to_user(buffer, &tu->tqueue[qhead], 2002 if (copy_to_user(buffer, &tu->tqueue[qhead],
2000 sizeof(struct snd_timer_tread))) 2003 sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2004 sizeof(struct snd_timer_read))) 2007 sizeof(struct snd_timer_read)))
2005 err = -EFAULT; 2008 err = -EFAULT;
2006 } 2009 }
2007 mutex_unlock(&tu->ioctl_lock);
2008 2010
2009 spin_lock_irq(&tu->qlock); 2011 spin_lock_irq(&tu->qlock);
2010 if (err < 0) 2012 if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2014 } 2016 }
2015 _error: 2017 _error:
2016 spin_unlock_irq(&tu->qlock); 2018 spin_unlock_irq(&tu->qlock);
2019 mutex_unlock(&tu->ioctl_lock);
2017 return result > 0 ? result : err; 2020 return result > 0 ? result : err;
2018} 2021}
2019 2022
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a57988d617e9..cbeebc0a9711 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5857 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5857 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5858 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5859 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5860 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5861 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5858 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), 5862 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
5859 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 5863 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
5860 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 5864 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5862 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 5866 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
5863 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 5867 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
5864 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 5868 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
5869 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
5865 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 5870 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
5866 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5867 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5871 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5868 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5872 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5869 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5870 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5871 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5872 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 5873 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
5873 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 5874 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
5874 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), 5875 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 7ae46c2647d4..b7ef8c59b49a 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
301 return 0; 301 return 0;
302} 302}
303 303
304static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
305{
306 struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
307 struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
308
309 return regcache_sync(dd->regmap);
310}
311
304static struct regmap *atmel_classd_codec_get_remap(struct device *dev) 312static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
305{ 313{
306 return dev_get_regmap(dev, NULL); 314 return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
308 316
309static struct snd_soc_codec_driver soc_codec_dev_classd = { 317static struct snd_soc_codec_driver soc_codec_dev_classd = {
310 .probe = atmel_classd_codec_probe, 318 .probe = atmel_classd_codec_probe,
319 .resume = atmel_classd_codec_resume,
311 .get_regmap = atmel_classd_codec_get_remap, 320 .get_regmap = atmel_classd_codec_get_remap,
312 .component_driver = { 321 .component_driver = {
313 .controls = atmel_classd_snd_controls, 322 .controls = atmel_classd_snd_controls,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 6dd7578f0bb8..024d83fa6a7f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
772 ++i; 772 ++i;
773 msleep(50); 773 msleep(50);
774 } 774 }
775 } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock)); 775 } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
776 776
777 if (!srm_lock) 777 if (!srm_lock)
778 dev_warn(codec->dev, "SRM failed to lock\n"); 778 dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7f758d..7899a2cdeb42 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") 1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
1109 } 1109 }
1110 }, 1110 },
1111 {
1112 .ident = "Thinkpad Helix 2nd",
1113 .matches = {
1114 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1115 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
1116 }
1117 },
1111 1118
1112 { } 1119 { }
1113}; 1120};
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2c9dedab5184..bc136d2bd7cd 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
202 if (ret < 0) 202 if (ret < 0)
203 return ret; 203 return ret;
204 204
205 ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX); 205 ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
206 if (ret < 0) 206 if (ret < 0)
207 return ret; 207 return ret;
208 208
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 58c525096a7c..498b15345b1a 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; 413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
414 u64 *ipc_header = (u64 *)(&header); 414 u64 *ipc_header = (u64 *)(&header);
415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc); 415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
416 unsigned long flags;
416 417
418 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
417 msg = skl_ipc_reply_get_msg(ipc, *ipc_header); 419 msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
420 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
418 if (msg == NULL) { 421 if (msg == NULL) {
419 dev_dbg(ipc->dev, "ipc: rx list is empty\n"); 422 dev_dbg(ipc->dev, "ipc: rx list is empty\n");
420 return; 423 return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
456 } 459 }
457 } 460 }
458 461
462 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
459 list_del(&msg->list); 463 list_del(&msg->list);
460 sst_ipc_tx_msg_reply_complete(ipc, msg); 464 sst_ipc_tx_msg_reply_complete(ipc, msg);
465 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
461} 466}
462 467
463irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context) 468irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3a99712e44a8..64a0f8ed33e1 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
2502 2502
2503 if (ret < 0) 2503 if (ret < 0)
2504 return ret; 2504 return ret;
2505 tkn_count += ret; 2505 tkn_count = ret;
2506 2506
2507 tuple_size += tkn_count * 2507 tuple_size += tkn_count *
2508 sizeof(struct snd_soc_tplg_vendor_string_elem); 2508 sizeof(struct snd_soc_tplg_vendor_string_elem);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 6df3b317a476..4c9b5781282b 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
410 struct skl *skl = ebus_to_skl(ebus); 410 struct skl *skl = ebus_to_skl(ebus);
411 struct hdac_bus *bus = ebus_to_hbus(ebus); 411 struct hdac_bus *bus = ebus_to_hbus(ebus);
412 412
413 skl->init_failed = 1; /* to be sure */ 413 skl->init_done = 0; /* to be sure */
414 414
415 snd_hdac_ext_stop_streams(ebus); 415 snd_hdac_ext_stop_streams(ebus);
416 416
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
428 428
429 snd_hdac_ext_bus_exit(ebus); 429 snd_hdac_ext_bus_exit(ebus);
430 430
431 cancel_work_sync(&skl->probe_work);
431 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 432 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
432 snd_hdac_i915_exit(&ebus->bus); 433 snd_hdac_i915_exit(&ebus->bus);
434
433 return 0; 435 return 0;
434} 436}
435 437
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
566 .get_response = snd_hdac_bus_get_response, 568 .get_response = snd_hdac_bus_get_response,
567}; 569};
568 570
571static int skl_i915_init(struct hdac_bus *bus)
572{
573 int err;
574
575 /*
576 * The HDMI codec is in GPU so we need to ensure that it is powered
577 * up and ready for probe
578 */
579 err = snd_hdac_i915_init(bus);
580 if (err < 0)
581 return err;
582
583 err = snd_hdac_display_power(bus, true);
584 if (err < 0)
585 dev_err(bus->dev, "Cannot turn on display power on i915\n");
586
587 return err;
588}
589
590static void skl_probe_work(struct work_struct *work)
591{
592 struct skl *skl = container_of(work, struct skl, probe_work);
593 struct hdac_ext_bus *ebus = &skl->ebus;
594 struct hdac_bus *bus = ebus_to_hbus(ebus);
595 struct hdac_ext_link *hlink = NULL;
596 int err;
597
598 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
599 err = skl_i915_init(bus);
600 if (err < 0)
601 return;
602 }
603
604 err = skl_init_chip(bus, true);
605 if (err < 0) {
606 dev_err(bus->dev, "Init chip failed with err: %d\n", err);
607 goto out_err;
608 }
609
610 /* codec detection */
611 if (!bus->codec_mask)
612 dev_info(bus->dev, "no hda codecs found!\n");
613
614 /* create codec instances */
615 err = skl_codec_create(ebus);
616 if (err < 0)
617 goto out_err;
618
619 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
620 err = snd_hdac_display_power(bus, false);
621 if (err < 0) {
622 dev_err(bus->dev, "Cannot turn off display power on i915\n");
623 return;
624 }
625 }
626
627 /* register platform dai and controls */
628 err = skl_platform_register(bus->dev);
629 if (err < 0)
630 return;
631 /*
632 * we are done probing so decrement link counts
633 */
634 list_for_each_entry(hlink, &ebus->hlink_list, list)
635 snd_hdac_ext_bus_link_put(ebus, hlink);
636
637 /* configure PM */
638 pm_runtime_put_noidle(bus->dev);
639 pm_runtime_allow(bus->dev);
640 skl->init_done = 1;
641
642 return;
643
644out_err:
645 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
646 err = snd_hdac_display_power(bus, false);
647}
648
569/* 649/*
570 * constructor 650 * constructor
571 */ 651 */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
593 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops); 673 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
594 ebus->bus.use_posbuf = 1; 674 ebus->bus.use_posbuf = 1;
595 skl->pci = pci; 675 skl->pci = pci;
676 INIT_WORK(&skl->probe_work, skl_probe_work);
596 677
597 ebus->bus.bdl_pos_adj = 0; 678 ebus->bus.bdl_pos_adj = 0;
598 679
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
601 return 0; 682 return 0;
602} 683}
603 684
604static int skl_i915_init(struct hdac_bus *bus)
605{
606 int err;
607
608 /*
609 * The HDMI codec is in GPU so we need to ensure that it is powered
610 * up and ready for probe
611 */
612 err = snd_hdac_i915_init(bus);
613 if (err < 0)
614 return err;
615
616 err = snd_hdac_display_power(bus, true);
617 if (err < 0) {
618 dev_err(bus->dev, "Cannot turn on display power on i915\n");
619 return err;
620 }
621
622 return err;
623}
624
625static int skl_first_init(struct hdac_ext_bus *ebus) 685static int skl_first_init(struct hdac_ext_bus *ebus)
626{ 686{
627 struct skl *skl = ebus_to_skl(ebus); 687 struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
684 /* initialize chip */ 744 /* initialize chip */
685 skl_init_pci(skl); 745 skl_init_pci(skl);
686 746
687 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 747 return skl_init_chip(bus, true);
688 err = skl_i915_init(bus);
689 if (err < 0)
690 return err;
691 }
692
693 skl_init_chip(bus, true);
694
695 /* codec detection */
696 if (!bus->codec_mask) {
697 dev_info(bus->dev, "no hda codecs found!\n");
698 }
699
700 return 0;
701} 748}
702 749
703static int skl_probe(struct pci_dev *pci, 750static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
706 struct skl *skl; 753 struct skl *skl;
707 struct hdac_ext_bus *ebus = NULL; 754 struct hdac_ext_bus *ebus = NULL;
708 struct hdac_bus *bus = NULL; 755 struct hdac_bus *bus = NULL;
709 struct hdac_ext_link *hlink = NULL;
710 int err; 756 int err;
711 757
712 /* we use ext core ops, so provide NULL for ops here */ 758 /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
729 775
730 if (skl->nhlt == NULL) { 776 if (skl->nhlt == NULL) {
731 err = -ENODEV; 777 err = -ENODEV;
732 goto out_display_power_off; 778 goto out_free;
733 } 779 }
734 780
735 err = skl_nhlt_create_sysfs(skl); 781 err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
760 if (bus->mlcap) 806 if (bus->mlcap)
761 snd_hdac_ext_bus_get_ml_capabilities(ebus); 807 snd_hdac_ext_bus_get_ml_capabilities(ebus);
762 808
809 snd_hdac_bus_stop_chip(bus);
810
763 /* create device for soc dmic */ 811 /* create device for soc dmic */
764 err = skl_dmic_device_register(skl); 812 err = skl_dmic_device_register(skl);
765 if (err < 0) 813 if (err < 0)
766 goto out_dsp_free; 814 goto out_dsp_free;
767 815
768 /* register platform dai and controls */ 816 schedule_work(&skl->probe_work);
769 err = skl_platform_register(bus->dev);
770 if (err < 0)
771 goto out_dmic_free;
772
773 /* create codec instances */
774 err = skl_codec_create(ebus);
775 if (err < 0)
776 goto out_unregister;
777
778 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
779 err = snd_hdac_display_power(bus, false);
780 if (err < 0) {
781 dev_err(bus->dev, "Cannot turn off display power on i915\n");
782 return err;
783 }
784 }
785
786 /*
787 * we are done probling so decrement link counts
788 */
789 list_for_each_entry(hlink, &ebus->hlink_list, list)
790 snd_hdac_ext_bus_link_put(ebus, hlink);
791
792 /* configure PM */
793 pm_runtime_put_noidle(bus->dev);
794 pm_runtime_allow(bus->dev);
795 817
796 return 0; 818 return 0;
797 819
798out_unregister:
799 skl_platform_unregister(bus->dev);
800out_dmic_free:
801 skl_dmic_device_unregister(skl);
802out_dsp_free: 820out_dsp_free:
803 skl_free_dsp(skl); 821 skl_free_dsp(skl);
804out_mach_free: 822out_mach_free:
805 skl_machine_device_unregister(skl); 823 skl_machine_device_unregister(skl);
806out_nhlt_free: 824out_nhlt_free:
807 skl_nhlt_free(skl->nhlt); 825 skl_nhlt_free(skl->nhlt);
808out_display_power_off:
809 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
810 snd_hdac_display_power(bus, false);
811out_free: 826out_free:
812 skl->init_failed = 1;
813 skl_free(ebus); 827 skl_free(ebus);
814 828
815 return err; 829 return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
828 842
829 skl = ebus_to_skl(ebus); 843 skl = ebus_to_skl(ebus);
830 844
831 if (skl->init_failed) 845 if (!skl->init_done)
832 return; 846 return;
833 847
834 snd_hdac_ext_stop_streams(ebus); 848 snd_hdac_ext_stop_streams(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index a454f6035f3e..2a630fcb7f08 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -46,7 +46,7 @@ struct skl {
46 struct hdac_ext_bus ebus; 46 struct hdac_ext_bus ebus;
47 struct pci_dev *pci; 47 struct pci_dev *pci;
48 48
49 unsigned int init_failed:1; /* delayed init failed */ 49 unsigned int init_done:1; /* delayed init status */
50 struct platform_device *dmic_dev; 50 struct platform_device *dmic_dev;
51 struct platform_device *i2s_dev; 51 struct platform_device *i2s_dev;
52 struct snd_soc_platform *platform; 52 struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
64 const struct firmware *tplg; 64 const struct firmware *tplg;
65 65
66 int supend_active; 66 int supend_active;
67
68 struct work_struct probe_work;
67}; 69};
68 70
69#define skl_to_ebus(s) (&(s)->ebus) 71#define skl_to_ebus(s) (&(s)->ebus)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 66203d107a11..d3b0dc145a56 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
507 rbga = rbgx; 507 rbga = rbgx;
508 adg->rbga_rate_for_441khz = rate / div; 508 adg->rbga_rate_for_441khz = rate / div;
509 ckr |= brg_table[i] << 20; 509 ckr |= brg_table[i] << 20;
510 if (req_441kHz_rate) 510 if (req_441kHz_rate &&
511 !(adg_mode_flags(adg) & AUDIO_OUT_48))
511 parent_clk_name = __clk_get_name(clk); 512 parent_clk_name = __clk_get_name(clk);
512 } 513 }
513 } 514 }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
522 rbgb = rbgx; 523 rbgb = rbgx;
523 adg->rbgb_rate_for_48khz = rate / div; 524 adg->rbgb_rate_for_48khz = rate / div;
524 ckr |= brg_table[i] << 16; 525 ckr |= brg_table[i] << 16;
525 if (req_48kHz_rate) 526 if (req_48kHz_rate &&
527 (adg_mode_flags(adg) & AUDIO_OUT_48))
526 parent_clk_name = __clk_get_name(clk); 528 parent_clk_name = __clk_get_name(clk);
527 } 529 }
528 } 530 }
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24b7cfa..d879c010cf03 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
89 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
90 90
91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data); 91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
92 rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
92 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 93 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
93 94
94 rsnd_adg_set_cmd_timsel_gen2(mod, io); 95 rsnd_adg_set_cmd_timsel_gen2(mod, io);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1744015408c3..8c1f4e2e0c4f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
343 return 0x76543210; 343 return 0x76543210;
344} 344}
345 345
346u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
347{
348 enum rsnd_mod_type playback_mods[] = {
349 RSND_MOD_SRC,
350 RSND_MOD_CMD,
351 RSND_MOD_SSIU,
352 };
353 enum rsnd_mod_type capture_mods[] = {
354 RSND_MOD_CMD,
355 RSND_MOD_SRC,
356 RSND_MOD_SSIU,
357 };
358 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
359 struct rsnd_mod *tmod = NULL;
360 enum rsnd_mod_type *mods =
361 rsnd_io_is_play(io) ?
362 playback_mods : capture_mods;
363 int i;
364
365 /*
366 * This is needed for 24bit data
367 * We need to shift 8bit
368 *
369 * Linux 24bit data is located as 0x00******
370 * HW 24bit data is located as 0x******00
371 *
372 */
373 switch (runtime->sample_bits) {
374 case 16:
375 return 0;
376 case 32:
377 break;
378 }
379
380 for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
381 tmod = rsnd_io_to_mod(io, mods[i]);
382 if (tmod)
383 break;
384 }
385
386 if (tmod != mod)
387 return 0;
388
389 if (rsnd_io_is_play(io))
390 return (0 << 20) | /* shift to Left */
391 (8 << 16); /* 8bit */
392 else
393 return (1 << 20) | /* shift to Right */
394 (8 << 16); /* 8bit */
395}
396
346/* 397/*
347 * rsnd_dai functions 398 * rsnd_dai functions
348 */ 399 */
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 63b6d3c28021..4b0980728e13 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20), 236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20), 237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20), 238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_MODE, 0x184, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20), 240 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20),
240 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20), 241 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20),
241 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20), 242 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index dbf4163427e8..323af41ecfcb 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -73,6 +73,7 @@ enum rsnd_reg {
73 RSND_REG_SCU_SYS_INT_EN0, 73 RSND_REG_SCU_SYS_INT_EN0,
74 RSND_REG_SCU_SYS_INT_EN1, 74 RSND_REG_SCU_SYS_INT_EN1,
75 RSND_REG_CMD_CTRL, 75 RSND_REG_CMD_CTRL,
76 RSND_REG_CMD_BUSIF_MODE,
76 RSND_REG_CMD_BUSIF_DALIGN, 77 RSND_REG_CMD_BUSIF_DALIGN,
77 RSND_REG_CMD_ROUTE_SLCT, 78 RSND_REG_CMD_ROUTE_SLCT,
78 RSND_REG_CMDOUT_TIMSEL, 79 RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
204 u32 mask, u32 data); 205 u32 mask, u32 data);
205u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 206u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
206u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 207u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
208u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
207 209
208/* 210/*
209 * R-Car DMA 211 * R-Car DMA
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 20b5b2ec625e..76a477a3ccb5 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
191 struct device *dev = rsnd_priv_to_dev(priv); 191 struct device *dev = rsnd_priv_to_dev(priv);
192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
193 int is_play = rsnd_io_is_play(io);
193 int use_src = 0; 194 int use_src = 0;
194 u32 fin, fout; 195 u32 fin, fout;
195 u32 ifscr, fsrate, adinr; 196 u32 ifscr, fsrate, adinr;
196 u32 cr, route; 197 u32 cr, route;
197 u32 bsdsr, bsisr; 198 u32 bsdsr, bsisr;
199 u32 i_busif, o_busif, tmp;
198 uint ratio; 200 uint ratio;
199 201
200 if (!runtime) 202 if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
270 break; 272 break;
271 } 273 }
272 274
275 /* BUSIF_MODE */
276 tmp = rsnd_get_busif_shift(io, mod);
277 i_busif = ( is_play ? tmp : 0) | 1;
278 o_busif = (!is_play ? tmp : 0) | 1;
279
273 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route); 280 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
274 281
275 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ 282 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
281 rsnd_mod_write(mod, SRC_BSISR, bsisr); 288 rsnd_mod_write(mod, SRC_BSISR, bsisr);
282 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */ 289 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */
283 290
284 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1); 291 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
285 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1); 292 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
293
286 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 294 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
287 295
288 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout); 296 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 135c5669f796..91e5c07911b4 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
302 * always use 32bit system word. 302 * always use 32bit system word.
303 * see also rsnd_ssi_master_clk_enable() 303 * see also rsnd_ssi_master_clk_enable()
304 */ 304 */
305 cr_own = FORCE | SWL_32 | PDTA; 305 cr_own = FORCE | SWL_32;
306 306
307 if (rdai->bit_clk_inv) 307 if (rdai->bit_clk_inv)
308 cr_own |= SCKP; 308 cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
551 u32 *buf = (u32 *)(runtime->dma_area + 551 u32 *buf = (u32 *)(runtime->dma_area +
552 rsnd_dai_pointer_offset(io, 0)); 552 rsnd_dai_pointer_offset(io, 0));
553 int shift = 0;
554
555 switch (runtime->sample_bits) {
556 case 32:
557 shift = 8;
558 break;
559 }
553 560
554 /* 561 /*
555 * 8/16/32 data can be assesse to TDR/RDR register 562 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
557 * see rsnd_ssi_init() 564 * see rsnd_ssi_init()
558 */ 565 */
559 if (rsnd_io_is_play(io)) 566 if (rsnd_io_is_play(io))
560 rsnd_mod_write(mod, SSITDR, *buf); 567 rsnd_mod_write(mod, SSITDR, (*buf) << shift);
561 else 568 else
562 *buf = rsnd_mod_read(mod, SSIRDR); 569 *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
563 570
564 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); 571 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
565 } 572 }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
709 struct rsnd_priv *priv) 716 struct rsnd_priv *priv)
710{ 717{
711 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 718 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
719 struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
720
721 /* Do nothing for SSI parent mod */
722 if (ssi_parent_mod == mod)
723 return 0;
712 724
713 /* PIO will request IRQ again */ 725 /* PIO will request IRQ again */
714 free_irq(ssi->irq, mod); 726 free_irq(ssi->irq, mod);
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 14fafdaf1395..512d238b79e2 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
144 (rsnd_io_is_play(io) ? 144 (rsnd_io_is_play(io) ?
145 rsnd_runtime_channel_after_ctu(io) : 145 rsnd_runtime_channel_after_ctu(io) :
146 rsnd_runtime_channel_original(io))); 146 rsnd_runtime_channel_original(io)));
147 rsnd_mod_write(mod, SSI_BUSIF_MODE, 1); 147 rsnd_mod_write(mod, SSI_BUSIF_MODE,
148 rsnd_get_busif_shift(io, mod) | 1);
148 rsnd_mod_write(mod, SSI_BUSIF_DALIGN, 149 rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
149 rsnd_get_dalign(mod, io)); 150 rsnd_get_dalign(mod, io));
150 } 151 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae099c0e502..754e3ef8d7ae 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2286 list_for_each_entry(rtd, &card->rtd_list, list) 2286 list_for_each_entry(rtd, &card->rtd_list, list)
2287 flush_delayed_work(&rtd->delayed_work); 2287 flush_delayed_work(&rtd->delayed_work);
2288 2288
2289 /* free the ALSA card at first; this syncs with pending operations */
2290 snd_card_free(card->snd_card);
2291
2289 /* remove and free each DAI */ 2292 /* remove and free each DAI */
2290 soc_remove_dai_links(card); 2293 soc_remove_dai_links(card);
2291 soc_remove_pcm_runtimes(card); 2294 soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2300 if (card->remove) 2303 if (card->remove)
2301 card->remove(card); 2304 card->remove(card);
2302 2305
2303 snd_card_free(card->snd_card);
2304 return 0; 2306 return 0;
2305
2306} 2307}
2307 2308
2308/* removes a socdev */ 2309/* removes a socdev */
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index e6c9902c6d82..165c2b1d4317 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable:
240 or 240 or
241 ./perf probe --add='schedule:12 cpu' 241 ./perf probe --add='schedule:12 cpu'
242 242
243 this will add one or more probes which has the name start with "schedule". 243Add one or more probes which has the name start with "schedule".
244 244
245 Add probes on lines in schedule() function which calls update_rq_clock(). 245 ./perf probe schedule*
246 or
247 ./perf probe --add='schedule*'
248
249Add probes on lines in schedule() function which calls update_rq_clock().
246 250
247 ./perf probe 'schedule;update_rq_clock*' 251 ./perf probe 'schedule;update_rq_clock*'
248 or 252 or
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index dfbb506d2c34..142606c0ec9c 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -39,7 +39,7 @@ EVENT HANDLERS
39When perf script is invoked using a trace script, a user-defined 39When perf script is invoked using a trace script, a user-defined
40'handler function' is called for each event in the trace. If there's 40'handler function' is called for each event in the trace. If there's
41no handler function defined for a given event type, the event is 41no handler function defined for a given event type, the event is
42ignored (or passed to a 'trace_handled' function, see below) and the 42ignored (or passed to a 'trace_unhandled' function, see below) and the
43next event is processed. 43next event is processed.
44 44
45Most of the event's field values are passed as arguments to the 45Most of the event's field values are passed as arguments to the
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 54acba221558..51ec2d20068a 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
149 print "id=%d, args=%s\n" % \ 149 print "id=%d, args=%s\n" % \
150 (id, args), 150 (id, args),
151 151
152def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, 152def trace_unhandled(event_name, context, event_fields_dict):
153 common_pid, common_comm): 153 print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
154 print_header(event_name, common_cpu, common_secs, common_nsecs,
155 common_pid, common_comm)
156 154
157def print_header(event_name, cpu, secs, nsecs, pid, comm): 155def print_header(event_name, cpu, secs, nsecs, pid, comm):
158 print "%-20s %5u %05u.%09u %8u %-20s " % \ 156 print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script. The
321process can be generalized to any tracepoint or set of tracepoints 319process can be generalized to any tracepoint or set of tracepoints
322you're interested in - basically find the tracepoint(s) you're 320you're interested in - basically find the tracepoint(s) you're
323interested in by looking at the list of available events shown by 321interested in by looking at the list of available events shown by
324'perf list' and/or look in /sys/kernel/debug/tracing events for 322'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
325detailed event and field info, record the corresponding trace data 323detailed event and field info, record the corresponding trace data
326using 'perf record', passing it the list of interesting events, 324using 'perf record', passing it the list of interesting events,
327generate a skeleton script using 'perf script -g python' and modify the 325generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other
334scripts listed by the 'perf script -l' command e.g.: 332scripts listed by the 'perf script -l' command e.g.:
335 333
336---- 334----
337root@tropicana:~# perf script -l 335# perf script -l
338List of available trace scripts: 336List of available trace scripts:
339 wakeup-latency system-wide min/max/avg wakeup latency 337 wakeup-latency system-wide min/max/avg wakeup latency
340 rw-by-file <comm> r/w activity for a program, by file 338 rw-by-file <comm> r/w activity for a program, by file
@@ -383,8 +381,6 @@ source tree:
383 381
384---- 382----
385# ls -al kernel-source/tools/perf/scripts/python 383# ls -al kernel-source/tools/perf/scripts/python
386
387root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
388total 32 384total 32
389drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . 385drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
390drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. 386drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l'
399should show a new entry for your script: 395should show a new entry for your script:
400 396
401---- 397----
402root@tropicana:~# perf script -l 398# perf script -l
403List of available trace scripts: 399List of available trace scripts:
404 wakeup-latency system-wide min/max/avg wakeup latency 400 wakeup-latency system-wide min/max/avg wakeup latency
405 rw-by-file <comm> r/w activity for a program, by file 401 rw-by-file <comm> r/w activity for a program, by file
@@ -437,7 +433,7 @@ EVENT HANDLERS
437When perf script is invoked using a trace script, a user-defined 433When perf script is invoked using a trace script, a user-defined
438'handler function' is called for each event in the trace. If there's 434'handler function' is called for each event in the trace. If there's
439no handler function defined for a given event type, the event is 435no handler function defined for a given event type, the event is
440ignored (or passed to a 'trace_handled' function, see below) and the 436ignored (or passed to a 'trace_unhandled' function, see below) and the
441next event is processed. 437next event is processed.
442 438
443Most of the event's field values are passed as arguments to the 439Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@ can implement a set of optional functions:
532gives scripts a chance to do setup tasks: 528gives scripts a chance to do setup tasks:
533 529
534---- 530----
535def trace_begin: 531def trace_begin():
536 pass 532 pass
537---- 533----
538 534
@@ -541,7 +537,7 @@ def trace_begin:
541 as display results: 537 as display results:
542 538
543---- 539----
544def trace_end: 540def trace_end():
545 pass 541 pass
546---- 542----
547 543
@@ -550,8 +546,7 @@ def trace_end:
550 of common arguments are passed into it: 546 of common arguments are passed into it:
551 547
552---- 548----
553def trace_unhandled(event_name, context, common_cpu, common_secs, 549def trace_unhandled(event_name, context, event_fields_dict):
554 common_nsecs, common_pid, common_comm):
555 pass 550 pass
556---- 551----
557 552
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 837067f48a4c..6b40e9f01740 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
26 26
27const char *const powerpc_triplets[] = { 27const char *const powerpc_triplets[] = {
28 "powerpc-unknown-linux-gnu-", 28 "powerpc-unknown-linux-gnu-",
29 "powerpc-linux-gnu-",
29 "powerpc64-unknown-linux-gnu-", 30 "powerpc64-unknown-linux-gnu-",
30 "powerpc64-linux-gnu-", 31 "powerpc64-linux-gnu-",
31 "powerpc64le-linux-gnu-", 32 "powerpc64le-linux-gnu-",
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a935b5023732..ad9324d1daf9 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv)
1578static void print_footer(void) 1578static void print_footer(void)
1579{ 1579{
1580 FILE *output = stat_config.output; 1580 FILE *output = stat_config.output;
1581 int n;
1581 1582
1582 if (!null_run) 1583 if (!null_run)
1583 fprintf(output, "\n"); 1584 fprintf(output, "\n");
@@ -1590,7 +1591,9 @@ static void print_footer(void)
1590 } 1591 }
1591 fprintf(output, "\n\n"); 1592 fprintf(output, "\n\n");
1592 1593
1593 if (print_free_counters_hint) 1594 if (print_free_counters_hint &&
1595 sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
1596 n > 0)
1594 fprintf(output, 1597 fprintf(output,
1595"Some events weren't counted. Try disabling the NMI watchdog:\n" 1598"Some events weren't counted. Try disabling the NMI watchdog:\n"
1596" echo 0 > /proc/sys/kernel/nmi_watchdog\n" 1599" echo 0 > /proc/sys/kernel/nmi_watchdog\n"
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index d014350adc52..4b2a5d298197 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -681,6 +681,10 @@ static struct syscall_fmt {
681 { .name = "mlockall", .errmsg = true, 681 { .name = "mlockall", .errmsg = true,
682 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 682 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
683 { .name = "mmap", .hexret = true, 683 { .name = "mmap", .hexret = true,
684/* The standard mmap maps to old_mmap on s390x */
685#if defined(__s390x__)
686 .alias = "old_mmap",
687#endif
684 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 688 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
685 [2] = SCA_MMAP_PROT, /* prot */ 689 [2] = SCA_MMAP_PROT, /* prot */
686 [3] = SCA_MMAP_FLAGS, /* flags */ }, }, 690 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index e7664fe3bd33..8ba2c4618fe9 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused)
288 return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ? 288 return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
289 TEST_OK : TEST_FAIL; 289 TEST_OK : TEST_FAIL;
290} 290}
291
292bool test__bp_signal_is_supported(void)
293{
294/*
295 * The powerpc so far does not have support to even create
296 * instruction breakpoint using the perf event interface.
297 * Once it's there we can release this.
298 */
299#ifdef __powerpc__
300 return false;
301#else
302 return true;
303#endif
304}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 9e08d297f1a9..3ccfd58a8c3c 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
97 { 97 {
98 .desc = "Breakpoint overflow signal handler", 98 .desc = "Breakpoint overflow signal handler",
99 .func = test__bp_signal, 99 .func = test__bp_signal,
100 .is_supported = test__bp_signal_is_supported,
100 }, 101 },
101 { 102 {
102 .desc = "Breakpoint overflow sampling", 103 .desc = "Breakpoint overflow sampling",
103 .func = test__bp_signal_overflow, 104 .func = test__bp_signal_overflow,
105 .is_supported = test__bp_signal_is_supported,
104 }, 106 },
105 { 107 {
106 .desc = "Number of exit events of a simple workload", 108 .desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
401 if (!perf_test__matches(t, curr, argc, argv)) 403 if (!perf_test__matches(t, curr, argc, argv))
402 continue; 404 continue;
403 405
406 if (t->is_supported && !t->is_supported()) {
407 pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
408 continue;
409 }
410
404 pr_info("%2d: %-*s:", i, width, t->desc); 411 pr_info("%2d: %-*s:", i, width, t->desc);
405 412
406 if (intlist__find(skiplist, i)) { 413 if (intlist__find(skiplist, i)) {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f14e7612cbb..94b7c7b02bde 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
229 unsigned char buf2[BUFSZ]; 229 unsigned char buf2[BUFSZ];
230 size_t ret_len; 230 size_t ret_len;
231 u64 objdump_addr; 231 u64 objdump_addr;
232 const char *objdump_name;
233 char decomp_name[KMOD_DECOMP_LEN];
232 int ret; 234 int ret;
233 235
234 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 236 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
289 state->done[state->done_cnt++] = al.map->start; 291 state->done[state->done_cnt++] = al.map->start;
290 } 292 }
291 293
294 objdump_name = al.map->dso->long_name;
295 if (dso__needs_decompress(al.map->dso)) {
296 if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
297 decomp_name,
298 sizeof(decomp_name)) < 0) {
299 pr_debug("decompression failed\n");
300 return -1;
301 }
302
303 objdump_name = decomp_name;
304 }
305
292 /* Read the object code using objdump */ 306 /* Read the object code using objdump */
293 objdump_addr = map__rip_2objdump(al.map, al.addr); 307 objdump_addr = map__rip_2objdump(al.map, al.addr);
294 ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); 308 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
309
310 if (dso__needs_decompress(al.map->dso))
311 unlink(objdump_name);
312
295 if (ret > 0) { 313 if (ret > 0) {
296 /* 314 /*
297 * The kernel maps are inaccurate - assume objdump is right in 315 * The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 631859629403..577363809c9b 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -34,6 +34,7 @@ struct test {
34 int (*get_nr)(void); 34 int (*get_nr)(void);
35 const char *(*get_desc)(int subtest); 35 const char *(*get_desc)(int subtest);
36 } subtest; 36 } subtest;
37 bool (*is_supported)(void);
37}; 38};
38 39
39/* Tests */ 40/* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
99int test__clang_subtest_get_nr(void); 100int test__clang_subtest_get_nr(void);
100int test__unit_number__scnprint(int subtest); 101int test__unit_number__scnprint(int subtest);
101 102
103bool test__bp_signal_is_supported(void);
104
102#if defined(__arm__) || defined(__aarch64__) 105#if defined(__arm__) || defined(__aarch64__)
103#ifdef HAVE_DWARF_UNWIND_SUPPORT 106#ifdef HAVE_DWARF_UNWIND_SUPPORT
104struct thread; 107struct thread;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 683f8340460c..ddbd56df9187 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
239 const char *s = strchr(ops->raw, '+'); 239 const char *s = strchr(ops->raw, '+');
240 const char *c = strchr(ops->raw, ','); 240 const char *c = strchr(ops->raw, ',');
241 241
242 if (c++ != NULL) 242 /*
243 * skip over possible up to 2 operands to get to address, e.g.:
244 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
245 */
246 if (c++ != NULL) {
243 ops->target.addr = strtoull(c, NULL, 16); 247 ops->target.addr = strtoull(c, NULL, 16);
244 else 248 if (!ops->target.addr) {
249 c = strchr(c, ',');
250 if (c++ != NULL)
251 ops->target.addr = strtoull(c, NULL, 16);
252 }
253 } else {
245 ops->target.addr = strtoull(ops->raw, NULL, 16); 254 ops->target.addr = strtoull(ops->raw, NULL, 16);
255 }
246 256
247 if (s++ != NULL) { 257 if (s++ != NULL) {
248 ops->target.offset = strtoull(s, NULL, 16); 258 ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
257static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 267static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
258 struct ins_operands *ops) 268 struct ins_operands *ops)
259{ 269{
270 const char *c = strchr(ops->raw, ',');
271
260 if (!ops->target.addr || ops->target.offset < 0) 272 if (!ops->target.addr || ops->target.offset < 0)
261 return ins__raw_scnprintf(ins, bf, size, ops); 273 return ins__raw_scnprintf(ins, bf, size, ops);
262 274
263 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); 275 if (c != NULL) {
276 const char *c2 = strchr(c + 1, ',');
277
278 /* check for 3-op insn */
279 if (c2 != NULL)
280 c = c2;
281 c++;
282
283 /* mirror arch objdump's space-after-comma style */
284 if (*c == ' ')
285 c++;
286 }
287
288 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
289 ins->name, c ? c - ops->raw : 0, ops->raw,
290 ops->target.offset);
264} 291}
265 292
266static struct ins_ops jump_ops = { 293static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1294 char linkname[PATH_MAX]; 1321 char linkname[PATH_MAX];
1295 char *build_id_filename; 1322 char *build_id_filename;
1296 char *build_id_path = NULL; 1323 char *build_id_path = NULL;
1324 char *pos;
1297 1325
1298 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1326 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1299 !dso__is_kcore(dso)) 1327 !dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1313 if (!build_id_path) 1341 if (!build_id_path)
1314 return -1; 1342 return -1;
1315 1343
1316 dirname(build_id_path); 1344 /*
1345 * old style build-id cache has name of XX/XXXXXXX.. while
1346 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1347 * extract the build-id part of dirname in the new style only.
1348 */
1349 pos = strrchr(build_id_path, '/');
1350 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1351 dirname(build_id_path);
1317 1352
1318 if (dso__is_kcore(dso) || 1353 if (dso__is_kcore(dso) ||
1319 readlink(build_id_path, linkname, sizeof(linkname)) < 0 || 1354 readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1396,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
1396 sizeof(symfs_filename)); 1431 sizeof(symfs_filename));
1397 } 1432 }
1398 } else if (dso__needs_decompress(dso)) { 1433 } else if (dso__needs_decompress(dso)) {
1399 char tmp[PATH_MAX]; 1434 char tmp[KMOD_DECOMP_LEN];
1400 struct kmod_path m;
1401 int fd;
1402 bool ret;
1403
1404 if (kmod_path__parse_ext(&m, symfs_filename))
1405 goto out;
1406
1407 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1408
1409 fd = mkstemp(tmp);
1410 if (fd < 0) {
1411 free(m.ext);
1412 goto out;
1413 }
1414
1415 ret = decompress_to_file(m.ext, symfs_filename, fd);
1416
1417 if (ret)
1418 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1419
1420 free(m.ext);
1421 close(fd);
1422 1435
1423 if (!ret) 1436 if (dso__decompress_kmodule_path(dso, symfs_filename,
1437 tmp, sizeof(tmp)) < 0)
1424 goto out; 1438 goto out;
1425 1439
1426 strcpy(symfs_filename, tmp); 1440 strcpy(symfs_filename, tmp);
@@ -1429,7 +1443,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
1429 snprintf(command, sizeof(command), 1443 snprintf(command, sizeof(command),
1430 "%s %s%s --start-address=0x%016" PRIx64 1444 "%s %s%s --start-address=0x%016" PRIx64
1431 " --stop-address=0x%016" PRIx64 1445 " --stop-address=0x%016" PRIx64
1432 " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand", 1446 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1433 objdump_path ? objdump_path : "objdump", 1447 objdump_path ? objdump_path : "objdump",
1434 disassembler_style ? "-M " : "", 1448 disassembler_style ? "-M " : "",
1435 disassembler_style ? disassembler_style : "", 1449 disassembler_style ? disassembler_style : "",
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 168cc49654e7..e0148b081bdf 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
278 return bf; 278 return bf;
279} 279}
280 280
281bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
282{
283 char *id_name = NULL, *ch;
284 struct stat sb;
285 char sbuild_id[SBUILD_ID_SIZE];
286
287 if (!dso->has_build_id)
288 goto err;
289
290 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
291 id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
292 if (!id_name)
293 goto err;
294 if (access(id_name, F_OK))
295 goto err;
296 if (lstat(id_name, &sb) == -1)
297 goto err;
298 if ((size_t)sb.st_size > size - 1)
299 goto err;
300 if (readlink(id_name, bf, size - 1) < 0)
301 goto err;
302
303 bf[sb.st_size] = '\0';
304
305 /*
306 * link should be:
307 * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
308 */
309 ch = strrchr(bf, '/');
310 if (!ch)
311 goto err;
312 if (ch - 3 < bf)
313 goto err;
314
315 free(id_name);
316 return strncmp(".ko", ch - 3, 3) == 0;
317err:
318 pr_err("Invalid build id: %s\n", id_name ? :
319 dso->long_name ? :
320 dso->short_name ? :
321 "[unknown]");
322 free(id_name);
323 return false;
324}
325
326#define dsos__for_each_with_build_id(pos, head) \ 281#define dsos__for_each_with_build_id(pos, head) \
327 list_for_each_entry(pos, head, node) \ 282 list_for_each_entry(pos, head, node) \
328 if (!pos->has_build_id) \ 283 if (!pos->has_build_id) \
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8a89b195c1fc..96690a55c62c 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
17 size_t size); 17 size_t size);
18 18
19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); 19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
20bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
21 20
22int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, 21int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
23 struct perf_sample *sample, struct perf_evsel *evsel, 22 struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index a96a99d2369f..4e7ab611377a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
249} 249}
250 250
251static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
252{
253 int fd = -1;
254 struct kmod_path m;
255
256 if (!dso__needs_decompress(dso))
257 return -1;
258
259 if (kmod_path__parse_ext(&m, dso->long_name))
260 return -1;
261
262 if (!m.comp)
263 goto out;
264
265 fd = mkstemp(tmpbuf);
266 if (fd < 0) {
267 dso->load_errno = errno;
268 goto out;
269 }
270
271 if (!decompress_to_file(m.ext, name, fd)) {
272 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
273 close(fd);
274 fd = -1;
275 }
276
277out:
278 free(m.ext);
279 return fd;
280}
281
282int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
283{
284 char tmpbuf[] = KMOD_DECOMP_NAME;
285 int fd;
286
287 fd = decompress_kmodule(dso, name, tmpbuf);
288 unlink(tmpbuf);
289 return fd;
290}
291
292int dso__decompress_kmodule_path(struct dso *dso, const char *name,
293 char *pathname, size_t len)
294{
295 char tmpbuf[] = KMOD_DECOMP_NAME;
296 int fd;
297
298 fd = decompress_kmodule(dso, name, tmpbuf);
299 if (fd < 0) {
300 unlink(tmpbuf);
301 return -1;
302 }
303
304 strncpy(pathname, tmpbuf, len);
305 close(fd);
306 return 0;
307}
308
251/* 309/*
252 * Parses kernel module specified in @path and updates 310 * Parses kernel module specified in @path and updates
253 * @m argument like: 311 * @m argument like:
@@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
335 return 0; 393 return 0;
336} 394}
337 395
396void dso__set_module_info(struct dso *dso, struct kmod_path *m,
397 struct machine *machine)
398{
399 if (machine__is_host(machine))
400 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
401 else
402 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
403
404 /* _KMODULE_COMP should be next to _KMODULE */
405 if (m->kmod && m->comp)
406 dso->symtab_type++;
407
408 dso__set_short_name(dso, strdup(m->name), true);
409}
410
338/* 411/*
339 * Global list of open DSOs and the counter. 412 * Global list of open DSOs and the counter.
340 */ 413 */
@@ -381,7 +454,7 @@ static int do_open(char *name)
381 454
382static int __open_dso(struct dso *dso, struct machine *machine) 455static int __open_dso(struct dso *dso, struct machine *machine)
383{ 456{
384 int fd; 457 int fd = -EINVAL;
385 char *root_dir = (char *)""; 458 char *root_dir = (char *)"";
386 char *name = malloc(PATH_MAX); 459 char *name = malloc(PATH_MAX);
387 460
@@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
392 root_dir = machine->root_dir; 465 root_dir = machine->root_dir;
393 466
394 if (dso__read_binary_type_filename(dso, dso->binary_type, 467 if (dso__read_binary_type_filename(dso, dso->binary_type,
395 root_dir, name, PATH_MAX)) { 468 root_dir, name, PATH_MAX))
396 free(name); 469 goto out;
397 return -EINVAL;
398 }
399 470
400 if (!is_regular_file(name)) 471 if (!is_regular_file(name))
401 return -EINVAL; 472 goto out;
473
474 if (dso__needs_decompress(dso)) {
475 char newpath[KMOD_DECOMP_LEN];
476 size_t len = sizeof(newpath);
477
478 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
479 fd = -dso->load_errno;
480 goto out;
481 }
482
483 strcpy(name, newpath);
484 }
402 485
403 fd = do_open(name); 486 fd = do_open(name);
487
488 if (dso__needs_decompress(dso))
489 unlink(name);
490
491out:
404 free(name); 492 free(name);
405 return fd; 493 return fd;
406} 494}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 12350b171727..bd061ba7b47c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
244bool is_kernel_module(const char *pathname, int cpumode); 244bool is_kernel_module(const char *pathname, int cpumode);
245bool decompress_to_file(const char *ext, const char *filename, int output_fd); 245bool decompress_to_file(const char *ext, const char *filename, int output_fd);
246bool dso__needs_decompress(struct dso *dso); 246bool dso__needs_decompress(struct dso *dso);
247int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
248int dso__decompress_kmodule_path(struct dso *dso, const char *name,
249 char *pathname, size_t len);
250
251#define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX"
252#define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME)
247 253
248struct kmod_path { 254struct kmod_path {
249 char *name; 255 char *name;
@@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
259#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) 265#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
260#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) 266#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true)
261 267
268void dso__set_module_info(struct dso *dso, struct kmod_path *m,
269 struct machine *machine);
270
262/* 271/*
263 * The dso__data_* external interface provides following functions: 272 * The dso__data_* external interface provides following functions:
264 * dso__data_get_fd 273 * dso__data_get_fd
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 314a07151fb7..5cac8d5e009a 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev,
1469 1469
1470 dso__set_build_id(dso, &bev->build_id); 1470 dso__set_build_id(dso, &bev->build_id);
1471 1471
1472 if (!is_kernel_module(filename, cpumode)) 1472 if (dso_type != DSO_TYPE_USER) {
1473 dso->kernel = dso_type; 1473 struct kmod_path m = { .name = NULL, };
1474
1475 if (!kmod_path__parse_name(&m, filename) && m.kmod)
1476 dso__set_module_info(dso, &m, machine);
1477 else
1478 dso->kernel = dso_type;
1479
1480 free(m.name);
1481 }
1474 1482
1475 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1483 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1476 sbuild_id); 1484 sbuild_id);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index d97e014c3df3..d7f31cb0a4cb 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
572 if (dso == NULL) 572 if (dso == NULL)
573 goto out_unlock; 573 goto out_unlock;
574 574
575 if (machine__is_host(machine)) 575 dso__set_module_info(dso, m, machine);
576 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
577 else
578 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
579
580 /* _KMODULE_COMP should be next to _KMODULE */
581 if (m->kmod && m->comp)
582 dso->symtab_type++;
583
584 dso__set_short_name(dso, strdup(m->name), true);
585 dso__set_long_name(dso, strdup(filename), true); 576 dso__set_long_name(dso, strdup(filename), true);
586 } 577 }
587 578
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9d92af7d0718..40de3cb40d21 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
1219 fprintf(ofp, "# be retrieved using Python functions of the form " 1219 fprintf(ofp, "# be retrieved using Python functions of the form "
1220 "common_*(context).\n"); 1220 "common_*(context).\n");
1221 1221
1222 fprintf(ofp, "# See the perf-trace-python Documentation for the list " 1222 fprintf(ofp, "# See the perf-script-python Documentation for the list "
1223 "of available functions.\n\n"); 1223 "of available functions.\n\n");
1224 1224
1225 fprintf(ofp, "import os\n"); 1225 fprintf(ofp, "import os\n");
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index e7ee47f7377a..502505cf236a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
637 return 0; 637 return 0;
638} 638}
639 639
640static int decompress_kmodule(struct dso *dso, const char *name,
641 enum dso_binary_type type)
642{
643 int fd = -1;
644 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
645 struct kmod_path m;
646
647 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
648 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
649 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
650 return -1;
651
652 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
653 name = dso->long_name;
654
655 if (kmod_path__parse_ext(&m, name) || !m.comp)
656 return -1;
657
658 fd = mkstemp(tmpbuf);
659 if (fd < 0) {
660 dso->load_errno = errno;
661 goto out;
662 }
663
664 if (!decompress_to_file(m.ext, name, fd)) {
665 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
666 close(fd);
667 fd = -1;
668 }
669
670 unlink(tmpbuf);
671
672out:
673 free(m.ext);
674 return fd;
675}
676
677bool symsrc__possibly_runtime(struct symsrc *ss) 640bool symsrc__possibly_runtime(struct symsrc *ss)
678{ 641{
679 return ss->dynsym || ss->opdsec; 642 return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
705 int fd; 668 int fd;
706 669
707 if (dso__needs_decompress(dso)) { 670 if (dso__needs_decompress(dso)) {
708 fd = decompress_kmodule(dso, name, type); 671 fd = dso__decompress_kmodule_fd(dso, name);
709 if (fd < 0) 672 if (fd < 0)
710 return -1; 673 return -1;
674
675 type = dso->symtab_type;
711 } else { 676 } else {
712 fd = open(name, O_RDONLY); 677 fd = open(name, O_RDONLY);
713 if (fd < 0) { 678 if (fd < 0) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8f2b068ff756..e7a98dbd2aed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
1562 if (!runtime_ss && syms_ss) 1562 if (!runtime_ss && syms_ss)
1563 runtime_ss = syms_ss; 1563 runtime_ss = syms_ss;
1564 1564
1565 if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
1566 if (dso__build_id_is_kmod(dso, name, PATH_MAX))
1567 kmod = true;
1568
1569 if (syms_ss) 1565 if (syms_ss)
1570 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1566 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1571 else 1567 else
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 943a06291587..da45c4be5fb3 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
39 return 0; 39 return 0;
40 40
41 mod = dwfl_addrmodule(ui->dwfl, ip); 41 mod = dwfl_addrmodule(ui->dwfl, ip);
42 if (mod) {
43 Dwarf_Addr s;
44
45 dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
46 if (s != al->map->start)
47 mod = 0;
48 }
49
42 if (!mod) 50 if (!mod)
43 mod = dwfl_report_elf(ui->dwfl, dso->short_name, 51 mod = dwfl_report_elf(ui->dwfl, dso->short_name,
44 dso->long_name, -1, al->map->start, 52 dso->long_name, -1, al->map->start,
@@ -224,7 +232,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
224 232
225 err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui); 233 err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
226 234
227 if (err && !ui->max_stack) 235 if (err && ui->max_stack != max_stack)
228 err = 0; 236 err = 0;
229 237
230 /* 238 /*