aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2017-06-19 14:14:35 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2017-06-19 14:14:35 -0400
commit8052bd8243e823a0954bcbafda8f91072598f13d (patch)
treee5f1502735c2a75fbe865cf57582d2e8354e6f43
parentd0fabd1cb8b70073a0f44f1cf8b663b5e7241c74 (diff)
parent531c221df182af5a7fc2bf7ba7eee89354fe9a10 (diff)
Merge remote-tracking branch 'tip/perf/urgent' into perf/core
Merge branch 'perf/urgent' into perf/core, to pick up fixes Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt9
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt1
-rw-r--r--Documentation/networking/dpaa.txt194
-rw-r--r--Documentation/networking/tcp.txt31
-rw-r--r--MAINTAINERS29
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi5
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts6
-rw-r--r--arch/arm/boot/dts/keystone-k2l-netcp.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi8
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-davinci/pm.c7
-rw-r--r--arch/arm/mm/dma-mapping.c29
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi3
-rw-r--r--arch/arm64/configs/defconfig10
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c10
-rw-r--r--arch/hexagon/mm/uaccess.c5
-rw-r--r--arch/powerpc/Kconfig21
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h3
-rw-r--r--arch/powerpc/include/asm/processor.h25
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c58
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/configs/default_defconfig39
-rw-r--r--arch/s390/configs/gcov_defconfig28
-rw-r--r--arch/s390/configs/performance_defconfig27
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/kernel/entry.S19
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/sparc/Kconfig15
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/multi3.S35
-rw-r--r--arch/sparc/mm/init_64.c89
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c3
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kvm/cpuid.c20
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/xtensa/include/asm/irq.h3
-rw-r--r--arch/xtensa/kernel/irq.c5
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S6
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c10
-rw-r--r--block/bfq-cgroup.c116
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bfq-iosched.h23
-rw-r--r--block/bio-integrity.c3
-rw-r--r--block/blk-mq.c25
-rw-r--r--block/blk-throttle.c22
-rw-r--r--crypto/asymmetric_keys/public_key.c2
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c4
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/drbg.c5
-rw-r--r--crypto/gcm.c6
-rw-r--r--drivers/acpi/arm64/iort.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/device_pm.c3
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/sleep.c28
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/wakeup.c18
-rw-r--r--drivers/block/loop.c3
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/random.c49
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/dax/super.c9
-rw-r--r--drivers/firmware/efi/efi-bgrt.c26
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/gpio/gpio-aspeed.c3
-rw-r--r--drivers/gpio/gpio-crystalcove.c54
-rw-r--r--drivers/gpio/gpio-mvebu.c9
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c11
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c106
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c19
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c21
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c12
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c115
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c23
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c13
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c8
-rw-r--r--drivers/iio/adc/max9611.c10
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c38
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/industrialio-trigger.c3
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/proximity/as3935.c14
-rw-r--r--drivers/input/mouse/elantech.c16
-rw-r--r--drivers/input/rmi4/rmi_f03.c2
-rw-r--r--drivers/iommu/of_iommu.c7
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c2
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c2
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--drivers/md/md.c16
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/media/Kconfig6
-rw-r--r--drivers/media/Makefile4
-rw-r--r--drivers/media/cec/Kconfig14
-rw-r--r--drivers/media/cec/Makefile2
-rw-r--r--drivers/media/cec/cec-adap.c2
-rw-r--r--drivers/media/cec/cec-core.c8
-rw-r--r--drivers/media/i2c/Kconfig9
-rw-r--r--drivers/media/platform/Kconfig10
-rw-r--r--drivers/media/platform/vivid/Kconfig3
-rw-r--r--drivers/media/rc/rc-ir-raw.c13
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig3
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c2
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/misc/cxl/file.c7
-rw-r--r--drivers/misc/cxl/native.c14
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h6
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h26
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c75
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c22
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio_bus.c13
-rw-r--r--drivers/net/phy/micrel.c42
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/vxlan.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c9
-rw-r--r--drivers/nvme/host/core.c21
-rw-r--r--drivers/nvme/host/fc.c20
-rw-r--r--drivers/nvme/host/pci.c13
-rw-r--r--drivers/nvme/host/rdma.c44
-rw-r--r--drivers/of/device.c4
-rw-r--r--drivers/phy/phy-qcom-qmp.c14
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c2
-rw-r--r--drivers/reset/hisilicon/hi6220_reset.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c12
-rw-r--r--drivers/s390/crypto/ap_bus.c38
-rw-r--r--drivers/s390/crypto/ap_card.c9
-rw-r--r--drivers/s390/crypto/ap_queue.c9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c29
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h26
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/staging/ccree/Kconfig2
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c9
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile2
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c41
-rw-r--r--drivers/usb/dwc2/params.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c13
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c45
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/dir-item.c2
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c126
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/ext4/acl.c4
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c85
-rw-r--r--fs/ext4/file.c54
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c30
-rw-r--r--fs/ext4/mballoc.c23
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/ext4/xattr.c8
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/quota/dquot.c16
-rw-r--r--fs/stat.c1
-rw-r--r--fs/ufs/balloc.c26
-rw-r--r--fs/ufs/inode.c28
-rw-r--r--fs/ufs/super.c18
-rw-r--r--fs/ufs/util.h10
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqchip/arm-gic.h28
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/quotaops.h6
-rw-r--r--include/linux/srcu.h2
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/media/cec-notifier.h2
-rw-r--r--include/media/cec.h4
-rw-r--r--include/net/ipv6.h1
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/uapi/linux/keyctl.h4
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/suspend.c29
-rw-r--r--kernel/printk/printk.c46
-rw-r--r--kernel/rcu/srcu.c5
-rw-r--r--kernel/rcu/srcutiny.c7
-rw-r--r--kernel/rcu/srcutree.c5
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/core/devlink.c8
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/dsa/dsa.c47
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/legacy.c47
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/ht.c16
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c11
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_nat_core.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c22
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--security/keys/Kconfig6
-rw-r--r--security/keys/dh.c300
-rw-r--r--security/keys/encrypted-keys/encrypted.c204
-rw-r--r--security/keys/gc.c4
-rw-r--r--security/keys/key.c16
-rw-r--r--security/keys/keyctl.c16
-rw-r--r--security/keys/keyring.c12
-rw-r--r--security/keys/process_keys.c7
-rw-r--r--security/keys/trusted.c50
-rw-r--r--security/keys/user_defined.c16
-rw-r--r--sound/core/timer.c7
-rw-r--r--sound/pci/hda/patch_realtek.c11
-rw-r--r--sound/soc/atmel/atmel-classd.c9
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/rt286.c7
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c2
-rw-r--r--sound/soc/intel/skylake/skl.c162
-rw-r--r--sound/soc/intel/skylake/skl.h4
-rw-r--r--sound/soc/sh/rcar/adg.c6
-rw-r--r--sound/soc/sh/rcar/cmd.c1
-rw-r--r--sound/soc/sh/rcar/core.c51
-rw-r--r--sound/soc/sh/rcar/gen.c1
-rw-r--r--sound/soc/sh/rcar/rsnd.h2
-rw-r--r--sound/soc/sh/rcar/src.c12
-rw-r--r--sound/soc/sh/rcar/ssi.c18
-rw-r--r--sound/soc/sh/rcar/ssiu.c3
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--tools/perf/Documentation/perf-probe.txt8
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-script-python.txt23
-rw-r--r--tools/perf/Makefile.config38
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/Build2
-rw-r--r--tools/perf/pmu-events/Build4
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/code-reading.c20
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/util/annotate.c37
-rw-r--r--tools/perf/util/build-id.c45
-rw-r--r--tools/perf/util/build-id.h1
-rw-r--r--tools/perf/util/dso.c85
-rw-r--r--tools/perf/util/dso.h6
-rw-r--r--tools/perf/util/evsel.c12
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/symbol-elf.c38
-rw-r--r--tools/perf/util/symbol.c4
-rw-r--r--tools/perf/util/unwind-libdw.c8
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c2
-rw-r--r--virt/kvm/arm/mmu.c3
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c16
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c28
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c47
-rw-r--r--virt/kvm/arm/vgic/vgic.h12
423 files changed, 3678 insertions, 2306 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 15f79c27748d..0f5c3b4347c6 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -866,6 +866,15 @@
866 866
867 dscc4.setup= [NET] 867 dscc4.setup= [NET]
868 868
869 dt_cpu_ftrs= [PPC]
870 Format: {"off" | "known"}
871 Control how the dt_cpu_ftrs device-tree binding is
872 used for CPU feature discovery and setup (if it
873 exists).
874 off: Do not use it, fall back to legacy cpu table.
875 known: Do not pass through unknown features to guests
876 or userspace, only those that the kernel is aware of.
877
869 dump_apple_properties [X86] 878 dump_apple_properties [X86]
870 Dump name and content of EFI device properties on 879 Dump name and content of EFI device properties on
871 x86 Macs. Useful for driver authors to determine 880 x86 Macs. Useful for driver authors to determine
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 7ef9dbb08957..1d4d0f49c9d0 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -26,6 +26,10 @@ Optional properties:
26- interrupt-controller : Indicates the switch is itself an interrupt 26- interrupt-controller : Indicates the switch is itself an interrupt
27 controller. This is used for the PHY interrupts. 27 controller. This is used for the PHY interrupts.
28#interrupt-cells = <2> : Controller uses two cells, number and flag 28#interrupt-cells = <2> : Controller uses two cells, number and flag
29- eeprom-length : Set to the length of an EEPROM connected to the
30 switch. Must be set if the switch can not detect
31 the presence and/or size of a connected EEPROM,
32 otherwise optional.
29- mdio : Container of PHY and devices on the switches MDIO 33- mdio : Container of PHY and devices on the switches MDIO
30 bus. 34 bus.
31- mdio? : Container of PHYs and devices on the external MDIO 35- mdio? : Container of PHYs and devices on the external MDIO
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 00bea038639e..fcf199b64d3d 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -10,6 +10,7 @@ Required properties:
10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; 10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs; 11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs; 12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
13 - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
13 - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs; 14 - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
14 - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs; 15 - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
15 - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs; 16 - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644
index 000000000000..76e016d4d344
--- /dev/null
+++ b/Documentation/networking/dpaa.txt
@@ -0,0 +1,194 @@
1The QorIQ DPAA Ethernet Driver
2==============================
3
4Authors:
5Madalin Bucur <madalin.bucur@nxp.com>
6Camelia Groza <camelia.groza@nxp.com>
7
8Contents
9========
10
11 - DPAA Ethernet Overview
12 - DPAA Ethernet Supported SoCs
13 - Configuring DPAA Ethernet in your kernel
14 - DPAA Ethernet Frame Processing
15 - DPAA Ethernet Features
16 - Debugging
17
18DPAA Ethernet Overview
19======================
20
21DPAA stands for Data Path Acceleration Architecture and it is a
22set of networking acceleration IPs that are available on several
23generations of SoCs, both on PowerPC and ARM64.
24
25The Freescale DPAA architecture consists of a series of hardware blocks
26that support Ethernet connectivity. The Ethernet driver depends upon the
27following drivers in the Linux kernel:
28
29 - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
30 drivers/iommu/fsl_*
31 - Frame Manager (FMan)
32 drivers/net/ethernet/freescale/fman
33 - Queue Manager (QMan), Buffer Manager (BMan)
34 drivers/soc/fsl/qbman
35
36A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
37
38 dpaa_eth /eth0\ ... /ethN\
39 driver | | | |
40 ------------- ---- ----------- ---- -------------
41 -Ports / Tx Rx \ ... / Tx Rx \
42 FMan | | | |
43 -MACs | MAC0 | | MACN |
44 / dtsec0 \ ... / dtsecN \ (or tgec)
45 / \ / \(or memac)
46 --------- -------------- --- -------------- ---------
47 FMan, FMan Port, FMan SP, FMan MURAM drivers
48 ---------------------------------------------------------
49 FMan HW blocks: MURAM, MACs, Ports, SP
50 ---------------------------------------------------------
51
52The dpaa_eth relation to the QMan, BMan and FMan:
53 ________________________________
54 dpaa_eth / eth0 \
55 driver / \
56 --------- -^- -^- -^- --- ---------
57 QMan driver / \ / \ / \ \ / | BMan |
58 |Rx | |Rx | |Tx | |Tx | | driver |
59 --------- |Dfl| |Err| |Cnf| |FQs| | |
60 QMan HW |FQ | |FQ | |FQs| | | | |
61 / \ / \ / \ \ / | |
62 --------- --- --- --- -v- ---------
63 | FMan QMI | |
64 | FMan HW FMan BMI | BMan HW |
65 ----------------------- --------
66
67where the acronyms used above (and in the code) are:
68DPAA = Data Path Acceleration Architecture
69FMan = DPAA Frame Manager
70QMan = DPAA Queue Manager
71BMan = DPAA Buffers Manager
72QMI = QMan interface in FMan
73BMI = BMan interface in FMan
74FMan SP = FMan Storage Profiles
75MURAM = Multi-user RAM in FMan
76FQ = QMan Frame Queue
77Rx Dfl FQ = default reception FQ
78Rx Err FQ = Rx error frames FQ
79Tx Cnf FQ = Tx confirmation FQs
80Tx FQs = transmission frame queues
81dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
82tgec = ten gigabit Ethernet controller (10 Gbps)
83memac = multirate Ethernet MAC (10/100/1000/10000)
84
85DPAA Ethernet Supported SoCs
86============================
87
88The DPAA drivers enable the Ethernet controllers present on the following SoCs:
89
90# PPC
91P1023
92P2041
93P3041
94P4080
95P5020
96P5040
97T1023
98T1024
99T1040
100T1042
101T2080
102T4240
103B4860
104
105# ARM
106LS1043A
107LS1046A
108
109Configuring DPAA Ethernet in your kernel
110========================================
111
112To enable the DPAA Ethernet driver, the following Kconfig options are required:
113
114# common for arch/arm64 and arch/powerpc platforms
115CONFIG_FSL_DPAA=y
116CONFIG_FSL_FMAN=y
117CONFIG_FSL_DPAA_ETH=y
118CONFIG_FSL_XGMAC_MDIO=y
119
120# for arch/powerpc only
121CONFIG_FSL_PAMU=y
122
123# common options needed for the PHYs used on the RDBs
124CONFIG_VITESSE_PHY=y
125CONFIG_REALTEK_PHY=y
126CONFIG_AQUANTIA_PHY=y
127
128DPAA Ethernet Frame Processing
129==============================
130
131On Rx, buffers for the incoming frames are retrieved from one of the three
132existing buffers pools. The driver initializes and seeds these, each with
133buffers of different sizes: 1KB, 2KB and 4KB.
134
135On Tx, all transmitted frames are returned to the driver through Tx
136confirmation frame queues. The driver is then responsible for freeing the
137buffers. In order to do this properly, a backpointer is added to the buffer
138before transmission that points to the skb. When the buffer returns to the
139driver on a confirmation FQ, the skb can be correctly consumed.
140
141DPAA Ethernet Features
142======================
143
144Currently the DPAA Ethernet driver enables the basic features required for
145a Linux Ethernet driver. The support for advanced features will be added
146gradually.
147
148The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
149checksum offload feature is enabled by default and cannot be controlled through
150ethtool.
151
152The driver has support for multiple prioritized Tx traffic classes. Priorities
153range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
154strict priority levels. Each traffic class contains NR_CPU TX queues. By
155default, only one traffic class is enabled and the lowest priority Tx queues
156are used. Higher priority traffic classes can be enabled with the mqprio
157qdisc. For example, all four traffic classes are enabled on an interface with
158the following command. Furthermore, skb priority levels are mapped to traffic
159classes as follows:
160
161 * priorities 0 to 3 - traffic class 0 (low priority)
162 * priorities 4 to 7 - traffic class 1 (medium-low priority)
163 * priorities 8 to 11 - traffic class 2 (medium-high priority)
164 * priorities 12 to 15 - traffic class 3 (high priority)
165
166tc qdisc add dev <int> root handle 1: \
167 mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
168
169Debugging
170=========
171
172The following statistics are exported for each interface through ethtool:
173
174 - interrupt count per CPU
175 - Rx packets count per CPU
176 - Tx packets count per CPU
177 - Tx confirmed packets count per CPU
178 - Tx S/G frames count per CPU
179 - Tx error count per CPU
180 - Rx error count per CPU
181 - Rx error count per type
182 - congestion related statistics:
183 - congestion status
184 - time spent in congestion
185 - number of time the device entered congestion
186 - dropped packets count per cause
187
188The driver also exports the following information in sysfs:
189
190 - the FQ IDs for each FQ type
191 /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
192
193 - the IDs of the buffer pools in use
194 /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index bdc4c0db51e1..9c7139d57e57 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -1,7 +1,7 @@
1TCP protocol 1TCP protocol
2============ 2============
3 3
4Last updated: 9 February 2008 4Last updated: 3 June 2017
5 5
6Contents 6Contents
7======== 7========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
29A congestion control mechanism can be registered through functions in 29A congestion control mechanism can be registered through functions in
30tcp_cong.c. The functions used by the congestion control mechanism are 30tcp_cong.c. The functions used by the congestion control mechanism are
31registered via passing a tcp_congestion_ops struct to 31registered via passing a tcp_congestion_ops struct to
32tcp_register_congestion_control. As a minimum name, ssthresh, 32tcp_register_congestion_control. As a minimum, the congestion control
33cong_avoid must be valid. 33mechanism must provide a valid name and must implement either ssthresh,
34cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
34 35
35Private data for a congestion control mechanism is stored in tp->ca_priv. 36Private data for a congestion control mechanism is stored in tp->ca_priv.
36tcp_ca(tp) returns a pointer to this space. This is preallocated space - it 37tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
37is important to check the size of your private data will fit this space, or 38is important to check the size of your private data will fit this space, or
38alternatively space could be allocated elsewhere and a pointer to it could 39alternatively, space could be allocated elsewhere and a pointer to it could
39be stored here. 40be stored here.
40 41
41There are three kinds of congestion control algorithms currently: The 42There are three kinds of congestion control algorithms currently: The
42simplest ones are derived from TCP reno (highspeed, scalable) and just 43simplest ones are derived from TCP reno (highspeed, scalable) and just
43provide an alternative the congestion window calculation. More complex 44provide an alternative congestion window calculation. More complex
44ones like BIC try to look at other events to provide better 45ones like BIC try to look at other events to provide better
45heuristics. There are also round trip time based algorithms like 46heuristics. There are also round trip time based algorithms like
46Vegas and Westwood+. 47Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
49needs to maintain fairness and performance. Please review current 50needs to maintain fairness and performance. Please review current
50research and RFC's before developing new modules. 51research and RFC's before developing new modules.
51 52
52The method that is used to determine which congestion control mechanism is 53The default congestion control mechanism is chosen based on the
53determined by the setting of the sysctl net.ipv4.tcp_congestion_control. 54DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
54The default congestion control will be the last one registered (LIFO); 55value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
55so if you built everything as modules, the default will be reno. If you 56module will be autoloaded if needed and you will get the expected protocol. If
56build with the defaults from Kconfig, then CUBIC will be builtin (not a 57you ask for an unknown congestion method, then the sysctl attempt will fail.
57module) and it will end up the default.
58 58
59If you really want a particular default value then you will need 59If you remove a TCP congestion control module, then you will get the next
60to set it with the sysctl. If you use a sysctl, the module will be autoloaded
61if needed and you will get the expected protocol. If you ask for an
62unknown congestion method, then the sysctl attempt will fail.
63
64If you remove a tcp congestion control module, then you will get the next
65available one. Since reno cannot be built as a module, and cannot be 60available one. Since reno cannot be built as a module, and cannot be
66deleted, it will always be available. 61removed, it will always be available.
67 62
68How the new TCP output machine [nyi] works. 63How the new TCP output machine [nyi] works.
69=========================================== 64===========================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 7a28acd7f525..09b5ab6a8a5c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1172,7 +1172,7 @@ N: clps711x
1172 1172
1173ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE 1173ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
1174M: Hartley Sweeten <hsweeten@visionengravers.com> 1174M: Hartley Sweeten <hsweeten@visionengravers.com>
1175M: Ryan Mallon <rmallon@gmail.com> 1175M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
1176L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1176L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1177S: Maintained 1177S: Maintained
1178F: arch/arm/mach-ep93xx/ 1178F: arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M: Gregory Clement <gregory.clement@free-electrons.com>
1489M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1489M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
1490L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1490L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1491S: Maintained 1491S: Maintained
1492F: arch/arm/mach-mvebu/
1493F: drivers/rtc/rtc-armada38x.c
1494F: arch/arm/boot/dts/armada* 1492F: arch/arm/boot/dts/armada*
1495F: arch/arm/boot/dts/kirkwood* 1493F: arch/arm/boot/dts/kirkwood*
1494F: arch/arm/configs/mvebu_*_defconfig
1495F: arch/arm/mach-mvebu/
1496F: arch/arm64/boot/dts/marvell/armada* 1496F: arch/arm64/boot/dts/marvell/armada*
1497F: drivers/cpufreq/mvebu-cpufreq.c 1497F: drivers/cpufreq/mvebu-cpufreq.c
1498F: arch/arm/configs/mvebu_*_defconfig 1498F: drivers/irqchip/irq-armada-370-xp.c
1499F: drivers/irqchip/irq-mvebu-*
1500F: drivers/rtc/rtc-armada38x.c
1499 1501
1500ARM/Marvell Berlin SoC support 1502ARM/Marvell Berlin SoC support
1501M: Jisheng Zhang <jszhang@marvell.com> 1503M: Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N: rockchip
1721ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1723ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
1722M: Kukjin Kim <kgene@kernel.org> 1724M: Kukjin Kim <kgene@kernel.org>
1723M: Krzysztof Kozlowski <krzk@kernel.org> 1725M: Krzysztof Kozlowski <krzk@kernel.org>
1724R: Javier Martinez Canillas <javier@osg.samsung.com>
1725L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1726L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1726L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1727L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1727Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ 1728Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F: drivers/edac/altera_edac.
1829ARM/STI ARCHITECTURE 1830ARM/STI ARCHITECTURE
1830M: Patrice Chotard <patrice.chotard@st.com> 1831M: Patrice Chotard <patrice.chotard@st.com>
1831L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1832L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1832L: kernel@stlinux.com
1833W: http://www.stlinux.com 1833W: http://www.stlinux.com
1834S: Maintained 1834S: Maintained
1835F: arch/arm/mach-sti/ 1835F: arch/arm/mach-sti/
@@ -5622,7 +5622,7 @@ F: scripts/get_maintainer.pl
5622 5622
5623GENWQE (IBM Generic Workqueue Card) 5623GENWQE (IBM Generic Workqueue Card)
5624M: Frank Haverkamp <haver@linux.vnet.ibm.com> 5624M: Frank Haverkamp <haver@linux.vnet.ibm.com>
5625M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> 5625M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
5626S: Supported 5626S: Supported
5627F: drivers/misc/genwqe/ 5627F: drivers/misc/genwqe/
5628 5628
@@ -5667,7 +5667,6 @@ F: tools/testing/selftests/gpio/
5667 5667
5668GPIO SUBSYSTEM 5668GPIO SUBSYSTEM
5669M: Linus Walleij <linus.walleij@linaro.org> 5669M: Linus Walleij <linus.walleij@linaro.org>
5670M: Alexandre Courbot <gnurou@gmail.com>
5671L: linux-gpio@vger.kernel.org 5670L: linux-gpio@vger.kernel.org
5672T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git 5671T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
5673S: Maintained 5672S: Maintained
@@ -7707,7 +7706,7 @@ F: drivers/platform/x86/hp_accel.c
7707 7706
7708LIVE PATCHING 7707LIVE PATCHING
7709M: Josh Poimboeuf <jpoimboe@redhat.com> 7708M: Josh Poimboeuf <jpoimboe@redhat.com>
7710M: Jessica Yu <jeyu@redhat.com> 7709M: Jessica Yu <jeyu@kernel.org>
7711M: Jiri Kosina <jikos@kernel.org> 7710M: Jiri Kosina <jikos@kernel.org>
7712M: Miroslav Benes <mbenes@suse.cz> 7711M: Miroslav Benes <mbenes@suse.cz>
7713R: Petr Mladek <pmladek@suse.com> 7712R: Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8507,7 @@ S: Odd Fixes
8508F: drivers/media/radio/radio-miropcm20* 8507F: drivers/media/radio/radio-miropcm20*
8509 8508
8510MELLANOX MLX4 core VPI driver 8509MELLANOX MLX4 core VPI driver
8511M: Yishai Hadas <yishaih@mellanox.com> 8510M: Tariq Toukan <tariqt@mellanox.com>
8512L: netdev@vger.kernel.org 8511L: netdev@vger.kernel.org
8513L: linux-rdma@vger.kernel.org 8512L: linux-rdma@vger.kernel.org
8514W: http://www.mellanox.com 8513W: http://www.mellanox.com
@@ -8516,7 +8515,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8516S: Supported 8515S: Supported
8517F: drivers/net/ethernet/mellanox/mlx4/ 8516F: drivers/net/ethernet/mellanox/mlx4/
8518F: include/linux/mlx4/ 8517F: include/linux/mlx4/
8519F: include/uapi/rdma/mlx4-abi.h
8520 8518
8521MELLANOX MLX4 IB driver 8519MELLANOX MLX4 IB driver
8522M: Yishai Hadas <yishaih@mellanox.com> 8520M: Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8524,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8526S: Supported 8524S: Supported
8527F: drivers/infiniband/hw/mlx4/ 8525F: drivers/infiniband/hw/mlx4/
8528F: include/linux/mlx4/ 8526F: include/linux/mlx4/
8527F: include/uapi/rdma/mlx4-abi.h
8529 8528
8530MELLANOX MLX5 core VPI driver 8529MELLANOX MLX5 core VPI driver
8531M: Saeed Mahameed <saeedm@mellanox.com> 8530M: Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8537,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
8538S: Supported 8537S: Supported
8539F: drivers/net/ethernet/mellanox/mlx5/core/ 8538F: drivers/net/ethernet/mellanox/mlx5/core/
8540F: include/linux/mlx5/ 8539F: include/linux/mlx5/
8541F: include/uapi/rdma/mlx5-abi.h
8542 8540
8543MELLANOX MLX5 IB driver 8541MELLANOX MLX5 IB driver
8544M: Matan Barak <matanb@mellanox.com> 8542M: Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8547,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
8549S: Supported 8547S: Supported
8550F: drivers/infiniband/hw/mlx5/ 8548F: drivers/infiniband/hw/mlx5/
8551F: include/linux/mlx5/ 8549F: include/linux/mlx5/
8550F: include/uapi/rdma/mlx5-abi.h
8552 8551
8553MELEXIS MLX90614 DRIVER 8552MELEXIS MLX90614 DRIVER
8554M: Crt Mori <cmo@melexis.com> 8553M: Crt Mori <cmo@melexis.com>
@@ -8588,7 +8587,7 @@ S: Maintained
8588F: drivers/media/dvb-frontends/mn88473* 8587F: drivers/media/dvb-frontends/mn88473*
8589 8588
8590MODULE SUPPORT 8589MODULE SUPPORT
8591M: Jessica Yu <jeyu@redhat.com> 8590M: Jessica Yu <jeyu@kernel.org>
8592M: Rusty Russell <rusty@rustcorp.com.au> 8591M: Rusty Russell <rusty@rustcorp.com.au>
8593T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 8592T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
8594S: Maintained 8593S: Maintained
@@ -11268,7 +11267,6 @@ F: drivers/media/rc/serial_ir.c
11268 11267
11269STI CEC DRIVER 11268STI CEC DRIVER
11270M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 11269M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
11271L: kernel@stlinux.com
11272S: Maintained 11270S: Maintained
11273F: drivers/staging/media/st-cec/ 11271F: drivers/staging/media/st-cec/
11274F: Documentation/devicetree/bindings/media/stih-cec.txt 11272F: Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11776,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
11778S: Supported 11776S: Supported
11779F: arch/arm/mach-davinci/ 11777F: arch/arm/mach-davinci/
11780F: drivers/i2c/busses/i2c-davinci.c 11778F: drivers/i2c/busses/i2c-davinci.c
11779F: arch/arm/boot/dts/da850*
11781 11780
11782TI DAVINCI SERIES MEDIA DRIVER 11781TI DAVINCI SERIES MEDIA DRIVER
11783M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com> 11782M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13861,7 +13860,7 @@ S: Odd fixes
13861F: drivers/net/wireless/wl3501* 13860F: drivers/net/wireless/wl3501*
13862 13861
13863WOLFSON MICROELECTRONICS DRIVERS 13862WOLFSON MICROELECTRONICS DRIVERS
13864L: patches@opensource.wolfsonmicro.com 13863L: patches@opensource.cirrus.com
13865T: git https://github.com/CirrusLogic/linux-drivers.git 13864T: git https://github.com/CirrusLogic/linux-drivers.git
13866W: https://github.com/CirrusLogic/linux-drivers/wiki 13865W: https://github.com/CirrusLogic/linux-drivers/wiki
13867S: Supported 13866S: Supported
diff --git a/Makefile b/Makefile
index 853ae9179af9..83f6d9972cab 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 12 2PATCHLEVEL = 12
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc5
5NAME = Fearless Coyote 5NAME = Fearless Coyote
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 561f27d8d922..9444a9a9ba10 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
3#include <dt-bindings/clock/bcm2835-aux.h> 3#include <dt-bindings/clock/bcm2835-aux.h>
4#include <dt-bindings/gpio/gpio.h> 4#include <dt-bindings/gpio/gpio.h>
5 5
6/* firmware-provided startup stubs live here, where the secondary CPUs are
7 * spinning.
8 */
9/memreserve/ 0x00000000 0x00001000;
10
6/* This include file covers the common peripherals and configuration between 11/* This include file covers the common peripherals and configuration between
7 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to 12 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
8 * bcm2835.dtsi and bcm2836.dtsi. 13 * bcm2835.dtsi and bcm2836.dtsi.
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1d0ce2..d2be8aa3370b 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
120 120
121 ethphy0: ethernet-phy@2 { 121 ethphy0: ethernet-phy@2 {
122 reg = <2>; 122 reg = <2>;
123 micrel,led-mode = <1>;
124 clocks = <&clks IMX6UL_CLK_ENET_REF>;
125 clock-names = "rmii-ref";
123 }; 126 };
124 127
125 ethphy1: ethernet-phy@1 { 128 ethphy1: ethernet-phy@1 {
126 reg = <1>; 129 reg = <1>;
130 micrel,led-mode = <1>;
131 clocks = <&clks IMX6UL_CLK_ENET2_REF>;
132 clock-names = "rmii-ref";
127 }; 133 };
128 }; 134 };
129}; 135};
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f26824e83a..66f615a74118 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>; 140 clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
141 clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk"; 141 clock-names = "pa_clk", "ethss_clk", "cpts";
142 dma-coherent; 142 dma-coherent;
143 143
144 ti,navigator-dmas = <&dma_gbe 0>, 144 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7ebc0919..148650406cf7 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
232 }; 232 };
233 }; 233 };
234 234
235 osr: sram@70000000 {
236 compatible = "mmio-sram";
237 reg = <0x70000000 0x10000>;
238 #address-cells = <1>;
239 #size-cells = <1>;
240 clocks = <&clkosr>;
241 };
242
235 dspgpio0: keystone_dsp_gpio@02620240 { 243 dspgpio0: keystone_dsp_gpio@02620240 {
236 compatible = "ti,keystone-dsp-gpio"; 244 compatible = "ti,keystone-dsp-gpio";
237 gpio-controller; 245 gpio-controller;
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb28374e..06e2331f666d 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
1#include <versatile-ab.dts> 1#include "versatile-ab.dts"
2 2
3/ { 3/ {
4 model = "ARM Versatile PB"; 4 model = "ARM Versatile PB";
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8f6e16..3234fe9bba6e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@ struct dev_archdata {
19#ifdef CONFIG_XEN 19#ifdef CONFIG_XEN
20 const struct dma_map_ops *dev_dma_ops; 20 const struct dma_map_ops *dev_dma_ops;
21#endif 21#endif
22 bool dma_coherent; 22 unsigned int dma_coherent:1;
23 unsigned int dma_ops_setup:1;
23}; 24};
24 25
25struct omap_device; 26struct omap_device;
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a9c261..5386528665b5 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@ __do_hyp_init:
104 @ - Write permission implies XN: disabled 104 @ - Write permission implies XN: disabled
105 @ - Instruction cache: enabled 105 @ - Instruction cache: enabled
106 @ - Data/Unified cache: enabled 106 @ - Data/Unified cache: enabled
107 @ - Memory alignment checks: enabled
108 @ - MMU: enabled (this code must be run from an identity mapping) 107 @ - MMU: enabled (this code must be run from an identity mapping)
109 mrc p15, 4, r0, c1, c0, 0 @ HSCR 108 mrc p15, 4, r0, c1, c0, 0 @ HSCR
110 ldr r2, =HSCTLR_MASK 109 ldr r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
112 mrc p15, 0, r1, c1, c0, 0 @ SCTLR 111 mrc p15, 0, r1, c1, c0, 0 @ SCTLR
113 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) 112 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
114 and r1, r1, r2 113 and r1, r1, r2
115 ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) 114 ARM( ldr r2, =(HSCTLR_M) )
116 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) 115 THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
117 orr r1, r1, r2 116 orr r1, r1, r2
118 orr r0, r0, r1 117 orr r0, r0, r1
119 mcr p15, 4, r0, c1, c0, 0 @ HSCR 118 mcr p15, 4, r0, c1, c0, 0 @ HSCR
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924143f9..cbd959b73654 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,6 +1,7 @@
1menuconfig ARCH_AT91 1menuconfig ARCH_AT91
2 bool "Atmel SoCs" 2 bool "Atmel SoCs"
3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
4 select ARM_CPU_SUSPEND if PM
4 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
5 select GPIOLIB 6 select GPIOLIB
6 select PINCTRL 7 select PINCTRL
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb80354f303..b5cc05dc2cb2 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); 153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
154 if (!davinci_sram_suspend) { 154 if (!davinci_sram_suspend) {
155 pr_err("PM: cannot allocate SRAM memory\n"); 155 pr_err("PM: cannot allocate SRAM memory\n");
156 return -ENOMEM; 156 ret = -ENOMEM;
157 goto no_sram_mem;
157 } 158 }
158 159
159 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, 160 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
161 162
162 suspend_set_ops(&davinci_pm_ops); 163 suspend_set_ops(&davinci_pm_ops);
163 164
165 return 0;
166
167no_sram_mem:
168 iounmap(pm_config.ddrpsc_reg_base);
164no_ddrpsc_mem: 169no_ddrpsc_mem:
165 iounmap(pm_config.ddrpll_reg_base); 170 iounmap(pm_config.ddrpll_reg_base);
166no_ddrpll_mem: 171no_ddrpll_mem:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd2967b..bd83c531828a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
2311} 2311}
2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2312EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
2313 2313
2314static void __arm_iommu_detach_device(struct device *dev) 2314/**
2315 * arm_iommu_detach_device
2316 * @dev: valid struct device pointer
2317 *
2318 * Detaches the provided device from a previously attached map.
2319 * This voids the dma operations (dma_map_ops pointer)
2320 */
2321void arm_iommu_detach_device(struct device *dev)
2315{ 2322{
2316 struct dma_iommu_mapping *mapping; 2323 struct dma_iommu_mapping *mapping;
2317 2324
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
2324 iommu_detach_device(mapping->domain, dev); 2331 iommu_detach_device(mapping->domain, dev);
2325 kref_put(&mapping->kref, release_iommu_mapping); 2332 kref_put(&mapping->kref, release_iommu_mapping);
2326 to_dma_iommu_mapping(dev) = NULL; 2333 to_dma_iommu_mapping(dev) = NULL;
2334 set_dma_ops(dev, NULL);
2327 2335
2328 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2336 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
2329} 2337}
2330
2331/**
2332 * arm_iommu_detach_device
2333 * @dev: valid struct device pointer
2334 *
2335 * Detaches the provided device from a previously attached map.
2336 * This voids the dma operations (dma_map_ops pointer)
2337 */
2338void arm_iommu_detach_device(struct device *dev)
2339{
2340 __arm_iommu_detach_device(dev);
2341 set_dma_ops(dev, NULL);
2342}
2343EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2338EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
2344 2339
2345static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2340static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
2379 if (!mapping) 2374 if (!mapping)
2380 return; 2375 return;
2381 2376
2382 __arm_iommu_detach_device(dev); 2377 arm_iommu_detach_device(dev);
2383 arm_iommu_release_mapping(mapping); 2378 arm_iommu_release_mapping(mapping);
2384} 2379}
2385 2380
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
2430 dev->dma_ops = xen_dma_ops; 2425 dev->dma_ops = xen_dma_ops;
2431 } 2426 }
2432#endif 2427#endif
2428 dev->archdata.dma_ops_setup = true;
2433} 2429}
2434 2430
2435void arch_teardown_dma_ops(struct device *dev) 2431void arch_teardown_dma_ops(struct device *dev)
2436{ 2432{
2433 if (!dev->archdata.dma_ops_setup)
2434 return;
2435
2437 arm_teardown_iommu_dma_ops(dev); 2436 arm_teardown_iommu_dma_ops(dev);
2438} 2437}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec69bca..b2024db225a9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
1084 def_bool y 1084 def_bool y
1085 depends on COMPAT && SYSVIPC 1085 depends on COMPAT && SYSVIPC
1086 1086
1087config KEYS_COMPAT
1088 def_bool y
1089 depends on COMPAT && KEYS
1090
1091endmenu 1087endmenu
1092 1088
1093menu "Power management options" 1089menu "Power management options"
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df5201cd6..b4bc42ece754 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -231,8 +231,7 @@
231 cpm_crypto: crypto@800000 { 231 cpm_crypto: crypto@800000 {
232 compatible = "inside-secure,safexcel-eip197"; 232 compatible = "inside-secure,safexcel-eip197";
233 reg = <0x800000 0x200000>; 233 reg = <0x800000 0x200000>;
234 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 234 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
235 | IRQ_TYPE_LEVEL_HIGH)>,
236 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 235 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
237 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 236 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
238 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, 237 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75a8230..6e2058847ddc 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -221,8 +221,7 @@
221 cps_crypto: crypto@800000 { 221 cps_crypto: crypto@800000 {
222 compatible = "inside-secure,safexcel-eip197"; 222 compatible = "inside-secure,safexcel-eip197";
223 reg = <0x800000 0x200000>; 223 reg = <0x800000 0x200000>;
224 interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 224 interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
225 | IRQ_TYPE_LEVEL_HIGH)>,
226 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 225 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
227 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 226 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
228 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>, 227 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 65cdd878cfbd..97c123e09e45 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
68CONFIG_PCIE_ARMADA_8K=y 68CONFIG_PCIE_ARMADA_8K=y
69CONFIG_PCI_AARDVARK=y 69CONFIG_PCI_AARDVARK=y
70CONFIG_PCIE_RCAR=y 70CONFIG_PCIE_RCAR=y
71CONFIG_PCIE_ROCKCHIP=m
71CONFIG_PCI_HOST_GENERIC=y 72CONFIG_PCI_HOST_GENERIC=y
72CONFIG_PCI_XGENE=y 73CONFIG_PCI_XGENE=y
73CONFIG_ARM64_VA_BITS_48=y 74CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
208CONFIG_WL18XX=m 209CONFIG_WL18XX=m
209CONFIG_WLCORE_SDIO=m 210CONFIG_WLCORE_SDIO=m
210CONFIG_INPUT_EVDEV=y 211CONFIG_INPUT_EVDEV=y
212CONFIG_KEYBOARD_ADC=m
213CONFIG_KEYBOARD_CROS_EC=y
211CONFIG_KEYBOARD_GPIO=y 214CONFIG_KEYBOARD_GPIO=y
212CONFIG_INPUT_MISC=y 215CONFIG_INPUT_MISC=y
213CONFIG_INPUT_PM8941_PWRKEY=y 216CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
263CONFIG_SPI_ORION=y 266CONFIG_SPI_ORION=y
264CONFIG_SPI_PL022=y 267CONFIG_SPI_PL022=y
265CONFIG_SPI_QUP=y 268CONFIG_SPI_QUP=y
269CONFIG_SPI_ROCKCHIP=y
266CONFIG_SPI_S3C64XX=y 270CONFIG_SPI_S3C64XX=y
267CONFIG_SPI_SPIDEV=m 271CONFIG_SPI_SPIDEV=m
268CONFIG_SPMI=y 272CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
292CONFIG_CPU_THERMAL=y 296CONFIG_CPU_THERMAL=y
293CONFIG_THERMAL_EMULATION=y 297CONFIG_THERMAL_EMULATION=y
294CONFIG_EXYNOS_THERMAL=y 298CONFIG_EXYNOS_THERMAL=y
299CONFIG_ROCKCHIP_THERMAL=m
295CONFIG_WATCHDOG=y 300CONFIG_WATCHDOG=y
296CONFIG_S3C2410_WATCHDOG=y 301CONFIG_S3C2410_WATCHDOG=y
297CONFIG_MESON_GXBB_WATCHDOG=m 302CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
300CONFIG_BCM2835_WDT=y 305CONFIG_BCM2835_WDT=y
301CONFIG_MFD_CROS_EC=y 306CONFIG_MFD_CROS_EC=y
302CONFIG_MFD_CROS_EC_I2C=y 307CONFIG_MFD_CROS_EC_I2C=y
308CONFIG_MFD_CROS_EC_SPI=y
303CONFIG_MFD_EXYNOS_LPASS=m 309CONFIG_MFD_EXYNOS_LPASS=m
304CONFIG_MFD_HI655X_PMIC=y 310CONFIG_MFD_HI655X_PMIC=y
305CONFIG_MFD_MAX77620=y 311CONFIG_MFD_MAX77620=y
306CONFIG_MFD_SPMI_PMIC=y 312CONFIG_MFD_SPMI_PMIC=y
307CONFIG_MFD_RK808=y 313CONFIG_MFD_RK808=y
308CONFIG_MFD_SEC_CORE=y 314CONFIG_MFD_SEC_CORE=y
315CONFIG_REGULATOR_FAN53555=y
309CONFIG_REGULATOR_FIXED_VOLTAGE=y 316CONFIG_REGULATOR_FIXED_VOLTAGE=y
310CONFIG_REGULATOR_GPIO=y 317CONFIG_REGULATOR_GPIO=y
311CONFIG_REGULATOR_HI655X=y 318CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
473CONFIG_EXTCON_USB_GPIO=y 480CONFIG_EXTCON_USB_GPIO=y
474CONFIG_IIO=y 481CONFIG_IIO=y
475CONFIG_EXYNOS_ADC=y 482CONFIG_EXYNOS_ADC=y
483CONFIG_ROCKCHIP_SARADC=m
476CONFIG_PWM=y 484CONFIG_PWM=y
477CONFIG_PWM_BCM2835=m 485CONFIG_PWM_BCM2835=m
486CONFIG_PWM_CROS_EC=m
478CONFIG_PWM_MESON=m 487CONFIG_PWM_MESON=m
479CONFIG_PWM_ROCKCHIP=y 488CONFIG_PWM_ROCKCHIP=y
480CONFIG_PWM_SAMSUNG=y 489CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
484CONFIG_PHY_SUN4I_USB=y 493CONFIG_PHY_SUN4I_USB=y
485CONFIG_PHY_ROCKCHIP_INNO_USB2=y 494CONFIG_PHY_ROCKCHIP_INNO_USB2=y
486CONFIG_PHY_ROCKCHIP_EMMC=y 495CONFIG_PHY_ROCKCHIP_EMMC=y
496CONFIG_PHY_ROCKCHIP_PCIE=m
487CONFIG_PHY_XGENE=y 497CONFIG_PHY_XGENE=y
488CONFIG_PHY_TEGRA_XUSB=y 498CONFIG_PHY_TEGRA_XUSB=y
489CONFIG_ARM_SCPI_PROTOCOL=y 499CONFIG_ARM_SCPI_PROTOCOL=y
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142ce991c..b4d13d9267ff 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
286#define SCTLR_ELx_A (1 << 1) 286#define SCTLR_ELx_A (1 << 1)
287#define SCTLR_ELx_M 1 287#define SCTLR_ELx_M 1
288 288
289#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
290 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
291 (1 << 28) | (1 << 29))
292
289#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ 293#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
290 SCTLR_ELx_SA | SCTLR_ELx_I) 294 SCTLR_ELx_SA | SCTLR_ELx_I)
291 295
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c24b1c..3f9615582377 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@ __do_hyp_init:
106 tlbi alle2 106 tlbi alle2
107 dsb sy 107 dsb sy
108 108
109 mrs x4, sctlr_el2 109 /*
110 and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 110 * Preserve all the RES1 bits while setting the default flags,
111 ldr x5, =SCTLR_ELx_FLAGS 111 * as well as the EE bit on BE. Drop the A flag since the compiler
112 orr x4, x4, x5 112 * is allowed to generate unaligned accesses.
113 */
114 ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
115CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
113 msr sctlr_el2, x4 116 msr sctlr_el2, x4
114 isb 117 isb
115 118
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e37d367..6260b69e5622 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
66 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 66 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
67 */ 67 */
68 vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; 68 vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
69 vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; 69 vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
70 vgic_set_vmcr(vcpu, &vmcr); 70 vgic_set_vmcr(vcpu, &vmcr);
71 } else { 71 } else {
72 val = 0; 72 val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
84 * Extract it directly using ICC_CTLR_EL1 reg definitions. 84 * Extract it directly using ICC_CTLR_EL1 reg definitions.
85 */ 85 */
86 val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; 86 val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
87 val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; 87 val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
88 88
89 p->regval = val; 89 p->regval = val;
90 } 90 }
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
135 p->regval = 0; 135 p->regval = 0;
136 136
137 vgic_get_vmcr(vcpu, &vmcr); 137 vgic_get_vmcr(vcpu, &vmcr);
138 if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { 138 if (!vmcr.cbpr) {
139 if (p->is_write) { 139 if (p->is_write) {
140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
141 ICC_BPR1_EL1_SHIFT; 141 ICC_BPR1_EL1_SHIFT;
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index ec90afdb3ad0..c599eb126c9e 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
37 long uncleared; 37 long uncleared;
38 38
39 while (count > PAGE_SIZE) { 39 while (count > PAGE_SIZE) {
40 uncleared = __copy_to_user_hexagon(dest, &empty_zero_page, 40 uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
41 PAGE_SIZE);
42 if (uncleared) 41 if (uncleared)
43 return count - (PAGE_SIZE - uncleared); 42 return count - (PAGE_SIZE - uncleared);
44 count -= PAGE_SIZE; 43 count -= PAGE_SIZE;
45 dest += PAGE_SIZE; 44 dest += PAGE_SIZE;
46 } 45 }
47 if (count) 46 if (count)
48 count = __copy_to_user_hexagon(dest, &empty_zero_page, count); 47 count = raw_copy_to_user(dest, &empty_zero_page, count);
49 48
50 return count; 49 return count;
51} 50}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f7c8f9972f61..bf4391d18923 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
380 380
381menu "Kernel options" 381menu "Kernel options"
382 382
383config PPC_DT_CPU_FTRS
384 bool "Device-tree based CPU feature discovery & setup"
385 depends on PPC_BOOK3S_64
386 default n
387 help
388 This enables code to use a new device tree binding for describing CPU
389 compatibility and features. Saying Y here will attempt to use the new
390 binding if the firmware provides it. Currently only the skiboot
391 firmware provides this binding.
392 If you're not sure say Y.
393
394config PPC_CPUFEATURES_ENABLE_UNKNOWN
395 bool "cpufeatures pass through unknown features to guest/userspace"
396 depends on PPC_DT_CPU_FTRS
397 default y
398
399config HIGHMEM 383config HIGHMEM
400 bool "High memory support" 384 bool "High memory support"
401 depends on PPC32 385 depends on PPC32
@@ -1215,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
1215 1199
1216source "security/Kconfig" 1200source "security/Kconfig"
1217 1201
1218config KEYS_COMPAT
1219 bool
1220 depends on COMPAT && KEYS
1221 default y
1222
1223source "crypto/Kconfig" 1202source "crypto/Kconfig"
1224 1203
1225config PPC_LIB_RHEAP 1204config PPC_LIB_RHEAP
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b671ca..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
8#define H_PTE_INDEX_SIZE 9 8#define H_PTE_INDEX_SIZE 9
9#define H_PMD_INDEX_SIZE 7 9#define H_PMD_INDEX_SIZE 7
10#define H_PUD_INDEX_SIZE 9 10#define H_PUD_INDEX_SIZE 9
11#define H_PGD_INDEX_SIZE 12 11#define H_PGD_INDEX_SIZE 9
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) 14#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index c2d509584a98..d02ad93bf708 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -214,7 +214,6 @@ enum {
214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 214#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) 215#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) 216#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
217#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
218#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) 217#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
219 218
220#ifndef __ASSEMBLY__ 219#ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
463 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 462 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
464 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 463 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
465 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 464 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
466 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE) 465 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
467#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 466#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
468#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) 467#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
469#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 468#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a2123f291ab0..bb99b651085a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
110#define TASK_SIZE_128TB (0x0000800000000000UL) 110#define TASK_SIZE_128TB (0x0000800000000000UL)
111#define TASK_SIZE_512TB (0x0002000000000000UL) 111#define TASK_SIZE_512TB (0x0002000000000000UL)
112 112
113#ifdef CONFIG_PPC_BOOK3S_64 113/*
114 * For now 512TB is only supported with book3s and 64K linux page size.
115 */
116#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
114/* 117/*
115 * Max value currently used: 118 * Max value currently used:
116 */ 119 */
117#define TASK_SIZE_USER64 TASK_SIZE_512TB 120#define TASK_SIZE_USER64 TASK_SIZE_512TB
121#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
118#else 122#else
119#define TASK_SIZE_USER64 TASK_SIZE_64TB 123#define TASK_SIZE_USER64 TASK_SIZE_64TB
124#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
120#endif 125#endif
121 126
122/* 127/*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
132 * space during mmap's. 137 * space during mmap's.
133 */ 138 */
134#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 139#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
135#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4)) 140#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
136 141
137#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 142#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
138 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 143 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
143 * with 128TB and conditionally enable upto 512TB 148 * with 128TB and conditionally enable upto 512TB
144 */ 149 */
145#ifdef CONFIG_PPC_BOOK3S_64 150#ifdef CONFIG_PPC_BOOK3S_64
146#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 151#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
147 TASK_SIZE_USER32 : TASK_SIZE_128TB) 152 TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
148#else 153#else
149#define DEFAULT_MAP_WINDOW TASK_SIZE 154#define DEFAULT_MAP_WINDOW TASK_SIZE
150#endif 155#endif
151 156
152#ifdef __powerpc64__ 157#ifdef __powerpc64__
153 158
154#ifdef CONFIG_PPC_BOOK3S_64 159#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
155/* Limit stack to 128TB */
156#define STACK_TOP_USER64 TASK_SIZE_128TB
157#else
158#define STACK_TOP_USER64 TASK_SIZE_USER64
159#endif
160
161#define STACK_TOP_USER32 TASK_SIZE_USER32 160#define STACK_TOP_USER32 TASK_SIZE_USER32
162 161
163#define STACK_TOP (is_32bit_task() ? \ 162#define STACK_TOP (is_32bit_task() ? \
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
44extern int sysfs_add_device_to_node(struct device *dev, int nid); 44extern int sysfs_add_device_to_node(struct device *dev, int nid);
45extern void sysfs_remove_device_from_node(struct device *dev, int nid); 45extern void sysfs_remove_device_from_node(struct device *dev, int nid);
46 46
47static inline int early_cpu_to_node(int cpu)
48{
49 int nid;
50
51 nid = numa_cpu_lookup_table[cpu];
52
53 /*
54 * Fall back to node 0 if nid is unset (it should be, except bugs).
55 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
56 */
57 return (nid < 0) ? 0 : nid;
58}
47#else 59#else
48 60
61static inline int early_cpu_to_node(int cpu) { return 0; }
62
49static inline void dump_numa_cpu_topology(void) {} 63static inline void dump_numa_cpu_topology(void) {}
50 64
51static inline int sysfs_add_device_to_node(struct device *dev, int nid) 65static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index fcc7588a96d6..4c7656dc4e04 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -8,6 +8,7 @@
8#include <linux/export.h> 8#include <linux/export.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/jump_label.h> 10#include <linux/jump_label.h>
11#include <linux/libfdt.h>
11#include <linux/memblock.h> 12#include <linux/memblock.h>
12#include <linux/printk.h> 13#include <linux/printk.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
642 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL}, 643 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
643 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 644 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
644 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 645 {"processor-utilization-of-resources-register", feat_enable_purr, 0},
645 {"subcore", feat_enable, CPU_FTR_SUBCORE},
646 {"no-execute", feat_enable, 0}, 646 {"no-execute", feat_enable, 0},
647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 647 {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
648 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, 648 {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
671 {"wait-v3", feat_enable, 0}, 671 {"wait-v3", feat_enable, 0},
672}; 672};
673 673
674/* XXX: how to configure this? Default + boot time? */ 674static bool __initdata using_dt_cpu_ftrs;
675#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN 675static bool __initdata enable_unknown = true;
676#define CPU_FEATURE_ENABLE_UNKNOWN 1 676
677#else 677static int __init dt_cpu_ftrs_parse(char *str)
678#define CPU_FEATURE_ENABLE_UNKNOWN 0 678{
679#endif 679 if (!str)
680 return 0;
681
682 if (!strcmp(str, "off"))
683 using_dt_cpu_ftrs = false;
684 else if (!strcmp(str, "known"))
685 enable_unknown = false;
686 else
687 return 1;
688
689 return 0;
690}
691early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
680 692
681static void __init cpufeatures_setup_start(u32 isa) 693static void __init cpufeatures_setup_start(u32 isa)
682{ 694{
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
707 } 719 }
708 } 720 }
709 721
710 if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { 722 if (!known && enable_unknown) {
711 if (!feat_try_enable_unknown(f)) { 723 if (!feat_try_enable_unknown(f)) {
712 pr_info("not enabling: %s (unknown and unsupported by kernel)\n", 724 pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
713 f->name); 725 f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
756 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); 768 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
757} 769}
758 770
771static int __init disabled_on_cmdline(void)
772{
773 unsigned long root, chosen;
774 const char *p;
775
776 root = of_get_flat_dt_root();
777 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
778 if (chosen == -FDT_ERR_NOTFOUND)
779 return false;
780
781 p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
782 if (!p)
783 return false;
784
785 if (strstr(p, "dt_cpu_ftrs=off"))
786 return true;
787
788 return false;
789}
790
759static int __init fdt_find_cpu_features(unsigned long node, const char *uname, 791static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
760 int depth, void *data) 792 int depth, void *data)
761{ 793{
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
766 return 0; 798 return 0;
767} 799}
768 800
769static bool __initdata using_dt_cpu_ftrs = false;
770
771bool __init dt_cpu_ftrs_in_use(void) 801bool __init dt_cpu_ftrs_in_use(void)
772{ 802{
773 return using_dt_cpu_ftrs; 803 return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
775 805
776bool __init dt_cpu_ftrs_init(void *fdt) 806bool __init dt_cpu_ftrs_init(void *fdt)
777{ 807{
808 using_dt_cpu_ftrs = false;
809
778 /* Setup and verify the FDT, if it fails we just bail */ 810 /* Setup and verify the FDT, if it fails we just bail */
779 if (!early_init_dt_verify(fdt)) 811 if (!early_init_dt_verify(fdt))
780 return false; 812 return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
782 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) 814 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
783 return false; 815 return false;
784 816
817 if (disabled_on_cmdline())
818 return false;
819
785 cpufeatures_setup_cpu(); 820 cpufeatures_setup_cpu();
786 821
787 using_dt_cpu_ftrs = true; 822 using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
1027 1062
1028void __init dt_cpu_ftrs_scan(void) 1063void __init dt_cpu_ftrs_scan(void)
1029{ 1064{
1065 if (!using_dt_cpu_ftrs)
1066 return;
1067
1030 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); 1068 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
1031} 1069}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index baae104b16c7..2ad725ef4368 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1666#ifdef CONFIG_VSX 1666#ifdef CONFIG_VSX
1667 current->thread.used_vsr = 0; 1667 current->thread.used_vsr = 0;
1668#endif 1668#endif
1669 current->thread.load_fp = 0;
1669 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1670 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
1670 current->thread.fp_save_area = NULL; 1671 current->thread.fp_save_area = NULL;
1671#ifdef CONFIG_ALTIVEC 1672#ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1674 current->thread.vr_save_area = NULL; 1675 current->thread.vr_save_area = NULL;
1675 current->thread.vrsave = 0; 1676 current->thread.vrsave = 0;
1676 current->thread.used_vr = 0; 1677 current->thread.used_vr = 0;
1678 current->thread.load_vec = 0;
1677#endif /* CONFIG_ALTIVEC */ 1679#endif /* CONFIG_ALTIVEC */
1678#ifdef CONFIG_SPE 1680#ifdef CONFIG_SPE
1679 memset(current->thread.evr, 0, sizeof(current->thread.evr)); 1681 memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1685 current->thread.tm_tfhar = 0; 1687 current->thread.tm_tfhar = 0;
1686 current->thread.tm_texasr = 0; 1688 current->thread.tm_texasr = 0;
1687 current->thread.tm_tfiar = 0; 1689 current->thread.tm_tfiar = 0;
1690 current->thread.load_tm = 0;
1688#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1691#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1689} 1692}
1690EXPORT_SYMBOL(start_thread); 1693EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 71dcda91755d..857129acf960 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
928 928
929#ifdef CONFIG_PPC_MM_SLICES 929#ifdef CONFIG_PPC_MM_SLICES
930#ifdef CONFIG_PPC64 930#ifdef CONFIG_PPC64
931 init_mm.context.addr_limit = TASK_SIZE_128TB; 931 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
932#else 932#else
933#error "context.addr_limit not initialized." 933#error "context.addr_limit not initialized."
934#endif 934#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f35ff9dea4fb..a8c1f99e9607 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
661 661
662static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 662static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
663{ 663{
664 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, 664 return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
665 __pa(MAX_DMA_ADDRESS)); 665 __pa(MAX_DMA_ADDRESS));
666} 666}
667 667
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
672 672
673static int pcpu_cpu_distance(unsigned int from, unsigned int to) 673static int pcpu_cpu_distance(unsigned int from, unsigned int to)
674{ 674{
675 if (cpu_to_node(from) == cpu_to_node(to)) 675 if (early_cpu_to_node(from) == early_cpu_to_node(to))
676 return LOCAL_DISTANCE; 676 return LOCAL_DISTANCE;
677 else 677 else
678 return REMOTE_DISTANCE; 678 return REMOTE_DISTANCE;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2ae78ef..a3edf813d455 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
99 * mm->context.addr_limit. Default to max task size so that we copy the 99 * mm->context.addr_limit. Default to max task size so that we copy the
100 * default values to paca which will help us to handle slb miss early. 100 * default values to paca which will help us to handle slb miss early.
101 */ 101 */
102 mm->context.addr_limit = TASK_SIZE_128TB; 102 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
103 103
104 /* 104 /*
105 * The old code would re-promote on fork, we don't do that when using 105 * The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e90ac35..bb28e1a41257 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
402 .name = "POWER9", 402 .name = "POWER9",
403 .n_counter = MAX_PMU_COUNTERS, 403 .n_counter = MAX_PMU_COUNTERS,
404 .add_fields = ISA207_ADD_FIELDS, 404 .add_fields = ISA207_ADD_FIELDS,
405 .test_adder = ISA207_TEST_ADDER, 405 .test_adder = P9_DD1_TEST_ADDER,
406 .compute_mmcr = isa207_compute_mmcr, 406 .compute_mmcr = isa207_compute_mmcr,
407 .config_bhrb = power9_config_bhrb, 407 .config_bhrb = power9_config_bhrb,
408 .bhrb_filter_map = power9_bhrb_filter_map, 408 .bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
421 .name = "POWER9", 421 .name = "POWER9",
422 .n_counter = MAX_PMU_COUNTERS, 422 .n_counter = MAX_PMU_COUNTERS,
423 .add_fields = ISA207_ADD_FIELDS, 423 .add_fields = ISA207_ADD_FIELDS,
424 .test_adder = P9_DD1_TEST_ADDER, 424 .test_adder = ISA207_TEST_ADDER,
425 .compute_mmcr = isa207_compute_mmcr, 425 .compute_mmcr = isa207_compute_mmcr,
426 .config_bhrb = power9_config_bhrb, 426 .config_bhrb = power9_config_bhrb,
427 .bhrb_filter_map = power9_bhrb_filter_map, 427 .bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3d9375..4fd64d3f5c44 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
59 59
60 In case of doubt, say Y 60 In case of doubt, say Y
61 61
62config PPC_DT_CPU_FTRS
63 bool "Device-tree based CPU feature discovery & setup"
64 depends on PPC_BOOK3S_64
65 default y
66 help
67 This enables code to use a new device tree binding for describing CPU
68 compatibility and features. Saying Y here will attempt to use the new
69 binding if the firmware provides it. Currently only the skiboot
70 firmware provides this binding.
71 If you're not sure say Y.
72
62config UDBG_RTAS_CONSOLE 73config UDBG_RTAS_CONSOLE
63 bool "RTAS based debug console" 74 bool "RTAS based debug console"
64 depends on PPC_RTAS 75 depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891ae80ee..84b7ac926ce6 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; 175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
176 if (!dump_skip(cprm, skip)) 176 if (!dump_skip(cprm, skip))
177 goto Eio; 177 goto Eio;
178
179 rc = 0;
178out: 180out:
179 free_page((unsigned long)buf); 181 free_page((unsigned long)buf);
180 return rc; 182 return rc;
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef11136f..8c6119280c13 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
407 407
408static int subcore_init(void) 408static int subcore_init(void)
409{ 409{
410 if (!cpu_has_feature(CPU_FTR_SUBCORE)) 410 unsigned pvr_ver;
411
412 pvr_ver = PVR_VER(mfspr(SPRN_PVR));
413
414 if (pvr_ver != PVR_POWER8 &&
415 pvr_ver != PVR_POWER8E &&
416 pvr_ver != PVR_POWER8NVL)
411 return 0; 417 return 0;
412 418
413 /* 419 /*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71ea44a..1fb162ba9d1c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
124 for (i = 0; i < num_lmbs; i++) { 124 for (i = 0; i < num_lmbs; i++) {
125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
127 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
127 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
128 } 129 }
129 130
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
147 for (i = 0; i < num_lmbs; i++) { 148 for (i = 0; i < num_lmbs; i++) {
148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 149 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
151 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
150 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 152 lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
151 } 153 }
152 154
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b470b04..6afddae2fb47 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
75 75
76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 76static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
77{ 77{
78 struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); 78 struct u8_gpio_chip *u8_gc =
79 container_of(mm_gc, struct u8_gpio_chip, mm_gc);
79 80
80 u8_gc->data = in_8(mm_gc->regs); 81 u8_gc->data = in_8(mm_gc->regs);
81} 82}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161fafb495b..6967addc6a89 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -363,9 +363,6 @@ config COMPAT
363config SYSVIPC_COMPAT 363config SYSVIPC_COMPAT
364 def_bool y if COMPAT && SYSVIPC 364 def_bool y if COMPAT && SYSVIPC
365 365
366config KEYS_COMPAT
367 def_bool y if COMPAT && KEYS
368
369config SMP 366config SMP
370 def_bool y 367 def_bool y
371 prompt "Symmetric multi-processing support" 368 prompt "Symmetric multi-processing support"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa89314..282072206df7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
30CONFIG_SCHED_AUTOGROUP=y 30CONFIG_SCHED_AUTOGROUP=y
31CONFIG_BLK_DEV_INITRD=y 31CONFIG_BLK_DEV_INITRD=y
32CONFIG_EXPERT=y 32CONFIG_EXPERT=y
33# CONFIG_SYSFS_SYSCALL is not set
33CONFIG_BPF_SYSCALL=y 34CONFIG_BPF_SYSCALL=y
34CONFIG_USERFAULTFD=y 35CONFIG_USERFAULTFD=y
35# CONFIG_COMPAT_BRK is not set 36# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
90CONFIG_UNIX_DIAG=m 94CONFIG_UNIX_DIAG=m
91CONFIG_XFRM_USER=m 95CONFIG_XFRM_USER=m
92CONFIG_NET_KEY=m 96CONFIG_NET_KEY=m
97CONFIG_SMC=m
98CONFIG_SMC_DIAG=m
93CONFIG_INET=y 99CONFIG_INET=y
94CONFIG_IP_MULTICAST=y 100CONFIG_IP_MULTICAST=y
95CONFIG_IP_ADVANCED_ROUTER=y 101CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
359CONFIG_NET_ACT_SKBEDIT=m 365CONFIG_NET_ACT_SKBEDIT=m
360CONFIG_NET_ACT_CSUM=m 366CONFIG_NET_ACT_CSUM=m
361CONFIG_DNS_RESOLVER=y 367CONFIG_DNS_RESOLVER=y
368CONFIG_NETLINK_DIAG=m
362CONFIG_CGROUP_NET_PRIO=y 369CONFIG_CGROUP_NET_PRIO=y
363CONFIG_BPF_JIT=y 370CONFIG_BPF_JIT=y
364CONFIG_NET_PKTGEN=m 371CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
367CONFIG_DMA_CMA=y 374CONFIG_DMA_CMA=y
368CONFIG_CMA_SIZE_MBYTES=0 375CONFIG_CMA_SIZE_MBYTES=0
369CONFIG_CONNECTOR=y 376CONFIG_CONNECTOR=y
377CONFIG_ZRAM=m
370CONFIG_BLK_DEV_LOOP=m 378CONFIG_BLK_DEV_LOOP=m
371CONFIG_BLK_DEV_CRYPTOLOOP=m 379CONFIG_BLK_DEV_CRYPTOLOOP=m
380CONFIG_BLK_DEV_DRBD=m
372CONFIG_BLK_DEV_NBD=m 381CONFIG_BLK_DEV_NBD=m
373CONFIG_BLK_DEV_OSD=m 382CONFIG_BLK_DEV_OSD=m
374CONFIG_BLK_DEV_RAM=y 383CONFIG_BLK_DEV_RAM=y
375CONFIG_BLK_DEV_RAM_SIZE=32768 384CONFIG_BLK_DEV_RAM_SIZE=32768
376CONFIG_CDROM_PKTCDVD=m 385CONFIG_BLK_DEV_RAM_DAX=y
377CONFIG_ATA_OVER_ETH=m
378CONFIG_VIRTIO_BLK=y 386CONFIG_VIRTIO_BLK=y
387CONFIG_BLK_DEV_RBD=m
379CONFIG_ENCLOSURE_SERVICES=m 388CONFIG_ENCLOSURE_SERVICES=m
389CONFIG_GENWQE=m
380CONFIG_RAID_ATTRS=m 390CONFIG_RAID_ATTRS=m
381CONFIG_SCSI=y 391CONFIG_SCSI=y
382CONFIG_BLK_DEV_SD=y 392CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
442# CONFIG_NET_VENDOR_INTEL is not set 452# CONFIG_NET_VENDOR_INTEL is not set
443# CONFIG_NET_VENDOR_MARVELL is not set 453# CONFIG_NET_VENDOR_MARVELL is not set
444CONFIG_MLX4_EN=m 454CONFIG_MLX4_EN=m
455CONFIG_MLX5_CORE=m
456CONFIG_MLX5_CORE_EN=y
445# CONFIG_NET_VENDOR_NATSEMI is not set 457# CONFIG_NET_VENDOR_NATSEMI is not set
446CONFIG_PPP=m 458CONFIG_PPP=m
447CONFIG_PPP_BSDCOMP=m 459CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
452CONFIG_PPPOL2TP=m 464CONFIG_PPPOL2TP=m
453CONFIG_PPP_ASYNC=m 465CONFIG_PPP_ASYNC=m
454CONFIG_PPP_SYNC_TTY=m 466CONFIG_PPP_SYNC_TTY=m
455# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
456# CONFIG_INPUT_KEYBOARD is not set 467# CONFIG_INPUT_KEYBOARD is not set
457# CONFIG_INPUT_MOUSE is not set 468# CONFIG_INPUT_MOUSE is not set
458# CONFIG_SERIO is not set 469# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
471CONFIG_INFINIBAND=m 482CONFIG_INFINIBAND=m
472CONFIG_INFINIBAND_USER_ACCESS=m 483CONFIG_INFINIBAND_USER_ACCESS=m
473CONFIG_MLX4_INFINIBAND=m 484CONFIG_MLX4_INFINIBAND=m
485CONFIG_MLX5_INFINIBAND=m
474CONFIG_VIRTIO_BALLOON=m 486CONFIG_VIRTIO_BALLOON=m
475CONFIG_EXT4_FS=y 487CONFIG_EXT4_FS=y
476CONFIG_EXT4_FS_POSIX_ACL=y 488CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
487CONFIG_XFS_RT=y 499CONFIG_XFS_RT=y
488CONFIG_XFS_DEBUG=y 500CONFIG_XFS_DEBUG=y
489CONFIG_GFS2_FS=m 501CONFIG_GFS2_FS=m
502CONFIG_GFS2_FS_LOCKING_DLM=y
490CONFIG_OCFS2_FS=m 503CONFIG_OCFS2_FS=m
491CONFIG_BTRFS_FS=y 504CONFIG_BTRFS_FS=y
492CONFIG_BTRFS_FS_POSIX_ACL=y 505CONFIG_BTRFS_FS_POSIX_ACL=y
506CONFIG_BTRFS_DEBUG=y
493CONFIG_NILFS2_FS=m 507CONFIG_NILFS2_FS=m
508CONFIG_FS_DAX=y
509CONFIG_EXPORTFS_BLOCK_OPS=y
494CONFIG_FANOTIFY=y 510CONFIG_FANOTIFY=y
511CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
495CONFIG_QUOTA_NETLINK_INTERFACE=y 512CONFIG_QUOTA_NETLINK_INTERFACE=y
513CONFIG_QUOTA_DEBUG=y
496CONFIG_QFMT_V1=m 514CONFIG_QFMT_V1=m
497CONFIG_QFMT_V2=m 515CONFIG_QFMT_V2=m
498CONFIG_AUTOFS4_FS=m 516CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
558CONFIG_DEBUG_SECTION_MISMATCH=y 576CONFIG_DEBUG_SECTION_MISMATCH=y
559CONFIG_MAGIC_SYSRQ=y 577CONFIG_MAGIC_SYSRQ=y
560CONFIG_DEBUG_PAGEALLOC=y 578CONFIG_DEBUG_PAGEALLOC=y
579CONFIG_DEBUG_RODATA_TEST=y
561CONFIG_DEBUG_OBJECTS=y 580CONFIG_DEBUG_OBJECTS=y
562CONFIG_DEBUG_OBJECTS_SELFTEST=y 581CONFIG_DEBUG_OBJECTS_SELFTEST=y
563CONFIG_DEBUG_OBJECTS_FREE=y 582CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
580CONFIG_WQ_WATCHDOG=y 599CONFIG_WQ_WATCHDOG=y
581CONFIG_PANIC_ON_OOPS=y 600CONFIG_PANIC_ON_OOPS=y
582CONFIG_DEBUG_TIMEKEEPING=y 601CONFIG_DEBUG_TIMEKEEPING=y
583CONFIG_TIMER_STATS=y
584CONFIG_DEBUG_RT_MUTEXES=y 602CONFIG_DEBUG_RT_MUTEXES=y
585CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 603CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
586CONFIG_PROVE_LOCKING=y 604CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
595CONFIG_RCU_CPU_STALL_TIMEOUT=300 613CONFIG_RCU_CPU_STALL_TIMEOUT=300
596CONFIG_NOTIFIER_ERROR_INJECTION=m 614CONFIG_NOTIFIER_ERROR_INJECTION=m
597CONFIG_PM_NOTIFIER_ERROR_INJECT=m 615CONFIG_PM_NOTIFIER_ERROR_INJECT=m
616CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
598CONFIG_FAULT_INJECTION=y 617CONFIG_FAULT_INJECTION=y
599CONFIG_FAILSLAB=y 618CONFIG_FAILSLAB=y
600CONFIG_FAIL_PAGE_ALLOC=y 619CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
616CONFIG_TRACE_ENUM_MAP_FILE=y 635CONFIG_TRACE_ENUM_MAP_FILE=y
617CONFIG_LKDTM=m 636CONFIG_LKDTM=m
618CONFIG_TEST_LIST_SORT=y 637CONFIG_TEST_LIST_SORT=y
638CONFIG_TEST_SORT=y
619CONFIG_KPROBES_SANITY_TEST=y 639CONFIG_KPROBES_SANITY_TEST=y
620CONFIG_RBTREE_TEST=y 640CONFIG_RBTREE_TEST=y
621CONFIG_INTERVAL_TREE_TEST=m 641CONFIG_INTERVAL_TREE_TEST=m
622CONFIG_PERCPU_TEST=m 642CONFIG_PERCPU_TEST=m
623CONFIG_ATOMIC64_SELFTEST=y 643CONFIG_ATOMIC64_SELFTEST=y
624CONFIG_TEST_STRING_HELPERS=y
625CONFIG_TEST_KSTRTOX=y
626CONFIG_DMA_API_DEBUG=y 644CONFIG_DMA_API_DEBUG=y
627CONFIG_TEST_BPF=m 645CONFIG_TEST_BPF=m
628CONFIG_BUG_ON_DATA_CORRUPTION=y 646CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
630CONFIG_ENCRYPTED_KEYS=m 648CONFIG_ENCRYPTED_KEYS=m
631CONFIG_SECURITY=y 649CONFIG_SECURITY=y
632CONFIG_SECURITY_NETWORK=y 650CONFIG_SECURITY_NETWORK=y
651CONFIG_HARDENED_USERCOPY=y
633CONFIG_SECURITY_SELINUX=y 652CONFIG_SECURITY_SELINUX=y
634CONFIG_SECURITY_SELINUX_BOOTPARAM=y 653CONFIG_SECURITY_SELINUX_BOOTPARAM=y
635CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 654CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
640CONFIG_CRYPTO_DH=m 659CONFIG_CRYPTO_DH=m
641CONFIG_CRYPTO_ECDH=m 660CONFIG_CRYPTO_ECDH=m
642CONFIG_CRYPTO_USER=m 661CONFIG_CRYPTO_USER=m
662CONFIG_CRYPTO_PCRYPT=m
643CONFIG_CRYPTO_CRYPTD=m 663CONFIG_CRYPTO_CRYPTD=m
664CONFIG_CRYPTO_MCRYPTD=m
644CONFIG_CRYPTO_TEST=m 665CONFIG_CRYPTO_TEST=m
645CONFIG_CRYPTO_CCM=m 666CONFIG_CRYPTO_CCM=m
646CONFIG_CRYPTO_GCM=m 667CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
648CONFIG_CRYPTO_LRW=m 669CONFIG_CRYPTO_LRW=m
649CONFIG_CRYPTO_PCBC=m 670CONFIG_CRYPTO_PCBC=m
650CONFIG_CRYPTO_KEYWRAP=m 671CONFIG_CRYPTO_KEYWRAP=m
672CONFIG_CRYPTO_CMAC=m
651CONFIG_CRYPTO_XCBC=m 673CONFIG_CRYPTO_XCBC=m
652CONFIG_CRYPTO_VMAC=m 674CONFIG_CRYPTO_VMAC=m
653CONFIG_CRYPTO_CRC32=m 675CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
657CONFIG_CRYPTO_RMD256=m 679CONFIG_CRYPTO_RMD256=m
658CONFIG_CRYPTO_RMD320=m 680CONFIG_CRYPTO_RMD320=m
659CONFIG_CRYPTO_SHA512=m 681CONFIG_CRYPTO_SHA512=m
682CONFIG_CRYPTO_SHA3=m
660CONFIG_CRYPTO_TGR192=m 683CONFIG_CRYPTO_TGR192=m
661CONFIG_CRYPTO_WP512=m 684CONFIG_CRYPTO_WP512=m
685CONFIG_CRYPTO_AES_TI=m
662CONFIG_CRYPTO_ANUBIS=m 686CONFIG_CRYPTO_ANUBIS=m
663CONFIG_CRYPTO_BLOWFISH=m 687CONFIG_CRYPTO_BLOWFISH=m
664CONFIG_CRYPTO_CAMELLIA=m 688CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
674CONFIG_CRYPTO_842=m 698CONFIG_CRYPTO_842=m
675CONFIG_CRYPTO_LZ4=m 699CONFIG_CRYPTO_LZ4=m
676CONFIG_CRYPTO_LZ4HC=m 700CONFIG_CRYPTO_LZ4HC=m
701CONFIG_CRYPTO_ANSI_CPRNG=m
677CONFIG_CRYPTO_USER_API_HASH=m 702CONFIG_CRYPTO_USER_API_HASH=m
678CONFIG_CRYPTO_USER_API_SKCIPHER=m 703CONFIG_CRYPTO_USER_API_SKCIPHER=m
679CONFIG_CRYPTO_USER_API_RNG=m 704CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
685CONFIG_CRYPTO_SHA512_S390=m 710CONFIG_CRYPTO_SHA512_S390=m
686CONFIG_CRYPTO_DES_S390=m 711CONFIG_CRYPTO_DES_S390=m
687CONFIG_CRYPTO_AES_S390=m 712CONFIG_CRYPTO_AES_S390=m
713CONFIG_CRYPTO_PAES_S390=m
688CONFIG_CRYPTO_GHASH_S390=m 714CONFIG_CRYPTO_GHASH_S390=m
689CONFIG_CRYPTO_CRC32_S390=y 715CONFIG_CRYPTO_CRC32_S390=y
690CONFIG_ASYMMETRIC_KEY_TYPE=y 716CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
692CONFIG_X509_CERTIFICATE_PARSER=m 718CONFIG_X509_CERTIFICATE_PARSER=m
693CONFIG_CRC7=m 719CONFIG_CRC7=m
694CONFIG_CRC8=m 720CONFIG_CRC8=m
721CONFIG_RANDOM32_SELFTEST=y
695CONFIG_CORDIC=m 722CONFIG_CORDIC=m
696CONFIG_CMM=m 723CONFIG_CMM=m
697CONFIG_APPLDATA_BASE=y 724CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5afb2b..3c6b78189fbc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
46CONFIG_MODULE_FORCE_UNLOAD=y 47CONFIG_MODULE_FORCE_UNLOAD=y
47CONFIG_MODVERSIONS=y 48CONFIG_MODVERSIONS=y
48CONFIG_MODULE_SRCVERSION_ALL=y 49CONFIG_MODULE_SRCVERSION_ALL=y
50CONFIG_BLK_DEV_INTEGRITY=y
49CONFIG_BLK_DEV_THROTTLING=y 51CONFIG_BLK_DEV_THROTTLING=y
52CONFIG_BLK_WBT=y
53CONFIG_BLK_WBT_SQ=y
50CONFIG_PARTITION_ADVANCED=y 54CONFIG_PARTITION_ADVANCED=y
51CONFIG_IBM_PARTITION=y 55CONFIG_IBM_PARTITION=y
52CONFIG_BSD_DISKLABEL=y 56CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
88CONFIG_UNIX_DIAG=m 92CONFIG_UNIX_DIAG=m
89CONFIG_XFRM_USER=m 93CONFIG_XFRM_USER=m
90CONFIG_NET_KEY=m 94CONFIG_NET_KEY=m
95CONFIG_SMC=m
96CONFIG_SMC_DIAG=m
91CONFIG_INET=y 97CONFIG_INET=y
92CONFIG_IP_MULTICAST=y 98CONFIG_IP_MULTICAST=y
93CONFIG_IP_ADVANCED_ROUTER=y 99CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
356CONFIG_NET_ACT_SKBEDIT=m 362CONFIG_NET_ACT_SKBEDIT=m
357CONFIG_NET_ACT_CSUM=m 363CONFIG_NET_ACT_CSUM=m
358CONFIG_DNS_RESOLVER=y 364CONFIG_DNS_RESOLVER=y
365CONFIG_NETLINK_DIAG=m
359CONFIG_CGROUP_NET_PRIO=y 366CONFIG_CGROUP_NET_PRIO=y
360CONFIG_BPF_JIT=y 367CONFIG_BPF_JIT=y
361CONFIG_NET_PKTGEN=m 368CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
364CONFIG_DMA_CMA=y 371CONFIG_DMA_CMA=y
365CONFIG_CMA_SIZE_MBYTES=0 372CONFIG_CMA_SIZE_MBYTES=0
366CONFIG_CONNECTOR=y 373CONFIG_CONNECTOR=y
374CONFIG_ZRAM=m
367CONFIG_BLK_DEV_LOOP=m 375CONFIG_BLK_DEV_LOOP=m
368CONFIG_BLK_DEV_CRYPTOLOOP=m 376CONFIG_BLK_DEV_CRYPTOLOOP=m
377CONFIG_BLK_DEV_DRBD=m
369CONFIG_BLK_DEV_NBD=m 378CONFIG_BLK_DEV_NBD=m
370CONFIG_BLK_DEV_OSD=m 379CONFIG_BLK_DEV_OSD=m
371CONFIG_BLK_DEV_RAM=y 380CONFIG_BLK_DEV_RAM=y
372CONFIG_BLK_DEV_RAM_SIZE=32768 381CONFIG_BLK_DEV_RAM_SIZE=32768
373CONFIG_CDROM_PKTCDVD=m 382CONFIG_BLK_DEV_RAM_DAX=y
374CONFIG_ATA_OVER_ETH=m
375CONFIG_VIRTIO_BLK=y 383CONFIG_VIRTIO_BLK=y
376CONFIG_ENCLOSURE_SERVICES=m 384CONFIG_ENCLOSURE_SERVICES=m
385CONFIG_GENWQE=m
377CONFIG_RAID_ATTRS=m 386CONFIG_RAID_ATTRS=m
378CONFIG_SCSI=y 387CONFIG_SCSI=y
379CONFIG_BLK_DEV_SD=y 388CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
439# CONFIG_NET_VENDOR_INTEL is not set 448# CONFIG_NET_VENDOR_INTEL is not set
440# CONFIG_NET_VENDOR_MARVELL is not set 449# CONFIG_NET_VENDOR_MARVELL is not set
441CONFIG_MLX4_EN=m 450CONFIG_MLX4_EN=m
451CONFIG_MLX5_CORE=m
452CONFIG_MLX5_CORE_EN=y
442# CONFIG_NET_VENDOR_NATSEMI is not set 453# CONFIG_NET_VENDOR_NATSEMI is not set
443CONFIG_PPP=m 454CONFIG_PPP=m
444CONFIG_PPP_BSDCOMP=m 455CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
449CONFIG_PPPOL2TP=m 460CONFIG_PPPOL2TP=m
450CONFIG_PPP_ASYNC=m 461CONFIG_PPP_ASYNC=m
451CONFIG_PPP_SYNC_TTY=m 462CONFIG_PPP_SYNC_TTY=m
452# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
453# CONFIG_INPUT_KEYBOARD is not set 463# CONFIG_INPUT_KEYBOARD is not set
454# CONFIG_INPUT_MOUSE is not set 464# CONFIG_INPUT_MOUSE is not set
455# CONFIG_SERIO is not set 465# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
468CONFIG_INFINIBAND=m 478CONFIG_INFINIBAND=m
469CONFIG_INFINIBAND_USER_ACCESS=m 479CONFIG_INFINIBAND_USER_ACCESS=m
470CONFIG_MLX4_INFINIBAND=m 480CONFIG_MLX4_INFINIBAND=m
481CONFIG_MLX5_INFINIBAND=m
471CONFIG_VIRTIO_BALLOON=m 482CONFIG_VIRTIO_BALLOON=m
472CONFIG_EXT4_FS=y 483CONFIG_EXT4_FS=y
473CONFIG_EXT4_FS_POSIX_ACL=y 484CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
483CONFIG_XFS_POSIX_ACL=y 494CONFIG_XFS_POSIX_ACL=y
484CONFIG_XFS_RT=y 495CONFIG_XFS_RT=y
485CONFIG_GFS2_FS=m 496CONFIG_GFS2_FS=m
497CONFIG_GFS2_FS_LOCKING_DLM=y
486CONFIG_OCFS2_FS=m 498CONFIG_OCFS2_FS=m
487CONFIG_BTRFS_FS=y 499CONFIG_BTRFS_FS=y
488CONFIG_BTRFS_FS_POSIX_ACL=y 500CONFIG_BTRFS_FS_POSIX_ACL=y
489CONFIG_NILFS2_FS=m 501CONFIG_NILFS2_FS=m
502CONFIG_FS_DAX=y
503CONFIG_EXPORTFS_BLOCK_OPS=y
490CONFIG_FANOTIFY=y 504CONFIG_FANOTIFY=y
505CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
491CONFIG_QUOTA_NETLINK_INTERFACE=y 506CONFIG_QUOTA_NETLINK_INTERFACE=y
492CONFIG_QFMT_V1=m 507CONFIG_QFMT_V1=m
493CONFIG_QFMT_V2=m 508CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
553CONFIG_MAGIC_SYSRQ=y 568CONFIG_MAGIC_SYSRQ=y
554CONFIG_DEBUG_MEMORY_INIT=y 569CONFIG_DEBUG_MEMORY_INIT=y
555CONFIG_PANIC_ON_OOPS=y 570CONFIG_PANIC_ON_OOPS=y
556CONFIG_TIMER_STATS=y
557CONFIG_RCU_TORTURE_TEST=m 571CONFIG_RCU_TORTURE_TEST=m
558CONFIG_RCU_CPU_STALL_TIMEOUT=60 572CONFIG_RCU_CPU_STALL_TIMEOUT=60
559CONFIG_LATENCYTOP=y 573CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
576CONFIG_ENCRYPTED_KEYS=m 590CONFIG_ENCRYPTED_KEYS=m
577CONFIG_SECURITY=y 591CONFIG_SECURITY=y
578CONFIG_SECURITY_NETWORK=y 592CONFIG_SECURITY_NETWORK=y
593CONFIG_HARDENED_USERCOPY=y
579CONFIG_SECURITY_SELINUX=y 594CONFIG_SECURITY_SELINUX=y
580CONFIG_SECURITY_SELINUX_BOOTPARAM=y 595CONFIG_SECURITY_SELINUX_BOOTPARAM=y
581CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 596CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
599CONFIG_CRYPTO_LRW=m 614CONFIG_CRYPTO_LRW=m
600CONFIG_CRYPTO_PCBC=m 615CONFIG_CRYPTO_PCBC=m
601CONFIG_CRYPTO_KEYWRAP=m 616CONFIG_CRYPTO_KEYWRAP=m
617CONFIG_CRYPTO_CMAC=m
602CONFIG_CRYPTO_XCBC=m 618CONFIG_CRYPTO_XCBC=m
603CONFIG_CRYPTO_VMAC=m 619CONFIG_CRYPTO_VMAC=m
604CONFIG_CRYPTO_CRC32=m 620CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
611CONFIG_CRYPTO_SHA3=m 627CONFIG_CRYPTO_SHA3=m
612CONFIG_CRYPTO_TGR192=m 628CONFIG_CRYPTO_TGR192=m
613CONFIG_CRYPTO_WP512=m 629CONFIG_CRYPTO_WP512=m
630CONFIG_CRYPTO_AES_TI=m
614CONFIG_CRYPTO_ANUBIS=m 631CONFIG_CRYPTO_ANUBIS=m
615CONFIG_CRYPTO_BLOWFISH=m 632CONFIG_CRYPTO_BLOWFISH=m
616CONFIG_CRYPTO_CAMELLIA=m 633CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
626CONFIG_CRYPTO_842=m 643CONFIG_CRYPTO_842=m
627CONFIG_CRYPTO_LZ4=m 644CONFIG_CRYPTO_LZ4=m
628CONFIG_CRYPTO_LZ4HC=m 645CONFIG_CRYPTO_LZ4HC=m
646CONFIG_CRYPTO_ANSI_CPRNG=m
629CONFIG_CRYPTO_USER_API_HASH=m 647CONFIG_CRYPTO_USER_API_HASH=m
630CONFIG_CRYPTO_USER_API_SKCIPHER=m 648CONFIG_CRYPTO_USER_API_SKCIPHER=m
631CONFIG_CRYPTO_USER_API_RNG=m 649CONFIG_CRYPTO_USER_API_RNG=m
632CONFIG_CRYPTO_USER_API_AEAD=m 650CONFIG_CRYPTO_USER_API_AEAD=m
633CONFIG_ZCRYPT=m 651CONFIG_ZCRYPT=m
652CONFIG_PKEY=m
634CONFIG_CRYPTO_SHA1_S390=m 653CONFIG_CRYPTO_SHA1_S390=m
635CONFIG_CRYPTO_SHA256_S390=m 654CONFIG_CRYPTO_SHA256_S390=m
636CONFIG_CRYPTO_SHA512_S390=m 655CONFIG_CRYPTO_SHA512_S390=m
637CONFIG_CRYPTO_DES_S390=m 656CONFIG_CRYPTO_DES_S390=m
638CONFIG_CRYPTO_AES_S390=m 657CONFIG_CRYPTO_AES_S390=m
658CONFIG_CRYPTO_PAES_S390=m
639CONFIG_CRYPTO_GHASH_S390=m 659CONFIG_CRYPTO_GHASH_S390=m
640CONFIG_CRYPTO_CRC32_S390=y 660CONFIG_CRYPTO_CRC32_S390=y
641CONFIG_CRC7=m 661CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542aaf59..653d72bcc007 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
31CONFIG_SCHED_AUTOGROUP=y 31CONFIG_SCHED_AUTOGROUP=y
32CONFIG_BLK_DEV_INITRD=y 32CONFIG_BLK_DEV_INITRD=y
33CONFIG_EXPERT=y 33CONFIG_EXPERT=y
34# CONFIG_SYSFS_SYSCALL is not set
34CONFIG_BPF_SYSCALL=y 35CONFIG_BPF_SYSCALL=y
35CONFIG_USERFAULTFD=y 36CONFIG_USERFAULTFD=y
36# CONFIG_COMPAT_BRK is not set 37# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
44CONFIG_MODULE_FORCE_UNLOAD=y 45CONFIG_MODULE_FORCE_UNLOAD=y
45CONFIG_MODVERSIONS=y 46CONFIG_MODVERSIONS=y
46CONFIG_MODULE_SRCVERSION_ALL=y 47CONFIG_MODULE_SRCVERSION_ALL=y
48CONFIG_BLK_DEV_INTEGRITY=y
47CONFIG_BLK_DEV_THROTTLING=y 49CONFIG_BLK_DEV_THROTTLING=y
50CONFIG_BLK_WBT=y
51CONFIG_BLK_WBT_SQ=y
48CONFIG_PARTITION_ADVANCED=y 52CONFIG_PARTITION_ADVANCED=y
49CONFIG_IBM_PARTITION=y 53CONFIG_IBM_PARTITION=y
50CONFIG_BSD_DISKLABEL=y 54CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
86CONFIG_UNIX_DIAG=m 90CONFIG_UNIX_DIAG=m
87CONFIG_XFRM_USER=m 91CONFIG_XFRM_USER=m
88CONFIG_NET_KEY=m 92CONFIG_NET_KEY=m
93CONFIG_SMC=m
94CONFIG_SMC_DIAG=m
89CONFIG_INET=y 95CONFIG_INET=y
90CONFIG_IP_MULTICAST=y 96CONFIG_IP_MULTICAST=y
91CONFIG_IP_ADVANCED_ROUTER=y 97CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
354CONFIG_NET_ACT_SKBEDIT=m 360CONFIG_NET_ACT_SKBEDIT=m
355CONFIG_NET_ACT_CSUM=m 361CONFIG_NET_ACT_CSUM=m
356CONFIG_DNS_RESOLVER=y 362CONFIG_DNS_RESOLVER=y
363CONFIG_NETLINK_DIAG=m
357CONFIG_CGROUP_NET_PRIO=y 364CONFIG_CGROUP_NET_PRIO=y
358CONFIG_BPF_JIT=y 365CONFIG_BPF_JIT=y
359CONFIG_NET_PKTGEN=m 366CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
362CONFIG_DMA_CMA=y 369CONFIG_DMA_CMA=y
363CONFIG_CMA_SIZE_MBYTES=0 370CONFIG_CMA_SIZE_MBYTES=0
364CONFIG_CONNECTOR=y 371CONFIG_CONNECTOR=y
372CONFIG_ZRAM=m
365CONFIG_BLK_DEV_LOOP=m 373CONFIG_BLK_DEV_LOOP=m
366CONFIG_BLK_DEV_CRYPTOLOOP=m 374CONFIG_BLK_DEV_CRYPTOLOOP=m
375CONFIG_BLK_DEV_DRBD=m
367CONFIG_BLK_DEV_NBD=m 376CONFIG_BLK_DEV_NBD=m
368CONFIG_BLK_DEV_OSD=m 377CONFIG_BLK_DEV_OSD=m
369CONFIG_BLK_DEV_RAM=y 378CONFIG_BLK_DEV_RAM=y
370CONFIG_BLK_DEV_RAM_SIZE=32768 379CONFIG_BLK_DEV_RAM_SIZE=32768
371CONFIG_CDROM_PKTCDVD=m 380CONFIG_BLK_DEV_RAM_DAX=y
372CONFIG_ATA_OVER_ETH=m
373CONFIG_VIRTIO_BLK=y 381CONFIG_VIRTIO_BLK=y
374CONFIG_ENCLOSURE_SERVICES=m 382CONFIG_ENCLOSURE_SERVICES=m
383CONFIG_GENWQE=m
375CONFIG_RAID_ATTRS=m 384CONFIG_RAID_ATTRS=m
376CONFIG_SCSI=y 385CONFIG_SCSI=y
377CONFIG_BLK_DEV_SD=y 386CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
437# CONFIG_NET_VENDOR_INTEL is not set 446# CONFIG_NET_VENDOR_INTEL is not set
438# CONFIG_NET_VENDOR_MARVELL is not set 447# CONFIG_NET_VENDOR_MARVELL is not set
439CONFIG_MLX4_EN=m 448CONFIG_MLX4_EN=m
449CONFIG_MLX5_CORE=m
450CONFIG_MLX5_CORE_EN=y
440# CONFIG_NET_VENDOR_NATSEMI is not set 451# CONFIG_NET_VENDOR_NATSEMI is not set
441CONFIG_PPP=m 452CONFIG_PPP=m
442CONFIG_PPP_BSDCOMP=m 453CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
447CONFIG_PPPOL2TP=m 458CONFIG_PPPOL2TP=m
448CONFIG_PPP_ASYNC=m 459CONFIG_PPP_ASYNC=m
449CONFIG_PPP_SYNC_TTY=m 460CONFIG_PPP_SYNC_TTY=m
450# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
451# CONFIG_INPUT_KEYBOARD is not set 461# CONFIG_INPUT_KEYBOARD is not set
452# CONFIG_INPUT_MOUSE is not set 462# CONFIG_INPUT_MOUSE is not set
453# CONFIG_SERIO is not set 463# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
466CONFIG_INFINIBAND=m 476CONFIG_INFINIBAND=m
467CONFIG_INFINIBAND_USER_ACCESS=m 477CONFIG_INFINIBAND_USER_ACCESS=m
468CONFIG_MLX4_INFINIBAND=m 478CONFIG_MLX4_INFINIBAND=m
479CONFIG_MLX5_INFINIBAND=m
469CONFIG_VIRTIO_BALLOON=m 480CONFIG_VIRTIO_BALLOON=m
470CONFIG_EXT4_FS=y 481CONFIG_EXT4_FS=y
471CONFIG_EXT4_FS_POSIX_ACL=y 482CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
481CONFIG_XFS_POSIX_ACL=y 492CONFIG_XFS_POSIX_ACL=y
482CONFIG_XFS_RT=y 493CONFIG_XFS_RT=y
483CONFIG_GFS2_FS=m 494CONFIG_GFS2_FS=m
495CONFIG_GFS2_FS_LOCKING_DLM=y
484CONFIG_OCFS2_FS=m 496CONFIG_OCFS2_FS=m
485CONFIG_BTRFS_FS=y 497CONFIG_BTRFS_FS=y
486CONFIG_BTRFS_FS_POSIX_ACL=y 498CONFIG_BTRFS_FS_POSIX_ACL=y
487CONFIG_NILFS2_FS=m 499CONFIG_NILFS2_FS=m
500CONFIG_FS_DAX=y
501CONFIG_EXPORTFS_BLOCK_OPS=y
488CONFIG_FANOTIFY=y 502CONFIG_FANOTIFY=y
503CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
489CONFIG_QUOTA_NETLINK_INTERFACE=y 504CONFIG_QUOTA_NETLINK_INTERFACE=y
490CONFIG_QFMT_V1=m 505CONFIG_QFMT_V1=m
491CONFIG_QFMT_V2=m 506CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
551CONFIG_MAGIC_SYSRQ=y 566CONFIG_MAGIC_SYSRQ=y
552CONFIG_DEBUG_MEMORY_INIT=y 567CONFIG_DEBUG_MEMORY_INIT=y
553CONFIG_PANIC_ON_OOPS=y 568CONFIG_PANIC_ON_OOPS=y
554CONFIG_TIMER_STATS=y
555CONFIG_RCU_TORTURE_TEST=m 569CONFIG_RCU_TORTURE_TEST=m
556CONFIG_RCU_CPU_STALL_TIMEOUT=60 570CONFIG_RCU_CPU_STALL_TIMEOUT=60
557CONFIG_LATENCYTOP=y 571CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
574CONFIG_ENCRYPTED_KEYS=m 588CONFIG_ENCRYPTED_KEYS=m
575CONFIG_SECURITY=y 589CONFIG_SECURITY=y
576CONFIG_SECURITY_NETWORK=y 590CONFIG_SECURITY_NETWORK=y
591CONFIG_HARDENED_USERCOPY=y
577CONFIG_SECURITY_SELINUX=y 592CONFIG_SECURITY_SELINUX=y
578CONFIG_SECURITY_SELINUX_BOOTPARAM=y 593CONFIG_SECURITY_SELINUX_BOOTPARAM=y
579CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 594CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
597CONFIG_CRYPTO_LRW=m 612CONFIG_CRYPTO_LRW=m
598CONFIG_CRYPTO_PCBC=m 613CONFIG_CRYPTO_PCBC=m
599CONFIG_CRYPTO_KEYWRAP=m 614CONFIG_CRYPTO_KEYWRAP=m
615CONFIG_CRYPTO_CMAC=m
600CONFIG_CRYPTO_XCBC=m 616CONFIG_CRYPTO_XCBC=m
601CONFIG_CRYPTO_VMAC=m 617CONFIG_CRYPTO_VMAC=m
602CONFIG_CRYPTO_CRC32=m 618CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
609CONFIG_CRYPTO_SHA3=m 625CONFIG_CRYPTO_SHA3=m
610CONFIG_CRYPTO_TGR192=m 626CONFIG_CRYPTO_TGR192=m
611CONFIG_CRYPTO_WP512=m 627CONFIG_CRYPTO_WP512=m
628CONFIG_CRYPTO_AES_TI=m
612CONFIG_CRYPTO_ANUBIS=m 629CONFIG_CRYPTO_ANUBIS=m
613CONFIG_CRYPTO_BLOWFISH=m 630CONFIG_CRYPTO_BLOWFISH=m
614CONFIG_CRYPTO_CAMELLIA=m 631CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
624CONFIG_CRYPTO_842=m 641CONFIG_CRYPTO_842=m
625CONFIG_CRYPTO_LZ4=m 642CONFIG_CRYPTO_LZ4=m
626CONFIG_CRYPTO_LZ4HC=m 643CONFIG_CRYPTO_LZ4HC=m
644CONFIG_CRYPTO_ANSI_CPRNG=m
627CONFIG_CRYPTO_USER_API_HASH=m 645CONFIG_CRYPTO_USER_API_HASH=m
628CONFIG_CRYPTO_USER_API_SKCIPHER=m 646CONFIG_CRYPTO_USER_API_SKCIPHER=m
629CONFIG_CRYPTO_USER_API_RNG=m 647CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
635CONFIG_CRYPTO_SHA512_S390=m 653CONFIG_CRYPTO_SHA512_S390=m
636CONFIG_CRYPTO_DES_S390=m 654CONFIG_CRYPTO_DES_S390=m
637CONFIG_CRYPTO_AES_S390=m 655CONFIG_CRYPTO_AES_S390=m
656CONFIG_CRYPTO_PAES_S390=m
638CONFIG_CRYPTO_GHASH_S390=m 657CONFIG_CRYPTO_GHASH_S390=m
639CONFIG_CRYPTO_CRC32_S390=y 658CONFIG_CRYPTO_CRC32_S390=y
640CONFIG_CRC7=m 659CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c13735..afa46a7406ea 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
12CONFIG_NR_CPUS=2 12CONFIG_NR_CPUS=2
13# CONFIG_HOTPLUG_CPU is not set 13# CONFIG_HOTPLUG_CPU is not set
14CONFIG_HZ_100=y 14CONFIG_HZ_100=y
15# CONFIG_ARCH_RANDOM is not set
15# CONFIG_COMPACTION is not set 16# CONFIG_COMPACTION is not set
16# CONFIG_MIGRATION is not set 17# CONFIG_MIGRATION is not set
18# CONFIG_BOUNCE is not set
17# CONFIG_CHECK_STACK is not set 19# CONFIG_CHECK_STACK is not set
18# CONFIG_CHSC_SCH is not set 20# CONFIG_CHSC_SCH is not set
19# CONFIG_SCM_BUS is not set 21# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
36CONFIG_SCSI_LOGGING=y 38CONFIG_SCSI_LOGGING=y
37CONFIG_SCSI_FC_ATTRS=y 39CONFIG_SCSI_FC_ATTRS=y
38CONFIG_ZFCP=y 40CONFIG_ZFCP=y
39# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
40# CONFIG_INPUT_KEYBOARD is not set 41# CONFIG_INPUT_KEYBOARD is not set
41# CONFIG_INPUT_MOUSE is not set 42# CONFIG_INPUT_MOUSE is not set
42# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
43# CONFIG_HVC_IUCV is not set 44# CONFIG_HVC_IUCV is not set
45# CONFIG_HW_RANDOM_S390 is not set
44CONFIG_RAW_DRIVER=y 46CONFIG_RAW_DRIVER=y
45# CONFIG_SCLP_ASYNC is not set 47# CONFIG_SCLP_ASYNC is not set
46# CONFIG_HMC_DRV is not set 48# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_INOTIFY_USER is not set 56# CONFIG_INOTIFY_USER is not set
55CONFIG_CONFIGFS_FS=y 57CONFIG_CONFIGFS_FS=y
56# CONFIG_MISC_FILESYSTEMS is not set 58# CONFIG_MISC_FILESYSTEMS is not set
59# CONFIG_NETWORK_FILESYSTEMS is not set
57CONFIG_PRINTK_TIME=y 60CONFIG_PRINTK_TIME=y
58CONFIG_DEBUG_INFO=y 61CONFIG_DEBUG_INFO=y
59CONFIG_DEBUG_FS=y
60CONFIG_DEBUG_KERNEL=y 62CONFIG_DEBUG_KERNEL=y
61CONFIG_PANIC_ON_OOPS=y 63CONFIG_PANIC_ON_OOPS=y
62# CONFIG_SCHED_DEBUG is not set 64# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189dbaf34b..20244a38c886 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
28CONFIG_USER_NS=y 28CONFIG_USER_NS=y
29CONFIG_BLK_DEV_INITRD=y 29CONFIG_BLK_DEV_INITRD=y
30CONFIG_EXPERT=y 30CONFIG_EXPERT=y
31# CONFIG_SYSFS_SYSCALL is not set
31CONFIG_BPF_SYSCALL=y 32CONFIG_BPF_SYSCALL=y
32CONFIG_USERFAULTFD=y 33CONFIG_USERFAULTFD=y
33# CONFIG_COMPAT_BRK is not set 34# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
108CONFIG_SCSI_VIRTIO=y 109CONFIG_SCSI_VIRTIO=y
109CONFIG_MD=y 110CONFIG_MD=y
110CONFIG_MD_LINEAR=m 111CONFIG_MD_LINEAR=m
111CONFIG_MD_RAID0=m
112CONFIG_MD_MULTIPATH=m 112CONFIG_MD_MULTIPATH=m
113CONFIG_BLK_DEV_DM=y 113CONFIG_BLK_DEV_DM=y
114CONFIG_DM_CRYPT=m 114CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
131CONFIG_VIRTIO_NET=y 131CONFIG_VIRTIO_NET=y
132# CONFIG_NET_VENDOR_ALACRITECH is not set 132# CONFIG_NET_VENDOR_ALACRITECH is not set
133# CONFIG_NET_VENDOR_SOLARFLARE is not set 133# CONFIG_NET_VENDOR_SOLARFLARE is not set
134# CONFIG_NET_VENDOR_SYNOPSYS is not set
134# CONFIG_INPUT is not set 135# CONFIG_INPUT is not set
135# CONFIG_SERIO is not set 136# CONFIG_SERIO is not set
136CONFIG_DEVKMEM=y 137CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
162CONFIG_DEBUG_PAGEALLOC=y 163CONFIG_DEBUG_PAGEALLOC=y
163CONFIG_DETECT_HUNG_TASK=y 164CONFIG_DETECT_HUNG_TASK=y
164CONFIG_PANIC_ON_OOPS=y 165CONFIG_PANIC_ON_OOPS=y
165CONFIG_TIMER_STATS=y
166CONFIG_DEBUG_RT_MUTEXES=y 166CONFIG_DEBUG_RT_MUTEXES=y
167CONFIG_PROVE_LOCKING=y 167CONFIG_PROVE_LOCKING=y
168CONFIG_LOCK_STAT=y 168CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
172CONFIG_DEBUG_SG=y 172CONFIG_DEBUG_SG=y
173CONFIG_DEBUG_NOTIFIERS=y 173CONFIG_DEBUG_NOTIFIERS=y
174CONFIG_RCU_CPU_STALL_TIMEOUT=60 174CONFIG_RCU_CPU_STALL_TIMEOUT=60
175CONFIG_RCU_TRACE=y
176CONFIG_LATENCYTOP=y 175CONFIG_LATENCYTOP=y
177CONFIG_SCHED_TRACER=y 176CONFIG_SCHED_TRACER=y
178CONFIG_FTRACE_SYSCALLS=y 177CONFIG_FTRACE_SYSCALLS=y
179CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 178CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
180CONFIG_STACK_TRACER=y 179CONFIG_STACK_TRACER=y
181CONFIG_BLK_DEV_IO_TRACE=y 180CONFIG_BLK_DEV_IO_TRACE=y
182CONFIG_UPROBE_EVENTS=y
183CONFIG_FUNCTION_PROFILER=y 181CONFIG_FUNCTION_PROFILER=y
184CONFIG_TRACE_ENUM_MAP_FILE=y 182CONFIG_TRACE_ENUM_MAP_FILE=y
185CONFIG_KPROBES_SANITY_TEST=y 183CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
190CONFIG_CRYPTO_GCM=m 188CONFIG_CRYPTO_GCM=m
191CONFIG_CRYPTO_CBC=y 189CONFIG_CRYPTO_CBC=y
192CONFIG_CRYPTO_CTS=m 190CONFIG_CRYPTO_CTS=m
193CONFIG_CRYPTO_ECB=m
194CONFIG_CRYPTO_LRW=m 191CONFIG_CRYPTO_LRW=m
195CONFIG_CRYPTO_PCBC=m 192CONFIG_CRYPTO_PCBC=m
196CONFIG_CRYPTO_XTS=m 193CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
230CONFIG_CRYPTO_USER_API_RNG=m 227CONFIG_CRYPTO_USER_API_RNG=m
231CONFIG_ZCRYPT=m 228CONFIG_ZCRYPT=m
232CONFIG_PKEY=m 229CONFIG_PKEY=m
230CONFIG_CRYPTO_PAES_S390=m
233CONFIG_CRYPTO_SHA1_S390=m 231CONFIG_CRYPTO_SHA1_S390=m
234CONFIG_CRYPTO_SHA256_S390=m 232CONFIG_CRYPTO_SHA256_S390=m
235CONFIG_CRYPTO_SHA512_S390=m 233CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a882a9..65d07ac34647 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
541 struct mutex ais_lock; 541 struct mutex ais_lock;
542 u8 simm; 542 u8 simm;
543 u8 nimm; 543 u8 nimm;
544 int ais_enabled;
545}; 544};
546 545
547struct kvm_hw_wp_info_arch { 546struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index e408d9cc5b96..6315037335ba 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,17 @@ ENTRY(sie64a)
231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
232.Lsie_done: 232.Lsie_done:
233# some program checks are suppressing. C code (e.g. do_protection_exception) 233# some program checks are suppressing. C code (e.g. do_protection_exception)
234# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 234# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
235# instructions between sie64a and .Lsie_done should not cause program 235# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
236# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 236# Other instructions between sie64a and .Lsie_done should not cause program
237# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
237# See also .Lcleanup_sie 238# See also .Lcleanup_sie
238.Lrewind_pad: 239.Lrewind_pad6:
239 nop 0 240 nopr 7
241.Lrewind_pad4:
242 nopr 7
243.Lrewind_pad2:
244 nopr 7
240 .globl sie_exit 245 .globl sie_exit
241sie_exit: 246sie_exit:
242 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area 247 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
249 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 254 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
250 j sie_exit 255 j sie_exit
251 256
252 EX_TABLE(.Lrewind_pad,.Lsie_fault) 257 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
258 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
259 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
253 EX_TABLE(sie_exit,.Lsie_fault) 260 EX_TABLE(sie_exit,.Lsie_fault)
254EXPORT_SYMBOL(sie64a) 261EXPORT_SYMBOL(sie64a)
255EXPORT_SYMBOL(sie_exit) 262EXPORT_SYMBOL(sie_exit)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8a8948..2d120fef7d90 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2160 struct kvm_s390_ais_req req; 2160 struct kvm_s390_ais_req req;
2161 int ret = 0; 2161 int ret = 0;
2162 2162
2163 if (!fi->ais_enabled) 2163 if (!test_kvm_facility(kvm, 72))
2164 return -ENOTSUPP; 2164 return -ENOTSUPP;
2165 2165
2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) 2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
2204 }; 2204 };
2205 int ret = 0; 2205 int ret = 0;
2206 2206
2207 if (!fi->ais_enabled || !adapter->suppressible) 2207 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2208 return kvm_s390_inject_vm(kvm, &s390int); 2208 return kvm_s390_inject_vm(kvm, &s390int);
2209 2209
2210 mutex_lock(&fi->ais_lock); 2210 mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48361c6..f28e2e776931 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
558 } else { 558 } else {
559 set_kvm_facility(kvm->arch.model.fac_mask, 72); 559 set_kvm_facility(kvm->arch.model.fac_mask, 72);
560 set_kvm_facility(kvm->arch.model.fac_list, 72); 560 set_kvm_facility(kvm->arch.model.fac_list, 72);
561 kvm->arch.float_int.ais_enabled = 1;
562 r = 0; 561 r = 0;
563 } 562 }
564 mutex_unlock(&kvm->lock); 563 mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1533 mutex_init(&kvm->arch.float_int.ais_lock); 1532 mutex_init(&kvm->arch.float_int.ais_lock);
1534 kvm->arch.float_int.simm = 0; 1533 kvm->arch.float_int.simm = 0;
1535 kvm->arch.float_int.nimm = 0; 1534 kvm->arch.float_int.nimm = 0;
1536 kvm->arch.float_int.ais_enabled = 0;
1537 spin_lock_init(&kvm->arch.float_int.lock); 1535 spin_lock_init(&kvm->arch.float_int.lock);
1538 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1536 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1539 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); 1537 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0d21c0..5639c9fe5b55 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@ config NR_CPUS
192 int "Maximum number of CPUs" 192 int "Maximum number of CPUs"
193 depends on SMP 193 depends on SMP
194 range 2 32 if SPARC32 194 range 2 32 if SPARC32
195 range 2 1024 if SPARC64 195 range 2 4096 if SPARC64
196 default 32 if SPARC32 196 default 32 if SPARC32
197 default 64 if SPARC64 197 default 4096 if SPARC64
198 198
199source kernel/Kconfig.hz 199source kernel/Kconfig.hz
200 200
@@ -295,9 +295,13 @@ config NUMA
295 depends on SPARC64 && SMP 295 depends on SPARC64 && SMP
296 296
297config NODES_SHIFT 297config NODES_SHIFT
298 int 298 int "Maximum NUMA Nodes (as a power of 2)"
299 default "4" 299 range 4 5 if SPARC64
300 default "5"
300 depends on NEED_MULTIPLE_NODES 301 depends on NEED_MULTIPLE_NODES
302 help
303 Specify the maximum number of NUMA Nodes available on the target
304 system. Increases memory reserved to accommodate various tables.
301 305
302# Some NUMA nodes have memory ranges that span 306# Some NUMA nodes have memory ranges that span
303# other nodes. Even though a pfn is valid and 307# other nodes. Even though a pfn is valid and
@@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
573 depends on COMPAT && SYSVIPC 577 depends on COMPAT && SYSVIPC
574 default y 578 default y
575 579
576config KEYS_COMPAT
577 def_bool y if COMPAT && KEYS
578
579endmenu 580endmenu
580 581
581source "net/Kconfig" 582source "net/Kconfig"
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
52#define CTX_NR_MASK TAG_CONTEXT_BITS 52#define CTX_NR_MASK TAG_CONTEXT_BITS
53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) 53#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
54 54
55#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) 55#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
56#define CTX_VALID(__ctx) \ 56#define CTX_VALID(__ctx) \
57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) 57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) 58#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6eba11..2cddcda4f85f 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
19extern unsigned long tlb_context_cache; 19extern unsigned long tlb_context_cache;
20extern unsigned long mmu_context_bmap[]; 20extern unsigned long mmu_context_bmap[];
21 21
22DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
22void get_new_mmu_context(struct mm_struct *mm); 23void get_new_mmu_context(struct mm_struct *mm);
23#ifdef CONFIG_SMP
24void smp_new_mmu_context_version(void);
25#else
26#define smp_new_mmu_context_version() do { } while (0)
27#endif
28
29int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 24int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
30void destroy_context(struct mm_struct *mm); 25void destroy_context(struct mm_struct *mm);
31 26
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
76static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 71static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
77{ 72{
78 unsigned long ctx_valid, flags; 73 unsigned long ctx_valid, flags;
79 int cpu; 74 int cpu = smp_processor_id();
80 75
76 per_cpu(per_cpu_secondary_mm, cpu) = mm;
81 if (unlikely(mm == &init_mm)) 77 if (unlikely(mm == &init_mm))
82 return; 78 return;
83 79
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
123 * for the first time, we must flush that context out of the 119 * for the first time, we must flush that context out of the
124 * local TLB. 120 * local TLB.
125 */ 121 */
126 cpu = smp_processor_id();
127 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 122 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
128 cpumask_set_cpu(cpu, mm_cpumask(mm)); 123 cpumask_set_cpu(cpu, mm_cpumask(mm));
129 __flush_tlb_mm(CTX_HWBITS(mm->context), 124 __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
133} 128}
134 129
135#define deactivate_mm(tsk,mm) do { } while (0) 130#define deactivate_mm(tsk,mm) do { } while (0)
136 131#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
137/* Activate a new MM instance for the current task. */
138static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
139{
140 unsigned long flags;
141 int cpu;
142
143 spin_lock_irqsave(&mm->context.lock, flags);
144 if (!CTX_VALID(mm->context))
145 get_new_mmu_context(mm);
146 cpu = smp_processor_id();
147 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
148 cpumask_set_cpu(cpu, mm_cpumask(mm));
149
150 load_secondary_context(mm);
151 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
152 tsb_context_switch(mm);
153 spin_unlock_irqrestore(&mm->context.lock, flags);
154}
155
156#endif /* !(__ASSEMBLY__) */ 132#endif /* !(__ASSEMBLY__) */
157 133
158#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 134#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
20#define PIL_SMP_CALL_FUNC 1 20#define PIL_SMP_CALL_FUNC 1
21#define PIL_SMP_RECEIVE_SIGNAL 2 21#define PIL_SMP_RECEIVE_SIGNAL 2
22#define PIL_SMP_CAPTURE 3 22#define PIL_SMP_CAPTURE 3
23#define PIL_SMP_CTX_NEW_VERSION 4
24#define PIL_DEVICE_IRQ 5 23#define PIL_DEVICE_IRQ 5
25#define PIL_SMP_CALL_FUNC_SNGL 6 24#define PIL_SMP_CALL_FUNC_SNGL 6
26#define PIL_DEFERRED_PCR_WORK 7 25#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
327 int compat_len; 327 int compat_len;
328 328
329 u64 dev_no; 329 u64 dev_no;
330 u64 id;
330 331
331 unsigned long channel_id; 332 unsigned long channel_id;
332 333
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7c8d94..f87265afb175 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
909 pbuf.req.handle = cp->handle; 909 pbuf.req.handle = cp->handle;
910 pbuf.req.major = 1; 910 pbuf.req.major = 1;
911 pbuf.req.minor = 0; 911 pbuf.req.minor = 0;
912 strcpy(pbuf.req.svc_id, cp->service_id); 912 strcpy(pbuf.id_buf, cp->service_id);
913 913
914 err = __ds_send(lp, &pbuf, msg_len); 914 err = __ds_send(lp, &pbuf, msg_len);
915 if (err > 0) 915 if (err > 0)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248aa0928..99dd133a029f 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
1034{ 1034{
1035#ifdef CONFIG_SMP 1035#ifdef CONFIG_SMP
1036 unsigned long page; 1036 unsigned long page;
1037 void *mondo, *p;
1037 1038
1038 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 1039 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
1040
1041 /* Make sure mondo block is 64byte aligned */
1042 p = kzalloc(127, GFP_KERNEL);
1043 if (!p) {
1044 prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
1045 prom_halt();
1046 }
1047 mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
1048 tb->cpu_mondo_block_pa = __pa(mondo);
1039 1049
1040 page = get_zeroed_page(GFP_KERNEL); 1050 page = get_zeroed_page(GFP_KERNEL);
1041 if (!page) { 1051 if (!page) {
1042 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 1052 prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
1043 prom_halt(); 1053 prom_halt();
1044 } 1054 }
1045 1055
1046 tb->cpu_mondo_block_pa = __pa(page); 1056 tb->cpu_list_pa = __pa(page);
1047 tb->cpu_list_pa = __pa(page + 64);
1048#endif 1057#endif
1049} 1058}
1050 1059
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551262c..6ae1e77be0bf 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
37/* smp_64.c */ 37/* smp_64.c */
38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 38void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); 39void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
40void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
41void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); 40void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
42void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); 41void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
43 42
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac757cc..fdf31040a7dc 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
964 preempt_enable(); 964 preempt_enable();
965} 965}
966 966
967void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
968{
969 struct mm_struct *mm;
970 unsigned long flags;
971
972 clear_softint(1 << irq);
973
974 /* See if we need to allocate a new TLB context because
975 * the version of the one we are using is now out of date.
976 */
977 mm = current->active_mm;
978 if (unlikely(!mm || (mm == &init_mm)))
979 return;
980
981 spin_lock_irqsave(&mm->context.lock, flags);
982
983 if (unlikely(!CTX_VALID(mm->context)))
984 get_new_mmu_context(mm);
985
986 spin_unlock_irqrestore(&mm->context.lock, flags);
987
988 load_secondary_context(mm);
989 __flush_tlb_mm(CTX_HWBITS(mm->context),
990 SECONDARY_CONTEXT);
991}
992
993void smp_new_mmu_context_version(void)
994{
995 smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
996}
997
998#ifdef CONFIG_KGDB 967#ifdef CONFIG_KGDB
999void kgdb_roundup_cpus(unsigned long flags) 968void kgdb_roundup_cpus(unsigned long flags)
1000{ 969{
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cfd0ad4..07c0df924960 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@ __tsb_context_switch:
455 .type copy_tsb,#function 455 .type copy_tsb,#function
456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 456copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
457 * %o2=new_tsb_base, %o3=new_tsb_size 457 * %o2=new_tsb_base, %o3=new_tsb_size
458 * %o4=page_size_shift
458 */ 459 */
459 sethi %uhi(TSB_PASS_BITS), %g7 460 sethi %uhi(TSB_PASS_BITS), %g7
460 srlx %o3, 4, %o3 461 srlx %o3, 4, %o3
461 add %o0, %o1, %g1 /* end of old tsb */ 462 add %o0, %o1, %o1 /* end of old tsb */
462 sllx %g7, 32, %g7 463 sllx %g7, 32, %g7
463 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 464 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
464 465
466 mov %o4, %g1 /* page_size_shift */
467
465661: prefetcha [%o0] ASI_N, #one_read 468661: prefetcha [%o0] ASI_N, #one_read
466 .section .tsb_phys_patch, "ax" 469 .section .tsb_phys_patch, "ax"
467 .word 661b 470 .word 661b
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
486 /* This can definitely be computed faster... */ 489 /* This can definitely be computed faster... */
487 srlx %o0, 4, %o5 /* Build index */ 490 srlx %o0, 4, %o5 /* Build index */
488 and %o5, 511, %o5 /* Mask index */ 491 and %o5, 511, %o5 /* Mask index */
489 sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 492 sllx %o5, %g1, %o5 /* Put into vaddr position */
490 or %o4, %o5, %o4 /* Full VADDR. */ 493 or %o4, %o5, %o4 /* Full VADDR. */
491 srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 494 srlx %o4, %g1, %o4 /* Shift down to create index */
492 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 495 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
493 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 496 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
494 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ 497 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
496 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 499 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
497 500
49880: add %o0, 16, %o0 50180: add %o0, 16, %o0
499 cmp %o0, %g1 502 cmp %o0, %o1
500 bne,pt %xcc, 90b 503 bne,pt %xcc, 90b
501 nop 504 nop
502 505
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f6556352..efe93ab4a9c0 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 50tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 51tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 52tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
53tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) 53tl0_irq4: BTRAP(0x44)
54#else 54#else
55tl0_irq1: BTRAP(0x41) 55tl0_irq1: BTRAP(0x41)
56tl0_irq2: BTRAP(0x42) 56tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857254fc..075d38980dee 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
302 if (!id) { 302 if (!id) {
303 dev_set_name(&vdev->dev, "%s", bus_id_name); 303 dev_set_name(&vdev->dev, "%s", bus_id_name);
304 vdev->dev_no = ~(u64)0; 304 vdev->dev_no = ~(u64)0;
305 vdev->id = ~(u64)0;
305 } else if (!cfg_handle) { 306 } else if (!cfg_handle) {
306 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); 307 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
307 vdev->dev_no = *id; 308 vdev->dev_no = *id;
309 vdev->id = ~(u64)0;
308 } else { 310 } else {
309 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, 311 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
310 *cfg_handle, *id); 312 *cfg_handle, *id);
311 vdev->dev_no = *cfg_handle; 313 vdev->dev_no = *cfg_handle;
314 vdev->id = *id;
312 } 315 }
313 316
314 vdev->dev.parent = parent; 317 vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
351 (void) vio_create_one(hp, node, &root_vdev->dev); 354 (void) vio_create_one(hp, node, &root_vdev->dev);
352} 355}
353 356
357struct vio_md_node_query {
358 const char *type;
359 u64 dev_no;
360 u64 id;
361};
362
354static int vio_md_node_match(struct device *dev, void *arg) 363static int vio_md_node_match(struct device *dev, void *arg)
355{ 364{
365 struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
356 struct vio_dev *vdev = to_vio_dev(dev); 366 struct vio_dev *vdev = to_vio_dev(dev);
357 367
358 if (vdev->mp == (u64) arg) 368 if (vdev->dev_no != query->dev_no)
359 return 1; 369 return 0;
370 if (vdev->id != query->id)
371 return 0;
372 if (strcmp(vdev->type, query->type))
373 return 0;
360 374
361 return 0; 375 return 1;
362} 376}
363 377
364static void vio_remove(struct mdesc_handle *hp, u64 node) 378static void vio_remove(struct mdesc_handle *hp, u64 node)
365{ 379{
380 const char *type;
381 const u64 *id, *cfg_handle;
382 u64 a;
383 struct vio_md_node_query query;
366 struct device *dev; 384 struct device *dev;
367 385
368 dev = device_find_child(&root_vdev->dev, (void *) node, 386 type = mdesc_get_property(hp, node, "device-type", NULL);
387 if (!type) {
388 type = mdesc_get_property(hp, node, "name", NULL);
389 if (!type)
390 type = mdesc_node_name(hp, node);
391 }
392
393 query.type = type;
394
395 id = mdesc_get_property(hp, node, "id", NULL);
396 cfg_handle = NULL;
397 mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
398 u64 target;
399
400 target = mdesc_arc_target(hp, a);
401 cfg_handle = mdesc_get_property(hp, target,
402 "cfg-handle", NULL);
403 if (cfg_handle)
404 break;
405 }
406
407 if (!id) {
408 query.dev_no = ~(u64)0;
409 query.id = ~(u64)0;
410 } else if (!cfg_handle) {
411 query.dev_no = *id;
412 query.id = ~(u64)0;
413 } else {
414 query.dev_no = *cfg_handle;
415 query.id = *id;
416 }
417
418 dev = device_find_child(&root_vdev->dev, &query,
369 vio_md_node_match); 419 vio_md_node_match);
370 if (dev) { 420 if (dev) {
371 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); 421 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
372 422
373 device_unregister(dev); 423 device_unregister(dev);
374 put_device(dev); 424 put_device(dev);
425 } else {
426 if (!id)
427 printk(KERN_ERR "VIO: Removed unknown %s node.\n",
428 type);
429 else if (!cfg_handle)
430 printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
431 type, *id);
432 else
433 printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
434 type, *cfg_handle, *id);
375 } 435 }
376} 436}
377 437
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2f8b54..07c03e72d812 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
15lib-$(CONFIG_SPARC64) += atomic_64.o 15lib-$(CONFIG_SPARC64) += atomic_64.o
16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o 16lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o 17lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
18lib-$(CONFIG_SPARC64) += multi3.o
18 19
19lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o 20lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
20lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o 21lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 000000000000..d6b6c97fe3c7
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
1#include <linux/linkage.h>
2#include <asm/export.h>
3
4 .text
5 .align 4
6ENTRY(__multi3) /* %o0 = u, %o1 = v */
7 mov %o1, %g1
8 srl %o3, 0, %g4
9 mulx %g4, %g1, %o1
10 srlx %g1, 0x20, %g3
11 mulx %g3, %g4, %g5
12 sllx %g5, 0x20, %o5
13 srl %g1, 0, %g4
14 sub %o1, %o5, %o5
15 srlx %o5, 0x20, %o5
16 addcc %g5, %o5, %g5
17 srlx %o3, 0x20, %o5
18 mulx %g4, %o5, %g4
19 mulx %g3, %o5, %o5
20 sethi %hi(0x80000000), %g3
21 addcc %g5, %g4, %g5
22 srlx %g5, 0x20, %g5
23 add %g3, %g3, %g3
24 movcc %xcc, %g0, %g3
25 addcc %o5, %g5, %o5
26 sllx %g4, 0x20, %g4
27 add %o1, %g4, %o1
28 add %o5, %g3, %g2
29 mulx %g1, %o2, %g1
30 add %g1, %g2, %g1
31 mulx %o0, %o3, %o0
32 retl
33 add %g1, %o0, %o0
34ENDPROC(__multi3)
35EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
358 } 358 }
359 359
360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { 360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
361 pr_warn("hugepagesz=%llu not supported by MMU.\n", 361 hugetlb_bad_size();
362 pr_err("hugepagesz=%llu not supported by MMU.\n",
362 hugepage_size); 363 hugepage_size);
363 goto out; 364 goto out;
364 } 365 }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
706 707
707/* get_new_mmu_context() uses "cache + 1". */ 708/* get_new_mmu_context() uses "cache + 1". */
708DEFINE_SPINLOCK(ctx_alloc_lock); 709DEFINE_SPINLOCK(ctx_alloc_lock);
709unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 710unsigned long tlb_context_cache = CTX_FIRST_VERSION;
710#define MAX_CTX_NR (1UL << CTX_NR_BITS) 711#define MAX_CTX_NR (1UL << CTX_NR_BITS)
711#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 712#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
712DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 713DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
714DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
715
716static void mmu_context_wrap(void)
717{
718 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
719 unsigned long new_ver, new_ctx, old_ctx;
720 struct mm_struct *mm;
721 int cpu;
722
723 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
724
725 /* Reserve kernel context */
726 set_bit(0, mmu_context_bmap);
727
728 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
729 if (unlikely(new_ver == 0))
730 new_ver = CTX_FIRST_VERSION;
731 tlb_context_cache = new_ver;
732
733 /*
734 * Make sure that any new mm that are added into per_cpu_secondary_mm,
735 * are going to go through get_new_mmu_context() path.
736 */
737 mb();
738
739 /*
740 * Updated versions to current on those CPUs that had valid secondary
741 * contexts
742 */
743 for_each_online_cpu(cpu) {
744 /*
745 * If a new mm is stored after we took this mm from the array,
746 * it will go into get_new_mmu_context() path, because we
747 * already bumped the version in tlb_context_cache.
748 */
749 mm = per_cpu(per_cpu_secondary_mm, cpu);
750
751 if (unlikely(!mm || mm == &init_mm))
752 continue;
753
754 old_ctx = mm->context.sparc64_ctx_val;
755 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
756 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
757 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
758 mm->context.sparc64_ctx_val = new_ctx;
759 }
760 }
761}
713 762
714/* Caller does TLB context flushing on local CPU if necessary. 763/* Caller does TLB context flushing on local CPU if necessary.
715 * The caller also ensures that CTX_VALID(mm->context) is false. 764 * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
725{ 774{
726 unsigned long ctx, new_ctx; 775 unsigned long ctx, new_ctx;
727 unsigned long orig_pgsz_bits; 776 unsigned long orig_pgsz_bits;
728 int new_version;
729 777
730 spin_lock(&ctx_alloc_lock); 778 spin_lock(&ctx_alloc_lock);
779retry:
780 /* wrap might have happened, test again if our context became valid */
781 if (unlikely(CTX_VALID(mm->context)))
782 goto out;
731 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 783 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
732 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 784 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
733 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 785 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
734 new_version = 0;
735 if (new_ctx >= (1 << CTX_NR_BITS)) { 786 if (new_ctx >= (1 << CTX_NR_BITS)) {
736 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 787 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
737 if (new_ctx >= ctx) { 788 if (new_ctx >= ctx) {
738 int i; 789 mmu_context_wrap();
739 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 790 goto retry;
740 CTX_FIRST_VERSION;
741 if (new_ctx == 1)
742 new_ctx = CTX_FIRST_VERSION;
743
744 /* Don't call memset, for 16 entries that's just
745 * plain silly...
746 */
747 mmu_context_bmap[0] = 3;
748 mmu_context_bmap[1] = 0;
749 mmu_context_bmap[2] = 0;
750 mmu_context_bmap[3] = 0;
751 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
752 mmu_context_bmap[i + 0] = 0;
753 mmu_context_bmap[i + 1] = 0;
754 mmu_context_bmap[i + 2] = 0;
755 mmu_context_bmap[i + 3] = 0;
756 }
757 new_version = 1;
758 goto out;
759 } 791 }
760 } 792 }
793 if (mm->context.sparc64_ctx_val)
794 cpumask_clear(mm_cpumask(mm));
761 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 795 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
762 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 796 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
763out:
764 tlb_context_cache = new_ctx; 797 tlb_context_cache = new_ctx;
765 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 798 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
799out:
766 spin_unlock(&ctx_alloc_lock); 800 spin_unlock(&ctx_alloc_lock);
767
768 if (unlikely(new_version))
769 smp_new_mmu_context_version();
770} 801}
771 802
772static int numa_enabled = 1; 803static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b22a47..0d4b998c7d7b 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ retry_tsb_alloc:
496 extern void copy_tsb(unsigned long old_tsb_base, 496 extern void copy_tsb(unsigned long old_tsb_base,
497 unsigned long old_tsb_size, 497 unsigned long old_tsb_size,
498 unsigned long new_tsb_base, 498 unsigned long new_tsb_base,
499 unsigned long new_tsb_size); 499 unsigned long new_tsb_size,
500 unsigned long page_size_shift);
500 unsigned long old_tsb_base = (unsigned long) old_tsb; 501 unsigned long old_tsb_base = (unsigned long) old_tsb;
501 unsigned long new_tsb_base = (unsigned long) new_tsb; 502 unsigned long new_tsb_base = (unsigned long) new_tsb;
502 503
@@ -504,7 +505,9 @@ retry_tsb_alloc:
504 old_tsb_base = __pa(old_tsb_base); 505 old_tsb_base = __pa(old_tsb_base);
505 new_tsb_base = __pa(new_tsb_base); 506 new_tsb_base = __pa(new_tsb_base);
506 } 507 }
507 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 508 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
509 tsb_index == MM_TSB_BASE ?
510 PAGE_SHIFT : REAL_HPAGE_SHIFT);
508 } 511 }
509 512
510 mm->context.tsb_block[tsb_index].tsb = new_tsb; 513 mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
972 retry 972 retry
973 973
974 .globl xcall_new_mmu_context_version
975xcall_new_mmu_context_version:
976 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
977 retry
978
979#ifdef CONFIG_KGDB 974#ifdef CONFIG_KGDB
980 .globl xcall_kgdb_capture 975 .globl xcall_kgdb_capture
981xcall_kgdb_capture: 976xcall_kgdb_capture:
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4ccfacc7232a..0efb4c9497bc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
2776config SYSVIPC_COMPAT 2776config SYSVIPC_COMPAT
2777 def_bool y 2777 def_bool y
2778 depends on SYSVIPC 2778 depends on SYSVIPC
2779
2780config KEYS_COMPAT
2781 def_bool y
2782 depends on KEYS
2783endif 2779endif
2784 2780
2785endmenu 2781endmenu
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61095f8..6f077445647a 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
255 break; 255 break;
256 256
257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
258 case 11: /* GX1 with inverted Device ID */
258#ifdef CONFIG_PCI 259#ifdef CONFIG_PCI
259 { 260 {
260 u32 vendor, device; 261 u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index afdfd237b59f..f522415bf9e5 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
619 619
620 show_saved_mc(); 620 show_saved_mc();
621 621
622 /* initrd is going away, clear patch ptr. */
623 intel_ucode_patch = NULL;
624
622 return 0; 625 return 0;
623} 626}
624 627
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c09789984..43e10d6fdbed 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
161 */ 161 */
162 rcu_irq_exit(); 162 rcu_irq_exit();
163 native_safe_halt(); 163 native_safe_halt();
164 rcu_irq_enter();
165 local_irq_disable(); 164 local_irq_disable();
165 rcu_irq_enter();
166 } 166 }
167 } 167 }
168 if (!n.halted) 168 if (!n.halted)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae76c71c..59ca2eea522c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@ out:
780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 780static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
781{ 781{
782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
783 int j, nent = vcpu->arch.cpuid_nent; 783 struct kvm_cpuid_entry2 *ej;
784 int j = i;
785 int nent = vcpu->arch.cpuid_nent;
784 786
785 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 787 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
786 /* when no next entry is found, the current entry[i] is reselected */ 788 /* when no next entry is found, the current entry[i] is reselected */
787 for (j = i + 1; ; j = (j + 1) % nent) { 789 do {
788 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 790 j = (j + 1) % nent;
789 if (ej->function == e->function) { 791 ej = &vcpu->arch.cpuid_entries[j];
790 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 792 } while (ej->function != e->function);
791 return j; 793
792 } 794 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
793 } 795
794 return 0; /* silence gcc, even though control never reaches here */ 796 return j;
795} 797}
796 798
797/* find an entry with matching function, matching index (if needed), and that 799/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5d3376f67794..cb8225969255 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3699} 3699}
3700 3700
3701static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3701bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3702{ 3702{
3703 if (unlikely(!lapic_in_kernel(vcpu) || 3703 if (unlikely(!lapic_in_kernel(vcpu) ||
3704 kvm_event_needs_reinjection(vcpu))) 3704 kvm_event_needs_reinjection(vcpu)))
3705 return false; 3705 return false;
3706 3706
3707 if (is_guest_mode(vcpu))
3708 return false;
3709
3707 return kvm_x86_ops->interrupt_allowed(vcpu); 3710 return kvm_x86_ops->interrupt_allowed(vcpu);
3708} 3711}
3709 3712
@@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3719 if (!async) 3722 if (!async)
3720 return false; /* *pfn has correct page already */ 3723 return false; /* *pfn has correct page already */
3721 3724
3722 if (!prefault && can_do_async_pf(vcpu)) { 3725 if (!prefault && kvm_can_do_async_pf(vcpu)) {
3723 trace_kvm_try_async_get_page(gva, gfn); 3726 trace_kvm_try_async_get_page(gva, gfn);
3724 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3727 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3725 trace_kvm_async_pf_doublefault(gva, gfn); 3728 trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 27975807cc64..330bf3a811fb 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 76void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 77void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
78 bool accessed_dirty); 78 bool accessed_dirty);
79bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
79 80
80static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 81static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
81{ 82{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9b4b5d6dcd34..ca5d2b93385c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
2425 if (!(vmcs12->exception_bitmap & (1u << nr))) 2425 if (!(vmcs12->exception_bitmap & (1u << nr)))
2426 return 0; 2426 return 0;
2427 2427
2428 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2428 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
2429 vmcs_read32(VM_EXIT_INTR_INFO), 2429 vmcs_read32(VM_EXIT_INTR_INFO),
2430 vmcs_readl(EXIT_QUALIFICATION)); 2430 vmcs_readl(EXIT_QUALIFICATION));
2431 return 1; 2431 return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a2cd0997343c..87d3cb901935 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8607,8 +8607,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8607 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8607 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
8608 return true; 8608 return true;
8609 else 8609 else
8610 return !kvm_event_needs_reinjection(vcpu) && 8610 return kvm_can_do_async_pf(vcpu);
8611 kvm_x86_ops->interrupt_allowed(vcpu);
8612} 8611}
8613 8612
8614void kvm_arch_start_assignment(struct kvm *kvm) 8613void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88ea7646..19707db966f1 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
29# define PLATFORM_NR_IRQS 0 29# define PLATFORM_NR_IRQS 0
30#endif 30#endif
31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS 31#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) 32#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
33#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
33 34
34#if VARIANT_NR_IRQS == 0 35#if VARIANT_NR_IRQS == 0
35static inline void variant_init_irq(void) { } 36static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd6ac37..99341028cc77 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
34{ 34{
35 int irq = irq_find_mapping(NULL, hwirq); 35 int irq = irq_find_mapping(NULL, hwirq);
36 36
37 if (hwirq >= NR_IRQS) {
38 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
39 __func__, hwirq);
40 }
41
42#ifdef CONFIG_DEBUG_STACKOVERFLOW 37#ifdef CONFIG_DEBUG_STACKOVERFLOW
43 /* Debugging check for stack overflow: is there less than 1KB free? */ 38 /* Debugging check for stack overflow: is there less than 1KB free? */
44 { 39 {
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08300b6..33bfa5270d95 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
593 (ccount_freq/10000) % 100, 593 (ccount_freq/10000) % 100,
594 loops_per_jiffy/(500000/HZ), 594 loops_per_jiffy/(500000/HZ),
595 (loops_per_jiffy/(5000/HZ)) % 100); 595 (loops_per_jiffy/(5000/HZ)) % 100);
596 596 seq_puts(f, "flags\t\t: "
597 seq_printf(f,"flags\t\t: "
598#if XCHAL_HAVE_NMI 597#if XCHAL_HAVE_NMI
599 "nmi " 598 "nmi "
600#endif 599#endif
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc21e076..162c77e53ca8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) 118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) 119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) 120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) 121 SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) 122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
123#endif 123#endif
124 124
@@ -306,13 +306,13 @@ SECTIONS
306 .UserExceptionVector.literal) 306 .UserExceptionVector.literal)
307 SECTION_VECTOR (_DoubleExceptionVector_literal, 307 SECTION_VECTOR (_DoubleExceptionVector_literal,
308 .DoubleExceptionVector.literal, 308 .DoubleExceptionVector.literal,
309 DOUBLEEXC_VECTOR_VADDR - 48, 309 DOUBLEEXC_VECTOR_VADDR - 20,
310 SIZEOF(.UserExceptionVector.text), 310 SIZEOF(.UserExceptionVector.text),
311 .UserExceptionVector.text) 311 .UserExceptionVector.text)
312 SECTION_VECTOR (_DoubleExceptionVector_text, 312 SECTION_VECTOR (_DoubleExceptionVector_text,
313 .DoubleExceptionVector.text, 313 .DoubleExceptionVector.text,
314 DOUBLEEXC_VECTOR_VADDR, 314 DOUBLEEXC_VECTOR_VADDR,
315 48, 315 20,
316 .DoubleExceptionVector.literal) 316 .DoubleExceptionVector.literal)
317 317
318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb3ad3e..c45b90bb9339 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
317 if (simdisk_count > MAX_SIMDISK_COUNT) 317 if (simdisk_count > MAX_SIMDISK_COUNT)
318 simdisk_count = MAX_SIMDISK_COUNT; 318 simdisk_count = MAX_SIMDISK_COUNT;
319 319
320 sddev = kmalloc(simdisk_count * sizeof(struct simdisk), 320 sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
321 GFP_KERNEL);
322 if (sddev == NULL) 321 if (sddev == NULL)
323 goto out_unregister; 322 goto out_unregister;
324 323
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b440a1..1fda7e20dfcb 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
24 24
25/* Interrupt configuration. */ 25/* Interrupt configuration. */
26 26
27#define PLATFORM_NR_IRQS 10 27#define PLATFORM_NR_IRQS 0
28 28
29/* Default assignment of LX60 devices to external interrupts. */ 29/* Default assignment of LX60 devices to external interrupts. */
30 30
31#ifdef CONFIG_XTENSA_MX 31#ifdef CONFIG_XTENSA_MX
32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM 32#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
33#define OETH_IRQ XCHAL_EXTINT4_NUM 33#define OETH_IRQ XCHAL_EXTINT4_NUM
34#define C67X00_IRQ XCHAL_EXTINT8_NUM
34#else 35#else
35#define DUART16552_INTNUM XCHAL_EXTINT0_NUM 36#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
36#define OETH_IRQ XCHAL_EXTINT1_NUM 37#define OETH_IRQ XCHAL_EXTINT1_NUM
38#define C67X00_IRQ XCHAL_EXTINT5_NUM
37#endif 39#endif
38 40
39/* 41/*
@@ -63,5 +65,5 @@
63 65
64#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) 66#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
65#define C67X00_SIZE 0x10 67#define C67X00_SIZE 0x10
66#define C67X00_IRQ 5 68
67#endif /* __XTENSA_XTAVNET_HARDWARE_H */ 69#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be723eb2b..42285f35d313 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
175 .flags = IORESOURCE_MEM, 175 .flags = IORESOURCE_MEM,
176 }, 176 },
177 [2] = { /* IRQ number */ 177 [2] = { /* IRQ number */
178 .start = OETH_IRQ, 178 .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
179 .end = OETH_IRQ, 179 .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
180 .flags = IORESOURCE_IRQ, 180 .flags = IORESOURCE_IRQ,
181 }, 181 },
182}; 182};
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
213 .flags = IORESOURCE_MEM, 213 .flags = IORESOURCE_MEM,
214 }, 214 },
215 [1] = { /* IRQ number */ 215 [1] = { /* IRQ number */
216 .start = C67X00_IRQ, 216 .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
217 .end = C67X00_IRQ, 217 .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
218 .flags = IORESOURCE_IRQ, 218 .flags = IORESOURCE_IRQ,
219 }, 219 },
220}; 220};
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
247static struct plat_serial8250_port serial_platform_data[] = { 247static struct plat_serial8250_port serial_platform_data[] = {
248 [0] = { 248 [0] = {
249 .mapbase = DUART16552_PADDR, 249 .mapbase = DUART16552_PADDR,
250 .irq = DUART16552_INTNUM, 250 .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
252 UPF_IOREMAP, 252 UPF_IOREMAP,
253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, 253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c8a32fb345cf..78b2e0db4fb2 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
52BFQG_FLAG_FNS(empty) 52BFQG_FLAG_FNS(empty)
53#undef BFQG_FLAG_FNS 53#undef BFQG_FLAG_FNS
54 54
55/* This should be called with the queue_lock held. */ 55/* This should be called with the scheduler lock held. */
56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 56static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
57{ 57{
58 unsigned long long now; 58 unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
67 bfqg_stats_clear_waiting(stats); 67 bfqg_stats_clear_waiting(stats);
68} 68}
69 69
70/* This should be called with the queue_lock held. */ 70/* This should be called with the scheduler lock held. */
71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 71static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
72 struct bfq_group *curr_bfqg) 72 struct bfq_group *curr_bfqg)
73{ 73{
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
81 bfqg_stats_mark_waiting(stats); 81 bfqg_stats_mark_waiting(stats);
82} 82}
83 83
84/* This should be called with the queue_lock held. */ 84/* This should be called with the scheduler lock held. */
85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 85static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
86{ 86{
87 unsigned long long now; 87 unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
203 203
204static void bfqg_get(struct bfq_group *bfqg) 204static void bfqg_get(struct bfq_group *bfqg)
205{ 205{
206 return blkg_get(bfqg_to_blkg(bfqg)); 206 bfqg->ref++;
207} 207}
208 208
209void bfqg_put(struct bfq_group *bfqg) 209void bfqg_put(struct bfq_group *bfqg)
210{ 210{
211 return blkg_put(bfqg_to_blkg(bfqg)); 211 bfqg->ref--;
212
213 if (bfqg->ref == 0)
214 kfree(bfqg);
215}
216
217static void bfqg_and_blkg_get(struct bfq_group *bfqg)
218{
219 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
220 bfqg_get(bfqg);
221
222 blkg_get(bfqg_to_blkg(bfqg));
223}
224
225void bfqg_and_blkg_put(struct bfq_group *bfqg)
226{
227 bfqg_put(bfqg);
228
229 blkg_put(bfqg_to_blkg(bfqg));
212} 230}
213 231
214void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, 232void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
312 if (bfqq) { 330 if (bfqq) {
313 bfqq->ioprio = bfqq->new_ioprio; 331 bfqq->ioprio = bfqq->new_ioprio;
314 bfqq->ioprio_class = bfqq->new_ioprio_class; 332 bfqq->ioprio_class = bfqq->new_ioprio_class;
315 bfqg_get(bfqg); 333 /*
334 * Make sure that bfqg and its associated blkg do not
335 * disappear before entity.
336 */
337 bfqg_and_blkg_get(bfqg);
316 } 338 }
317 entity->parent = bfqg->my_entity; /* NULL for root group */ 339 entity->parent = bfqg->my_entity; /* NULL for root group */
318 entity->sched_data = &bfqg->sched_data; 340 entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
399 return NULL; 421 return NULL;
400 } 422 }
401 423
424 /* see comments in bfq_bic_update_cgroup for why refcounting */
425 bfqg_get(bfqg);
402 return &bfqg->pd; 426 return &bfqg->pd;
403} 427}
404 428
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
426 struct bfq_group *bfqg = pd_to_bfqg(pd); 450 struct bfq_group *bfqg = pd_to_bfqg(pd);
427 451
428 bfqg_stats_exit(&bfqg->stats); 452 bfqg_stats_exit(&bfqg->stats);
429 return kfree(bfqg); 453 bfqg_put(bfqg);
430} 454}
431 455
432void bfq_pd_reset_stats(struct blkg_policy_data *pd) 456void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
496 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 520 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
497 * it on the new one. Avoid putting the entity on the old group idle tree. 521 * it on the new one. Avoid putting the entity on the old group idle tree.
498 * 522 *
499 * Must be called under the queue lock; the cgroup owning @bfqg must 523 * Must be called under the scheduler lock, to make sure that the blkg
500 * not disappear (by now this just means that we are called under 524 * owning @bfqg does not disappear (see comments in
501 * rcu_read_lock()). 525 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
526 * objects).
502 */ 527 */
503void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 528void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
504 struct bfq_group *bfqg) 529 struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
519 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 544 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
520 else if (entity->on_st) 545 else if (entity->on_st)
521 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 546 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
522 bfqg_put(bfqq_group(bfqq)); 547 bfqg_and_blkg_put(bfqq_group(bfqq));
523 548
524 /*
525 * Here we use a reference to bfqg. We don't need a refcounter
526 * as the cgroup reference will not be dropped, so that its
527 * destroy() callback will not be invoked.
528 */
529 entity->parent = bfqg->my_entity; 549 entity->parent = bfqg->my_entity;
530 entity->sched_data = &bfqg->sched_data; 550 entity->sched_data = &bfqg->sched_data;
531 bfqg_get(bfqg); 551 /* pin down bfqg and its associated blkg */
552 bfqg_and_blkg_get(bfqg);
532 553
533 if (bfq_bfqq_busy(bfqq)) { 554 if (bfq_bfqq_busy(bfqq)) {
534 bfq_pos_tree_add_move(bfqd, bfqq); 555 bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
545 * @bic: the bic to move. 566 * @bic: the bic to move.
546 * @blkcg: the blk-cgroup to move to. 567 * @blkcg: the blk-cgroup to move to.
547 * 568 *
548 * Move bic to blkcg, assuming that bfqd->queue is locked; the caller 569 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
549 * has to make sure that the reference to cgroup is valid across the call. 570 * sure that the reference to cgroup is valid across the call (see
571 * comments in bfq_bic_update_cgroup on this issue)
550 * 572 *
551 * NOTE: an alternative approach might have been to store the current 573 * NOTE: an alternative approach might have been to store the current
552 * cgroup in bfqq and getting a reference to it, reducing the lookup 574 * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
604 goto out; 626 goto out;
605 627
606 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); 628 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
629 /*
630 * Update blkg_path for bfq_log_* functions. We cache this
631 * path, and update it here, for the following
632 * reasons. Operations on blkg objects in blk-cgroup are
633 * protected with the request_queue lock, and not with the
634 * lock that protects the instances of this scheduler
635 * (bfqd->lock). This exposes BFQ to the following sort of
636 * race.
637 *
638 * The blkg_lookup performed in bfq_get_queue, protected
639 * through rcu, may happen to return the address of a copy of
640 * the original blkg. If this is the case, then the
641 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
642 * the blkg, is useless: it does not prevent blk-cgroup code
643 * from destroying both the original blkg and all objects
644 * directly or indirectly referred by the copy of the
645 * blkg.
646 *
647 * On the bright side, destroy operations on a blkg invoke, as
648 * a first step, hooks of the scheduler associated with the
649 * blkg. And these hooks are executed with bfqd->lock held for
650 * BFQ. As a consequence, for any blkg associated with the
651 * request queue this instance of the scheduler is attached
652 * to, we are guaranteed that such a blkg is not destroyed, and
653 * that all the pointers it contains are consistent, while we
654 * are holding bfqd->lock. A blkg_lookup performed with
655 * bfqd->lock held then returns a fully consistent blkg, which
656 * remains consistent until this lock is held.
657 *
658 * Thanks to the last fact, and to the fact that: (1) bfqg has
659 * been obtained through a blkg_lookup in the above
660 * assignment, and (2) bfqd->lock is being held, here we can
661 * safely use the policy data for the involved blkg (i.e., the
662 * field bfqg->pd) to get to the blkg associated with bfqg,
663 * and then we can safely use any field of blkg. After we
664 * release bfqd->lock, even just getting blkg through this
665 * bfqg may cause dangling references to be traversed, as
666 * bfqg->pd may not exist any more.
667 *
668 * In view of the above facts, here we cache, in the bfqg, any
669 * blkg data we may need for this bic, and for its associated
670 * bfq_queue. As of now, we need to cache only the path of the
671 * blkg, which is used in the bfq_log_* functions.
672 *
673 * Finally, note that bfqg itself needs to be protected from
674 * destruction on the blkg_free of the original blkg (which
675 * invokes bfq_pd_free). We use an additional private
676 * refcounter for bfqg, to let it disappear only after no
677 * bfq_queue refers to it any longer.
678 */
679 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
607 bic->blkcg_serial_nr = serial_nr; 680 bic->blkcg_serial_nr = serial_nr;
608out: 681out:
609 rcu_read_unlock(); 682 rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
640 * @bfqd: the device data structure with the root group. 713 * @bfqd: the device data structure with the root group.
641 * @bfqg: the group to move from. 714 * @bfqg: the group to move from.
642 * @st: the service tree with the entities. 715 * @st: the service tree with the entities.
643 *
644 * Needs queue_lock to be taken and reference to be valid over the call.
645 */ 716 */
646static void bfq_reparent_active_entities(struct bfq_data *bfqd, 717static void bfq_reparent_active_entities(struct bfq_data *bfqd,
647 struct bfq_group *bfqg, 718 struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
692 /* 763 /*
693 * The idle tree may still contain bfq_queues belonging 764 * The idle tree may still contain bfq_queues belonging
694 * to exited task because they never migrated to a different 765 * to exited task because they never migrated to a different
695 * cgroup from the one being destroyed now. No one else 766 * cgroup from the one being destroyed now.
696 * can access them so it's safe to act without any lock.
697 */ 767 */
698 bfq_flush_idle_tree(st); 768 bfq_flush_idle_tree(st);
699 769
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 08ce45096350..ed93da2462ab 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
3665 3665
3666 kmem_cache_free(bfq_pool, bfqq); 3666 kmem_cache_free(bfq_pool, bfqq);
3667#ifdef CONFIG_BFQ_GROUP_IOSCHED 3667#ifdef CONFIG_BFQ_GROUP_IOSCHED
3668 bfqg_put(bfqg); 3668 bfqg_and_blkg_put(bfqg);
3669#endif 3669#endif
3670} 3670}
3671 3671
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index ae783c06dfd9..5c3bf9861492 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -759,6 +759,12 @@ struct bfq_group {
759 /* must be the first member */ 759 /* must be the first member */
760 struct blkg_policy_data pd; 760 struct blkg_policy_data pd;
761 761
762 /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
763 char blkg_path[128];
764
765 /* reference counter (see comments in bfq_bic_update_cgroup) */
766 int ref;
767
762 struct bfq_entity entity; 768 struct bfq_entity entity;
763 struct bfq_sched_data sched_data; 769 struct bfq_sched_data sched_data;
764 770
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
838struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); 844struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
839struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 845struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
840struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 846struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
841void bfqg_put(struct bfq_group *bfqg); 847void bfqg_and_blkg_put(struct bfq_group *bfqg);
842 848
843#ifdef CONFIG_BFQ_GROUP_IOSCHED 849#ifdef CONFIG_BFQ_GROUP_IOSCHED
844extern struct cftype bfq_blkcg_legacy_files[]; 850extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
910struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 916struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
911 917
912#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 918#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
913 char __pbuf[128]; \ 919 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
914 \
915 blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
916 blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
917 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ 920 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
918 __pbuf, ##args); \ 921 bfqq_group(bfqq)->blkg_path, ##args); \
919} while (0) 922} while (0)
920 923
921#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 924#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
922 char __pbuf[128]; \ 925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
923 \
924 blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \
925 blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \
926} while (0)
927 926
928#else /* CONFIG_BFQ_GROUP_IOSCHED */ 927#else /* CONFIG_BFQ_GROUP_IOSCHED */
929 928
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5384713d48bc..b5009a896a7f 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
176 return false; 176 return false;
177 177
178 if (!bio_sectors(bio))
179 return false;
180
178 /* Already protected? */ 181 /* Already protected? */
179 if (bio_integrity(bio)) 182 if (bio_integrity(bio))
180 return false; 183 return false;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1bcccedcc74f..bb66c96850b1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1462} 1462}
1463 1463
1464static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1464static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1465 bool may_sleep) 1465 struct request *rq,
1466 blk_qc_t *cookie, bool may_sleep)
1466{ 1467{
1467 struct request_queue *q = rq->q; 1468 struct request_queue *q = rq->q;
1468 struct blk_mq_queue_data bd = { 1469 struct blk_mq_queue_data bd = {
1469 .rq = rq, 1470 .rq = rq,
1470 .last = true, 1471 .last = true,
1471 }; 1472 };
1472 struct blk_mq_hw_ctx *hctx;
1473 blk_qc_t new_cookie; 1473 blk_qc_t new_cookie;
1474 int ret; 1474 int ret;
1475 bool run_queue = true;
1476
1477 if (blk_mq_hctx_stopped(hctx)) {
1478 run_queue = false;
1479 goto insert;
1480 }
1475 1481
1476 if (q->elevator) 1482 if (q->elevator)
1477 goto insert; 1483 goto insert;
1478 1484
1479 if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1485 if (!blk_mq_get_driver_tag(rq, NULL, false))
1480 goto insert; 1486 goto insert;
1481 1487
1482 new_cookie = request_to_qc_t(hctx, rq); 1488 new_cookie = request_to_qc_t(hctx, rq);
@@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1500 1506
1501 __blk_mq_requeue_request(rq); 1507 __blk_mq_requeue_request(rq);
1502insert: 1508insert:
1503 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1509 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1504} 1510}
1505 1511
1506static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1512static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1508{ 1514{
1509 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1515 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1510 rcu_read_lock(); 1516 rcu_read_lock();
1511 __blk_mq_try_issue_directly(rq, cookie, false); 1517 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1512 rcu_read_unlock(); 1518 rcu_read_unlock();
1513 } else { 1519 } else {
1514 unsigned int srcu_idx; 1520 unsigned int srcu_idx;
@@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1516 might_sleep(); 1522 might_sleep();
1517 1523
1518 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1524 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
1519 __blk_mq_try_issue_directly(rq, cookie, true); 1525 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1520 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1526 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1521 } 1527 }
1522} 1528}
@@ -1619,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1619 1625
1620 blk_mq_put_ctx(data.ctx); 1626 blk_mq_put_ctx(data.ctx);
1621 1627
1622 if (same_queue_rq) 1628 if (same_queue_rq) {
1629 data.hctx = blk_mq_map_queue(q,
1630 same_queue_rq->mq_ctx->cpu);
1623 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1631 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1624 &cookie); 1632 &cookie);
1633 }
1625 } else if (q->nr_hw_queues > 1 && is_sync) { 1634 } else if (q->nr_hw_queues > 1 && is_sync) {
1626 blk_mq_put_ctx(data.ctx); 1635 blk_mq_put_ctx(data.ctx);
1627 blk_mq_bio_to_request(rq, bio); 1636 blk_mq_bio_to_request(rq, bio);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fc13dd0c6e39..a7285bf2831c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
27#define MIN_THROTL_IOPS (10) 27#define MIN_THROTL_IOPS (10)
28#define DFL_LATENCY_TARGET (-1L) 28#define DFL_LATENCY_TARGET (-1L)
29#define DFL_IDLE_THRESHOLD (0) 29#define DFL_IDLE_THRESHOLD (0)
30#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
31#define LATENCY_FILTERED_SSD (0)
32/*
33 * For HD, very small latency comes from sequential IO. Such IO is helpless to
34 * help determine if its IO is impacted by others, hence we ignore the IO
35 */
36#define LATENCY_FILTERED_HD (1000L) /* 1ms */
30 37
31#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) 38#define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
32 39
@@ -212,6 +219,7 @@ struct throtl_data
212 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE]; 219 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
213 struct latency_bucket __percpu *latency_buckets; 220 struct latency_bucket __percpu *latency_buckets;
214 unsigned long last_calculate_time; 221 unsigned long last_calculate_time;
222 unsigned long filtered_latency;
215 223
216 bool track_bio_latency; 224 bool track_bio_latency;
217}; 225};
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
698static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 706static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
699 unsigned long expires) 707 unsigned long expires)
700{ 708{
701 unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice; 709 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
702 710
703 /* 711 /*
704 * Since we are adjusting the throttle limit dynamically, the sleep 712 * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
2281 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), 2289 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2282 bio_op(bio), lat); 2290 bio_op(bio), lat);
2283 2291
2284 if (tg->latency_target) { 2292 if (tg->latency_target && lat >= tg->td->filtered_latency) {
2285 int bucket; 2293 int bucket;
2286 unsigned int threshold; 2294 unsigned int threshold;
2287 2295
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
2417void blk_throtl_register_queue(struct request_queue *q) 2425void blk_throtl_register_queue(struct request_queue *q)
2418{ 2426{
2419 struct throtl_data *td; 2427 struct throtl_data *td;
2428 int i;
2420 2429
2421 td = q->td; 2430 td = q->td;
2422 BUG_ON(!td); 2431 BUG_ON(!td);
2423 2432
2424 if (blk_queue_nonrot(q)) 2433 if (blk_queue_nonrot(q)) {
2425 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2434 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2426 else 2435 td->filtered_latency = LATENCY_FILTERED_SSD;
2436 } else {
2427 td->throtl_slice = DFL_THROTL_SLICE_HD; 2437 td->throtl_slice = DFL_THROTL_SLICE_HD;
2438 td->filtered_latency = LATENCY_FILTERED_HD;
2439 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2440 td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
2441 }
2428#ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2442#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2429 /* if no low limit, use previous default */ 2443 /* if no low limit, use previous default */
2430 td->throtl_slice = DFL_THROTL_SLICE_HD; 2444 td->throtl_slice = DFL_THROTL_SLICE_HD;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index d3a989e718f5..3cd6e12cfc46 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
141 * signature and returns that to us. 141 * signature and returns that to us.
142 */ 142 */
143 ret = crypto_akcipher_verify(req); 143 ret = crypto_akcipher_verify(req);
144 if (ret == -EINPROGRESS) { 144 if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
145 wait_for_completion(&compl.completion); 145 wait_for_completion(&compl.completion);
146 ret = compl.err; 146 ret = compl.err;
147 } 147 }
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 672a94c2c3ff..d178650fd524 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -381,7 +381,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
381 } 381 }
382 382
383error: 383error:
384 kfree(desc); 384 kzfree(desc);
385error_no_desc: 385error_no_desc:
386 crypto_free_shash(tfm); 386 crypto_free_shash(tfm);
387 kleave(" = %d", ret); 387 kleave(" = %d", ret);
@@ -450,6 +450,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen,
450 ret = pefile_digest_pe(pebuf, pelen, &ctx); 450 ret = pefile_digest_pe(pebuf, pelen, &ctx);
451 451
452error: 452error:
453 kfree(ctx.digest); 453 kzfree(ctx.digest);
454 return ret; 454 return ret;
455} 455}
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index c80765b211cf..dd03fead1ca3 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
102 } 102 }
103 } 103 }
104 104
105 ret = -ENOMEM;
105 cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); 106 cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
106 if (!cert->pub->key) 107 if (!cert->pub->key)
107 goto error_decode; 108 goto error_decode;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index fa749f470135..cdb27ac4b226 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
1767 break; 1767 break;
1768 case -EINPROGRESS: 1768 case -EINPROGRESS:
1769 case -EBUSY: 1769 case -EBUSY:
1770 ret = wait_for_completion_interruptible( 1770 wait_for_completion(&drbg->ctr_completion);
1771 &drbg->ctr_completion); 1771 if (!drbg->ctr_async_err) {
1772 if (!ret && !drbg->ctr_async_err) {
1773 reinit_completion(&drbg->ctr_completion); 1772 reinit_completion(&drbg->ctr_completion);
1774 break; 1773 break;
1775 } 1774 }
diff --git a/crypto/gcm.c b/crypto/gcm.c
index b7ad808be3d4..3841b5eafa7e 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
152 152
153 err = crypto_skcipher_encrypt(&data->req); 153 err = crypto_skcipher_encrypt(&data->req);
154 if (err == -EINPROGRESS || err == -EBUSY) { 154 if (err == -EINPROGRESS || err == -EBUSY) {
155 err = wait_for_completion_interruptible( 155 wait_for_completion(&data->result.completion);
156 &data->result.completion); 156 err = data->result.err;
157 if (!err)
158 err = data->result.err;
159 } 157 }
160 158
161 if (err) 159 if (err)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index c5fecf97ee2f..797b28dc7b34 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
666 int ret = -ENODEV; 666 int ret = -ENODEV;
667 struct fwnode_handle *iort_fwnode; 667 struct fwnode_handle *iort_fwnode;
668 668
669 /*
670 * If we already translated the fwspec there
671 * is nothing left to do, return the iommu_ops.
672 */
673 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
674 if (ops)
675 return ops;
676
677 if (node) { 669 if (node) {
678 iort_fwnode = iort_get_fwnode(node); 670 iort_fwnode = iort_get_fwnode(node);
679 if (!iort_fwnode) 671 if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
735 u32 streamid = 0; 727 u32 streamid = 0;
736 int err; 728 int err;
737 729
730 /*
731 * If we already translated the fwspec there
732 * is nothing left to do, return the iommu_ops.
733 */
734 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
735 if (ops)
736 return ops;
737
738 if (dev_is_pci(dev)) { 738 if (dev_is_pci(dev)) {
739 struct pci_bus *bus = to_pci_dev(dev)->bus; 739 struct pci_bus *bus = to_pci_dev(dev)->bus;
740 u32 rid; 740 u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
782 if (err) 782 if (err)
783 ops = ERR_PTR(err); 783 ops = ERR_PTR(err);
784 784
785 /* Ignore all other errors apart from EPROBE_DEFER */
786 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
787 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
788 ops = NULL;
789 }
790
785 return ops; 791 return ops;
786} 792}
787 793
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index a9a9ab3399d4..d42eeef9d928 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || 782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && 783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
784 (battery->capacity_now <= battery->alarm))) 784 (battery->capacity_now <= battery->alarm)))
785 pm_wakeup_hard_event(&battery->device->dev); 785 pm_wakeup_event(&battery->device->dev, 0);
786 786
787 return result; 787 return result;
788} 788}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 9ad8cdb58743..e19f530f1083 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
217 } 217 }
218 218
219 if (state) 219 if (state)
220 pm_wakeup_hard_event(&device->dev); 220 pm_wakeup_event(&device->dev, 0);
221 221
222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
223 if (ret == NOTIFY_DONE) 223 if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
402 } else { 402 } else {
403 int keycode; 403 int keycode;
404 404
405 pm_wakeup_hard_event(&device->dev); 405 pm_wakeup_event(&device->dev, 0);
406 if (button->suspended) 406 if (button->suspended)
407 break; 407 break;
408 408
@@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
534 lid_device = device; 534 lid_device = device;
535 } 535 }
536 536
537 device_init_wakeup(&device->dev, true);
538 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); 537 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
539 return 0; 538 return 0;
540 539
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 798d5003a039..993fd31394c8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -24,7 +24,6 @@
24#include <linux/pm_qos.h> 24#include <linux/pm_qos.h>
25#include <linux/pm_domain.h> 25#include <linux/pm_domain.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/suspend.h>
28 27
29#include "internal.h" 28#include "internal.h"
30 29
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
400 mutex_lock(&acpi_pm_notifier_lock); 399 mutex_lock(&acpi_pm_notifier_lock);
401 400
402 if (adev->wakeup.flags.notifier_present) { 401 if (adev->wakeup.flags.notifier_present) {
403 pm_wakeup_ws_event(adev->wakeup.ws, 0, true); 402 __pm_wakeup_event(adev->wakeup.ws, 0);
404 if (adev->wakeup.context.work.func) 403 if (adev->wakeup.context.work.func)
405 queue_pm_work(&adev->wakeup.context.work); 404 queue_pm_work(&adev->wakeup.context.work);
406 } 405 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e39ec7b7cb67..3a10d7573477 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
1371 iort_set_dma_mask(dev); 1371 iort_set_dma_mask(dev);
1372 1372
1373 iommu = iort_iommu_configure(dev); 1373 iommu = iort_iommu_configure(dev);
1374 if (IS_ERR(iommu)) 1374 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
1375 return PTR_ERR(iommu); 1375 return -EPROBE_DEFER;
1376 1376
1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
1378 /* 1378 /*
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a6574d626340..097d630ab886 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
663 acpi_os_wait_events_complete(); 663 acpi_os_wait_events_complete();
664 if (acpi_sci_irq_valid()) 664 if (acpi_sci_irq_valid())
665 enable_irq_wake(acpi_sci_irq); 665 enable_irq_wake(acpi_sci_irq);
666
667 return 0; 666 return 0;
668} 667}
669 668
670static void acpi_freeze_wake(void)
671{
672 /*
673 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
674 * that the SCI has triggered while suspended, so cancel the wakeup in
675 * case it has not been a wakeup event (the GPEs will be checked later).
676 */
677 if (acpi_sci_irq_valid() &&
678 !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
679 pm_system_cancel_wakeup();
680}
681
682static void acpi_freeze_sync(void)
683{
684 /*
685 * Process all pending events in case there are any wakeup ones.
686 *
687 * The EC driver uses the system workqueue, so that one needs to be
688 * flushed too.
689 */
690 acpi_os_wait_events_complete();
691 flush_scheduled_work();
692}
693
694static void acpi_freeze_restore(void) 669static void acpi_freeze_restore(void)
695{ 670{
696 acpi_disable_wakeup_devices(ACPI_STATE_S0); 671 acpi_disable_wakeup_devices(ACPI_STATE_S0);
697 if (acpi_sci_irq_valid()) 672 if (acpi_sci_irq_valid())
698 disable_irq_wake(acpi_sci_irq); 673 disable_irq_wake(acpi_sci_irq);
699
700 acpi_enable_all_runtime_gpes(); 674 acpi_enable_all_runtime_gpes();
701} 675}
702 676
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
708static const struct platform_freeze_ops acpi_freeze_ops = { 682static const struct platform_freeze_ops acpi_freeze_ops = {
709 .begin = acpi_freeze_begin, 683 .begin = acpi_freeze_begin,
710 .prepare = acpi_freeze_prepare, 684 .prepare = acpi_freeze_prepare,
711 .wake = acpi_freeze_wake,
712 .sync = acpi_freeze_sync,
713 .restore = acpi_freeze_restore, 685 .restore = acpi_freeze_restore,
714 .end = acpi_freeze_end, 686 .end = acpi_freeze_end,
715}; 687};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e987a6f55d36..9faee1c893e5 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1091 if (async_error) 1091 if (async_error)
1092 goto Complete; 1092 goto Complete;
1093 1093
1094 if (pm_wakeup_pending()) {
1095 async_error = -EBUSY;
1096 goto Complete;
1097 }
1098
1094 if (dev->power.syscore || dev->power.direct_complete) 1099 if (dev->power.syscore || dev->power.direct_complete)
1095 goto Complete; 1100 goto Complete;
1096 1101
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 9c36b27996fc..c313b600d356 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
28/* First wakeup IRQ seen by the kernel in the last cycle. */ 28/* First wakeup IRQ seen by the kernel in the last cycle. */
29unsigned int pm_wakeup_irq __read_mostly; 29unsigned int pm_wakeup_irq __read_mostly;
30 30
31/* If greater than 0 and the system is suspending, terminate the suspend. */ 31/* If set and the system is suspending, terminate the suspend. */
32static atomic_t pm_abort_suspend __read_mostly; 32static bool pm_abort_suspend __read_mostly;
33 33
34/* 34/*
35 * Combined counters of registered wakeup events and wakeup events in progress. 35 * Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
855 pm_print_active_wakeup_sources(); 855 pm_print_active_wakeup_sources();
856 } 856 }
857 857
858 return ret || atomic_read(&pm_abort_suspend) > 0; 858 return ret || pm_abort_suspend;
859} 859}
860 860
861void pm_system_wakeup(void) 861void pm_system_wakeup(void)
862{ 862{
863 atomic_inc(&pm_abort_suspend); 863 pm_abort_suspend = true;
864 freeze_wake(); 864 freeze_wake();
865} 865}
866EXPORT_SYMBOL_GPL(pm_system_wakeup); 866EXPORT_SYMBOL_GPL(pm_system_wakeup);
867 867
868void pm_system_cancel_wakeup(void) 868void pm_wakeup_clear(void)
869{
870 atomic_dec(&pm_abort_suspend);
871}
872
873void pm_wakeup_clear(bool reset)
874{ 869{
870 pm_abort_suspend = false;
875 pm_wakeup_irq = 0; 871 pm_wakeup_irq = 0;
876 if (reset)
877 atomic_set(&pm_abort_suspend, 0);
878} 872}
879 873
880void pm_system_irq_wakeup(unsigned int irq_number) 874void pm_system_irq_wakeup(unsigned int irq_number)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 28d932906f24..ebbd0c3fe0ed 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
608 */ 608 */
609static int loop_flush(struct loop_device *lo) 609static int loop_flush(struct loop_device *lo)
610{ 610{
611 /* loop not yet configured, no running thread, nothing to flush */
612 if (lo->lo_state != Lo_bound)
613 return 0;
611 return loop_switch(lo, NULL); 614 return loop_switch(lo, NULL);
612} 615}
613 616
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6e0cbe092220..593a8818aca9 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
343 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 343 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
344 344
345 /* It's illegal to wrap around the end of the physical address space. */ 345 /* It's illegal to wrap around the end of the physical address space. */
346 if (offset + (phys_addr_t)size < offset) 346 if (offset + (phys_addr_t)size - 1 < offset)
347 return -EINVAL; 347 return -EINVAL;
348 348
349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) 349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a561f0c2f428..e870f329db88 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
1/* 1/*
2 * random.c -- A strong random number generator 2 * random.c -- A strong random number generator
3 * 3 *
4 * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
5 * Rights Reserved.
6 *
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * 8 *
6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762static struct crng_state **crng_node_pool __read_mostly; 765static struct crng_state **crng_node_pool __read_mostly;
763#endif 766#endif
764 767
768static void invalidate_batched_entropy(void);
769
765static void crng_initialize(struct crng_state *crng) 770static void crng_initialize(struct crng_state *crng)
766{ 771{
767 int i; 772 int i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
799 cp++; crng_init_cnt++; len--; 804 cp++; crng_init_cnt++; len--;
800 } 805 }
801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 806 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807 invalidate_batched_entropy();
802 crng_init = 1; 808 crng_init = 1;
803 wake_up_interruptible(&crng_init_wait); 809 wake_up_interruptible(&crng_init_wait);
804 pr_notice("random: fast init done\n"); 810 pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
836 memzero_explicit(&buf, sizeof(buf)); 842 memzero_explicit(&buf, sizeof(buf));
837 crng->init_time = jiffies; 843 crng->init_time = jiffies;
838 if (crng == &primary_crng && crng_init < 2) { 844 if (crng == &primary_crng && crng_init < 2) {
845 invalidate_batched_entropy();
839 crng_init = 2; 846 crng_init = 2;
840 process_random_ready_list(); 847 process_random_ready_list();
841 wake_up_interruptible(&crng_init_wait); 848 wake_up_interruptible(&crng_init_wait);
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
1097static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 1104static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1098{ 1105{
1099 __u32 *ptr = (__u32 *) regs; 1106 __u32 *ptr = (__u32 *) regs;
1100 unsigned long flags; 1107 unsigned int idx;
1101 1108
1102 if (regs == NULL) 1109 if (regs == NULL)
1103 return 0; 1110 return 0;
1104 local_irq_save(flags); 1111 idx = READ_ONCE(f->reg_idx);
1105 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1112 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1106 f->reg_idx = 0; 1113 idx = 0;
1107 ptr += f->reg_idx++; 1114 ptr += idx++;
1108 local_irq_restore(flags); 1115 WRITE_ONCE(f->reg_idx, idx);
1109 return *ptr; 1116 return *ptr;
1110} 1117}
1111 1118
@@ -2023,6 +2030,7 @@ struct batched_entropy {
2023 }; 2030 };
2024 unsigned int position; 2031 unsigned int position;
2025}; 2032};
2033static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
2026 2034
2027/* 2035/*
2028 * Get a random word for internal kernel use only. The quality of the random 2036 * Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2033u64 get_random_u64(void) 2041u64 get_random_u64(void)
2034{ 2042{
2035 u64 ret; 2043 u64 ret;
2044 bool use_lock = crng_init < 2;
2045 unsigned long flags;
2036 struct batched_entropy *batch; 2046 struct batched_entropy *batch;
2037 2047
2038#if BITS_PER_LONG == 64 2048#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
2045#endif 2055#endif
2046 2056
2047 batch = &get_cpu_var(batched_entropy_u64); 2057 batch = &get_cpu_var(batched_entropy_u64);
2058 if (use_lock)
2059 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2048 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2060 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2049 extract_crng((u8 *)batch->entropy_u64); 2061 extract_crng((u8 *)batch->entropy_u64);
2050 batch->position = 0; 2062 batch->position = 0;
2051 } 2063 }
2052 ret = batch->entropy_u64[batch->position++]; 2064 ret = batch->entropy_u64[batch->position++];
2065 if (use_lock)
2066 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2053 put_cpu_var(batched_entropy_u64); 2067 put_cpu_var(batched_entropy_u64);
2054 return ret; 2068 return ret;
2055} 2069}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2059u32 get_random_u32(void) 2073u32 get_random_u32(void)
2060{ 2074{
2061 u32 ret; 2075 u32 ret;
2076 bool use_lock = crng_init < 2;
2077 unsigned long flags;
2062 struct batched_entropy *batch; 2078 struct batched_entropy *batch;
2063 2079
2064 if (arch_get_random_int(&ret)) 2080 if (arch_get_random_int(&ret))
2065 return ret; 2081 return ret;
2066 2082
2067 batch = &get_cpu_var(batched_entropy_u32); 2083 batch = &get_cpu_var(batched_entropy_u32);
2084 if (use_lock)
2085 read_lock_irqsave(&batched_entropy_reset_lock, flags);
2068 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2086 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2069 extract_crng((u8 *)batch->entropy_u32); 2087 extract_crng((u8 *)batch->entropy_u32);
2070 batch->position = 0; 2088 batch->position = 0;
2071 } 2089 }
2072 ret = batch->entropy_u32[batch->position++]; 2090 ret = batch->entropy_u32[batch->position++];
2091 if (use_lock)
2092 read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2073 put_cpu_var(batched_entropy_u32); 2093 put_cpu_var(batched_entropy_u32);
2074 return ret; 2094 return ret;
2075} 2095}
2076EXPORT_SYMBOL(get_random_u32); 2096EXPORT_SYMBOL(get_random_u32);
2077 2097
2098/* It's important to invalidate all potential batched entropy that might
2099 * be stored before the crng is initialized, which we can do lazily by
2100 * simply resetting the counter to zero so that it's re-extracted on the
2101 * next usage. */
2102static void invalidate_batched_entropy(void)
2103{
2104 int cpu;
2105 unsigned long flags;
2106
2107 write_lock_irqsave(&batched_entropy_reset_lock, flags);
2108 for_each_possible_cpu (cpu) {
2109 per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2110 per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2111 }
2112 write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2113}
2114
2078/** 2115/**
2079 * randomize_page - Generate a random, page aligned address 2116 * randomize_page - Generate a random, page aligned address
2080 * @start: The smallest acceptable address the caller will take. 2117 * @start: The smallest acceptable address the caller will take.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7de5bd76a31..eb1158532de3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
571static int min_perf_pct_min(void) 571static int min_perf_pct_min(void)
572{ 572{
573 struct cpudata *cpu = all_cpu_data[0]; 573 struct cpudata *cpu = all_cpu_data[0];
574 int turbo_pstate = cpu->pstate.turbo_pstate;
574 575
575 return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 576 return turbo_pstate ?
576 cpu->pstate.turbo_pstate); 577 DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
577} 578}
578 579
579static s16 intel_pstate_get_epb(struct cpudata *cpu_data) 580static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 6ed32aac8bbe..922d0823f8ec 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -210,9 +210,12 @@ EXPORT_SYMBOL_GPL(kill_dax);
210static struct inode *dax_alloc_inode(struct super_block *sb) 210static struct inode *dax_alloc_inode(struct super_block *sb)
211{ 211{
212 struct dax_device *dax_dev; 212 struct dax_device *dax_dev;
213 struct inode *inode;
213 214
214 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 215 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
215 return &dax_dev->inode; 216 inode = &dax_dev->inode;
217 inode->i_rdev = 0;
218 return inode;
216} 219}
217 220
218static struct dax_device *to_dax_dev(struct inode *inode) 221static struct dax_device *to_dax_dev(struct inode *inode)
@@ -227,7 +230,8 @@ static void dax_i_callback(struct rcu_head *head)
227 230
228 kfree(dax_dev->host); 231 kfree(dax_dev->host);
229 dax_dev->host = NULL; 232 dax_dev->host = NULL;
230 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 233 if (inode->i_rdev)
234 ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
231 kmem_cache_free(dax_cache, dax_dev); 235 kmem_cache_free(dax_cache, dax_dev);
232} 236}
233 237
@@ -423,6 +427,7 @@ static void init_once(void *_dax_dev)
423 struct dax_device *dax_dev = _dax_dev; 427 struct dax_device *dax_dev = _dax_dev;
424 struct inode *inode = &dax_dev->inode; 428 struct inode *inode = &dax_dev->inode;
425 429
430 memset(dax_dev, 0, sizeof(*dax_dev));
426 inode_init_once(inode); 431 inode_init_once(inode);
427} 432}
428 433
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index 8bf27323f7a3..b58233e4ed71 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -27,6 +27,26 @@ struct bmp_header {
27 u32 size; 27 u32 size;
28} __packed; 28} __packed;
29 29
30static bool efi_bgrt_addr_valid(u64 addr)
31{
32 efi_memory_desc_t *md;
33
34 for_each_efi_memory_desc(md) {
35 u64 size;
36 u64 end;
37
38 if (md->type != EFI_BOOT_SERVICES_DATA)
39 continue;
40
41 size = md->num_pages << EFI_PAGE_SHIFT;
42 end = md->phys_addr + size;
43 if (addr >= md->phys_addr && addr < end)
44 return true;
45 }
46
47 return false;
48}
49
30void __init efi_bgrt_init(struct acpi_table_header *table) 50void __init efi_bgrt_init(struct acpi_table_header *table)
31{ 51{
32 void *image; 52 void *image;
@@ -36,7 +56,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
36 if (acpi_disabled) 56 if (acpi_disabled)
37 return; 57 return;
38 58
39 if (!efi_enabled(EFI_BOOT)) 59 if (!efi_enabled(EFI_MEMMAP))
40 return; 60 return;
41 61
42 if (table->length < sizeof(bgrt_tab)) { 62 if (table->length < sizeof(bgrt_tab)) {
@@ -65,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
65 goto out; 85 goto out;
66 } 86 }
67 87
88 if (!efi_bgrt_addr_valid(bgrt->image_address)) {
89 pr_notice("Ignoring BGRT: invalid image address\n");
90 goto out;
91 }
68 image = early_memremap(bgrt->image_address, sizeof(bmp_header)); 92 image = early_memremap(bgrt->image_address, sizeof(bmp_header));
69 if (!image) { 93 if (!image) {
70 pr_notice("Ignoring BGRT: failed to map image header memory\n"); 94 pr_notice("Ignoring BGRT: failed to map image header memory\n");
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1e7860f02f4f..31058d400bda 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -136,12 +136,12 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
136 info->value = value; 136 info->value = value;
137 137
138 INIT_LIST_HEAD(&info->list); 138 INIT_LIST_HEAD(&info->list);
139 list_add_tail(&info->list, &sec->attribs);
140 139
141 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 140 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
142 if (ret) 141 if (ret)
143 goto free_info_key; 142 goto free_info_key;
144 143
144 list_add_tail(&info->list, &sec->attribs);
145 return 0; 145 return 0;
146 146
147free_info_key: 147free_info_key:
@@ -158,8 +158,8 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
158 struct vpd_attrib_info *temp; 158 struct vpd_attrib_info *temp;
159 159
160 list_for_each_entry_safe(info, temp, &sec->attribs, list) { 160 list_for_each_entry_safe(info, temp, &sec->attribs, list) {
161 kfree(info->key);
162 sysfs_remove_bin_file(sec->kobj, &info->bin_attr); 161 sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
162 kfree(info->key);
163 kfree(info); 163 kfree(info);
164 } 164 }
165} 165}
@@ -244,7 +244,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
244{ 244{
245 if (sec->enabled) { 245 if (sec->enabled) {
246 vpd_section_attrib_destroy(sec); 246 vpd_section_attrib_destroy(sec);
247 kobject_del(sec->kobj); 247 kobject_put(sec->kobj);
248 sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); 248 sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
249 kfree(sec->raw_name); 249 kfree(sec->raw_name);
250 iounmap(sec->baseaddr); 250 iounmap(sec->baseaddr);
@@ -331,7 +331,7 @@ static void __exit vpd_platform_exit(void)
331{ 331{
332 vpd_section_destroy(&ro_vpd); 332 vpd_section_destroy(&ro_vpd);
333 vpd_section_destroy(&rw_vpd); 333 vpd_section_destroy(&rw_vpd);
334 kobject_del(vpd_kobj); 334 kobject_put(vpd_kobj);
335} 335}
336 336
337module_init(vpd_platform_init); 337module_init(vpd_platform_init);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index ccea609676ee..4ca436e66bdb 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -646,6 +646,9 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
646 int rc; 646 int rc;
647 int i; 647 int i;
648 648
649 if (!gpio->clk)
650 return -EINVAL;
651
649 rc = usecs_to_cycles(gpio, usecs, &requested_cycles); 652 rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
650 if (rc < 0) { 653 if (rc < 0) {
651 dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n", 654 dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 2197368cc899..e60156ec0c18 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
90{ 90{
91 int reg; 91 int reg;
92 92
93 if (gpio == 94) 93 if (gpio >= CRYSTALCOVE_GPIO_NUM) {
94 return GPIOPANELCTL; 94 /*
95 * Virtual GPIO called from ACPI, for now we only support
96 * the panel ctl.
97 */
98 switch (gpio) {
99 case 0x5e:
100 return GPIOPANELCTL;
101 default:
102 return -EOPNOTSUPP;
103 }
104 }
95 105
96 if (reg_type == CTRL_IN) { 106 if (reg_type == CTRL_IN) {
97 if (gpio < 8) 107 if (gpio < 8)
@@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
130static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) 140static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
131{ 141{
132 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 142 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
143 int reg = to_reg(gpio, CTRL_OUT);
133 144
134 if (gpio > CRYSTALCOVE_VGPIO_NUM) 145 if (reg < 0)
135 return 0; 146 return 0;
136 147
137 return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 148 return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
138 CTLO_INPUT_SET);
139} 149}
140 150
141static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, 151static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
142 int value) 152 int value)
143{ 153{
144 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 154 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
155 int reg = to_reg(gpio, CTRL_OUT);
145 156
146 if (gpio > CRYSTALCOVE_VGPIO_NUM) 157 if (reg < 0)
147 return 0; 158 return 0;
148 159
149 return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 160 return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
150 CTLO_OUTPUT_SET | value);
151} 161}
152 162
153static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) 163static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
154{ 164{
155 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 165 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
156 int ret;
157 unsigned int val; 166 unsigned int val;
167 int ret, reg = to_reg(gpio, CTRL_IN);
158 168
159 if (gpio > CRYSTALCOVE_VGPIO_NUM) 169 if (reg < 0)
160 return 0; 170 return 0;
161 171
162 ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); 172 ret = regmap_read(cg->regmap, reg, &val);
163 if (ret) 173 if (ret)
164 return ret; 174 return ret;
165 175
@@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
170 unsigned gpio, int value) 180 unsigned gpio, int value)
171{ 181{
172 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 182 struct crystalcove_gpio *cg = gpiochip_get_data(chip);
183 int reg = to_reg(gpio, CTRL_OUT);
173 184
174 if (gpio > CRYSTALCOVE_VGPIO_NUM) 185 if (reg < 0)
175 return; 186 return;
176 187
177 if (value) 188 if (value)
178 regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); 189 regmap_update_bits(cg->regmap, reg, 1, 1);
179 else 190 else
180 regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); 191 regmap_update_bits(cg->regmap, reg, 1, 0);
181} 192}
182 193
183static int crystalcove_irq_type(struct irq_data *data, unsigned type) 194static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type)
185 struct crystalcove_gpio *cg = 196 struct crystalcove_gpio *cg =
186 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 197 gpiochip_get_data(irq_data_get_irq_chip_data(data));
187 198
199 if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
200 return 0;
201
188 switch (type) { 202 switch (type) {
189 case IRQ_TYPE_NONE: 203 case IRQ_TYPE_NONE:
190 cg->intcnt_value = CTLI_INTCNT_DIS; 204 cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data)
235 struct crystalcove_gpio *cg = 249 struct crystalcove_gpio *cg =
236 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 250 gpiochip_get_data(irq_data_get_irq_chip_data(data));
237 251
238 cg->set_irq_mask = false; 252 if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
239 cg->update |= UPDATE_IRQ_MASK; 253 cg->set_irq_mask = false;
254 cg->update |= UPDATE_IRQ_MASK;
255 }
240} 256}
241 257
242static void crystalcove_irq_mask(struct irq_data *data) 258static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data)
244 struct crystalcove_gpio *cg = 260 struct crystalcove_gpio *cg =
245 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 261 gpiochip_get_data(irq_data_get_irq_chip_data(data));
246 262
247 cg->set_irq_mask = true; 263 if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
248 cg->update |= UPDATE_IRQ_MASK; 264 cg->set_irq_mask = true;
265 cg->update |= UPDATE_IRQ_MASK;
266 }
249} 267}
250 268
251static struct irq_chip crystalcove_irqchip = { 269static struct irq_chip crystalcove_irqchip = {
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 19a92efabbef..5104b6398139 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -747,7 +747,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
747 set = U32_MAX; 747 set = U32_MAX;
748 else 748 else
749 return -EINVAL; 749 return -EINVAL;
750 writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip)); 750 writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
751 751
752 mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); 752 mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
753 if (!mvpwm) 753 if (!mvpwm)
@@ -768,6 +768,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
768 mvpwm->chip.dev = dev; 768 mvpwm->chip.dev = dev;
769 mvpwm->chip.ops = &mvebu_pwm_ops; 769 mvpwm->chip.ops = &mvebu_pwm_ops;
770 mvpwm->chip.npwm = mvchip->chip.ngpio; 770 mvpwm->chip.npwm = mvchip->chip.ngpio;
771 /*
772 * There may already be some PWM allocated, so we can't force
773 * mvpwm->chip.base to a fixed point like mvchip->chip.base.
774 * So, we let pwmchip_add() do the numbering and take the next free
775 * region.
776 */
777 mvpwm->chip.base = -1;
771 778
772 spin_lock_init(&mvpwm->lock); 779 spin_lock_init(&mvpwm->lock);
773 780
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 8be9719284b0..aa885a614e27 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
508 bool has_connectors = 508 bool has_connectors =
509 !!new_crtc_state->connector_mask; 509 !!new_crtc_state->connector_mask;
510 510
511 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
512
511 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 513 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
512 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 514 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
513 crtc->base.id, crtc->name); 515 crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
551 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 553 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
552 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 554 const struct drm_connector_helper_funcs *funcs = connector->helper_private;
553 555
556 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
557
554 /* 558 /*
555 * This only sets crtc->connectors_changed for routing changes, 559 * This only sets crtc->connectors_changed for routing changes,
556 * drivers must set crtc->connectors_changed themselves when 560 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
650 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 654 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
651 const struct drm_plane_helper_funcs *funcs; 655 const struct drm_plane_helper_funcs *funcs;
652 656
657 WARN_ON(!drm_modeset_is_locked(&plane->mutex));
658
653 funcs = plane->helper_private; 659 funcs = plane->helper_private;
654 660
655 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 661 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
2663 2669
2664 drm_modeset_acquire_init(&ctx, 0); 2670 drm_modeset_acquire_init(&ctx, 0);
2665 while (1) { 2671 while (1) {
2672 err = drm_modeset_lock_all_ctx(dev, &ctx);
2673 if (err)
2674 goto out;
2675
2666 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 2676 err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
2677out:
2667 if (err != -EDEADLK) 2678 if (err != -EDEADLK)
2668 break; 2679 break;
2669 2680
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b5c6bb46a425..37b8ad3e30d8 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
358void drm_unplug_dev(struct drm_device *dev) 358void drm_unplug_dev(struct drm_device *dev)
359{ 359{
360 /* for a USB device */ 360 /* for a USB device */
361 drm_dev_unregister(dev); 361 if (drm_core_check_feature(dev, DRIVER_MODESET))
362 drm_modeset_unregister_all(dev);
363
364 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
365 drm_minor_unregister(dev, DRM_MINOR_RENDER);
366 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
362 367
363 mutex_lock(&drm_global_mutex); 368 mutex_lock(&drm_global_mutex);
364 369
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index 5abc69c9630f..f77dcfaade6c 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
760 * Get the endpoint node. In our case, dsi has one output port1 760 * Get the endpoint node. In our case, dsi has one output port1
761 * to which the external HDMI bridge is connected. 761 * to which the external HDMI bridge is connected.
762 */ 762 */
763 ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge); 763 ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
764 if (ret) 764 if (ret)
765 return ret; 765 return ret;
766 766
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c994fe6e65b2..48428672fc6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1235 goto out_fini; 1235 goto out_fini;
1236 1236
1237 pci_set_drvdata(pdev, &dev_priv->drm); 1237 pci_set_drvdata(pdev, &dev_priv->drm);
1238 /*
1239 * Disable the system suspend direct complete optimization, which can
1240 * leave the device suspended skipping the driver's suspend handlers
1241 * if the device was already runtime suspended. This is needed due to
1242 * the difference in our runtime and system suspend sequence and
1243 * becaue the HDA driver may require us to enable the audio power
1244 * domain during system suspend.
1245 */
1246 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
1238 1247
1239 ret = i915_driver_init_early(dev_priv, ent); 1248 ret = i915_driver_init_early(dev_priv, ent);
1240 if (ret < 0) 1249 if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 963f6d4481f7..2c453a4e97d5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2991 return false; 2991 return false;
2992} 2992}
2993 2993
2994static inline bool
2995intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
2996{
2997#ifdef CONFIG_INTEL_IOMMU
2998 if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
2999 return true;
3000#endif
3001 return false;
3002}
3003
2994int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 3004int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2995 int enable_ppgtt); 3005 int enable_ppgtt);
2996 3006
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6ac3df18b58..462031cbd77f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3298{ 3298{
3299 int ret; 3299 int ret;
3300 3300
3301 /* If the device is asleep, we have no requests outstanding */
3302 if (!READ_ONCE(i915->gt.awake))
3303 return 0;
3304
3301 if (flags & I915_WAIT_LOCKED) { 3305 if (flags & I915_WAIT_LOCKED) {
3302 struct i915_gem_timeline *tl; 3306 struct i915_gem_timeline *tl;
3303 3307
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 50b8f1139ff9..f1989b8792dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2191 gen8_set_pte(&gtt_base[i], scratch_pte); 2191 gen8_set_pte(&gtt_base[i], scratch_pte);
2192} 2192}
2193 2193
2194static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2195{
2196 struct drm_i915_private *dev_priv = vm->i915;
2197
2198 /*
2199 * Make sure the internal GAM fifo has been cleared of all GTT
2200 * writes before exiting stop_machine(). This guarantees that
2201 * any aperture accesses waiting to start in another process
2202 * cannot back up behind the GTT writes causing a hang.
2203 * The register can be any arbitrary GAM register.
2204 */
2205 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2206}
2207
2208struct insert_page {
2209 struct i915_address_space *vm;
2210 dma_addr_t addr;
2211 u64 offset;
2212 enum i915_cache_level level;
2213};
2214
2215static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2216{
2217 struct insert_page *arg = _arg;
2218
2219 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2220 bxt_vtd_ggtt_wa(arg->vm);
2221
2222 return 0;
2223}
2224
2225static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2226 dma_addr_t addr,
2227 u64 offset,
2228 enum i915_cache_level level,
2229 u32 unused)
2230{
2231 struct insert_page arg = { vm, addr, offset, level };
2232
2233 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2234}
2235
2236struct insert_entries {
2237 struct i915_address_space *vm;
2238 struct sg_table *st;
2239 u64 start;
2240 enum i915_cache_level level;
2241};
2242
2243static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2244{
2245 struct insert_entries *arg = _arg;
2246
2247 gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
2248 bxt_vtd_ggtt_wa(arg->vm);
2249
2250 return 0;
2251}
2252
2253static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2254 struct sg_table *st,
2255 u64 start,
2256 enum i915_cache_level level,
2257 u32 unused)
2258{
2259 struct insert_entries arg = { vm, st, start, level };
2260
2261 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2262}
2263
2264struct clear_range {
2265 struct i915_address_space *vm;
2266 u64 start;
2267 u64 length;
2268};
2269
2270static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2271{
2272 struct clear_range *arg = _arg;
2273
2274 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2275 bxt_vtd_ggtt_wa(arg->vm);
2276
2277 return 0;
2278}
2279
2280static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2281 u64 start,
2282 u64 length)
2283{
2284 struct clear_range arg = { vm, start, length };
2285
2286 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2287}
2288
2194static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2289static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2195 u64 start, u64 length) 2290 u64 start, u64 length)
2196{ 2291{
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
2785 2880
2786 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 2881 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
2787 2882
2883 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2884 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2885 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2886 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2887 if (ggtt->base.clear_range != nop_clear_range)
2888 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2889 }
2890
2788 ggtt->invalidate = gen6_ggtt_invalidate; 2891 ggtt->invalidate = gen6_ggtt_invalidate;
2789 2892
2790 return ggtt_probe_common(ggtt, size); 2893 return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
2997 3100
2998void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3101void i915_ggtt_disable_guc(struct drm_i915_private *i915)
2999{ 3102{
3000 i915->ggtt.invalidate = gen6_ggtt_invalidate; 3103 if (i915->ggtt.invalidate == guc_ggtt_invalidate)
3104 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3001} 3105}
3002 3106
3003void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) 3107void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a0d6d4317a49..fb5231f98c0d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
278 obj->mm.quirked = false; 278 obj->mm.quirked = false;
279 } 279 }
280 if (!i915_gem_object_is_tiled(obj)) { 280 if (!i915_gem_object_is_tiled(obj)) {
281 GEM_BUG_ON(!obj->mm.quirked); 281 GEM_BUG_ON(obj->mm.quirked);
282 __i915_gem_object_pin_pages(obj); 282 __i915_gem_object_pin_pages(obj);
283 obj->mm.quirked = true; 283 obj->mm.quirked = true;
284 } 284 }
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index f87b0c4e564d..1a78363c7f4a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
208static const struct intel_device_info intel_ironlake_m_info = { 208static const struct intel_device_info intel_ironlake_m_info = {
209 GEN5_FEATURES, 209 GEN5_FEATURES,
210 .platform = INTEL_IRONLAKE, 210 .platform = INTEL_IRONLAKE,
211 .is_mobile = 1, 211 .is_mobile = 1, .has_fbc = 1,
212}; 212};
213 213
214#define GEN6_FEATURES \ 214#define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
390 .has_hw_contexts = 1, \ 390 .has_hw_contexts = 1, \
391 .has_logical_ring_contexts = 1, \ 391 .has_logical_ring_contexts = 1, \
392 .has_guc = 1, \ 392 .has_guc = 1, \
393 .has_decoupled_mmio = 1, \
394 .has_aliasing_ppgtt = 1, \ 393 .has_aliasing_ppgtt = 1, \
395 .has_full_ppgtt = 1, \ 394 .has_full_ppgtt = 1, \
396 .has_full_48bit_ppgtt = 1, \ 395 .has_full_48bit_ppgtt = 1, \
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3cabe52a4e3b..569717a12723 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
12203 * type. For DP ports it behaves like most other platforms, but on HDMI 12203 * type. For DP ports it behaves like most other platforms, but on HDMI
12204 * there's an extra 1 line difference. So we need to add two instead of 12204 * there's an extra 1 line difference. So we need to add two instead of
12205 * one to the value. 12205 * one to the value.
12206 *
12207 * On VLV/CHV DSI the scanline counter would appear to increment
12208 * approx. 1/3 of a scanline before start of vblank. Unfortunately
12209 * that means we can't tell whether we're in vblank or not while
12210 * we're on that particular line. We must still set scanline_offset
12211 * to 1 so that the vblank timestamps come out correct when we query
12212 * the scanline counter from within the vblank interrupt handler.
12213 * However if queried just before the start of vblank we'll get an
12214 * answer that's slightly in the future.
12206 */ 12215 */
12207 if (IS_GEN2(dev_priv)) { 12216 if (IS_GEN2(dev_priv)) {
12208 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12217 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 854e8e0c836b..f94eacff196c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1075 return 0; 1075 return 0;
1076} 1076}
1077 1077
1078static bool ring_is_idle(struct intel_engine_cs *engine)
1079{
1080 struct drm_i915_private *dev_priv = engine->i915;
1081 bool idle = true;
1082
1083 intel_runtime_pm_get(dev_priv);
1084
1085 /* No bit for gen2, so assume the CS parser is idle */
1086 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1087 idle = false;
1088
1089 intel_runtime_pm_put(dev_priv);
1090
1091 return idle;
1092}
1093
1078/** 1094/**
1079 * intel_engine_is_idle() - Report if the engine has finished process all work 1095 * intel_engine_is_idle() - Report if the engine has finished process all work
1080 * @engine: the intel_engine_cs 1096 * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
1084 */ 1100 */
1085bool intel_engine_is_idle(struct intel_engine_cs *engine) 1101bool intel_engine_is_idle(struct intel_engine_cs *engine)
1086{ 1102{
1087 struct drm_i915_private *dev_priv = engine->i915;
1088
1089 /* Any inflight/incomplete requests? */ 1103 /* Any inflight/incomplete requests? */
1090 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 1104 if (!i915_seqno_passed(intel_engine_get_seqno(engine),
1091 intel_engine_last_submit(engine))) 1105 intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
1100 return false; 1114 return false;
1101 1115
1102 /* Ring stopped? */ 1116 /* Ring stopped? */
1103 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1117 if (!ring_is_idle(engine))
1104 return false; 1118 return false;
1105 1119
1106 return true; 1120 return true;
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index ded2add18b26..d93c58410bff 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 82static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
83 int *width, int *height) 83 int *width, int *height)
84{ 84{
85 int w, h;
86
87 if (drm_rotation_90_or_270(cache->plane.rotation)) {
88 w = cache->plane.src_h;
89 h = cache->plane.src_w;
90 } else {
91 w = cache->plane.src_w;
92 h = cache->plane.src_h;
93 }
94
95 if (width) 85 if (width)
96 *width = w; 86 *width = cache->plane.src_w;
97 if (height) 87 if (height)
98 *height = h; 88 *height = cache->plane.src_h;
99} 89}
100 90
101static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, 91static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
746 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 736 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
747 737
748 cache->plane.rotation = plane_state->base.rotation; 738 cache->plane.rotation = plane_state->base.rotation;
739 /*
740 * Src coordinates are already rotated by 270 degrees for
741 * the 90/270 degree plane rotation cases (to match the
742 * GTT mapping), hence no need to account for rotation here.
743 */
749 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 744 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
750 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 745 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
751 cache->plane.visible = plane_state->base.visible; 746 cache->plane.visible = plane_state->base.visible;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 570bd603f401..2ca481b5aa69 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4335,11 +4335,19 @@ skl_compute_wm(struct drm_atomic_state *state)
4335 struct drm_crtc_state *cstate; 4335 struct drm_crtc_state *cstate;
4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
4337 struct skl_wm_values *results = &intel_state->wm_results; 4337 struct skl_wm_values *results = &intel_state->wm_results;
4338 struct drm_device *dev = state->dev;
4338 struct skl_pipe_wm *pipe_wm; 4339 struct skl_pipe_wm *pipe_wm;
4339 bool changed = false; 4340 bool changed = false;
4340 int ret, i; 4341 int ret, i;
4341 4342
4342 /* 4343 /*
4344 * When we distrust bios wm we always need to recompute to set the
4345 * expected DDB allocations for each CRTC.
4346 */
4347 if (to_i915(dev)->wm.distrust_bios_wm)
4348 changed = true;
4349
4350 /*
4343 * If this transaction isn't actually touching any CRTC's, don't 4351 * If this transaction isn't actually touching any CRTC's, don't
4344 * bother with watermark calculation. Note that if we pass this 4352 * bother with watermark calculation. Note that if we pass this
4345 * test, we're guaranteed to hold at least one CRTC state mutex, 4353 * test, we're guaranteed to hold at least one CRTC state mutex,
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
4349 */ 4357 */
4350 for_each_new_crtc_in_state(state, crtc, cstate, i) 4358 for_each_new_crtc_in_state(state, crtc, cstate, i)
4351 changed = true; 4359 changed = true;
4360
4352 if (!changed) 4361 if (!changed)
4353 return 0; 4362 return 0;
4354 4363
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3780d0d2baf..559f1ab42bfc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
435 } 435 }
436 436
437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ 437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
438 if (intel_crtc->config->pipe_src_w > 3200 || 438 if (dev_priv->psr.psr2_support &&
439 intel_crtc->config->pipe_src_h > 2000) { 439 (intel_crtc->config->pipe_src_w > 3200 ||
440 intel_crtc->config->pipe_src_h > 2000)) {
440 dev_priv->psr.psr2_support = false; 441 dev_priv->psr.psr2_support = false;
441 return false; 442 return false;
442 } 443 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8c87c717c7cd..e6517edcd16b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
83 */ 83 */
84void intel_pipe_update_start(struct intel_crtc *crtc) 84void intel_pipe_update_start(struct intel_crtc *crtc)
85{ 85{
86 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
86 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 87 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
87 long timeout = msecs_to_jiffies_timeout(1); 88 long timeout = msecs_to_jiffies_timeout(1);
88 int scanline, min, max, vblank_start; 89 int scanline, min, max, vblank_start;
89 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 90 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
91 bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
92 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
90 DEFINE_WAIT(wait); 93 DEFINE_WAIT(wait);
91 94
92 vblank_start = adjusted_mode->crtc_vblank_start; 95 vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 142
140 drm_crtc_vblank_put(&crtc->base); 143 drm_crtc_vblank_put(&crtc->base);
141 144
145 /*
146 * On VLV/CHV DSI the scanline counter would appear to
147 * increment approx. 1/3 of a scanline before start of vblank.
148 * The registers still get latched at start of vblank however.
149 * This means we must not write any registers on the first
150 * line of vblank (since not the whole line is actually in
151 * vblank). And unfortunately we can't use the interrupt to
152 * wait here since it will fire too soon. We could use the
153 * frame start interrupt instead since it will fire after the
154 * critical scanline, but that would require more changes
155 * in the interrupt code. So for now we'll just do the nasty
156 * thing and poll for the bad scanline to pass us by.
157 *
158 * FIXME figure out if BXT+ DSI suffers from this as well
159 */
160 while (need_vlv_dsi_wa && scanline == vblank_start)
161 scanline = intel_get_crtc_scanline(crtc);
162
142 crtc->debug.scanline_start = scanline; 163 crtc->debug.scanline_start = scanline;
143 crtc->debug.start_vbl_time = ktime_get(); 164 crtc->debug.start_vbl_time = ktime_get();
144 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); 165 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 4b7f73aeddac..f84115261ae7 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
59 * available in the work queue (note, the queue is shared, 59 * available in the work queue (note, the queue is shared,
60 * not per-engine). It is OK for this to be nonzero, but 60 * not per-engine). It is OK for this to be nonzero, but
61 * it should not be huge! 61 * it should not be huge!
62 * q_fail: failed to enqueue a work item. This should never happen,
63 * because we check for space beforehand.
64 * b_fail: failed to ring the doorbell. This should never happen, unless 62 * b_fail: failed to ring the doorbell. This should never happen, unless
65 * somehow the hardware misbehaves, or maybe if the GuC firmware 63 * somehow the hardware misbehaves, or maybe if the GuC firmware
66 * crashes? We probably need to reset the GPU to recover. 64 * crashes? We probably need to reset the GPU to recover.
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8fb801fab039..8b05ecb8fdef 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
673 ret = drm_of_find_panel_or_bridge(child, 673 ret = drm_of_find_panel_or_bridge(child,
674 imx_ldb->lvds_mux ? 4 : 2, 0, 674 imx_ldb->lvds_mux ? 4 : 2, 0,
675 &channel->panel, &channel->bridge); 675 &channel->panel, &channel->bridge);
676 if (ret) 676 if (ret && ret != -ENODEV)
677 return ret; 677 return ret;
678 678
679 /* panel ddc only if there is no bridge */ 679 /* panel ddc only if there is no bridge */
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 808b995a990f..b5cc6e12334c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,6 +19,7 @@
19#include <drm/drm_of.h> 19#include <drm/drm_of.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/component.h> 21#include <linux/component.h>
22#include <linux/iopoll.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/of.h> 24#include <linux/of.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
900 901
901static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 902static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
902{ 903{
903 u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ 904 int ret;
904 905 u32 val;
905 while (timeout_ms--) {
906 if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
907 break;
908
909 usleep_range(2, 4);
910 }
911 906
912 if (timeout_ms == 0) { 907 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
908 4, 2000000);
909 if (ret) {
913 DRM_WARN("polling dsi wait not busy timeout!\n"); 910 DRM_WARN("polling dsi wait not busy timeout!\n");
914 911
915 mtk_dsi_enable(dsi); 912 mtk_dsi_enable(dsi);
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 41a1c03b0347..0a4ffd724146 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
1062 } 1062 }
1063 1063
1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
1065 if (err) { 1065 if (err < 0) {
1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", 1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
1067 err); 1067 err);
1068 return err; 1068 return err;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 75382f5f0fce..10b227d83e9a 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
152 .max_register = 0x1000, 152 .max_register = 0x1000,
153}; 153};
154 154
155static int meson_drv_bind(struct device *dev) 155static int meson_drv_bind_master(struct device *dev, bool has_components)
156{ 156{
157 struct platform_device *pdev = to_platform_device(dev); 157 struct platform_device *pdev = to_platform_device(dev);
158 struct meson_drm *priv; 158 struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
233 if (ret) 233 if (ret)
234 goto free_drm; 234 goto free_drm;
235 235
236 ret = component_bind_all(drm->dev, drm); 236 if (has_components) {
237 if (ret) { 237 ret = component_bind_all(drm->dev, drm);
238 dev_err(drm->dev, "Couldn't bind all components\n"); 238 if (ret) {
239 goto free_drm; 239 dev_err(drm->dev, "Couldn't bind all components\n");
240 goto free_drm;
241 }
240 } 242 }
241 243
242 ret = meson_plane_create(priv); 244 ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
276 return ret; 278 return ret;
277} 279}
278 280
281static int meson_drv_bind(struct device *dev)
282{
283 return meson_drv_bind_master(dev, true);
284}
285
279static void meson_drv_unbind(struct device *dev) 286static void meson_drv_unbind(struct device *dev)
280{ 287{
281 struct drm_device *drm = dev_get_drvdata(dev); 288 struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
357 count += meson_probe_remote(pdev, &match, np, remote); 364 count += meson_probe_remote(pdev, &match, np, remote);
358 } 365 }
359 366
367 if (count && !match)
368 return meson_drv_bind_master(&pdev->dev, false);
369
360 /* If some endpoints were found, initialize the nodes */ 370 /* If some endpoints were found, initialize the nodes */
361 if (count) { 371 if (count) {
362 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count); 372 dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 6a567fe347b3..820a4805916f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
4 4
5struct nvkm_alarm { 5struct nvkm_alarm {
6 struct list_head head; 6 struct list_head head;
7 struct list_head exec;
7 u64 timestamp; 8 u64 timestamp;
8 void (*func)(struct nvkm_alarm *); 9 void (*func)(struct nvkm_alarm *);
9}; 10};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 36268e1802b5..15a13d09d431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
80module_param_named(modeset, nouveau_modeset, int, 0400); 80module_param_named(modeset, nouveau_modeset, int, 0400);
81 81
82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); 82MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
83int nouveau_runtime_pm = -1; 83static int nouveau_runtime_pm = -1;
84module_param_named(runpm, nouveau_runtime_pm, int, 0400); 84module_param_named(runpm, nouveau_runtime_pm, int, 0400);
85 85
86static struct drm_driver driver_stub; 86static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
495 nouveau_fbcon_init(dev); 495 nouveau_fbcon_init(dev);
496 nouveau_led_init(dev); 496 nouveau_led_init(dev);
497 497
498 if (nouveau_runtime_pm != 0) { 498 if (nouveau_pmops_runtime()) {
499 pm_runtime_use_autosuspend(dev->dev); 499 pm_runtime_use_autosuspend(dev->dev);
500 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 500 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
501 pm_runtime_set_active(dev->dev); 501 pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
527{ 527{
528 struct nouveau_drm *drm = nouveau_drm(dev); 528 struct nouveau_drm *drm = nouveau_drm(dev);
529 529
530 if (nouveau_runtime_pm != 0) { 530 if (nouveau_pmops_runtime()) {
531 pm_runtime_get_sync(dev->dev); 531 pm_runtime_get_sync(dev->dev);
532 pm_runtime_forbid(dev->dev); 532 pm_runtime_forbid(dev->dev);
533 } 533 }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
726 return nouveau_do_resume(drm_dev, false); 726 return nouveau_do_resume(drm_dev, false);
727} 727}
728 728
729bool
730nouveau_pmops_runtime()
731{
732 if (nouveau_runtime_pm == -1)
733 return nouveau_is_optimus() || nouveau_is_v1_dsm();
734 return nouveau_runtime_pm == 1;
735}
736
729static int 737static int
730nouveau_pmops_runtime_suspend(struct device *dev) 738nouveau_pmops_runtime_suspend(struct device *dev)
731{ 739{
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
733 struct drm_device *drm_dev = pci_get_drvdata(pdev); 741 struct drm_device *drm_dev = pci_get_drvdata(pdev);
734 int ret; 742 int ret;
735 743
736 if (nouveau_runtime_pm == 0) { 744 if (!nouveau_pmops_runtime()) {
737 pm_runtime_forbid(dev);
738 return -EBUSY;
739 }
740
741 /* are we optimus enabled? */
742 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
743 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
744 pm_runtime_forbid(dev); 745 pm_runtime_forbid(dev);
745 return -EBUSY; 746 return -EBUSY;
746 } 747 }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
765 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; 766 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
766 int ret; 767 int ret;
767 768
768 if (nouveau_runtime_pm == 0) 769 if (!nouveau_pmops_runtime()) {
769 return -EINVAL; 770 pm_runtime_forbid(dev);
771 return -EBUSY;
772 }
770 773
771 pci_set_power_state(pdev, PCI_D0); 774 pci_set_power_state(pdev, PCI_D0);
772 pci_restore_state(pdev); 775 pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
796 struct nouveau_drm *drm = nouveau_drm(drm_dev); 799 struct nouveau_drm *drm = nouveau_drm(drm_dev);
797 struct drm_crtc *crtc; 800 struct drm_crtc *crtc;
798 801
799 if (nouveau_runtime_pm == 0) { 802 if (!nouveau_pmops_runtime()) {
800 pm_runtime_forbid(dev);
801 return -EBUSY;
802 }
803
804 /* are we optimus enabled? */
805 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
806 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
807 pm_runtime_forbid(dev); 803 pm_runtime_forbid(dev);
808 return -EBUSY; 804 return -EBUSY;
809 } 805 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index eadec2f49ad3..a11b6aaed325 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
108#include <nvif/object.h> 108#include <nvif/object.h>
109#include <nvif/device.h> 109#include <nvif/device.h>
110 110
111extern int nouveau_runtime_pm;
112
113struct nouveau_drm { 111struct nouveau_drm {
114 struct nouveau_cli client; 112 struct nouveau_cli client;
115 struct drm_device *dev; 113 struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
195 193
196int nouveau_pmops_suspend(struct device *); 194int nouveau_pmops_suspend(struct device *);
197int nouveau_pmops_resume(struct device *); 195int nouveau_pmops_resume(struct device *);
196bool nouveau_pmops_runtime(void);
198 197
199#include <nvkm/core/tegra.h> 198#include <nvkm/core/tegra.h>
200 199
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index a4aacbc0cec8..02fe0efb9e16 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -87,7 +87,7 @@ void
87nouveau_vga_init(struct nouveau_drm *drm) 87nouveau_vga_init(struct nouveau_drm *drm)
88{ 88{
89 struct drm_device *dev = drm->dev; 89 struct drm_device *dev = drm->dev;
90 bool runtime = false; 90 bool runtime = nouveau_pmops_runtime();
91 91
92 /* only relevant for PCI devices */ 92 /* only relevant for PCI devices */
93 if (!dev->pdev) 93 if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
99 if (pci_is_thunderbolt_attached(dev->pdev)) 99 if (pci_is_thunderbolt_attached(dev->pdev))
100 return; 100 return;
101 101
102 if (nouveau_runtime_pm == 1)
103 runtime = true;
104 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
105 runtime = true;
106 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); 102 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
107 103
108 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 104 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
113nouveau_vga_fini(struct nouveau_drm *drm) 109nouveau_vga_fini(struct nouveau_drm *drm)
114{ 110{
115 struct drm_device *dev = drm->dev; 111 struct drm_device *dev = drm->dev;
116 bool runtime = false; 112 bool runtime = nouveau_pmops_runtime();
117 113
118 vga_client_register(dev->pdev, NULL, NULL, NULL); 114 vga_client_register(dev->pdev, NULL, NULL, NULL);
119 115
120 if (pci_is_thunderbolt_attached(dev->pdev)) 116 if (pci_is_thunderbolt_attached(dev->pdev))
121 return; 117 return;
122 118
123 if (nouveau_runtime_pm == 1)
124 runtime = true;
125 if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
126 runtime = true;
127
128 vga_switcheroo_unregister_client(dev->pdev); 119 vga_switcheroo_unregister_client(dev->pdev);
129 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) 120 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
130 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); 121 vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a7663249b3ba..06e564a9ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2107 asyc->set.dither = true; 2107 asyc->set.dither = true;
2108 } 2108 }
2109 } else { 2109 } else {
2110 asyc->set.mask = ~0; 2110 if (asyc)
2111 asyc->set.mask = ~0;
2111 asyh->set.mask = ~0; 2112 asyh->set.mask = ~0;
2112 } 2113 }
2113 2114
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86eae0a0d..2437f7d41ca2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
50 /* Move to completed list. We'll drop the lock before 50 /* Move to completed list. We'll drop the lock before
51 * executing the callback so it can reschedule itself. 51 * executing the callback so it can reschedule itself.
52 */ 52 */
53 list_move_tail(&alarm->head, &exec); 53 list_del_init(&alarm->head);
54 list_add(&alarm->exec, &exec);
54 } 55 }
55 56
56 /* Shut down interrupt if no more pending alarms. */ 57 /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
59 spin_unlock_irqrestore(&tmr->lock, flags); 60 spin_unlock_irqrestore(&tmr->lock, flags);
60 61
61 /* Execute completed callbacks. */ 62 /* Execute completed callbacks. */
62 list_for_each_entry_safe(alarm, atemp, &exec, head) { 63 list_for_each_entry_safe(alarm, atemp, &exec, exec) {
63 list_del_init(&alarm->head); 64 list_del(&alarm->exec);
64 alarm->func(alarm); 65 alarm->func(alarm);
65 } 66 }
66} 67}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d8fa7a9c9240..ce5f2d1f9994 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
245 struct drm_connector_state *conn_state) 245 struct drm_connector_state *conn_state)
246{ 246{
247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
248 struct rockchip_dp_device *dp = to_dp(encoder);
249 int ret;
250 248
251 /* 249 /*
252 * The hardware IC designed that VOP must output the RGB10 video 250 * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
258 256
259 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 257 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
260 s->output_type = DRM_MODE_CONNECTOR_eDP; 258 s->output_type = DRM_MODE_CONNECTOR_eDP;
261 if (dp->data->chip_type == RK3399_EDP) {
262 /*
263 * For RK3399, VOP Lit must code the out mode to RGB888,
264 * VOP Big must code the out mode to RGB10.
265 */
266 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
267 encoder);
268 if (ret > 0)
269 s->output_mode = ROCKCHIP_OUT_MODE_P888;
270 }
271 259
272 return 0; 260 return 0;
273} 261}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index a2169dd3d26b..14fa1f8351e8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
615{ 615{
616 struct cdn_dp_device *dp = encoder_to_dp(encoder); 616 struct cdn_dp_device *dp = encoder_to_dp(encoder);
617 int ret, val; 617 int ret, val;
618 struct rockchip_crtc_state *state;
619 618
620 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 619 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
621 if (ret < 0) { 620 if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
625 624
626 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 625 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
627 (ret) ? "LIT" : "BIG"); 626 (ret) ? "LIT" : "BIG");
628 state = to_rockchip_crtc_state(encoder->crtc->state); 627 if (ret)
629 if (ret) {
630 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 628 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
631 state->output_mode = ROCKCHIP_OUT_MODE_P888; 629 else
632 } else {
633 val = DP_SEL_VOP_LIT << 16; 630 val = DP_SEL_VOP_LIT << 16;
634 state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
635 }
636 631
637 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 632 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
638 if (ret) 633 if (ret)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 3f7a82d1e095..45589d6ce65e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
875static void vop_crtc_enable(struct drm_crtc *crtc) 875static void vop_crtc_enable(struct drm_crtc *crtc)
876{ 876{
877 struct vop *vop = to_vop(crtc); 877 struct vop *vop = to_vop(crtc);
878 const struct vop_data *vop_data = vop->data;
878 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 879 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
879 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 880 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
880 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; 881 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
967 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 968 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
968 s->output_type); 969 s->output_type);
969 } 970 }
971
972 /*
973 * if vop is not support RGB10 output, need force RGB10 to RGB888.
974 */
975 if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
976 !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
977 s->output_mode = ROCKCHIP_OUT_MODE_P888;
970 VOP_CTRL_SET(vop, out_mode, s->output_mode); 978 VOP_CTRL_SET(vop, out_mode, s->output_mode);
971 979
972 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len); 980 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 5a4faa85dbd2..9979fd0c2282 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -142,6 +142,9 @@ struct vop_data {
142 const struct vop_intr *intr; 142 const struct vop_intr *intr;
143 const struct vop_win_data *win; 143 const struct vop_win_data *win;
144 unsigned int win_size; 144 unsigned int win_size;
145
146#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
147 u64 feature;
145}; 148};
146 149
147/* interrupt define */ 150/* interrupt define */
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 0da44442aab0..bafd698a28b1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
275static const struct vop_data rk3288_vop = { 275static const struct vop_data rk3288_vop = {
276 .init_table = rk3288_init_reg_table, 276 .init_table = rk3288_init_reg_table,
277 .table_size = ARRAY_SIZE(rk3288_init_reg_table), 277 .table_size = ARRAY_SIZE(rk3288_init_reg_table),
278 .feature = VOP_FEATURE_OUTPUT_RGB10,
278 .intr = &rk3288_vop_intr, 279 .intr = &rk3288_vop_intr,
279 .ctrl = &rk3288_ctrl_data, 280 .ctrl = &rk3288_ctrl_data,
280 .win = rk3288_vop_win_data, 281 .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
343static const struct vop_data rk3399_vop_big = { 344static const struct vop_data rk3399_vop_big = {
344 .init_table = rk3399_init_reg_table, 345 .init_table = rk3399_init_reg_table,
345 .table_size = ARRAY_SIZE(rk3399_init_reg_table), 346 .table_size = ARRAY_SIZE(rk3399_init_reg_table),
347 .feature = VOP_FEATURE_OUTPUT_RGB10,
346 .intr = &rk3399_vop_intr, 348 .intr = &rk3399_vop_intr,
347 .ctrl = &rk3399_ctrl_data, 349 .ctrl = &rk3399_ctrl_data,
348 /* 350 /*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 130d51c5ec6a..4b948fba9eec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -41,9 +41,9 @@
41#include <drm/ttm/ttm_module.h> 41#include <drm/ttm/ttm_module.h>
42#include "vmwgfx_fence.h" 42#include "vmwgfx_fence.h"
43 43
44#define VMWGFX_DRIVER_DATE "20170221" 44#define VMWGFX_DRIVER_DATE "20170607"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 12 46#define VMWGFX_DRIVER_MINOR 13
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806b06bf..a1c68e6a689e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
368 return fifo_state->static_buffer; 368 return fifo_state->static_buffer;
369 else { 369 else {
370 fifo_state->dynamic_buffer = vmalloc(bytes); 370 fifo_state->dynamic_buffer = vmalloc(bytes);
371 if (!fifo_state->dynamic_buffer)
372 goto out_err;
371 return fifo_state->dynamic_buffer; 373 return fifo_state->dynamic_buffer;
372 } 374 }
373 } 375 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef9f3a2a4030..1d2db5d912b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
274} 274}
275 275
276 276
277
278/**
279 * vmw_du_cursor_plane_update() - Update cursor image and location
280 *
281 * @plane: plane object to update
282 * @crtc: owning CRTC of @plane
283 * @fb: framebuffer to flip onto plane
284 * @crtc_x: x offset of plane on crtc
285 * @crtc_y: y offset of plane on crtc
286 * @crtc_w: width of plane rectangle on crtc
287 * @crtc_h: height of plane rectangle on crtc
288 * @src_x: Not used
289 * @src_y: Not used
290 * @src_w: Not used
291 * @src_h: Not used
292 *
293 *
294 * RETURNS:
295 * Zero on success, error code on failure
296 */
297int vmw_du_cursor_plane_update(struct drm_plane *plane,
298 struct drm_crtc *crtc,
299 struct drm_framebuffer *fb,
300 int crtc_x, int crtc_y,
301 unsigned int crtc_w,
302 unsigned int crtc_h,
303 uint32_t src_x, uint32_t src_y,
304 uint32_t src_w, uint32_t src_h)
305{
306 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
307 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
308 struct vmw_surface *surface = NULL;
309 struct vmw_dma_buffer *dmabuf = NULL;
310 s32 hotspot_x, hotspot_y;
311 int ret;
312
313 hotspot_x = du->hotspot_x + fb->hot_x;
314 hotspot_y = du->hotspot_y + fb->hot_y;
315
316 /* A lot of the code assumes this */
317 if (crtc_w != 64 || crtc_h != 64) {
318 ret = -EINVAL;
319 goto out;
320 }
321
322 if (vmw_framebuffer_to_vfb(fb)->dmabuf)
323 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
324 else
325 surface = vmw_framebuffer_to_vfbs(fb)->surface;
326
327 if (surface && !surface->snooper.image) {
328 DRM_ERROR("surface not suitable for cursor\n");
329 ret = -EINVAL;
330 goto out;
331 }
332
333 /* setup new image */
334 ret = 0;
335 if (surface) {
336 /* vmw_user_surface_lookup takes one reference */
337 du->cursor_surface = surface;
338
339 du->cursor_age = du->cursor_surface->snooper.age;
340
341 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
342 64, 64, hotspot_x, hotspot_y);
343 } else if (dmabuf) {
344 /* vmw_user_surface_lookup takes one reference */
345 du->cursor_dmabuf = dmabuf;
346
347 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
348 hotspot_x, hotspot_y);
349 } else {
350 vmw_cursor_update_position(dev_priv, false, 0, 0);
351 goto out;
352 }
353
354 if (!ret) {
355 du->cursor_x = crtc_x + du->set_gui_x;
356 du->cursor_y = crtc_y + du->set_gui_y;
357
358 vmw_cursor_update_position(dev_priv, true,
359 du->cursor_x + hotspot_x,
360 du->cursor_y + hotspot_y);
361 }
362
363out:
364 return ret;
365}
366
367
368int vmw_du_cursor_plane_disable(struct drm_plane *plane)
369{
370 if (plane->fb) {
371 drm_framebuffer_unreference(plane->fb);
372 plane->fb = NULL;
373 }
374
375 return -EINVAL;
376}
377
378
379void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 277void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
380{ 278{
381 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 279 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -473,18 +371,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
473 371
474 372
475void 373void
476vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
477 struct drm_plane_state *old_state)
478{
479 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
480 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
481
482 drm_atomic_set_fb_for_plane(plane->state, NULL);
483 vmw_cursor_update_position(dev_priv, false, 0, 0);
484}
485
486
487void
488vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 374vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
489 struct drm_plane_state *old_state) 375 struct drm_plane_state *old_state)
490{ 376{
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1498 */ 1384 */
1499 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1385 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1500 dmabuf && only_2d && 1386 dmabuf && only_2d &&
1387 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1501 dev_priv->active_display_unit == vmw_du_screen_target) { 1388 dev_priv->active_display_unit == vmw_du_screen_target) {
1502 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1389 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
1503 dmabuf, &surface); 1390 dmabuf, &surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 13f2f1d2818a..5f8d678ae675 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
256 u16 *r, u16 *g, u16 *b, 256 u16 *r, u16 *g, u16 *b,
257 uint32_t size, 257 uint32_t size,
258 struct drm_modeset_acquire_ctx *ctx); 258 struct drm_modeset_acquire_ctx *ctx);
259int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
260 uint32_t handle, uint32_t width, uint32_t height,
261 int32_t hot_x, int32_t hot_y);
262int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
263int vmw_du_connector_set_property(struct drm_connector *connector, 259int vmw_du_connector_set_property(struct drm_connector *connector,
264 struct drm_property *property, 260 struct drm_property *property,
265 uint64_t val); 261 uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
339/* Universal Plane Helpers */ 335/* Universal Plane Helpers */
340void vmw_du_primary_plane_destroy(struct drm_plane *plane); 336void vmw_du_primary_plane_destroy(struct drm_plane *plane);
341void vmw_du_cursor_plane_destroy(struct drm_plane *plane); 337void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
342int vmw_du_cursor_plane_disable(struct drm_plane *plane);
343int vmw_du_cursor_plane_update(struct drm_plane *plane,
344 struct drm_crtc *crtc,
345 struct drm_framebuffer *fb,
346 int crtc_x, int crtc_y,
347 unsigned int crtc_w,
348 unsigned int crtc_h,
349 uint32_t src_x, uint32_t src_y,
350 uint32_t src_w, uint32_t src_h);
351 338
352/* Atomic Helpers */ 339/* Atomic Helpers */
353int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 340int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
356 struct drm_plane_state *state); 343 struct drm_plane_state *state);
357void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 344void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
358 struct drm_plane_state *old_state); 345 struct drm_plane_state *old_state);
359void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
360 struct drm_plane_state *old_state);
361int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 346int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
362 struct drm_plane_state *new_state); 347 struct drm_plane_state *new_state);
363void vmw_du_plane_cleanup_fb(struct drm_plane *plane, 348void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index bad31bdf09b6..50be1f034f9e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -56,6 +56,8 @@ enum stdu_content_type {
56 * @right: Right side of bounding box. 56 * @right: Right side of bounding box.
57 * @top: Top side of bounding box. 57 * @top: Top side of bounding box.
58 * @bottom: Bottom side of bounding box. 58 * @bottom: Bottom side of bounding box.
59 * @fb_left: Left side of the framebuffer/content bounding box
60 * @fb_top: Top of the framebuffer/content bounding box
59 * @buf: DMA buffer when DMA-ing between buffer and screen targets. 61 * @buf: DMA buffer when DMA-ing between buffer and screen targets.
60 * @sid: Surface ID when copying between surface and screen targets. 62 * @sid: Surface ID when copying between surface and screen targets.
61 */ 63 */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
63 struct vmw_kms_dirty base; 65 struct vmw_kms_dirty base;
64 SVGA3dTransferType transfer; 66 SVGA3dTransferType transfer;
65 s32 left, right, top, bottom; 67 s32 left, right, top, bottom;
68 s32 fb_left, fb_top;
66 u32 pitch; 69 u32 pitch;
67 union { 70 union {
68 struct vmw_dma_buffer *buf; 71 struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
647 * 650 *
648 * @dirty: The closure structure. 651 * @dirty: The closure structure.
649 * 652 *
650 * This function calculates the bounding box for all the incoming clips 653 * This function calculates the bounding box for all the incoming clips.
651 */ 654 */
652static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) 655static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
653{ 656{
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
656 659
657 dirty->num_hits = 1; 660 dirty->num_hits = 1;
658 661
659 /* Calculate bounding box */ 662 /* Calculate destination bounding box */
660 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); 663 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
661 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); 664 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
662 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); 665 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
663 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); 666 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
667
668 /*
669 * Calculate content bounding box. We only need the top-left
670 * coordinate because width and height will be the same as the
671 * destination bounding box above
672 */
673 ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
674 ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y);
664} 675}
665 676
666 677
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
697 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 708 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
698 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 709 src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
699 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used); 710 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
700 src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp; 711 src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
701 712
702 dst_pitch = ddirty->pitch; 713 dst_pitch = ddirty->pitch;
703 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 714 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
704 dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp; 715 dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
705 716
706 717
707 /* Figure out the real direction */ 718 /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
760 } 771 }
761 772
762out_cleanup: 773out_cleanup:
763 ddirty->left = ddirty->top = S32_MAX; 774 ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
764 ddirty->right = ddirty->bottom = S32_MIN; 775 ddirty->right = ddirty->bottom = S32_MIN;
765} 776}
766 777
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
812 SVGA3D_READ_HOST_VRAM; 823 SVGA3D_READ_HOST_VRAM;
813 ddirty.left = ddirty.top = S32_MAX; 824 ddirty.left = ddirty.top = S32_MAX;
814 ddirty.right = ddirty.bottom = S32_MIN; 825 ddirty.right = ddirty.bottom = S32_MIN;
826 ddirty.fb_left = ddirty.fb_top = S32_MAX;
815 ddirty.pitch = vfb->base.pitches[0]; 827 ddirty.pitch = vfb->base.pitches[0];
816 ddirty.buf = buf; 828 ddirty.buf = buf;
817 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; 829 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
1355 DRM_ERROR("Failed to bind surface to STDU.\n"); 1367 DRM_ERROR("Failed to bind surface to STDU.\n");
1356 else 1368 else
1357 crtc->primary->fb = plane->state->fb; 1369 crtc->primary->fb = plane->state->fb;
1370
1371 ret = vmw_stdu_update_st(dev_priv, stdu);
1372
1373 if (ret)
1374 DRM_ERROR("Failed to update STDU.\n");
1358} 1375}
1359 1376
1360 1377
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7681341fe32b..6b70bd259953 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1275 int ret; 1275 int ret;
1276 uint32_t size; 1276 uint32_t size;
1277 uint32_t backup_handle; 1277 uint32_t backup_handle = 0;
1278 1278
1279 if (req->multisample_count != 0) 1279 if (req->multisample_count != 0)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
1282 if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1283 return -EINVAL;
1284
1282 if (unlikely(vmw_user_surface_size == 0)) 1285 if (unlikely(vmw_user_surface_size == 0))
1283 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + 1286 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1284 128; 1287 128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1314 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1317 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1315 &res->backup, 1318 &res->backup,
1316 &user_srf->backup_base); 1319 &user_srf->backup_base);
1317 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1320 if (ret == 0) {
1318 res->backup_size) { 1321 if (res->backup->base.num_pages * PAGE_SIZE <
1319 DRM_ERROR("Surface backup buffer is too small.\n"); 1322 res->backup_size) {
1320 vmw_dmabuf_unreference(&res->backup); 1323 DRM_ERROR("Surface backup buffer is too small.\n");
1321 ret = -EINVAL; 1324 vmw_dmabuf_unreference(&res->backup);
1322 goto out_unlock; 1325 ret = -EINVAL;
1326 goto out_unlock;
1327 } else {
1328 backup_handle = req->buffer_handle;
1329 }
1323 } 1330 }
1324 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1331 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1325 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1332 ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1491 dev_priv->stdu_max_height); 1498 dev_priv->stdu_max_height);
1492 1499
1493 if (size.width > max_width || size.height > max_height) { 1500 if (size.width > max_width || size.height > max_height) {
1494 DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u", 1501 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1495 size.width, size.height, 1502 size.width, size.height,
1496 max_width, max_height); 1503 max_width, max_height);
1497 return -EINVAL; 1504 return -EINVAL;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 16d556816b5f..2fb5f432a54c 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
725 spin_lock_irqsave(&ipu->lock, flags); 725 spin_lock_irqsave(&ipu->lock, flags);
726 726
727 val = ipu_cm_read(ipu, IPU_CONF); 727 val = ipu_cm_read(ipu, IPU_CONF);
728 if (vdi) { 728 if (vdi)
729 val |= IPU_CONF_IC_INPUT; 729 val |= IPU_CONF_IC_INPUT;
730 } else { 730 else
731 val &= ~IPU_CONF_IC_INPUT; 731 val &= ~IPU_CONF_IC_INPUT;
732 if (csi_id == 1) 732
733 val |= IPU_CONF_CSI_SEL; 733 if (csi_id == 1)
734 else 734 val |= IPU_CONF_CSI_SEL;
735 val &= ~IPU_CONF_CSI_SEL; 735 else
736 } 736 val &= ~IPU_CONF_CSI_SEL;
737
737 ipu_cm_write(ipu, val, IPU_CONF); 738 ipu_cm_write(ipu, val, IPU_CONF);
738 739
739 spin_unlock_irqrestore(&ipu->lock, flags); 740 spin_unlock_irqrestore(&ipu->lock, flags);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c55563379e2e..c35f74c83065 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
131 if (pre->in_use) 131 if (pre->in_use)
132 return -EBUSY; 132 return -EBUSY;
133 133
134 clk_prepare_enable(pre->clk_axi);
135
136 /* first get the engine out of reset and remove clock gating */ 134 /* first get the engine out of reset and remove clock gating */
137 writel(0, pre->regs + IPU_PRE_CTRL); 135 writel(0, pre->regs + IPU_PRE_CTRL);
138 136
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
149 147
150void ipu_pre_put(struct ipu_pre *pre) 148void ipu_pre_put(struct ipu_pre *pre)
151{ 149{
152 u32 val; 150 writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
153
154 val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
155 writel(val, pre->regs + IPU_PRE_CTRL);
156
157 clk_disable_unprepare(pre->clk_axi);
158 151
159 pre->in_use = false; 152 pre->in_use = false;
160} 153}
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
249 if (!pre->buffer_virt) 242 if (!pre->buffer_virt)
250 return -ENOMEM; 243 return -ENOMEM;
251 244
245 clk_prepare_enable(pre->clk_axi);
246
252 pre->dev = dev; 247 pre->dev = dev;
253 platform_set_drvdata(pdev, pre); 248 platform_set_drvdata(pdev, pre);
254 mutex_lock(&ipu_pre_list_mutex); 249 mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
268 available_pres--; 263 available_pres--;
269 mutex_unlock(&ipu_pre_list_mutex); 264 mutex_unlock(&ipu_pre_list_mutex);
270 265
266 clk_disable_unprepare(pre->clk_axi);
267
271 if (pre->buffer_virt) 268 if (pre->buffer_virt)
272 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt, 269 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
273 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4); 270 IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 21d38c8af21e..7f4f9c4150e3 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
143 iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); 143 iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
144} 144}
145 145
146static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) 146static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
147{ 147{
148 u32 channel_intr_status; 148 u32 channel_intr_status;
149 u32 intr_status; 149 u32 intr_status;
@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
167 return IRQ_NONE; 167 return IRQ_NONE;
168} 168}
169 169
170static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) 170static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
171{ 171{
172 irqreturn_t retval = IRQ_NONE; 172 irqreturn_t retval = IRQ_NONE;
173 struct iproc_adc_priv *adc_priv; 173 struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
181 adc_priv = iio_priv(indio_dev); 181 adc_priv = iio_priv(indio_dev);
182 182
183 regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); 183 regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
184 dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", 184 dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
185 intr_status); 185 intr_status);
186 186
187 intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; 187 intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
566 } 566 }
567 567
568 ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, 568 ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
569 iproc_adc_interrupt_thread,
570 iproc_adc_interrupt_handler, 569 iproc_adc_interrupt_handler,
570 iproc_adc_interrupt_thread,
571 IRQF_SHARED, "iproc-adc", indio_dev); 571 IRQF_SHARED, "iproc-adc", indio_dev);
572 if (ret) { 572 if (ret) {
573 dev_err(&pdev->dev, "request_irq error %d\n", ret); 573 dev_err(&pdev->dev, "request_irq error %d\n", ret);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index ec82106480e1..b0526e4b9530 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -438,10 +438,10 @@ static ssize_t max9611_shunt_resistor_show(struct device *dev,
438 struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev)); 438 struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
439 unsigned int i, r; 439 unsigned int i, r;
440 440
441 i = max9611->shunt_resistor_uohm / 1000; 441 i = max9611->shunt_resistor_uohm / 1000000;
442 r = max9611->shunt_resistor_uohm % 1000; 442 r = max9611->shunt_resistor_uohm % 1000000;
443 443
444 return sprintf(buf, "%u.%03u\n", i, r); 444 return sprintf(buf, "%u.%06u\n", i, r);
445} 445}
446 446
447static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444, 447static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
@@ -536,8 +536,8 @@ static int max9611_probe(struct i2c_client *client,
536 int ret; 536 int ret;
537 537
538 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611)); 538 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
539 if (IS_ERR(indio_dev)) 539 if (!indio_dev)
540 return PTR_ERR(indio_dev); 540 return -ENOMEM;
541 541
542 i2c_set_clientdata(client, indio_dev); 542 i2c_set_clientdata(client, indio_dev);
543 543
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index b23527309088..81d4c39e414a 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -105,6 +105,8 @@ struct sun4i_gpadc_iio {
105 bool no_irq; 105 bool no_irq;
106 /* prevents concurrent reads of temperature and ADC */ 106 /* prevents concurrent reads of temperature and ADC */
107 struct mutex mutex; 107 struct mutex mutex;
108 struct thermal_zone_device *tzd;
109 struct device *sensor_device;
108}; 110};
109 111
110#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \ 112#define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \
@@ -502,7 +504,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
502{ 504{
503 struct sun4i_gpadc_iio *info = iio_priv(indio_dev); 505 struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
504 const struct of_device_id *of_dev; 506 const struct of_device_id *of_dev;
505 struct thermal_zone_device *tzd;
506 struct resource *mem; 507 struct resource *mem;
507 void __iomem *base; 508 void __iomem *base;
508 int ret; 509 int ret;
@@ -532,13 +533,14 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
532 if (!IS_ENABLED(CONFIG_THERMAL_OF)) 533 if (!IS_ENABLED(CONFIG_THERMAL_OF))
533 return 0; 534 return 0;
534 535
535 tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info, 536 info->sensor_device = &pdev->dev;
536 &sun4i_ts_tz_ops); 537 info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
537 if (IS_ERR(tzd)) 538 info, &sun4i_ts_tz_ops);
539 if (IS_ERR(info->tzd))
538 dev_err(&pdev->dev, "could not register thermal sensor: %ld\n", 540 dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
539 PTR_ERR(tzd)); 541 PTR_ERR(info->tzd));
540 542
541 return PTR_ERR_OR_ZERO(tzd); 543 return PTR_ERR_OR_ZERO(info->tzd);
542} 544}
543 545
544static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, 546static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -584,15 +586,15 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
584 * of_node, and the device from this driver as third argument to 586 * of_node, and the device from this driver as third argument to
585 * return the temperature. 587 * return the temperature.
586 */ 588 */
587 struct thermal_zone_device *tzd; 589 info->sensor_device = pdev->dev.parent;
588 tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0, 590 info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
589 info, 591 0, info,
590 &sun4i_ts_tz_ops); 592 &sun4i_ts_tz_ops);
591 if (IS_ERR(tzd)) { 593 if (IS_ERR(info->tzd)) {
592 dev_err(&pdev->dev, 594 dev_err(&pdev->dev,
593 "could not register thermal sensor: %ld\n", 595 "could not register thermal sensor: %ld\n",
594 PTR_ERR(tzd)); 596 PTR_ERR(info->tzd));
595 return PTR_ERR(tzd); 597 return PTR_ERR(info->tzd);
596 } 598 }
597 } else { 599 } else {
598 indio_dev->num_channels = 600 indio_dev->num_channels =
@@ -688,7 +690,13 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
688 690
689 pm_runtime_put(&pdev->dev); 691 pm_runtime_put(&pdev->dev);
690 pm_runtime_disable(&pdev->dev); 692 pm_runtime_disable(&pdev->dev);
691 if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF)) 693
694 if (!IS_ENABLED(CONFIG_THERMAL_OF))
695 return 0;
696
697 thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
698
699 if (!info->no_irq)
692 iio_map_array_unregister(indio_dev); 700 iio_map_array_unregister(indio_dev);
693 701
694 return 0; 702 return 0;
@@ -700,6 +708,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = {
700 { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data }, 708 { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
701 { /* sentinel */ }, 709 { /* sentinel */ },
702}; 710};
711MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
703 712
704static struct platform_driver sun4i_gpadc_driver = { 713static struct platform_driver sun4i_gpadc_driver = {
705 .driver = { 714 .driver = {
@@ -711,6 +720,7 @@ static struct platform_driver sun4i_gpadc_driver = {
711 .probe = sun4i_gpadc_probe, 720 .probe = sun4i_gpadc_probe,
712 .remove = sun4i_gpadc_remove, 721 .remove = sun4i_gpadc_remove,
713}; 722};
723MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
714 724
715module_platform_driver(sun4i_gpadc_driver); 725module_platform_driver(sun4i_gpadc_driver);
716 726
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 4282ceca3d8f..6cbed7eb118a 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -614,7 +614,7 @@ static int tiadc_probe(struct platform_device *pdev)
614 return -EINVAL; 614 return -EINVAL;
615 } 615 }
616 616
617 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); 617 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
618 if (indio_dev == NULL) { 618 if (indio_dev == NULL) {
619 dev_err(&pdev->dev, "failed to allocate iio device\n"); 619 dev_err(&pdev->dev, "failed to allocate iio device\n");
620 return -ENOMEM; 620 return -ENOMEM;
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 978e1592c2a3..4061fed93f1f 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
451 return len; 451 return len;
452 452
453out_trigger_put: 453out_trigger_put:
454 iio_trigger_put(trig); 454 if (trig)
455 iio_trigger_put(trig);
455 return ret; 456 return ret;
456} 457}
457 458
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b30e0c1c6cc4..67838edd8b37 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
74static const struct reg_field reg_field_it = 74static const struct reg_field reg_field_it =
75 REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); 75 REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
76static const struct reg_field reg_field_als_intr = 76static const struct reg_field reg_field_als_intr =
77 REG_FIELD(LTR501_INTR, 0, 0);
78static const struct reg_field reg_field_ps_intr =
79 REG_FIELD(LTR501_INTR, 1, 1); 77 REG_FIELD(LTR501_INTR, 1, 1);
78static const struct reg_field reg_field_ps_intr =
79 REG_FIELD(LTR501_INTR, 0, 0);
80static const struct reg_field reg_field_als_rate = 80static const struct reg_field reg_field_als_rate =
81 REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); 81 REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
82static const struct reg_field reg_field_ps_rate = 82static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index ddf9bee89f77..aa4df0dcc8c9 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
40#define AS3935_AFE_PWR_BIT BIT(0) 40#define AS3935_AFE_PWR_BIT BIT(0)
41 41
42#define AS3935_INT 0x03 42#define AS3935_INT 0x03
43#define AS3935_INT_MASK 0x07 43#define AS3935_INT_MASK 0x0f
44#define AS3935_EVENT_INT BIT(3) 44#define AS3935_EVENT_INT BIT(3)
45#define AS3935_NOISE_INT BIT(1) 45#define AS3935_NOISE_INT BIT(0)
46 46
47#define AS3935_DATA 0x07 47#define AS3935_DATA 0x07
48#define AS3935_DATA_MASK 0x3F 48#define AS3935_DATA_MASK 0x3F
@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
215 215
216 st->buffer[0] = val & AS3935_DATA_MASK; 216 st->buffer[0] = val & AS3935_DATA_MASK;
217 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, 217 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
218 pf->timestamp); 218 iio_get_time_ns(indio_dev));
219err_read: 219err_read:
220 iio_trigger_notify_done(indio_dev->trig); 220 iio_trigger_notify_done(indio_dev->trig);
221 221
@@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
244 244
245 switch (val) { 245 switch (val) {
246 case AS3935_EVENT_INT: 246 case AS3935_EVENT_INT:
247 iio_trigger_poll(st->trig); 247 iio_trigger_poll_chained(st->trig);
248 break; 248 break;
249 case AS3935_NOISE_INT: 249 case AS3935_NOISE_INT:
250 dev_warn(&st->spi->dev, "noise level is too high\n"); 250 dev_warn(&st->spi->dev, "noise level is too high\n");
@@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
269 269
270static void calibrate_as3935(struct as3935_state *st) 270static void calibrate_as3935(struct as3935_state *st)
271{ 271{
272 mutex_lock(&st->lock);
273
274 /* mask disturber interrupt bit */ 272 /* mask disturber interrupt bit */
275 as3935_write(st, AS3935_INT, BIT(5)); 273 as3935_write(st, AS3935_INT, BIT(5));
276 274
@@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st)
280 278
281 mdelay(2); 279 mdelay(2);
282 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); 280 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
283
284 mutex_unlock(&st->lock);
285} 281}
286 282
287#ifdef CONFIG_PM_SLEEP 283#ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev)
318 val &= ~AS3935_AFE_PWR_BIT; 314 val &= ~AS3935_AFE_PWR_BIT;
319 ret = as3935_write(st, AS3935_AFE_GAIN, val); 315 ret = as3935_write(st, AS3935_AFE_GAIN, val);
320 316
317 calibrate_as3935(st);
318
321err_resume: 319err_resume:
322 mutex_unlock(&st->lock); 320 mutex_unlock(&st->lock);
323 321
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e73d968023f7..f1fa1f172107 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1118 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1119 * Avatar AVIU-145A2 0x361f00 ? clickpad 1119 * Avatar AVIU-145A2 0x361f00 ? clickpad
1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1120 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1121 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1122 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
1122 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1123 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1124 * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
1123 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons 1125 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1124 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1126 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1125 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1127 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1525,6 +1527,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1525 }, 1527 },
1526 }, 1528 },
1527 { 1529 {
1530 /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
1531 .matches = {
1532 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1533 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
1534 },
1535 },
1536 {
1528 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ 1537 /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
1529 .matches = { 1538 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1539 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1546 }, 1555 },
1547 }, 1556 },
1548 { 1557 {
1558 /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
1559 .matches = {
1560 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1561 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
1562 },
1563 },
1564 {
1549 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ 1565 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
1550 .matches = { 1566 .matches = {
1551 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 1567 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 77dad045a468..ad71a5e768dc 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
146 if (!serio) 146 if (!serio)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 serio->id.type = SERIO_8042; 149 serio->id.type = SERIO_PS_PSTHRU;
150 serio->write = rmi_f03_pt_write; 150 serio->write = rmi_f03_pt_write;
151 serio->port_data = f03; 151 serio->port_data = f03;
152 152
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 9f44ee8ea1bc..19779b88a479 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -118,6 +118,7 @@ static const struct iommu_ops
118 118
119 ops = iommu_ops_from_fwnode(fwnode); 119 ops = iommu_ops_from_fwnode(fwnode);
120 if ((ops && !ops->of_xlate) || 120 if ((ops && !ops->of_xlate) ||
121 !of_device_is_available(iommu_spec->np) ||
121 (!ops && !of_iommu_driver_present(iommu_spec->np))) 122 (!ops && !of_iommu_driver_present(iommu_spec->np)))
122 return NULL; 123 return NULL;
123 124
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
236 ops = ERR_PTR(err); 237 ops = ERR_PTR(err);
237 } 238 }
238 239
240 /* Ignore all other errors apart from EPROBE_DEFER */
241 if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
242 dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
243 ops = NULL;
244 }
245
239 return ops; 246 return ops;
240} 247}
241 248
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5fe5846..72a391e01011 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = {
142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) 142int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
143{ 143{
144 struct irq_domain *root_domain = 144 struct irq_domain *root_domain =
145 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 145 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
146 &xtensa_mx_irq_domain_ops, 146 &xtensa_mx_irq_domain_ops,
147 &xtensa_mx_irq_chip); 147 &xtensa_mx_irq_chip);
148 irq_set_default_host(root_domain); 148 irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae1770964..f728755fa292 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = {
89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) 89int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
90{ 90{
91 struct irq_domain *root_domain = 91 struct irq_domain *root_domain =
92 irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 92 irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
93 &xtensa_irq_domain_ops, &xtensa_irq_chip); 93 &xtensa_irq_domain_ops, &xtensa_irq_chip);
94 irq_set_default_host(root_domain); 94 irq_set_default_host(root_domain);
95 return 0; 95 return 0;
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index d07dd5196ffc..8aa158a09180 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
2364 id); 2364 id);
2365 return NULL; 2365 return NULL;
2366 } else { 2366 } else {
2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); 2367 rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
2368 if (!rs) 2368 if (!rs)
2369 return NULL; 2369 return NULL;
2370 rs->state = CCPResetIdle; 2370 rs->state = CCPResetIdle;
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 8b7faea2ddf8..422dced7c90a 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
75 if (sk->sk_state != MISDN_BOUND) 75 if (sk->sk_state != MISDN_BOUND)
76 continue; 76 continue;
77 if (!cskb) 77 if (!cskb)
78 cskb = skb_copy(skb, GFP_KERNEL); 78 cskb = skb_copy(skb, GFP_ATOMIC);
79 if (!cskb) { 79 if (!cskb) {
80 printk(KERN_WARNING "%s no skb\n", __func__); 80 printk(KERN_WARNING "%s no skb\n", __func__);
81 break; 81 break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 212a6777ff31..87edc342ccb3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
5174 5174
5175static void no_op(struct percpu_ref *r) {} 5175static void no_op(struct percpu_ref *r) {}
5176 5176
5177int mddev_init_writes_pending(struct mddev *mddev)
5178{
5179 if (mddev->writes_pending.percpu_count_ptr)
5180 return 0;
5181 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5182 return -ENOMEM;
5183 /* We want to start with the refcount at zero */
5184 percpu_ref_put(&mddev->writes_pending);
5185 return 0;
5186}
5187EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5188
5177static int md_alloc(dev_t dev, char *name) 5189static int md_alloc(dev_t dev, char *name)
5178{ 5190{
5179 /* 5191 /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
5239 blk_queue_make_request(mddev->queue, md_make_request); 5251 blk_queue_make_request(mddev->queue, md_make_request);
5240 blk_set_stacking_limits(&mddev->queue->limits); 5252 blk_set_stacking_limits(&mddev->queue->limits);
5241 5253
5242 if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
5243 goto abort;
5244 /* We want to start with the refcount at zero */
5245 percpu_ref_put(&mddev->writes_pending);
5246 disk = alloc_disk(1 << shift); 5254 disk = alloc_disk(1 << shift);
5247 if (!disk) { 5255 if (!disk) {
5248 blk_cleanup_queue(mddev->queue); 5256 blk_cleanup_queue(mddev->queue);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 11f15146ce51..0fa1de42c42b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
648extern void md_wakeup_thread(struct md_thread *thread); 648extern void md_wakeup_thread(struct md_thread *thread);
649extern void md_check_recovery(struct mddev *mddev); 649extern void md_check_recovery(struct mddev *mddev);
650extern void md_reap_sync_thread(struct mddev *mddev); 650extern void md_reap_sync_thread(struct mddev *mddev);
651extern int mddev_init_writes_pending(struct mddev *mddev);
651extern void md_write_start(struct mddev *mddev, struct bio *bi); 652extern void md_write_start(struct mddev *mddev, struct bio *bi);
652extern void md_write_inc(struct mddev *mddev, struct bio *bi); 653extern void md_write_inc(struct mddev *mddev, struct bio *bi);
653extern void md_write_end(struct mddev *mddev); 654extern void md_write_end(struct mddev *mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index af5056d56878..e1a7e3d4c5e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
3063 mdname(mddev)); 3063 mdname(mddev));
3064 return -EIO; 3064 return -EIO;
3065 } 3065 }
3066 if (mddev_init_writes_pending(mddev) < 0)
3067 return -ENOMEM;
3066 /* 3068 /*
3067 * copy the already verified devices into our private RAID1 3069 * copy the already verified devices into our private RAID1
3068 * bookkeeping area. [whatever we allocate in run(), 3070 * bookkeeping area. [whatever we allocate in run(),
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 4343d7ff9916..797ed60abd5e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
3611 int first = 1; 3611 int first = 1;
3612 bool discard_supported = false; 3612 bool discard_supported = false;
3613 3613
3614 if (mddev_init_writes_pending(mddev) < 0)
3615 return -ENOMEM;
3616
3614 if (mddev->private == NULL) { 3617 if (mddev->private == NULL) {
3615 conf = setup_conf(mddev); 3618 conf = setup_conf(mddev);
3616 if (IS_ERR(conf)) 3619 if (IS_ERR(conf))
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 722064689e82..ec0f951ae19f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7118,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
7118 long long min_offset_diff = 0; 7118 long long min_offset_diff = 0;
7119 int first = 1; 7119 int first = 1;
7120 7120
7121 if (mddev_init_writes_pending(mddev) < 0)
7122 return -ENOMEM;
7123
7121 if (mddev->recovery_cp != MaxSector) 7124 if (mddev->recovery_cp != MaxSector)
7122 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", 7125 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
7123 mdname(mddev)); 7126 mdname(mddev));
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index b72edd27f880..55d9c2b82b7e 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -2,6 +2,12 @@
2# Multimedia device configuration 2# Multimedia device configuration
3# 3#
4 4
5config CEC_CORE
6 tristate
7
8config CEC_NOTIFIER
9 bool
10
5menuconfig MEDIA_SUPPORT 11menuconfig MEDIA_SUPPORT
6 tristate "Multimedia support" 12 tristate "Multimedia support"
7 depends on HAS_IOMEM 13 depends on HAS_IOMEM
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 523fea3648ad..044503aa8801 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -4,8 +4,6 @@
4 4
5media-objs := media-device.o media-devnode.o media-entity.o 5media-objs := media-device.o media-devnode.o media-entity.o
6 6
7obj-$(CONFIG_CEC_CORE) += cec/
8
9# 7#
10# I2C drivers should come before other drivers, otherwise they'll fail 8# I2C drivers should come before other drivers, otherwise they'll fail
11# when compiled as builtin drivers 9# when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE) += dvb-core/
26# There are both core and drivers at RC subtree - merge before drivers 24# There are both core and drivers at RC subtree - merge before drivers
27obj-y += rc/ 25obj-y += rc/
28 26
27obj-$(CONFIG_CEC_CORE) += cec/
28
29# 29#
30# Finally, merge the drivers that require the core 30# Finally, merge the drivers that require the core
31# 31#
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index f944d93e3167..4e25a950ae6f 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -1,19 +1,5 @@
1config CEC_CORE
2 tristate
3 depends on MEDIA_CEC_SUPPORT
4 default y
5
6config MEDIA_CEC_NOTIFIER
7 bool
8
9config MEDIA_CEC_RC 1config MEDIA_CEC_RC
10 bool "HDMI CEC RC integration" 2 bool "HDMI CEC RC integration"
11 depends on CEC_CORE && RC_CORE 3 depends on CEC_CORE && RC_CORE
12 ---help--- 4 ---help---
13 Pass on CEC remote control messages to the RC framework. 5 Pass on CEC remote control messages to the RC framework.
14
15config MEDIA_CEC_DEBUG
16 bool "HDMI CEC debugfs interface"
17 depends on CEC_CORE && DEBUG_FS
18 ---help---
19 Turns on the DebugFS interface for CEC devices.
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
index 402a6c62a3e8..eaf408e64669 100644
--- a/drivers/media/cec/Makefile
+++ b/drivers/media/cec/Makefile
@@ -1,6 +1,6 @@
1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o 1cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
2 2
3ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) 3ifeq ($(CONFIG_CEC_NOTIFIER),y)
4 cec-objs += cec-notifier.o 4 cec-objs += cec-notifier.o
5endif 5endif
6 6
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index f5fe01c9da8a..9dfc79800c71 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); 1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
1865} 1865}
1866 1866
1867#ifdef CONFIG_MEDIA_CEC_DEBUG 1867#ifdef CONFIG_DEBUG_FS
1868/* 1868/*
1869 * Log the current state of the CEC adapter. 1869 * Log the current state of the CEC adapter.
1870 * Very useful for debugging. 1870 * Very useful for debugging.
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index f9ebff90f8eb..2f87748ba4fc 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
187 put_device(&devnode->dev); 187 put_device(&devnode->dev);
188} 188}
189 189
190#ifdef CONFIG_MEDIA_CEC_NOTIFIER 190#ifdef CONFIG_CEC_NOTIFIER
191static void cec_cec_notify(struct cec_adapter *adap, u16 pa) 191static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
192{ 192{
193 cec_s_phys_addr(adap, pa, false); 193 cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
323 } 323 }
324 324
325 dev_set_drvdata(&adap->devnode.dev, adap); 325 dev_set_drvdata(&adap->devnode.dev, adap);
326#ifdef CONFIG_MEDIA_CEC_DEBUG 326#ifdef CONFIG_DEBUG_FS
327 if (!top_cec_dir) 327 if (!top_cec_dir)
328 return 0; 328 return 0;
329 329
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
355 adap->rc = NULL; 355 adap->rc = NULL;
356#endif 356#endif
357 debugfs_remove_recursive(adap->cec_dir); 357 debugfs_remove_recursive(adap->cec_dir);
358#ifdef CONFIG_MEDIA_CEC_NOTIFIER 358#ifdef CONFIG_CEC_NOTIFIER
359 if (adap->notifier) 359 if (adap->notifier)
360 cec_notifier_unregister(adap->notifier); 360 cec_notifier_unregister(adap->notifier);
361#endif 361#endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
395 return ret; 395 return ret;
396 } 396 }
397 397
398#ifdef CONFIG_MEDIA_CEC_DEBUG 398#ifdef CONFIG_DEBUG_FS
399 top_cec_dir = debugfs_create_dir("cec", NULL); 399 top_cec_dir = debugfs_create_dir("cec", NULL);
400 if (IS_ERR_OR_NULL(top_cec_dir)) { 400 if (IS_ERR_OR_NULL(top_cec_dir)) {
401 pr_warn("cec: Failed to create debugfs cec dir\n"); 401 pr_warn("cec: Failed to create debugfs cec dir\n");
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index fd181c99ce11..aaa9471c7d11 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
220 220
221config VIDEO_ADV7604_CEC 221config VIDEO_ADV7604_CEC
222 bool "Enable Analog Devices ADV7604 CEC support" 222 bool "Enable Analog Devices ADV7604 CEC support"
223 depends on VIDEO_ADV7604 && CEC_CORE 223 depends on VIDEO_ADV7604
224 select CEC_CORE
224 ---help--- 225 ---help---
225 When selected the adv7604 will support the optional 226 When selected the adv7604 will support the optional
226 HDMI CEC feature. 227 HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
240 241
241config VIDEO_ADV7842_CEC 242config VIDEO_ADV7842_CEC
242 bool "Enable Analog Devices ADV7842 CEC support" 243 bool "Enable Analog Devices ADV7842 CEC support"
243 depends on VIDEO_ADV7842 && CEC_CORE 244 depends on VIDEO_ADV7842
245 select CEC_CORE
244 ---help--- 246 ---help---
245 When selected the adv7842 will support the optional 247 When selected the adv7842 will support the optional
246 HDMI CEC feature. 248 HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
478 480
479config VIDEO_ADV7511_CEC 481config VIDEO_ADV7511_CEC
480 bool "Enable Analog Devices ADV7511 CEC support" 482 bool "Enable Analog Devices ADV7511 CEC support"
481 depends on VIDEO_ADV7511 && CEC_CORE 483 depends on VIDEO_ADV7511
484 select CEC_CORE
482 ---help--- 485 ---help---
483 When selected the adv7511 will support the optional 486 When selected the adv7511 will support the optional
484 HDMI CEC feature. 487 HDMI CEC feature.
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ac026ee1ca07..041cb80a26b1 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
501 501
502config VIDEO_SAMSUNG_S5P_CEC 502config VIDEO_SAMSUNG_S5P_CEC
503 tristate "Samsung S5P CEC driver" 503 tristate "Samsung S5P CEC driver"
504 depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 504 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
505 select MEDIA_CEC_NOTIFIER 505 select CEC_CORE
506 select CEC_NOTIFIER
506 ---help--- 507 ---help---
507 This is a driver for Samsung S5P HDMI CEC interface. It uses the 508 This is a driver for Samsung S5P HDMI CEC interface. It uses the
508 generic CEC framework interface. 509 generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
511 512
512config VIDEO_STI_HDMI_CEC 513config VIDEO_STI_HDMI_CEC
513 tristate "STMicroelectronics STiH4xx HDMI CEC driver" 514 tristate "STMicroelectronics STiH4xx HDMI CEC driver"
514 depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) 515 depends on ARCH_STI || COMPILE_TEST
515 select MEDIA_CEC_NOTIFIER 516 select CEC_CORE
517 select CEC_NOTIFIER
516 ---help--- 518 ---help---
517 This is a driver for STIH4xx HDMI CEC interface. It uses the 519 This is a driver for STIH4xx HDMI CEC interface. It uses the
518 generic CEC framework interface. 520 generic CEC framework interface.
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index b36ac19dc6e4..154de92dd809 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -26,7 +26,8 @@ config VIDEO_VIVID
26 26
27config VIDEO_VIVID_CEC 27config VIDEO_VIVID_CEC
28 bool "Enable CEC emulation support" 28 bool "Enable CEC emulation support"
29 depends on VIDEO_VIVID && CEC_CORE 29 depends on VIDEO_VIVID
30 select CEC_CORE
30 ---help--- 31 ---help---
31 When selected the vivid module will emulate the optional 32 When selected the vivid module will emulate the optional
32 HDMI CEC feature. 33 HDMI CEC feature.
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index 90f66dc7c0d7..a2fc1a1d58b0 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
211 */ 211 */
212void ir_raw_event_handle(struct rc_dev *dev) 212void ir_raw_event_handle(struct rc_dev *dev)
213{ 213{
214 if (!dev->raw) 214 if (!dev->raw || !dev->raw->thread)
215 return; 215 return;
216 216
217 wake_up_process(dev->raw->thread); 217 wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
490{ 490{
491 int rc; 491 int rc;
492 struct ir_raw_handler *handler; 492 struct ir_raw_handler *handler;
493 struct task_struct *thread;
493 494
494 if (!dev) 495 if (!dev)
495 return -EINVAL; 496 return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
507 * because the event is coming from userspace 508 * because the event is coming from userspace
508 */ 509 */
509 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { 510 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
510 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, 511 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
511 "rc%u", dev->minor); 512 dev->minor);
512 513
513 if (IS_ERR(dev->raw->thread)) { 514 if (IS_ERR(thread)) {
514 rc = PTR_ERR(dev->raw->thread); 515 rc = PTR_ERR(thread);
515 goto out; 516 goto out;
516 } 517 }
518
519 dev->raw->thread = thread;
517 } 520 }
518 521
519 mutex_lock(&ir_raw_handler_lock); 522 mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig
index 8937f3986a01..18ead44824ba 100644
--- a/drivers/media/usb/pulse8-cec/Kconfig
+++ b/drivers/media/usb/pulse8-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_PULSE8_CEC 1config USB_PULSE8_CEC
2 tristate "Pulse Eight HDMI CEC" 2 tristate "Pulse Eight HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig
index 3eb86607efb8..030ef01b1ff0 100644
--- a/drivers/media/usb/rainshadow-cec/Kconfig
+++ b/drivers/media/usb/rainshadow-cec/Kconfig
@@ -1,6 +1,7 @@
1config USB_RAINSHADOW_CEC 1config USB_RAINSHADOW_CEC
2 tristate "RainShadow Tech HDMI CEC" 2 tristate "RainShadow Tech HDMI CEC"
3 depends on USB_ACM && CEC_CORE 3 depends on USB_ACM
4 select CEC_CORE
4 select SERIO 5 select SERIO
5 select SERIO_SERPORT 6 select SERIO_SERPORT
6 ---help--- 7 ---help---
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 541ca543f71f..71bd68548c9c 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
119 119
120 while (true) { 120 while (true) {
121 unsigned long flags; 121 unsigned long flags;
122 bool exit_loop; 122 bool exit_loop = false;
123 char data; 123 char data;
124 124
125 spin_lock_irqsave(&rain->buf_lock, flags); 125 spin_lock_irqsave(&rain->buf_lock, flags);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 35910f945bfa..99e644cda4d1 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
581 return of_platform_populate(np, NULL, NULL, dev); 581 return of_platform_populate(np, NULL, NULL, dev);
582} 582}
583 583
584static int atmel_ebi_resume(struct device *dev) 584static __maybe_unused int atmel_ebi_resume(struct device *dev)
585{ 585{
586 struct atmel_ebi *ebi = dev_get_drvdata(dev); 586 struct atmel_ebi *ebi = dev_get_drvdata(dev);
587 struct atmel_ebi_dev *ebid; 587 struct atmel_ebi_dev *ebid;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 17b433f1ce23..0761271d68c5 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
159 159
160 /* Do this outside the status_mutex to avoid a circular dependency with 160 /* Do this outside the status_mutex to avoid a circular dependency with
161 * the locking in cxl_mmap_fault() */ 161 * the locking in cxl_mmap_fault() */
162 if (copy_from_user(&work, uwork, 162 if (copy_from_user(&work, uwork, sizeof(work)))
163 sizeof(struct cxl_ioctl_start_work))) { 163 return -EFAULT;
164 rc = -EFAULT;
165 goto out;
166 }
167 164
168 mutex_lock(&ctx->status_mutex); 165 mutex_lock(&ctx->status_mutex);
169 if (ctx->status != OPENED) { 166 if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 871a2f09c718..8d6ea9712dbd 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
1302 1302
1303void cxl_native_release_psl_err_irq(struct cxl *adapter) 1303void cxl_native_release_psl_err_irq(struct cxl *adapter)
1304{ 1304{
1305 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) 1305 if (adapter->native->err_virq == 0 ||
1306 adapter->native->err_virq !=
1307 irq_find_mapping(NULL, adapter->native->err_hwirq))
1306 return; 1308 return;
1307 1309
1308 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1309 cxl_unmap_irq(adapter->native->err_virq, adapter); 1311 cxl_unmap_irq(adapter->native->err_virq, adapter);
1310 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1312 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1311 kfree(adapter->irq_name); 1313 kfree(adapter->irq_name);
1314 adapter->native->err_virq = 0;
1312} 1315}
1313 1316
1314int cxl_native_register_serr_irq(struct cxl_afu *afu) 1317int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
1346 1349
1347void cxl_native_release_serr_irq(struct cxl_afu *afu) 1350void cxl_native_release_serr_irq(struct cxl_afu *afu)
1348{ 1351{
1349 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1352 if (afu->serr_virq == 0 ||
1353 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1350 return; 1354 return;
1351 1355
1352 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1356 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1353 cxl_unmap_irq(afu->serr_virq, afu); 1357 cxl_unmap_irq(afu->serr_virq, afu);
1354 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1358 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1355 kfree(afu->err_irq_name); 1359 kfree(afu->err_irq_name);
1360 afu->serr_virq = 0;
1356} 1361}
1357 1362
1358int cxl_native_register_psl_irq(struct cxl_afu *afu) 1363int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
1375 1380
1376void cxl_native_release_psl_irq(struct cxl_afu *afu) 1381void cxl_native_release_psl_irq(struct cxl_afu *afu)
1377{ 1382{
1378 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) 1383 if (afu->native->psl_virq == 0 ||
1384 afu->native->psl_virq !=
1385 irq_find_mapping(NULL, afu->native->psl_hwirq))
1379 return; 1386 return;
1380 1387
1381 cxl_unmap_irq(afu->native->psl_virq, afu); 1388 cxl_unmap_irq(afu->native->psl_virq, afu);
1382 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1389 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1383 kfree(afu->psl_irq_name); 1390 kfree(afu->psl_irq_name);
1391 afu->native->psl_virq = 0;
1384} 1392}
1385 1393
1386static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 1394static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index d1928fdd0f43..07aad8576334 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
763{ 763{
764 struct mei_cl_device *cldev = to_mei_cl_device(dev); 764 struct mei_cl_device *cldev = to_mei_cl_device(dev);
765 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 765 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
766 u8 version = mei_me_cl_ver(cldev->me_cl);
766 767
767 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); 768 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
769 cldev->name, uuid, version);
768} 770}
769static DEVICE_ATTR_RO(modalias); 771static DEVICE_ATTR_RO(modalias);
770 772
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 96046bb12ca1..14c0be98e0a4 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
114 return -EOPNOTSUPP; 114 return -EOPNOTSUPP;
115} 115}
116 116
117int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, 117static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
118 int src_port, u16 data) 118 int src_dev, int src_port, u16 data)
119{ 119{
120 return -EOPNOTSUPP; 120 return -EOPNOTSUPP;
121} 121}
122 122
123int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) 123static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
124{ 124{
125 return -EOPNOTSUPP; 125 return -EOPNOTSUPP;
126} 126}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index b3bc87fe3764..0a98c369df20 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
324 struct xgbe_ring *ring, 324 struct xgbe_ring *ring,
325 struct xgbe_ring_data *rdata) 325 struct xgbe_ring_data *rdata)
326{ 326{
327 int order, ret; 327 int ret;
328 328
329 if (!ring->rx_hdr_pa.pages) { 329 if (!ring->rx_hdr_pa.pages) {
330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); 330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
333 } 333 }
334 334
335 if (!ring->rx_buf_pa.pages) { 335 if (!ring->rx_buf_pa.pages) {
336 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
337 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, 336 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
338 order); 337 PAGE_ALLOC_COSTLY_ORDER);
339 if (ret) 338 if (ret)
340 return ret; 339 return ret;
341 } 340 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 099b374c1b17..5274501428e4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
2026 priv->num_rx_desc_words = params->num_rx_desc_words; 2026 priv->num_rx_desc_words = params->num_rx_desc_words;
2027 2027
2028 priv->irq0 = platform_get_irq(pdev, 0); 2028 priv->irq0 = platform_get_irq(pdev, 0);
2029 if (!priv->is_lite) 2029 if (!priv->is_lite) {
2030 priv->irq1 = platform_get_irq(pdev, 1); 2030 priv->irq1 = platform_get_irq(pdev, 1);
2031 priv->wol_irq = platform_get_irq(pdev, 2); 2031 priv->wol_irq = platform_get_irq(pdev, 2);
2032 } else {
2033 priv->wol_irq = platform_get_irq(pdev, 1);
2034 }
2032 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2035 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2033 dev_err(&pdev->dev, "invalid interrupts\n"); 2036 dev_err(&pdev->dev, "invalid interrupts\n");
2034 ret = -EINVAL; 2037 ret = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index eccb3d1b6abb..5f49334dcad5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1926 } 1926 }
1927 1927
1928 /* select a non-FCoE queue */ 1928 /* select a non-FCoE queue */
1929 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1929 return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1930} 1930}
1931 1931
1932void bnx2x_set_num_queues(struct bnx2x *bp) 1932void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 38a5c6764bb5..77ed2f628f9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
2196 if (err) 2196 if (err)
2197 goto irq_err; 2197 goto irq_err;
2198 } 2198 }
2199
2200 mutex_lock(&uld_mutex);
2199 enable_rx(adap); 2201 enable_rx(adap);
2200 t4_sge_start(adap); 2202 t4_sge_start(adap);
2201 t4_intr_enable(adap); 2203 t4_intr_enable(adap);
2202 adap->flags |= FULL_INIT_DONE; 2204 adap->flags |= FULL_INIT_DONE;
2205 mutex_unlock(&uld_mutex);
2206
2203 notify_ulds(adap, CXGB4_STATE_UP); 2207 notify_ulds(adap, CXGB4_STATE_UP);
2204#if IS_ENABLED(CONFIG_IPV6) 2208#if IS_ENABLED(CONFIG_IPV6)
2205 update_clip(adap); 2209 update_clip(adap);
@@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
2771{ 2775{
2772 int port; 2776 int port;
2773 2777
2778 if (pci_channel_offline(adap->pdev))
2779 return;
2780
2774 /* Disable the SGE since ULDs are going to free resources that 2781 /* Disable the SGE since ULDs are going to free resources that
2775 * could be exposed to the adapter. RDMA MWs for example... 2782 * could be exposed to the adapter. RDMA MWs for example...
2776 */ 2783 */
@@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3882 spin_lock(&adap->stats_lock); 3889 spin_lock(&adap->stats_lock);
3883 for_each_port(adap, i) { 3890 for_each_port(adap, i) {
3884 struct net_device *dev = adap->port[i]; 3891 struct net_device *dev = adap->port[i];
3885 3892 if (dev) {
3886 netif_device_detach(dev); 3893 netif_device_detach(dev);
3887 netif_carrier_off(dev); 3894 netif_carrier_off(dev);
3895 }
3888 } 3896 }
3889 spin_unlock(&adap->stats_lock); 3897 spin_unlock(&adap->stats_lock);
3890 disable_interrupts(adap); 3898 disable_interrupts(adap);
@@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
3963 rtnl_lock(); 3971 rtnl_lock();
3964 for_each_port(adap, i) { 3972 for_each_port(adap, i) {
3965 struct net_device *dev = adap->port[i]; 3973 struct net_device *dev = adap->port[i];
3966 3974 if (dev) {
3967 if (netif_running(dev)) { 3975 if (netif_running(dev)) {
3968 link_start(dev); 3976 link_start(dev);
3969 cxgb_set_rxmode(dev); 3977 cxgb_set_rxmode(dev);
3978 }
3979 netif_device_attach(dev);
3970 } 3980 }
3971 netif_device_attach(dev);
3972 } 3981 }
3973 rtnl_unlock(); 3982 rtnl_unlock();
3974} 3983}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index aded42b96f6d..3a34aa629f7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
4557 */ 4557 */
4558void t4_intr_disable(struct adapter *adapter) 4558void t4_intr_disable(struct adapter *adapter)
4559{ 4559{
4560 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); 4560 u32 whoami, pf;
4561 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 4561
4562 if (pci_channel_offline(adapter->pdev))
4563 return;
4564
4565 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4566 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4562 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 4567 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4563 4568
4564 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); 4569 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 3549d3876278..f2d623a7aee0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -37,7 +37,7 @@
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x10 39#define T4FW_VERSION_MINOR 0x10
40#define T4FW_VERSION_MICRO 0x2B 40#define T4FW_VERSION_MICRO 0x2D
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x10 48#define T5FW_VERSION_MINOR 0x10
49#define T5FW_VERSION_MICRO 0x2B 49#define T5FW_VERSION_MICRO 0x2D
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x10 57#define T6FW_VERSION_MINOR 0x10
58#define T6FW_VERSION_MICRO 0x2B 58#define T6FW_VERSION_MICRO 0x2D
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index e863ba74d005..8bb0db990c8f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
739 if (ret) 739 if (ret)
740 return ret; 740 return ret;
741 741
742 napi_enable(&priv->napi);
743
742 ethoc_init_ring(priv, dev->mem_start); 744 ethoc_init_ring(priv, dev->mem_start);
743 ethoc_reset(priv); 745 ethoc_reset(priv);
744 746
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
754 priv->old_duplex = -1; 756 priv->old_duplex = -1;
755 757
756 phy_start(dev->phydev); 758 phy_start(dev->phydev);
757 napi_enable(&priv->napi);
758 759
759 if (netif_msg_ifup(priv)) { 760 if (netif_msg_ifup(priv)) {
760 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", 761 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 446c7b374ff5..a10de1e9c157 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
381{ 381{
382 const struct of_device_id *id = 382 const struct of_device_id *id =
383 of_match_device(fsl_pq_mdio_match, &pdev->dev); 383 of_match_device(fsl_pq_mdio_match, &pdev->dev);
384 const struct fsl_pq_mdio_data *data = id->data; 384 const struct fsl_pq_mdio_data *data;
385 struct device_node *np = pdev->dev.of_node; 385 struct device_node *np = pdev->dev.of_node;
386 struct resource res; 386 struct resource res;
387 struct device_node *tbi; 387 struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
389 struct mii_bus *new_bus; 389 struct mii_bus *new_bus;
390 int err; 390 int err;
391 391
392 if (!id) {
393 dev_err(&pdev->dev, "Failed to match device\n");
394 return -ENODEV;
395 }
396
397 data = id->data;
398
392 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); 399 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
393 400
394 new_bus = mdiobus_alloc_size(sizeof(*priv)); 401 new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f2d329dba99..a93757c255f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -81,7 +81,7 @@
81static const char ibmvnic_driver_name[] = "ibmvnic"; 81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83 83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); 84MODULE_AUTHOR("Santiago Leon");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL"); 86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION); 87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d5c9c9e06ff5..150caf6ca2b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
295 **/ 295 **/
296void i40e_service_event_schedule(struct i40e_pf *pf) 296void i40e_service_event_schedule(struct i40e_pf *pf)
297{ 297{
298 if (!test_bit(__I40E_VSI_DOWN, pf->state) && 298 if (!test_bit(__I40E_DOWN, pf->state) &&
299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
300 queue_work(i40e_wq, &pf->service_task); 300 queue_work(i40e_wq, &pf->service_task);
301} 301}
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3611 * this is not a performance path and napi_schedule() 3611 * this is not a performance path and napi_schedule()
3612 * can deal with rescheduling. 3612 * can deal with rescheduling.
3613 */ 3613 */
3614 if (!test_bit(__I40E_VSI_DOWN, pf->state)) 3614 if (!test_bit(__I40E_DOWN, pf->state))
3615 napi_schedule_irqoff(&q_vector->napi); 3615 napi_schedule_irqoff(&q_vector->napi);
3616 } 3616 }
3617 3617
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
3687enable_intr: 3687enable_intr:
3688 /* re-enable interrupt causes */ 3688 /* re-enable interrupt causes */
3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3690 if (!test_bit(__I40E_VSI_DOWN, pf->state)) { 3690 if (!test_bit(__I40E_DOWN, pf->state)) {
3691 i40e_service_event_schedule(pf); 3691 i40e_service_event_schedule(pf);
3692 i40e_irq_dynamic_enable_icr0(pf, false); 3692 i40e_irq_dynamic_enable_icr0(pf, false);
3693 } 3693 }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
6203{ 6203{
6204 6204
6205 /* if interface is down do nothing */ 6205 /* if interface is down do nothing */
6206 if (test_bit(__I40E_VSI_DOWN, pf->state)) 6206 if (test_bit(__I40E_DOWN, pf->state))
6207 return; 6207 return;
6208 6208
6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
6344 int i; 6344 int i;
6345 6345
6346 /* if interface is down do nothing */ 6346 /* if interface is down do nothing */
6347 if (test_bit(__I40E_VSI_DOWN, pf->state) || 6347 if (test_bit(__I40E_DOWN, pf->state) ||
6348 test_bit(__I40E_CONFIG_BUSY, pf->state)) 6348 test_bit(__I40E_CONFIG_BUSY, pf->state))
6349 return; 6349 return;
6350 6350
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
6401 } 6401 }
6402 if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { 6402 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
6403 reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); 6403 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
6404 clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); 6404 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
6405 } 6405 }
6406 6406
6407 /* If there's a recovery already waiting, it takes 6407 /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
6415 6415
6416 /* If we're already down or resetting, just bail */ 6416 /* If we're already down or resetting, just bail */
6417 if (reset_flags && 6417 if (reset_flags &&
6418 !test_bit(__I40E_VSI_DOWN, pf->state) && 6418 !test_bit(__I40E_DOWN, pf->state) &&
6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { 6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
6420 rtnl_lock(); 6420 rtnl_lock();
6421 i40e_do_reset(pf, reset_flags, true); 6421 i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
7002 u32 val; 7002 u32 val;
7003 int v; 7003 int v;
7004 7004
7005 if (test_bit(__I40E_VSI_DOWN, pf->state)) 7005 if (test_bit(__I40E_DOWN, pf->state))
7006 goto clear_recovery; 7006 goto clear_recovery;
7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
7008 7008
@@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
9767 return -ENODEV; 9767 return -ENODEV;
9768 } 9768 }
9769 if (vsi == pf->vsi[pf->lan_vsi] && 9769 if (vsi == pf->vsi[pf->lan_vsi] &&
9770 !test_bit(__I40E_VSI_DOWN, pf->state)) { 9770 !test_bit(__I40E_DOWN, pf->state)) {
9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9772 return -ENODEV; 9772 return -ENODEV;
9773 } 9773 }
@@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11003 } 11003 }
11004 pf->next_vsi = 0; 11004 pf->next_vsi = 0;
11005 pf->pdev = pdev; 11005 pf->pdev = pdev;
11006 set_bit(__I40E_VSI_DOWN, pf->state); 11006 set_bit(__I40E_DOWN, pf->state);
11007 11007
11008 hw = &pf->hw; 11008 hw = &pf->hw;
11009 hw->back = pf; 11009 hw->back = pf;
@@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11293 * before setting up the misc vector or we get a race and the vector 11293 * before setting up the misc vector or we get a race and the vector
11294 * ends up disabled forever. 11294 * ends up disabled forever.
11295 */ 11295 */
11296 clear_bit(__I40E_VSI_DOWN, pf->state); 11296 clear_bit(__I40E_DOWN, pf->state);
11297 11297
11298 /* In case of MSIX we are going to setup the misc vector right here 11298 /* In case of MSIX we are going to setup the misc vector right here
11299 * to handle admin queue events etc. In case of legacy and MSI 11299 * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11448 11448
11449 /* Unwind what we've done if something failed in the setup */ 11449 /* Unwind what we've done if something failed in the setup */
11450err_vsis: 11450err_vsis:
11451 set_bit(__I40E_VSI_DOWN, pf->state); 11451 set_bit(__I40E_DOWN, pf->state);
11452 i40e_clear_interrupt_scheme(pf); 11452 i40e_clear_interrupt_scheme(pf);
11453 kfree(pf->vsi); 11453 kfree(pf->vsi);
11454err_switch_setup: 11454err_switch_setup:
@@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
11500 11500
11501 /* no more scheduling of any task */ 11501 /* no more scheduling of any task */
11502 set_bit(__I40E_SUSPENDED, pf->state); 11502 set_bit(__I40E_SUSPENDED, pf->state);
11503 set_bit(__I40E_VSI_DOWN, pf->state); 11503 set_bit(__I40E_DOWN, pf->state);
11504 if (pf->service_timer.data) 11504 if (pf->service_timer.data)
11505 del_timer_sync(&pf->service_timer); 11505 del_timer_sync(&pf->service_timer);
11506 if (pf->service_task.func) 11506 if (pf->service_task.func)
@@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
11740 struct i40e_hw *hw = &pf->hw; 11740 struct i40e_hw *hw = &pf->hw;
11741 11741
11742 set_bit(__I40E_SUSPENDED, pf->state); 11742 set_bit(__I40E_SUSPENDED, pf->state);
11743 set_bit(__I40E_VSI_DOWN, pf->state); 11743 set_bit(__I40E_DOWN, pf->state);
11744 rtnl_lock(); 11744 rtnl_lock();
11745 i40e_prep_for_reset(pf, true); 11745 i40e_prep_for_reset(pf, true);
11746 rtnl_unlock(); 11746 rtnl_unlock();
@@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11789 int retval = 0; 11789 int retval = 0;
11790 11790
11791 set_bit(__I40E_SUSPENDED, pf->state); 11791 set_bit(__I40E_SUSPENDED, pf->state);
11792 set_bit(__I40E_VSI_DOWN, pf->state); 11792 set_bit(__I40E_DOWN, pf->state);
11793 11793
11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) 11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
11795 i40e_enable_mc_magic_wake(pf); 11795 i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
11841 11841
11842 /* handling the reset will rebuild the device state */ 11842 /* handling the reset will rebuild the device state */
11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { 11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
11844 clear_bit(__I40E_VSI_DOWN, pf->state); 11844 clear_bit(__I40E_DOWN, pf->state);
11845 rtnl_lock(); 11845 rtnl_lock();
11846 i40e_reset_and_rebuild(pf, false, true); 11846 i40e_reset_and_rebuild(pf, false, true);
11847 rtnl_unlock(); 11847 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 29321a6167a6..cd894f4023b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1854#if (PAGE_SIZE < 8192) 1854#if (PAGE_SIZE < 8192)
1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1856#else 1856#else
1857 unsigned int truesize = SKB_DATA_ALIGN(size); 1857 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1858 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1858#endif 1859#endif
1859 struct sk_buff *skb; 1860 struct sk_buff *skb;
1860 1861
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index dfe241a12ad0..12b02e530503 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1190#if (PAGE_SIZE < 8192) 1190#if (PAGE_SIZE < 8192)
1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1192#else 1192#else
1193 unsigned int truesize = SKB_DATA_ALIGN(size); 1193 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1194 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1194#endif 1195#endif
1195 struct sk_buff *skb; 1196 struct sk_buff *skb;
1196 1197
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ae5fdc2df654..ffbcb27c05e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
1562 qpn = priv->drop_qp.qpn; 1562 qpn = priv->drop_qp.qpn;
1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
1565 if (qpn < priv->rss_map.base_qpn ||
1566 qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
1567 en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
1568 return -EINVAL;
1569 }
1570 } else { 1565 } else {
1571 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1566 if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
1572 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", 1567 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1a670b681555..0710b3677464 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
38#include <linux/mlx4/qp.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
40#include "mlx4.h" 41#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
985 if (IS_ERR(mailbox)) 986 if (IS_ERR(mailbox))
986 return PTR_ERR(mailbox); 987 return PTR_ERR(mailbox);
987 988
989 if (!mlx4_qp_lookup(dev, rule->qpn)) {
990 mlx4_err_rule(dev, "QP doesn't exist\n", rule);
991 ret = -EINVAL;
992 goto out;
993 }
994
988 trans_rule_ctrl_to_hw(rule, mailbox->buf); 995 trans_rule_ctrl_to_hw(rule, mailbox->buf);
989 996
990 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
991 998
992 list_for_each_entry(cur, &rule->list, list) { 999 list_for_each_entry(cur, &rule->list, list) {
993 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size);
994 if (ret < 0) { 1001 if (ret < 0)
995 mlx4_free_cmd_mailbox(dev, mailbox); 1002 goto out;
996 return ret; 1003
997 }
998 size += ret; 1004 size += ret;
999 } 1005 }
1000 1006
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
1021 } 1027 }
1022 } 1028 }
1023 1029
1030out:
1024 mlx4_free_cmd_mailbox(dev, mailbox); 1031 mlx4_free_cmd_mailbox(dev, mailbox);
1025 1032
1026 return ret; 1033 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2d6abd4662b1..5a310d313e94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
384 __mlx4_qp_free_icm(dev, qpn); 384 __mlx4_qp_free_icm(dev, qpn);
385} 385}
386 386
387struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
388{
389 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
390 struct mlx4_qp *qp;
391
392 spin_lock(&qp_table->lock);
393
394 qp = __mlx4_qp_lookup(dev, qpn);
395
396 spin_unlock(&qp_table->lock);
397 return qp;
398}
399
387int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
388{ 401{
389 struct mlx4_priv *priv = mlx4_priv(dev); 402 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
471 } 484 }
472 485
473 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 486 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
487 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
488 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
489 err = -EOPNOTSUPP;
490 goto out;
491 }
492
474 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 493 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
475 cmd->qp_context.qos_vport = params->qos_vport; 494 cmd->qp_context.qos_vport = params->qos_vport;
476 } 495 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 07516545474f..812783865205 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256} 5256}
5257 5257
5258static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259 struct mlx4_vf_immed_vlan_work *work)
5260{
5261 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262 ctx->qp_context.qos_vport = work->qos_vport;
5263}
5264
5258void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5265void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5259{ 5266{
5260 struct mlx4_vf_immed_vlan_work *work = 5267 struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5369 qp->sched_queue & 0xC7; 5376 qp->sched_queue & 0xC7;
5370 upd_context->qp_context.pri_path.sched_queue |= 5377 upd_context->qp_context.pri_path.sched_queue |=
5371 ((work->qos & 0x7) << 3); 5378 ((work->qos & 0x7) << 3);
5372 upd_context->qp_mask |= 5379
5373 cpu_to_be64(1ULL << 5380 if (dev->caps.flags2 &
5374 MLX4_UPD_QP_MASK_QOS_VPP); 5381 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5375 upd_context->qp_context.qos_vport = 5382 update_qos_vpp(upd_context, work);
5376 work->qos_vport;
5377 } 5383 }
5378 5384
5379 err = mlx4_cmd(dev, mailbox->dma, 5385 err = mlx4_cmd(dev, mailbox->dma,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index fe5546bb4153..af945edfee19 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -621,10 +621,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
622 priv->irq_info[i].mask); 622 priv->irq_info[i].mask);
623 623
624#ifdef CONFIG_SMP 624 if (IS_ENABLED(CONFIG_SMP) &&
625 if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) 625 irq_set_affinity_hint(irq, priv->irq_info[i].mask))
626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); 626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
627#endif
628 627
629 return 0; 628 return 0;
630} 629}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 537d1236a4fe..715b3aaf83ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
1731 break; 1731 break;
1732 default: 1732 default:
1733 DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1733 DP_VERBOSE(cdev, QED_MSG_SP,
1734 "Invalid protocol type = %d\n", type);
1734 return; 1735 return;
1735 } 1736 }
1736} 1737}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7245b1072518..81312924df14 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
1824 u32 (*get_cap_size)(void *, int); 1824 u32 (*get_cap_size)(void *, int);
1825 void (*set_sys_info)(void *, int, u32); 1825 void (*set_sys_info)(void *, int, u32);
1826 void (*store_cap_mask)(void *, u32); 1826 void (*store_cap_mask)(void *, u32);
1827 bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
1828 bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
1827}; 1829};
1828 1830
1829extern struct qlcnic_nic_template qlcnic_vf_ops; 1831extern struct qlcnic_nic_template qlcnic_vf_ops;
1830 1832
1831static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) 1833static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1832{ 1834{
1833 return adapter->ahw->extra_capability[0] & 1835 return adapter->ahw->extra_capability[0] &
1834 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; 1836 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
1835} 1837}
1836 1838
1837static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) 1839static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1838{ 1840{
1839 return adapter->ahw->extra_capability[0] & 1841 return adapter->ahw->extra_capability[0] &
1840 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; 1842 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
1841} 1843}
1842 1844
1845static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
1846{
1847 return false;
1848}
1849
1850static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
1851{
1852 return false;
1853}
1854
1855static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
1856{
1857 return adapter->ahw->hw_ops->encap_rx_offload(adapter);
1858}
1859
1860static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
1861{
1862 return adapter->ahw->hw_ops->encap_tx_offload(adapter);
1863}
1864
1843static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1865static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1844{ 1866{
1845 return adapter->nic_ops->start_firmware(adapter); 1867 return adapter->nic_ops->start_firmware(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4fb68797630e..f7080d0ab874 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
242 .get_cap_size = qlcnic_83xx_get_cap_size, 242 .get_cap_size = qlcnic_83xx_get_cap_size,
243 .set_sys_info = qlcnic_83xx_set_sys_info, 243 .set_sys_info = qlcnic_83xx_set_sys_info,
244 .store_cap_mask = qlcnic_83xx_store_cap_mask, 244 .store_cap_mask = qlcnic_83xx_store_cap_mask,
245 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
246 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
245}; 247};
246 248
247static struct qlcnic_nic_template qlcnic_83xx_ops = { 249static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 838cc0ceafd8..7848cf04b29a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
341 } 341 }
342 return -EIO; 342 return -EIO;
343 } 343 }
344 usleep_range(1000, 1500); 344 udelay(1200);
345 } 345 }
346 346
347 if (id_reg) 347 if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index b6628aaa6e4a..1b5f7d57b6f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
632 .get_cap_size = qlcnic_82xx_get_cap_size, 632 .get_cap_size = qlcnic_82xx_get_cap_size,
633 .set_sys_info = qlcnic_82xx_set_sys_info, 633 .set_sys_info = qlcnic_82xx_set_sys_info,
634 .store_cap_mask = qlcnic_82xx_store_cap_mask, 634 .store_cap_mask = qlcnic_82xx_store_cap_mask,
635 .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
636 .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
635}; 637};
636 638
637static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) 639static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 2f656f395f39..c58180f40844 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
77 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 77 .free_mac_list = qlcnic_sriov_vf_free_mac_list,
78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr, 78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
80 .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
81 .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
80}; 82};
81 83
82static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 84static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index cc065ffbe4b5..bcd4708b3745 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
931 emac_mac_config(adpt); 931 emac_mac_config(adpt);
932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q); 932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
933 933
934 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 934 adpt->phydev->irq = PHY_POLL;
935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, 935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
936 PHY_INTERFACE_MODE_SGMII); 936 PHY_INTERFACE_MODE_SGMII);
937 if (ret) { 937 if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 441c19366489..18461fcb9815 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -13,15 +13,11 @@
13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver. 13/* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
14 */ 14 */
15 15
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_net.h>
19#include <linux/of_mdio.h> 16#include <linux/of_mdio.h>
20#include <linux/phy.h> 17#include <linux/phy.h>
21#include <linux/iopoll.h> 18#include <linux/iopoll.h>
22#include <linux/acpi.h> 19#include <linux/acpi.h>
23#include "emac.h" 20#include "emac.h"
24#include "emac-mac.h"
25 21
26/* EMAC base register offsets */ 22/* EMAC base register offsets */
27#define EMAC_MDIO_CTRL 0x001414 23#define EMAC_MDIO_CTRL 0x001414
@@ -52,62 +48,10 @@
52 48
53#define MDIO_WAIT_TIMES 1000 49#define MDIO_WAIT_TIMES 1000
54 50
55#define EMAC_LINK_SPEED_DEFAULT (\
56 EMAC_LINK_SPEED_10_HALF |\
57 EMAC_LINK_SPEED_10_FULL |\
58 EMAC_LINK_SPEED_100_HALF |\
59 EMAC_LINK_SPEED_100_FULL |\
60 EMAC_LINK_SPEED_1GB_FULL)
61
62/**
63 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
64 * @adpt: the emac adapter
65 *
66 * The autopoll feature takes over the MDIO bus. In order for
67 * the PHY driver to be able to talk to the PHY over the MDIO
68 * bus, we need to temporarily disable the autopoll feature.
69 */
70static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
71{
72 u32 val;
73
74 /* disable autopoll */
75 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
76
77 /* wait for any mdio polling to complete */
78 if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
79 !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
80 return 0;
81
82 /* failed to disable; ensure it is enabled before returning */
83 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
84
85 return -EBUSY;
86}
87
88/**
89 * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
90 * @adpt: the emac adapter
91 *
92 * The EMAC has the ability to poll the external PHY on the MDIO
93 * bus for link state changes. This eliminates the need for the
94 * driver to poll the phy. If if the link state does change,
95 * the EMAC issues an interrupt on behalf of the PHY.
96 */
97static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
98{
99 emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
100}
101
102static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 51static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
103{ 52{
104 struct emac_adapter *adpt = bus->priv; 53 struct emac_adapter *adpt = bus->priv;
105 u32 reg; 54 u32 reg;
106 int ret;
107
108 ret = emac_phy_mdio_autopoll_disable(adpt);
109 if (ret)
110 return ret;
111 55
112 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 56 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
113 (addr << PHY_ADDR_SHFT)); 57 (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
122 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 66 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
123 !(reg & (MDIO_START | MDIO_BUSY)), 67 !(reg & (MDIO_START | MDIO_BUSY)),
124 100, MDIO_WAIT_TIMES * 100)) 68 100, MDIO_WAIT_TIMES * 100))
125 ret = -EIO; 69 return -EIO;
126 else
127 ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
128 70
129 emac_phy_mdio_autopoll_enable(adpt); 71 return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
130
131 return ret;
132} 72}
133 73
134static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) 74static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
135{ 75{
136 struct emac_adapter *adpt = bus->priv; 76 struct emac_adapter *adpt = bus->priv;
137 u32 reg; 77 u32 reg;
138 int ret;
139
140 ret = emac_phy_mdio_autopoll_disable(adpt);
141 if (ret)
142 return ret;
143 78
144 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 79 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
145 (addr << PHY_ADDR_SHFT)); 80 (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
155 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 90 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
156 !(reg & (MDIO_START | MDIO_BUSY)), 100, 91 !(reg & (MDIO_START | MDIO_BUSY)), 100,
157 MDIO_WAIT_TIMES * 100)) 92 MDIO_WAIT_TIMES * 100))
158 ret = -EIO; 93 return -EIO;
159 94
160 emac_phy_mdio_autopoll_enable(adpt); 95 return 0;
161
162 return ret;
163} 96}
164 97
165/* Configure the MDIO bus and connect the external PHY */ 98/* Configure the MDIO bus and connect the external PHY */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 28a8cdc36485..98a326faea29 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -50,19 +50,7 @@
50#define DMAR_DLY_CNT_DEF 15 50#define DMAR_DLY_CNT_DEF 15
51#define DMAW_DLY_CNT_DEF 4 51#define DMAW_DLY_CNT_DEF 4
52 52
53#define IMR_NORMAL_MASK (\ 53#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
54 ISR_ERROR |\
55 ISR_GPHY_LINK |\
56 ISR_TX_PKT |\
57 GPHY_WAKEUP_INT)
58
59#define IMR_EXTENDED_MASK (\
60 SW_MAN_INT |\
61 ISR_OVER |\
62 ISR_ERROR |\
63 ISR_GPHY_LINK |\
64 ISR_TX_PKT |\
65 GPHY_WAKEUP_INT)
66 54
67#define ISR_TX_PKT (\ 55#define ISR_TX_PKT (\
68 TX_PKT_INT |\ 56 TX_PKT_INT |\
@@ -70,10 +58,6 @@
70 TX_PKT_INT2 |\ 58 TX_PKT_INT2 |\
71 TX_PKT_INT3) 59 TX_PKT_INT3)
72 60
73#define ISR_GPHY_LINK (\
74 GPHY_LINK_UP_INT |\
75 GPHY_LINK_DOWN_INT)
76
77#define ISR_OVER (\ 61#define ISR_OVER (\
78 RFD0_UR_INT |\ 62 RFD0_UR_INT |\
79 RFD1_UR_INT |\ 63 RFD1_UR_INT |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
187 if (status & ISR_OVER) 171 if (status & ISR_OVER)
188 net_warn_ratelimited("warning: TX/RX overflow\n"); 172 net_warn_ratelimited("warning: TX/RX overflow\n");
189 173
190 /* link event */
191 if (status & ISR_GPHY_LINK)
192 phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
193
194exit: 174exit:
195 /* enable the interrupt */ 175 /* enable the interrupt */
196 writel(irq->mask, adpt->base + EMAC_INT_MASK); 176 writel(irq->mask, adpt->base + EMAC_INT_MASK);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 3cd7989c007d..784782da3a85 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
230 int ring_size; 230 int ring_size;
231 int i; 231 int i;
232 232
233 /* Free RX skb ringbuffer */
234 if (priv->rx_skb[q]) {
235 for (i = 0; i < priv->num_rx_ring[q]; i++)
236 dev_kfree_skb(priv->rx_skb[q][i]);
237 }
238 kfree(priv->rx_skb[q]);
239 priv->rx_skb[q] = NULL;
240
241 /* Free aligned TX buffers */
242 kfree(priv->tx_align[q]);
243 priv->tx_align[q] = NULL;
244
245 if (priv->rx_ring[q]) { 233 if (priv->rx_ring[q]) {
246 for (i = 0; i < priv->num_rx_ring[q]; i++) { 234 for (i = 0; i < priv->num_rx_ring[q]; i++) {
247 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; 235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
270 priv->tx_ring[q] = NULL; 258 priv->tx_ring[q] = NULL;
271 } 259 }
272 260
261 /* Free RX skb ringbuffer */
262 if (priv->rx_skb[q]) {
263 for (i = 0; i < priv->num_rx_ring[q]; i++)
264 dev_kfree_skb(priv->rx_skb[q][i]);
265 }
266 kfree(priv->rx_skb[q]);
267 priv->rx_skb[q] = NULL;
268
269 /* Free aligned TX buffers */
270 kfree(priv->tx_align[q]);
271 priv->tx_align[q] = NULL;
272
273 /* Free TX skb ringbuffer. 273 /* Free TX skb ringbuffer.
274 * SKBs are freed by ravb_tx_free() call above. 274 * SKBs are freed by ravb_tx_free() call above.
275 */ 275 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 489ef146201e..6a9c954492f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -37,6 +37,7 @@
37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) 37#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
38#define TSE_PCS_CONTROL_REG 0x00 38#define TSE_PCS_CONTROL_REG 0x00
39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) 39#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
40#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
40#define TSE_PCS_IF_MODE_REG 0x28 41#define TSE_PCS_IF_MODE_REG 0x28
41#define TSE_PCS_LINK_TIMER_0_REG 0x24 42#define TSE_PCS_LINK_TIMER_0_REG 0x24
42#define TSE_PCS_LINK_TIMER_1_REG 0x26 43#define TSE_PCS_LINK_TIMER_1_REG 0x26
@@ -65,6 +66,7 @@
65#define TSE_PCS_SW_RESET_TIMEOUT 100 66#define TSE_PCS_SW_RESET_TIMEOUT 100
66#define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 67#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
67#define TSE_PCS_USE_SGMII_ENA BIT(0) 68#define TSE_PCS_USE_SGMII_ENA BIT(0)
69#define TSE_PCS_IF_USE_SGMII 0x03
68 70
69#define SGMII_ADAPTER_CTRL_REG 0x00 71#define SGMII_ADAPTER_CTRL_REG 0x00
70#define SGMII_ADAPTER_DISABLE 0x0001 72#define SGMII_ADAPTER_DISABLE 0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
101{ 103{
102 int ret = 0; 104 int ret = 0;
103 105
104 writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); 106 writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
107
108 writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
105 109
106 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); 110 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
107 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); 111 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a74c481401c4..12236daf7bb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1208 u32 rx_count = priv->plat->rx_queues_to_use; 1208 u32 rx_count = priv->plat->rx_queues_to_use;
1209 unsigned int bfsize = 0; 1209 unsigned int bfsize = 0;
1210 int ret = -ENOMEM; 1210 int ret = -ENOMEM;
1211 u32 queue; 1211 int queue;
1212 int i; 1212 int i;
1213 1213
1214 if (priv->hw->mode->set_16kib_bfsize) 1214 if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2724 2724
2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2726 0, 1, 2726 0, 1,
2727 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), 2727 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2728 0, 0); 2728 0, 0);
2729 2729
2730 tmp_len -= TSO_MAX_BUFF_SIZE; 2730 tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2947 int i, csum_insertion = 0, is_jumbo = 0; 2947 int i, csum_insertion = 0, is_jumbo = 0;
2948 u32 queue = skb_get_queue_mapping(skb); 2948 u32 queue = skb_get_queue_mapping(skb);
2949 int nfrags = skb_shinfo(skb)->nr_frags; 2949 int nfrags = skb_shinfo(skb)->nr_frags;
2950 unsigned int entry, first_entry; 2950 int entry;
2951 unsigned int first_entry;
2951 struct dma_desc *desc, *first; 2952 struct dma_desc *desc, *first;
2952 struct stmmac_tx_queue *tx_q; 2953 struct stmmac_tx_queue *tx_q;
2953 unsigned int enh_desc; 2954 unsigned int enh_desc;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 959fd12d2e67..6ebb0f559a42 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
1133 1133
1134 /* make enough headroom for basic scenario */ 1134 /* make enough headroom for basic scenario */
1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
1136 if (ip_tunnel_info_af(info) == AF_INET) { 1136 if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
1137 encap_len += sizeof(struct iphdr); 1137 encap_len += sizeof(struct iphdr);
1138 dev->max_mtu -= sizeof(struct iphdr); 1138 dev->max_mtu -= sizeof(struct iphdr);
1139 } else { 1139 } else {
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 8c3633c1d078..97e3bc60c3e7 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
576 case HDLCDRVCTL_CALIBRATE: 576 case HDLCDRVCTL_CALIBRATE:
577 if(!capable(CAP_SYS_RAWIO)) 577 if(!capable(CAP_SYS_RAWIO))
578 return -EPERM; 578 return -EPERM;
579 if (s->par.bitrate <= 0)
580 return -EINVAL;
579 if (bi.data.calibrate > INT_MAX / s->par.bitrate) 581 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
580 return -EINVAL; 582 return -EINVAL;
581 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 583 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 9097e42bec2e..57297ba23987 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
1127 if (adv < 0) 1127 if (adv < 0)
1128 return adv; 1128 return adv;
1129 1129
1130 lpa &= adv;
1131
1132 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 1130 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
1133 phydev->duplex = DUPLEX_FULL; 1131 phydev->duplex = DUPLEX_FULL;
1134 else 1132 else
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8e73f5f36e71..f99c21f78b63 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
658 return 0; 658 return 0;
659} 659}
660 660
661static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
662{
663 int rc;
664
665 /* Some devices have extra OF data and an OF-style MODALIAS */
666 rc = of_device_uevent_modalias(dev, env);
667 if (rc != -ENODEV)
668 return rc;
669
670 return 0;
671}
672
661#ifdef CONFIG_PM 673#ifdef CONFIG_PM
662static int mdio_bus_suspend(struct device *dev) 674static int mdio_bus_suspend(struct device *dev)
663{ 675{
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
708struct bus_type mdio_bus_type = { 720struct bus_type mdio_bus_type = {
709 .name = "mdio_bus", 721 .name = "mdio_bus",
710 .match = mdio_bus_match, 722 .match = mdio_bus_match,
723 .uevent = mdio_uevent,
711 .pm = MDIO_BUS_PM_OPS, 724 .pm = MDIO_BUS_PM_OPS,
712}; 725};
713EXPORT_SYMBOL(mdio_bus_type); 726EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 6a5fd18f062c..b9252b8d81ff 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,23 +268,12 @@ out:
268 return ret; 268 return ret;
269} 269}
270 270
271static int kszphy_config_init(struct phy_device *phydev) 271/* Some config bits need to be set again on resume, handle them here. */
272static int kszphy_config_reset(struct phy_device *phydev)
272{ 273{
273 struct kszphy_priv *priv = phydev->priv; 274 struct kszphy_priv *priv = phydev->priv;
274 const struct kszphy_type *type;
275 int ret; 275 int ret;
276 276
277 if (!priv)
278 return 0;
279
280 type = priv->type;
281
282 if (type->has_broadcast_disable)
283 kszphy_broadcast_disable(phydev);
284
285 if (type->has_nand_tree_disable)
286 kszphy_nand_tree_disable(phydev);
287
288 if (priv->rmii_ref_clk_sel) { 277 if (priv->rmii_ref_clk_sel) {
289 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); 278 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
290 if (ret) { 279 if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
295 } 284 }
296 285
297 if (priv->led_mode >= 0) 286 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 287 kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
299 288
300 return 0; 289 return 0;
301} 290}
302 291
292static int kszphy_config_init(struct phy_device *phydev)
293{
294 struct kszphy_priv *priv = phydev->priv;
295 const struct kszphy_type *type;
296
297 if (!priv)
298 return 0;
299
300 type = priv->type;
301
302 if (type->has_broadcast_disable)
303 kszphy_broadcast_disable(phydev);
304
305 if (type->has_nand_tree_disable)
306 kszphy_nand_tree_disable(phydev);
307
308 return kszphy_config_reset(phydev);
309}
310
303static int ksz8041_config_init(struct phy_device *phydev) 311static int ksz8041_config_init(struct phy_device *phydev)
304{ 312{
305 struct device_node *of_node = phydev->mdio.dev.of_node; 313 struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
700 708
701static int kszphy_resume(struct phy_device *phydev) 709static int kszphy_resume(struct phy_device *phydev)
702{ 710{
711 int ret;
712
703 genphy_resume(phydev); 713 genphy_resume(phydev);
704 714
715 ret = kszphy_config_reset(phydev);
716 if (ret)
717 return ret;
718
705 /* Enable PHY Interrupts */ 719 /* Enable PHY Interrupts */
706 if (phy_interrupt_is_valid(phydev)) { 720 if (phy_interrupt_is_valid(phydev)) {
707 phydev->interrupts = PHY_INTERRUPT_ENABLED; 721 phydev->interrupts = PHY_INTERRUPT_ENABLED;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 82ab8fb82587..7524caa0f29d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
241 * phy_lookup_setting - lookup a PHY setting 241 * phy_lookup_setting - lookup a PHY setting
242 * @speed: speed to match 242 * @speed: speed to match
243 * @duplex: duplex to match 243 * @duplex: duplex to match
244 * @feature: allowed link modes 244 * @features: allowed link modes
245 * @exact: an exact match is required 245 * @exact: an exact match is required
246 * 246 *
247 * Search the settings array for a setting that matches the speed and 247 * Search the settings array for a setting that matches the speed and
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3e9246cc49c3..a871f45ecc79 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
869 unsigned int len; 869 unsigned int len;
870 870
871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
872 rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); 872 rq->min_buf_len, PAGE_SIZE - hdr_len);
873 return ALIGN(len, L1_CACHE_BYTES); 873 return ALIGN(len, L1_CACHE_BYTES);
874} 874}
875 875
@@ -2144,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
2144 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2144 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2145 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2145 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2146 2146
2147 return max(min_buf_len, hdr_len); 2147 return max(max(min_buf_len, hdr_len) - hdr_len,
2148 (unsigned int)GOOD_PACKET_LEN);
2148} 2149}
2149 2150
2150static int virtnet_find_vqs(struct virtnet_info *vi) 2151static int virtnet_find_vqs(struct virtnet_info *vi)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 328b4712683c..a6b5052c1d36 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
59 59
60static int vxlan_sock_add(struct vxlan_dev *vxlan); 60static int vxlan_sock_add(struct vxlan_dev *vxlan);
61 61
62static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
63
62/* per-network namespace private data for this module */ 64/* per-network namespace private data for this module */
63struct vxlan_net { 65struct vxlan_net {
64 struct list_head vxlan_list; 66 struct list_head vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
740 call_rcu(&f->rcu, vxlan_fdb_free); 742 call_rcu(&f->rcu, vxlan_fdb_free);
741} 743}
742 744
745static void vxlan_dst_free(struct rcu_head *head)
746{
747 struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
748
749 dst_cache_destroy(&rd->dst_cache);
750 kfree(rd);
751}
752
753static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
754 struct vxlan_rdst *rd)
755{
756 list_del_rcu(&rd->list);
757 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
758 call_rcu(&rd->rcu, vxlan_dst_free);
759}
760
743static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 761static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
744 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 762 union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
745 __be32 *vni, u32 *ifindex) 763 __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
864 * otherwise destroy the fdb entry 882 * otherwise destroy the fdb entry
865 */ 883 */
866 if (rd && !list_is_singular(&f->remotes)) { 884 if (rd && !list_is_singular(&f->remotes)) {
867 list_del_rcu(&rd->list); 885 vxlan_fdb_dst_destroy(vxlan, f, rd);
868 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
869 kfree_rcu(rd, rcu);
870 goto out; 886 goto out;
871 } 887 }
872 888
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
1067 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1083 rcu_assign_pointer(vxlan->vn4_sock, NULL);
1068 synchronize_net(); 1084 synchronize_net();
1069 1085
1086 vxlan_vs_del_dev(vxlan);
1087
1070 if (__vxlan_sock_release_prep(sock4)) { 1088 if (__vxlan_sock_release_prep(sock4)) {
1071 udp_tunnel_sock_release(sock4->sock); 1089 udp_tunnel_sock_release(sock4->sock);
1072 kfree(sock4); 1090 kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
2342 mod_timer(&vxlan->age_timer, next_timer); 2360 mod_timer(&vxlan->age_timer, next_timer);
2343} 2361}
2344 2362
2363static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
2364{
2365 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
2366
2367 spin_lock(&vn->sock_lock);
2368 hlist_del_init_rcu(&vxlan->hlist);
2369 spin_unlock(&vn->sock_lock);
2370}
2371
2345static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2372static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
2346{ 2373{
2347 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2374 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3286static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3313static void vxlan_dellink(struct net_device *dev, struct list_head *head)
3287{ 3314{
3288 struct vxlan_dev *vxlan = netdev_priv(dev); 3315 struct vxlan_dev *vxlan = netdev_priv(dev);
3289 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
3290 3316
3291 vxlan_flush(vxlan, true); 3317 vxlan_flush(vxlan, true);
3292 3318
3293 spin_lock(&vn->sock_lock);
3294 if (!hlist_unhashed(&vxlan->hlist))
3295 hlist_del_rcu(&vxlan->hlist);
3296 spin_unlock(&vn->sock_lock);
3297
3298 gro_cells_destroy(&vxlan->gro_cells); 3319 gro_cells_destroy(&vxlan->gro_cells);
3299 list_del(&vxlan->next); 3320 list_del(&vxlan->next);
3300 unregister_netdevice_queue(dev, head); 3321 unregister_netdevice_queue(dev, head);
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index d5e993dc9b23..517a315e259b 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
1271 qcom_smem_state_put(wcn->tx_enable_state); 1271 qcom_smem_state_put(wcn->tx_enable_state);
1272 qcom_smem_state_put(wcn->tx_rings_empty_state); 1272 qcom_smem_state_put(wcn->tx_rings_empty_state);
1273 1273
1274 rpmsg_destroy_ept(wcn->smd_channel);
1275
1274 iounmap(wcn->dxe_base); 1276 iounmap(wcn->dxe_base);
1275 iounmap(wcn->ccu_base); 1277 iounmap(wcn->ccu_base);
1276 1278
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fc64b8913aa6..e03450059b06 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
3422 /* otherwise, set txglomalign */ 3422 /* otherwise, set txglomalign */
3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align; 3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align;
3424 /* SDIO ADMA requires at least 32 bit alignment */ 3424 /* SDIO ADMA requires at least 32 bit alignment */
3425 value = max_t(u32, value, 4); 3425 value = max_t(u32, value, ALIGNMENT);
3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, 3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
3427 sizeof(u32)); 3427 sizeof(u32));
3428 } 3428 }
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 3b3e076571d6..45e2efc70d19 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -79,8 +79,8 @@
79/* Lowest firmware API version supported */ 79/* Lowest firmware API version supported */
80#define IWL7260_UCODE_API_MIN 17 80#define IWL7260_UCODE_API_MIN 17
81#define IWL7265_UCODE_API_MIN 17 81#define IWL7265_UCODE_API_MIN 17
82#define IWL7265D_UCODE_API_MIN 17 82#define IWL7265D_UCODE_API_MIN 22
83#define IWL3168_UCODE_API_MIN 20 83#define IWL3168_UCODE_API_MIN 22
84 84
85/* NVM versions */ 85/* NVM versions */
86#define IWL7260_NVM_VERSION 0x0a1d 86#define IWL7260_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index b9718c0cf174..89137717c1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -74,8 +74,8 @@
74#define IWL8265_UCODE_API_MAX 30 74#define IWL8265_UCODE_API_MAX 30
75 75
76/* Lowest firmware API version supported */ 76/* Lowest firmware API version supported */
77#define IWL8000_UCODE_API_MIN 17 77#define IWL8000_UCODE_API_MIN 22
78#define IWL8265_UCODE_API_MIN 20 78#define IWL8265_UCODE_API_MIN 22
79 79
80/* NVM versions */ 80/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 81#define IWL8000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 306bc967742e..77efbb78e867 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -370,6 +370,7 @@
370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c) 370#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
371 371
372#define DBGC_IN_SAMPLE (0xa03c00) 372#define DBGC_IN_SAMPLE (0xa03c00)
373#define DBGC_OUT_CTRL (0xa03c0c)
373 374
374/* enable the ID buf for read */ 375/* enable the ID buf for read */
375#define WFPM_PS_CTL_CLR 0xA0300C 376#define WFPM_PS_CTL_CLR 0xA0300C
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
index 1b7d265ffb0a..a10c6aae9ab9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
@@ -307,6 +307,11 @@ enum {
307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ 307/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
308#define LQ_FLAG_COLOR_POS 1 308#define LQ_FLAG_COLOR_POS 1
309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) 309#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS)
310#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\
311 LQ_FLAG_COLOR_POS)
312#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
313 LQ_FLAG_COLOR_MSK)
314#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
310 315
311/* Bit 4-5: Tx RTS BW Signalling 316/* Bit 4-5: Tx RTS BW Signalling
312 * (0) No RTS BW signalling 317 * (0) No RTS BW signalling
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 81b98915b1a4..1360ebfdc51b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -519,8 +519,11 @@ struct agg_tx_status {
519 * bit-7 invalid rate indication 519 * bit-7 invalid rate indication
520 */ 520 */
521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f 521#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
522#define TX_RES_RATE_TABLE_COLOR_POS 4
522#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 523#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
523#define TX_RES_INV_RATE_INDEX_MSK 0x80 524#define TX_RES_INV_RATE_INDEX_MSK 0x80
525#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
526 TX_RES_RATE_TABLE_COLOR_POS)
524 527
525#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) 528#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
526#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) 529#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 7b86a4f1b574..c8712e6eea74 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
1002 return 0; 1002 return 0;
1003} 1003}
1004 1004
1005static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
1006{
1007 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
1008 iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1009 else
1010 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
1011}
1012
1013int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) 1005int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1014{ 1006{
1015 u8 *ptr; 1007 u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
1023 /* EARLY START - firmware's configuration is hard coded */ 1015 /* EARLY START - firmware's configuration is hard coded */
1024 if ((!mvm->fw->dbg_conf_tlv[conf_id] || 1016 if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
1025 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && 1017 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
1026 conf_id == FW_DBG_START_FROM_ALIVE) { 1018 conf_id == FW_DBG_START_FROM_ALIVE)
1027 iwl_mvm_restart_early_start(mvm);
1028 return 0; 1019 return 0;
1029 }
1030 1020
1031 if (!mvm->fw->dbg_conf_tlv[conf_id]) 1021 if (!mvm->fw->dbg_conf_tlv[conf_id])
1032 return -EINVAL; 1022 return -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 0f1831b41915..fd2fc46e2fe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; 1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd; 1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd;
1042 } u = {}; 1042 } u = {};
1043 struct iwl_mac_beacon_cmd beacon_cmd; 1043 struct iwl_mac_beacon_cmd beacon_cmd = {};
1044 struct ieee80211_tx_info *info; 1044 struct ieee80211_tx_info *info;
1045 u32 beacon_skb_len; 1045 u32 beacon_skb_len;
1046 u32 rate, tx_flags; 1046 u32 rate, tx_flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 4e74a6b90e70..52f8d7a6a7dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
1730 */ 1730 */
1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) 1731static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1732{ 1732{
1733 u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
1734 IWL_MVM_CMD_QUEUE;
1735
1733 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & 1736 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
1734 ~BIT(IWL_MVM_CMD_QUEUE)); 1737 ~BIT(cmd_queue));
1735} 1738}
1736 1739
1737static inline 1740static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1753 if (!iwl_mvm_has_new_tx_api(mvm)) 1756 if (!iwl_mvm_has_new_tx_api(mvm))
1754 iwl_free_fw_paging(mvm); 1757 iwl_free_fw_paging(mvm);
1755 mvm->ucode_loaded = false; 1758 mvm->ucode_loaded = false;
1759 mvm->fw_dbg_conf = FW_DBG_INVALID;
1756 iwl_trans_stop_device(mvm->trans); 1760 iwl_trans_stop_device(mvm->trans);
1757} 1761}
1758 1762
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 9ffff6ed8133..3da5ec40aaea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
1149 1149
1150 mutex_lock(&mvm->mutex); 1150 mutex_lock(&mvm->mutex);
1151 1151
1152 /* stop recording */
1153 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1152 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1153 /* stop recording */
1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
1155
1156 iwl_mvm_fw_error_dump(mvm);
1157
1158 /* start recording again if the firmware is not crashed */
1159 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1160 mvm->fw->dbg_dest_tlv)
1161 iwl_clear_bits_prph(mvm->trans,
1162 MON_BUFF_SAMPLE_CTL, 0x100);
1155 } else { 1163 } else {
1164 u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
1165 u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
1166
1167 /* stop recording */
1156 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); 1168 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
1157 /* wait before we collect the data till the DBGC stop */
1158 udelay(100); 1169 udelay(100);
1159 } 1170 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
1171 /* wait before we collect the data till the DBGC stop */
1172 udelay(500);
1160 1173
1161 iwl_mvm_fw_error_dump(mvm); 1174 iwl_mvm_fw_error_dump(mvm);
1162 1175
1163 /* start recording again if the firmware is not crashed */ 1176 /* start recording again if the firmware is not crashed */
1164 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 1177 if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
1165 mvm->fw->dbg_dest_tlv && 1178 mvm->fw->dbg_dest_tlv) {
1166 iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 1179 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
1180 iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
1181 }
1182 }
1167 1183
1168 mutex_unlock(&mvm->mutex); 1184 mutex_unlock(&mvm->mutex);
1169 1185
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 7788eefcd2bd..aa785cf3cf68 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2,7 +2,7 @@
2 * 2 *
3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH 5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
1083 rs_get_lower_rate_in_column(lq_sta, rate); 1083 rs_get_lower_rate_in_column(lq_sta, rate);
1084} 1084}
1085 1085
1086/* Check if both rates are identical
1087 * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
1088 * with a rate indicating STBC/BFER and ANT_AB.
1089 */
1090static inline bool rs_rate_equal(struct rs_rate *a,
1091 struct rs_rate *b,
1092 bool allow_ant_mismatch)
1093
1094{
1095 bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
1096 (a->bfer == b->bfer);
1097
1098 if (allow_ant_mismatch) {
1099 if (a->stbc || a->bfer) {
1100 WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
1101 a->stbc, a->bfer, a->ant);
1102 ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
1103 } else if (b->stbc || b->bfer) {
1104 WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
1105 b->stbc, b->bfer, b->ant);
1106 ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
1107 }
1108 }
1109
1110 return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
1111 (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
1112}
1113
1114/* Check if both rates share the same column */ 1086/* Check if both rates share the same column */
1115static inline bool rs_rate_column_match(struct rs_rate *a, 1087static inline bool rs_rate_column_match(struct rs_rate *a,
1116 struct rs_rate *b) 1088 struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1182 u32 lq_hwrate; 1154 u32 lq_hwrate;
1183 struct rs_rate lq_rate, tx_resp_rate; 1155 struct rs_rate lq_rate, tx_resp_rate;
1184 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1156 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
1185 u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; 1157 u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
1158 u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
1159 u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
1186 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; 1160 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
1187 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1161 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1188 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; 1162 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
1189 bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
1190 IWL_UCODE_TLV_API_LQ_SS_PARAMS);
1191 1163
1192 /* Treat uninitialized rate scaling data same as non-existing. */ 1164 /* Treat uninitialized rate scaling data same as non-existing. */
1193 if (!lq_sta) { 1165 if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1262 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1234 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
1263 1235
1264 /* Here we actually compare this rate to the latest LQ command */ 1236 /* Here we actually compare this rate to the latest LQ command */
1265 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { 1237 if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
1266 IWL_DEBUG_RATE(mvm, 1238 IWL_DEBUG_RATE(mvm,
1267 "initial tx resp rate 0x%x does not match 0x%x\n", 1239 "tx resp color 0x%x does not match 0x%x\n",
1268 tx_resp_hwrate, lq_hwrate); 1240 lq_color, LQ_FLAG_COLOR_GET(table->flags));
1269 1241
1270 /* 1242 /*
1271 * Since rates mis-match, the last LQ command may have failed. 1243 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3326 u8 valid_tx_ant = 0; 3298 u8 valid_tx_ant = 0;
3327 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; 3299 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
3328 bool toggle_ant = false; 3300 bool toggle_ant = false;
3301 u32 color;
3329 3302
3330 memcpy(&rate, initial_rate, sizeof(rate)); 3303 memcpy(&rate, initial_rate, sizeof(rate));
3331 3304
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
3380 num_rates, num_retries, valid_tx_ant, 3353 num_rates, num_retries, valid_tx_ant,
3381 toggle_ant); 3354 toggle_ant);
3382 3355
3356 /* update the color of the LQ command (as a counter at bits 1-3) */
3357 color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
3358 lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
3383} 3359}
3384 3360
3385struct rs_bfer_active_iter_data { 3361struct rs_bfer_active_iter_data {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index ee207f2c0a90..3abde1cb0303 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2017 Intel Deutschland GmbH
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License as 8 * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
357 } pers; 358 } pers;
358}; 359};
359 360
361/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
362 * Note, it's iwlmvm <-> mac80211 interface.
363 * bits 0-7: reduced tx power
364 * bits 8-10: LQ command's color
365 */
366#define RS_DRV_DATA_TXP_MSK 0xff
367#define RS_DRV_DATA_LQ_COLOR_POS 8
368#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
369#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
370 RS_DRV_DATA_LQ_COLOR_POS)
371#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
372 (((uintptr_t)_p) |\
373 ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
374
360/* Initialize station's rate scaling information after adding station */ 375/* Initialize station's rate scaling information after adding station */
361void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 376void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
362 enum nl80211_band band, bool init); 377 enum nl80211_band band, bool init);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f5c786ddc526..614d67810d05 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2120 if (!iwl_mvm_is_dqa_supported(mvm)) 2120 if (!iwl_mvm_is_dqa_supported(mvm))
2121 return 0; 2121 return 0;
2122 2122
2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) 2123 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2124 vif->type != NL80211_IFTYPE_ADHOC))
2124 return -ENOTSUPP; 2125 return -ENOTSUPP;
2125 2126
2126 /* 2127 /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2155 mvmvif->cab_queue = queue; 2156 mvmvif->cab_queue = queue;
2156 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2157 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2157 IWL_UCODE_TLV_API_STA_TYPE)) { 2158 IWL_UCODE_TLV_API_STA_TYPE)) {
2159 /*
2160 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2161 * invalid, so make sure we use the queue we want.
2162 * Note that this is done here as we want to avoid making DQA
2163 * changes in mac80211 layer.
2164 */
2165 if (vif->type == NL80211_IFTYPE_ADHOC) {
2166 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2167 mvmvif->cab_queue = vif->cab_queue;
2168 }
2158 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2169 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2159 &cfg, timeout); 2170 &cfg, timeout);
2160 } 2171 }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3321 3332
3322 /* Get the station from the mvm local station table */ 3333 /* Get the station from the mvm local station table */
3323 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3334 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3324 if (!mvm_sta) { 3335 if (mvm_sta)
3325 IWL_ERR(mvm, "Failed to find station\n"); 3336 sta_id = mvm_sta->sta_id;
3326 return -EINVAL;
3327 }
3328 sta_id = mvm_sta->sta_id;
3329 3337
3330 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3338 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3331 keyconf->keyidx, sta_id); 3339 keyconf->keyidx, sta_id);
3332 3340
3333 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3341 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3334 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3342 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3335 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3343 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3336 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3344 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3337 3345
3338 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3346 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 2716cb5483bf..ad62b67dceb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
313 * This is basically (last acked packet++). 313 * This is basically (last acked packet++).
314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
316 * @lq_color: the color of the LQ command as it appears in tx response.
316 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 317 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
317 * @state: state of the BA agreement establishment / tear down. 318 * @state: state of the BA agreement establishment / tear down.
318 * @txq_id: Tx queue used by the BA session / DQA 319 * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
331 u16 next_reclaimed; 332 u16 next_reclaimed;
332 /* The rest is Tx AGG related */ 333 /* The rest is Tx AGG related */
333 u32 rate_n_flags; 334 u32 rate_n_flags;
335 u8 lq_color;
334 bool amsdu_in_ampdu_allowed; 336 bool amsdu_in_ampdu_allowed;
335 enum iwl_mvm_agg_state state; 337 enum iwl_mvm_agg_state state;
336 u16 txq_id; 338 u16 txq_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f9cbd197246f..506d58104e1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); 790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
791 int ret; 791 int ret;
792 792
793 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
794 return -EIO;
795
796 mutex_lock(&mvm->mutex); 793 mutex_lock(&mvm->mutex);
797 794
795 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
796 ret = -EIO;
797 goto unlock;
798 }
799
798 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { 800 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
799 ret = -EINVAL; 801 ret = -EINVAL;
800 goto unlock; 802 goto unlock;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bcaceb64a6e8..f21901cd4a4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1323 struct iwl_mvm_sta *mvmsta; 1323 struct iwl_mvm_sta *mvmsta;
1324 struct sk_buff_head skbs; 1324 struct sk_buff_head skbs;
1325 u8 skb_freed = 0; 1325 u8 skb_freed = 0;
1326 u8 lq_color;
1326 u16 next_reclaimed, seq_ctl; 1327 u16 next_reclaimed, seq_ctl;
1327 bool is_ndp = false; 1328 bool is_ndp = false;
1328 1329
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1405 info->status.tx_time = 1406 info->status.tx_time =
1406 le16_to_cpu(tx_resp->wireless_media_time); 1407 le16_to_cpu(tx_resp->wireless_media_time);
1407 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); 1408 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1409 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1408 info->status.status_driver_data[0] = 1410 info->status.status_driver_data[0] =
1409 (void *)(uintptr_t)tx_resp->reduced_tpc; 1411 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1410 1412
1411 ieee80211_tx_status(mvm->hw, skb); 1413 ieee80211_tx_status(mvm->hw, skb);
1412 } 1414 }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1638 le32_to_cpu(tx_resp->initial_rate); 1640 le32_to_cpu(tx_resp->initial_rate);
1639 mvmsta->tid_data[tid].tx_time = 1641 mvmsta->tid_data[tid].tx_time =
1640 le16_to_cpu(tx_resp->wireless_media_time); 1642 le16_to_cpu(tx_resp->wireless_media_time);
1643 mvmsta->tid_data[tid].lq_color =
1644 (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
1645 TX_RES_RATE_TABLE_COLOR_POS;
1641 } 1646 }
1642 1647
1643 rcu_read_unlock(); 1648 rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
1707 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1712 iwl_mvm_check_ratid_empty(mvm, sta, tid);
1708 1713
1709 freed = 0; 1714 freed = 0;
1715
1716 /* pack lq color from tid_data along the reduced txp */
1717 ba_info->status.status_driver_data[0] =
1718 RS_DRV_DATA_PACK(tid_data->lq_color,
1719 ba_info->status.status_driver_data[0]);
1710 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; 1720 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
1711 1721
1712 skb_queue_walk(&reclaimed_skbs, skb) { 1722 skb_queue_walk(&reclaimed_skbs, skb) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 70acf850a9f1..93cbc7a69bcd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
2803#ifdef CONFIG_PM_SLEEP 2803#ifdef CONFIG_PM_SLEEP
2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 2804static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2805{ 2805{
2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2806 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2807 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2807 return iwl_pci_fw_enter_d0i3(trans); 2808 return iwl_pci_fw_enter_d0i3(trans);
2808 2809
2809 return 0; 2810 return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
2811 2812
2812static void iwl_trans_pcie_resume(struct iwl_trans *trans) 2813static void iwl_trans_pcie_resume(struct iwl_trans *trans)
2813{ 2814{
2814 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2815 if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
2816 (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
2815 iwl_pci_fw_exit_d0i3(trans); 2817 iwl_pci_fw_exit_d0i3(trans);
2816} 2818}
2817#endif /* CONFIG_PM_SLEEP */ 2819#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 9fb46a6f47cf..9c9bfbbabdf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
906 906
907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { 907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
908 ret = -EINVAL; 908 ret = -EINVAL;
909 goto error; 909 goto error_free_resp;
910 } 910 }
911 911
912 rsp = (void *)hcmd.resp_pkt->data; 912 rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
915 if (qid > ARRAY_SIZE(trans_pcie->txq)) { 915 if (qid > ARRAY_SIZE(trans_pcie->txq)) {
916 WARN_ONCE(1, "queue index %d unsupported", qid); 916 WARN_ONCE(1, "queue index %d unsupported", qid);
917 ret = -EIO; 917 ret = -EIO;
918 goto error; 918 goto error_free_resp;
919 } 919 }
920 920
921 if (test_and_set_bit(qid, trans_pcie->queue_used)) { 921 if (test_and_set_bit(qid, trans_pcie->queue_used)) {
922 WARN_ONCE(1, "queue %d already used", qid); 922 WARN_ONCE(1, "queue %d already used", qid);
923 ret = -EIO; 923 ret = -EIO;
924 goto error; 924 goto error_free_resp;
925 } 925 }
926 926
927 txq->id = qid; 927 txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
934 (txq->write_ptr) | (qid << 16)); 934 (txq->write_ptr) | (qid << 16));
935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
936 936
937 iwl_free_resp(&hcmd);
937 return qid; 938 return qid;
938 939
940error_free_resp:
941 iwl_free_resp(&hcmd);
939error: 942error:
940 iwl_pcie_gen2_txq_free_memory(trans, txq); 943 iwl_pcie_gen2_txq_free_memory(trans, txq);
941 return ret; 944 return ret;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a60926410438..903d5813023a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
56static int nvme_char_major; 56static int nvme_char_major;
57module_param(nvme_char_major, int, 0); 57module_param(nvme_char_major, int, 0);
58 58
59static unsigned long default_ps_max_latency_us = 25000; 59static unsigned long default_ps_max_latency_us = 100000;
60module_param(default_ps_max_latency_us, ulong, 0644); 60module_param(default_ps_max_latency_us, ulong, 0644);
61MODULE_PARM_DESC(default_ps_max_latency_us, 61MODULE_PARM_DESC(default_ps_max_latency_us,
62 "max power saving latency for new devices; use PM QOS to change per device"); 62 "max power saving latency for new devices; use PM QOS to change per device");
@@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1342 * transitioning between power states. Therefore, when running 1342 * transitioning between power states. Therefore, when running
1343 * in any given state, we will enter the next lower-power 1343 * in any given state, we will enter the next lower-power
1344 * non-operational state after waiting 50 * (enlat + exlat) 1344 * non-operational state after waiting 50 * (enlat + exlat)
1345 * microseconds, as long as that state's total latency is under 1345 * microseconds, as long as that state's exit latency is under
1346 * the requested maximum latency. 1346 * the requested maximum latency.
1347 * 1347 *
1348 * We will not autonomously enter any non-operational state for 1348 * We will not autonomously enter any non-operational state for
@@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1387 * lowest-power state, not the number of states. 1387 * lowest-power state, not the number of states.
1388 */ 1388 */
1389 for (state = (int)ctrl->npss; state >= 0; state--) { 1389 for (state = (int)ctrl->npss; state >= 0; state--) {
1390 u64 total_latency_us, transition_ms; 1390 u64 total_latency_us, exit_latency_us, transition_ms;
1391 1391
1392 if (target) 1392 if (target)
1393 table->entries[state] = target; 1393 table->entries[state] = target;
@@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1408 NVME_PS_FLAGS_NON_OP_STATE)) 1408 NVME_PS_FLAGS_NON_OP_STATE))
1409 continue; 1409 continue;
1410 1410
1411 total_latency_us = 1411 exit_latency_us =
1412 (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + 1412 (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
1413 + le32_to_cpu(ctrl->psd[state].exit_lat); 1413 if (exit_latency_us > ctrl->ps_max_latency_us)
1414 if (total_latency_us > ctrl->ps_max_latency_us)
1415 continue; 1414 continue;
1416 1415
1416 total_latency_us =
1417 exit_latency_us +
1418 le32_to_cpu(ctrl->psd[state].entry_lat);
1419
1417 /* 1420 /*
1418 * This state is good. Use it as the APST idle 1421 * This state is good. Use it as the APST idle
1419 * target for higher power states. 1422 * target for higher power states.
@@ -2438,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2438 struct nvme_ns *ns; 2441 struct nvme_ns *ns;
2439 2442
2440 mutex_lock(&ctrl->namespaces_mutex); 2443 mutex_lock(&ctrl->namespaces_mutex);
2444
2445 /* Forcibly start all queues to avoid having stuck requests */
2446 blk_mq_start_hw_queues(ctrl->admin_q);
2447
2441 list_for_each_entry(ns, &ctrl->namespaces, list) { 2448 list_for_each_entry(ns, &ctrl->namespaces, list) {
2442 /* 2449 /*
2443 * Revalidating a dead namespace sets capacity to 0. This will 2450 * Revalidating a dead namespace sets capacity to 0. This will
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5b14cbefb724..92964cef0f4b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1139/* *********************** NVME Ctrl Routines **************************** */ 1139/* *********************** NVME Ctrl Routines **************************** */
1140 1140
1141static void __nvme_fc_final_op_cleanup(struct request *rq); 1141static void __nvme_fc_final_op_cleanup(struct request *rq);
1142static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1142 1143
1143static int 1144static int
1144nvme_fc_reinit_request(void *data, struct request *rq) 1145nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1265 struct nvme_command *sqe = &op->cmd_iu.sqe; 1266 struct nvme_command *sqe = &op->cmd_iu.sqe;
1266 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1267 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1267 union nvme_result result; 1268 union nvme_result result;
1268 bool complete_rq; 1269 bool complete_rq, terminate_assoc = true;
1269 1270
1270 /* 1271 /*
1271 * WARNING: 1272 * WARNING:
@@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1294 * fabricate a CQE, the following fields will not be set as they 1295 * fabricate a CQE, the following fields will not be set as they
1295 * are not referenced: 1296 * are not referenced:
1296 * cqe.sqid, cqe.sqhd, cqe.command_id 1297 * cqe.sqid, cqe.sqhd, cqe.command_id
1298 *
1299 * Failure or error of an individual i/o, in a transport
1300 * detected fashion unrelated to the nvme completion status,
1301 * potentially cause the initiator and target sides to get out
1302 * of sync on SQ head/tail (aka outstanding io count allowed).
1303 * Per FC-NVME spec, failure of an individual command requires
1304 * the connection to be terminated, which in turn requires the
1305 * association to be terminated.
1297 */ 1306 */
1298 1307
1299 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1308 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1359 goto done; 1368 goto done;
1360 } 1369 }
1361 1370
1371 terminate_assoc = false;
1372
1362done: 1373done:
1363 if (op->flags & FCOP_FLAGS_AEN) { 1374 if (op->flags & FCOP_FLAGS_AEN) {
1364 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 1375 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@ done:
1366 atomic_set(&op->state, FCPOP_STATE_IDLE); 1377 atomic_set(&op->state, FCPOP_STATE_IDLE);
1367 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1378 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
1368 nvme_fc_ctrl_put(ctrl); 1379 nvme_fc_ctrl_put(ctrl);
1369 return; 1380 goto check_error;
1370 } 1381 }
1371 1382
1372 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); 1383 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@ done:
1379 nvme_end_request(rq, status, result); 1390 nvme_end_request(rq, status, result);
1380 } else 1391 } else
1381 __nvme_fc_final_op_cleanup(rq); 1392 __nvme_fc_final_op_cleanup(rq);
1393
1394check_error:
1395 if (terminate_assoc)
1396 nvme_fc_error_recovery(ctrl, "transport detected io error");
1382} 1397}
1383 1398
1384static int 1399static int
@@ -2791,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2791 ctrl->ctrl.opts = NULL; 2806 ctrl->ctrl.opts = NULL;
2792 /* initiate nvme ctrl ref counting teardown */ 2807 /* initiate nvme ctrl ref counting teardown */
2793 nvme_uninit_ctrl(&ctrl->ctrl); 2808 nvme_uninit_ctrl(&ctrl->ctrl);
2809 nvme_put_ctrl(&ctrl->ctrl);
2794 2810
2795 /* as we're past the point where we transition to the ref 2811 /* as we're past the point where we transition to the ref
2796 * counting teardown path, if we return a bad pointer here, 2812 * counting teardown path, if we return a bad pointer here,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d52701df7245..951042a375d6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
1368 1368
1369 /* If there is a reset ongoing, we shouldn't reset again. */ 1369 /* If there is a reset ongoing, we shouldn't reset again. */
1370 if (work_busy(&dev->reset_work)) 1370 if (dev->ctrl.state == NVME_CTRL_RESETTING)
1371 return false; 1371 return false;
1372 1372
1373 /* We shouldn't reset unless the controller is on fatal error state 1373 /* We shouldn't reset unless the controller is on fatal error state
@@ -1903,7 +1903,7 @@ static void nvme_reset_work(struct work_struct *work)
1903 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1903 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
1904 int result = -ENODEV; 1904 int result = -ENODEV;
1905 1905
1906 if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1906 if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
1907 goto out; 1907 goto out;
1908 1908
1909 /* 1909 /*
@@ -1913,9 +1913,6 @@ static void nvme_reset_work(struct work_struct *work)
1913 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1913 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1914 nvme_dev_disable(dev, false); 1914 nvme_dev_disable(dev, false);
1915 1915
1916 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
1917 goto out;
1918
1919 result = nvme_pci_enable(dev); 1916 result = nvme_pci_enable(dev);
1920 if (result) 1917 if (result)
1921 goto out; 1918 goto out;
@@ -2009,8 +2006,8 @@ static int nvme_reset(struct nvme_dev *dev)
2009{ 2006{
2010 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 2007 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
2011 return -ENODEV; 2008 return -ENODEV;
2012 if (work_busy(&dev->reset_work)) 2009 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
2013 return -ENODEV; 2010 return -EBUSY;
2014 if (!queue_work(nvme_workq, &dev->reset_work)) 2011 if (!queue_work(nvme_workq, &dev->reset_work))
2015 return -EBUSY; 2012 return -EBUSY;
2016 return 0; 2013 return 0;
@@ -2136,6 +2133,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2136 if (result) 2133 if (result)
2137 goto release_pools; 2134 goto release_pools;
2138 2135
2136 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
2139 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2137 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2140 2138
2141 queue_work(nvme_workq, &dev->reset_work); 2139 queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2177,7 @@ static void nvme_remove(struct pci_dev *pdev)
2179 2177
2180 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2178 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2181 2179
2180 cancel_work_sync(&dev->reset_work);
2182 pci_set_drvdata(pdev, NULL); 2181 pci_set_drvdata(pdev, NULL);
2183 2182
2184 if (!pci_device_is_present(pdev)) { 2183 if (!pci_device_is_present(pdev)) {
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 28bd255c144d..24397d306d53 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
753 if (ret) 753 if (ret)
754 goto requeue; 754 goto requeue;
755 755
756 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
757
758 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 756 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
759 if (ret) 757 if (ret)
760 goto stop_admin_q; 758 goto requeue;
761 759
762 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 760 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
763 761
764 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 762 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
765 if (ret) 763 if (ret)
766 goto stop_admin_q; 764 goto requeue;
767 765
768 nvme_start_keep_alive(&ctrl->ctrl); 766 nvme_start_keep_alive(&ctrl->ctrl);
769 767
770 if (ctrl->queue_count > 1) { 768 if (ctrl->queue_count > 1) {
771 ret = nvme_rdma_init_io_queues(ctrl); 769 ret = nvme_rdma_init_io_queues(ctrl);
772 if (ret) 770 if (ret)
773 goto stop_admin_q; 771 goto requeue;
774 772
775 ret = nvme_rdma_connect_io_queues(ctrl); 773 ret = nvme_rdma_connect_io_queues(ctrl);
776 if (ret) 774 if (ret)
777 goto stop_admin_q; 775 goto requeue;
778 } 776 }
779 777
780 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 778 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
782 ctrl->ctrl.opts->nr_reconnects = 0; 780 ctrl->ctrl.opts->nr_reconnects = 0;
783 781
784 if (ctrl->queue_count > 1) { 782 if (ctrl->queue_count > 1) {
785 nvme_start_queues(&ctrl->ctrl);
786 nvme_queue_scan(&ctrl->ctrl); 783 nvme_queue_scan(&ctrl->ctrl);
787 nvme_queue_async_events(&ctrl->ctrl); 784 nvme_queue_async_events(&ctrl->ctrl);
788 } 785 }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
791 788
792 return; 789 return;
793 790
794stop_admin_q:
795 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
796requeue: 791requeue:
797 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 792 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
798 ctrl->ctrl.opts->nr_reconnects); 793 ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
823 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 818 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
824 nvme_cancel_request, &ctrl->ctrl); 819 nvme_cancel_request, &ctrl->ctrl);
825 820
821 /*
822 * queues are not a live anymore, so restart the queues to fail fast
823 * new IO
824 */
825 blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
826 nvme_start_queues(&ctrl->ctrl);
827
826 nvme_rdma_reconnect_or_remove(ctrl); 828 nvme_rdma_reconnect_or_remove(ctrl);
827} 829}
828 830
@@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
1433/* 1435/*
1434 * We cannot accept any other command until the Connect command has completed. 1436 * We cannot accept any other command until the Connect command has completed.
1435 */ 1437 */
1436static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1438static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1437 struct request *rq) 1439 struct request *rq)
1438{ 1440{
1439 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { 1441 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1441 1443
1442 if (!blk_rq_is_passthrough(rq) || 1444 if (!blk_rq_is_passthrough(rq) ||
1443 cmd->common.opcode != nvme_fabrics_command || 1445 cmd->common.opcode != nvme_fabrics_command ||
1444 cmd->fabrics.fctype != nvme_fabrics_type_connect) 1446 cmd->fabrics.fctype != nvme_fabrics_type_connect) {
1445 return false; 1447 /*
1448 * reconnecting state means transport disruption, which
1449 * can take a long time and even might fail permanently,
1450 * so we can't let incoming I/O be requeued forever.
1451 * fail it fast to allow upper layers a chance to
1452 * failover.
1453 */
1454 if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
1455 return -EIO;
1456 else
1457 return -EAGAIN;
1458 }
1446 } 1459 }
1447 1460
1448 return true; 1461 return 0;
1449} 1462}
1450 1463
1451static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1464static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1463 1476
1464 WARN_ON_ONCE(rq->tag < 0); 1477 WARN_ON_ONCE(rq->tag < 0);
1465 1478
1466 if (!nvme_rdma_queue_is_ready(queue, rq)) 1479 ret = nvme_rdma_queue_is_ready(queue, rq);
1467 return BLK_MQ_RQ_QUEUE_BUSY; 1480 if (unlikely(ret))
1481 goto err;
1468 1482
1469 dev = queue->device->dev; 1483 dev = queue->device->dev;
1470 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1484 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 9416d052cb89..28c38c756f92 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
144 coherent ? " " : " not "); 144 coherent ? " " : " not ");
145 145
146 iommu = of_iommu_configure(dev, np); 146 iommu = of_iommu_configure(dev, np);
147 if (IS_ERR(iommu)) 147 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
148 return PTR_ERR(iommu); 148 return -EPROBE_DEFER;
149 149
150 dev_dbg(dev, "device is%sbehind an iommu\n", 150 dev_dbg(dev, "device is%sbehind an iommu\n",
151 iommu ? " " : " not "); 151 iommu ? " " : " not ");
diff --git a/drivers/phy/phy-qcom-qmp.c b/drivers/phy/phy-qcom-qmp.c
index 727e23be7cac..78ca62897784 100644
--- a/drivers/phy/phy-qcom-qmp.c
+++ b/drivers/phy/phy-qcom-qmp.c
@@ -844,7 +844,7 @@ static int qcom_qmp_phy_vreg_init(struct device *dev)
844 int num = qmp->cfg->num_vregs; 844 int num = qmp->cfg->num_vregs;
845 int i; 845 int i;
846 846
847 qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL); 847 qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
848 if (!qmp->vregs) 848 if (!qmp->vregs)
849 return -ENOMEM; 849 return -ENOMEM;
850 850
@@ -983,16 +983,16 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
983 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. 983 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
984 */ 984 */
985 qphy->tx = of_iomap(np, 0); 985 qphy->tx = of_iomap(np, 0);
986 if (IS_ERR(qphy->tx)) 986 if (!qphy->tx)
987 return PTR_ERR(qphy->tx); 987 return -ENOMEM;
988 988
989 qphy->rx = of_iomap(np, 1); 989 qphy->rx = of_iomap(np, 1);
990 if (IS_ERR(qphy->rx)) 990 if (!qphy->rx)
991 return PTR_ERR(qphy->rx); 991 return -ENOMEM;
992 992
993 qphy->pcs = of_iomap(np, 2); 993 qphy->pcs = of_iomap(np, 2);
994 if (IS_ERR(qphy->pcs)) 994 if (!qphy->pcs)
995 return PTR_ERR(qphy->pcs); 995 return -ENOMEM;
996 996
997 /* 997 /*
998 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3 998 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2de1e603bd2b..5f3672153b12 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -704,7 +704,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
704 /* Reallocate the array */ 704 /* Reallocate the array */
705 u32 new_capacity = 2 * dev->pipes_capacity; 705 u32 new_capacity = 2 * dev->pipes_capacity;
706 struct goldfish_pipe **pipes = 706 struct goldfish_pipe **pipes =
707 kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL); 707 kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
708 if (!pipes) 708 if (!pipes)
709 return -ENOMEM; 709 return -ENOMEM;
710 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity); 710 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c
index 35ce53edabf9..d5e5229308f2 100644
--- a/drivers/reset/hisilicon/hi6220_reset.c
+++ b/drivers/reset/hisilicon/hi6220_reset.c
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
155} 155}
156 156
157postcore_initcall(hi6220_reset_init); 157postcore_initcall(hi6220_reset_init);
158
159MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index e72abbc18ee3..a66a317f3e4f 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -70,14 +70,14 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
70{ 70{
71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); 71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
72} 72}
73MDEV_TYPE_ATTR_RO(name); 73static MDEV_TYPE_ATTR_RO(name);
74 74
75static ssize_t device_api_show(struct kobject *kobj, struct device *dev, 75static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
76 char *buf) 76 char *buf)
77{ 77{
78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); 78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
79} 79}
80MDEV_TYPE_ATTR_RO(device_api); 80static MDEV_TYPE_ATTR_RO(device_api);
81 81
82static ssize_t available_instances_show(struct kobject *kobj, 82static ssize_t available_instances_show(struct kobject *kobj,
83 struct device *dev, char *buf) 83 struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
86 86
87 return sprintf(buf, "%d\n", atomic_read(&private->avail)); 87 return sprintf(buf, "%d\n", atomic_read(&private->avail));
88} 88}
89MDEV_TYPE_ATTR_RO(available_instances); 89static MDEV_TYPE_ATTR_RO(available_instances);
90 90
91static struct attribute *mdev_types_attrs[] = { 91static struct attribute *mdev_types_attrs[] = {
92 &mdev_type_attr_name.attr, 92 &mdev_type_attr_name.attr,
@@ -100,7 +100,7 @@ static struct attribute_group mdev_type_group = {
100 .attrs = mdev_types_attrs, 100 .attrs = mdev_types_attrs,
101}; 101};
102 102
103struct attribute_group *mdev_type_groups[] = { 103static struct attribute_group *mdev_type_groups[] = {
104 &mdev_type_group, 104 &mdev_type_group,
105 NULL, 105 NULL,
106}; 106};
@@ -152,7 +152,7 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
152 &events, &private->nb); 152 &events, &private->nb);
153} 153}
154 154
155void vfio_ccw_mdev_release(struct mdev_device *mdev) 155static void vfio_ccw_mdev_release(struct mdev_device *mdev)
156{ 156{
157 struct vfio_ccw_private *private = 157 struct vfio_ccw_private *private =
158 dev_get_drvdata(mdev_parent_dev(mdev)); 158 dev_get_drvdata(mdev_parent_dev(mdev));
@@ -233,7 +233,7 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
233 } 233 }
234} 234}
235 235
236int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 236static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
237{ 237{
238 if (info->index != VFIO_CCW_IO_IRQ_INDEX) 238 if (info->index != VFIO_CCW_IO_IRQ_INDEX)
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9be4596d8a08..ea099910b4e9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -668,10 +668,28 @@ static int ap_device_probe(struct device *dev)
668 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 668 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
669 int rc; 669 int rc;
670 670
671 /* Add queue/card to list of active queues/cards */
672 spin_lock_bh(&ap_list_lock);
673 if (is_card_dev(dev))
674 list_add(&to_ap_card(dev)->list, &ap_card_list);
675 else
676 list_add(&to_ap_queue(dev)->list,
677 &to_ap_queue(dev)->card->queues);
678 spin_unlock_bh(&ap_list_lock);
679
671 ap_dev->drv = ap_drv; 680 ap_dev->drv = ap_drv;
672 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 681 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
673 if (rc) 682
683 if (rc) {
684 spin_lock_bh(&ap_list_lock);
685 if (is_card_dev(dev))
686 list_del_init(&to_ap_card(dev)->list);
687 else
688 list_del_init(&to_ap_queue(dev)->list);
689 spin_unlock_bh(&ap_list_lock);
674 ap_dev->drv = NULL; 690 ap_dev->drv = NULL;
691 }
692
675 return rc; 693 return rc;
676} 694}
677 695
@@ -680,14 +698,17 @@ static int ap_device_remove(struct device *dev)
680 struct ap_device *ap_dev = to_ap_dev(dev); 698 struct ap_device *ap_dev = to_ap_dev(dev);
681 struct ap_driver *ap_drv = ap_dev->drv; 699 struct ap_driver *ap_drv = ap_dev->drv;
682 700
701 if (ap_drv->remove)
702 ap_drv->remove(ap_dev);
703
704 /* Remove queue/card from list of active queues/cards */
683 spin_lock_bh(&ap_list_lock); 705 spin_lock_bh(&ap_list_lock);
684 if (is_card_dev(dev)) 706 if (is_card_dev(dev))
685 list_del_init(&to_ap_card(dev)->list); 707 list_del_init(&to_ap_card(dev)->list);
686 else 708 else
687 list_del_init(&to_ap_queue(dev)->list); 709 list_del_init(&to_ap_queue(dev)->list);
688 spin_unlock_bh(&ap_list_lock); 710 spin_unlock_bh(&ap_list_lock);
689 if (ap_drv->remove) 711
690 ap_drv->remove(ap_dev);
691 return 0; 712 return 0;
692} 713}
693 714
@@ -1056,10 +1077,6 @@ static void ap_scan_bus(struct work_struct *unused)
1056 } 1077 }
1057 /* get it and thus adjust reference counter */ 1078 /* get it and thus adjust reference counter */
1058 get_device(&ac->ap_dev.device); 1079 get_device(&ac->ap_dev.device);
1059 /* Add card device to card list */
1060 spin_lock_bh(&ap_list_lock);
1061 list_add(&ac->list, &ap_card_list);
1062 spin_unlock_bh(&ap_list_lock);
1063 } 1080 }
1064 /* now create the new queue device */ 1081 /* now create the new queue device */
1065 aq = ap_queue_create(qid, type); 1082 aq = ap_queue_create(qid, type);
@@ -1070,10 +1087,6 @@ static void ap_scan_bus(struct work_struct *unused)
1070 aq->ap_dev.device.parent = &ac->ap_dev.device; 1087 aq->ap_dev.device.parent = &ac->ap_dev.device;
1071 dev_set_name(&aq->ap_dev.device, 1088 dev_set_name(&aq->ap_dev.device,
1072 "%02x.%04x", id, dom); 1089 "%02x.%04x", id, dom);
1073 /* Add queue device to card queue list */
1074 spin_lock_bh(&ap_list_lock);
1075 list_add(&aq->list, &ac->queues);
1076 spin_unlock_bh(&ap_list_lock);
1077 /* Start with a device reset */ 1090 /* Start with a device reset */
1078 spin_lock_bh(&aq->lock); 1091 spin_lock_bh(&aq->lock);
1079 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 1092 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
@@ -1081,9 +1094,6 @@ static void ap_scan_bus(struct work_struct *unused)
1081 /* Register device */ 1094 /* Register device */
1082 rc = device_register(&aq->ap_dev.device); 1095 rc = device_register(&aq->ap_dev.device);
1083 if (rc) { 1096 if (rc) {
1084 spin_lock_bh(&ap_list_lock);
1085 list_del_init(&aq->list);
1086 spin_unlock_bh(&ap_list_lock);
1087 put_device(&aq->ap_dev.device); 1097 put_device(&aq->ap_dev.device);
1088 continue; 1098 continue;
1089 } 1099 }
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index cfa161ccc74e..836efac96813 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -160,7 +160,14 @@ static struct device_type ap_card_type = {
160 160
161static void ap_card_device_release(struct device *dev) 161static void ap_card_device_release(struct device *dev)
162{ 162{
163 kfree(to_ap_card(dev)); 163 struct ap_card *ac = to_ap_card(dev);
164
165 if (!list_empty(&ac->list)) {
166 spin_lock_bh(&ap_list_lock);
167 list_del_init(&ac->list);
168 spin_unlock_bh(&ap_list_lock);
169 }
170 kfree(ac);
164} 171}
165 172
166struct ap_card *ap_card_create(int id, int queue_depth, int device_type, 173struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 480c58a63769..0f1a5d02acb0 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -584,7 +584,14 @@ static struct device_type ap_queue_type = {
584 584
585static void ap_queue_device_release(struct device *dev) 585static void ap_queue_device_release(struct device *dev)
586{ 586{
587 kfree(to_ap_queue(dev)); 587 struct ap_queue *aq = to_ap_queue(dev);
588
589 if (!list_empty(&aq->list)) {
590 spin_lock_bh(&ap_list_lock);
591 list_del_init(&aq->list);
592 spin_unlock_bh(&ap_list_lock);
593 }
594 kfree(aq);
588} 595}
589 596
590struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) 597struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 4fc8ed5fe067..1f424e40afdf 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
191 struct bnx2fc_cmd_mgr *cmd_mgr; 191 struct bnx2fc_cmd_mgr *cmd_mgr;
192 spinlock_t hba_lock; 192 spinlock_t hba_lock;
193 struct mutex hba_mutex; 193 struct mutex hba_mutex;
194 struct mutex hba_stats_mutex;
194 unsigned long adapter_state; 195 unsigned long adapter_state;
195 #define ADAPTER_STATE_UP 0 196 #define ADAPTER_STATE_UP 0
196 #define ADAPTER_STATE_GOING_DOWN 1 197 #define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 93b5a0012417..902722dc4ce3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
663 if (!fw_stats) 663 if (!fw_stats)
664 return NULL; 664 return NULL;
665 665
666 mutex_lock(&hba->hba_stats_mutex);
667
666 bnx2fc_stats = fc_get_host_stats(shost); 668 bnx2fc_stats = fc_get_host_stats(shost);
667 669
668 init_completion(&hba->stat_req_done); 670 init_completion(&hba->stat_req_done);
669 if (bnx2fc_send_stat_req(hba)) 671 if (bnx2fc_send_stat_req(hba))
670 return bnx2fc_stats; 672 goto unlock_stats_mutex;
671 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); 673 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
672 if (!rc) { 674 if (!rc) {
673 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); 675 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
674 return bnx2fc_stats; 676 goto unlock_stats_mutex;
675 } 677 }
676 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); 678 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
677 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; 679 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
693 695
694 memcpy(&hba->prev_stats, hba->stats_buffer, 696 memcpy(&hba->prev_stats, hba->stats_buffer,
695 sizeof(struct fcoe_statistics_params)); 697 sizeof(struct fcoe_statistics_params));
698
699unlock_stats_mutex:
700 mutex_unlock(&hba->hba_stats_mutex);
696 return bnx2fc_stats; 701 return bnx2fc_stats;
697} 702}
698 703
@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
1340 } 1345 }
1341 spin_lock_init(&hba->hba_lock); 1346 spin_lock_init(&hba->hba_lock);
1342 mutex_init(&hba->hba_mutex); 1347 mutex_init(&hba->hba_mutex);
1348 mutex_init(&hba->hba_stats_mutex);
1343 1349
1344 hba->cnic = cnic; 1350 hba->cnic = cnic;
1345 1351
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 1076c1578322..0aae094ab91c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk)
1595 cxgbi_sock_put(csk); 1595 cxgbi_sock_put(csk);
1596 } 1596 }
1597 csk->dst = NULL; 1597 csk->dst = NULL;
1598 csk->cdev = NULL;
1599} 1598}
1600 1599
1601static int init_act_open(struct cxgbi_sock *csk) 1600static int init_act_open(struct cxgbi_sock *csk)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index fb06974c88c1..e4c83b7c96a8 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk)
867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
868 csk, (csk)->state, (csk)->flags, (csk)->tid); 868 csk, (csk)->state, (csk)->flags, (csk)->tid);
869 spin_lock_bh(&csk->lock); 869 spin_lock_bh(&csk->lock);
870 dst_confirm(csk->dst); 870 if (csk->dst)
871 dst_confirm(csk->dst);
871 data_lost = skb_queue_len(&csk->receive_queue); 872 data_lost = skb_queue_len(&csk->receive_queue);
872 __skb_queue_purge(&csk->receive_queue); 873 __skb_queue_purge(&csk->receive_queue);
873 874
@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk)
882 } 883 }
883 884
884 if (close_req) { 885 if (close_req) {
885 if (data_lost) 886 if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
887 data_lost)
886 csk->cdev->csk_send_abort_req(csk); 888 csk->cdev->csk_send_abort_req(csk);
887 else 889 else
888 csk->cdev->csk_send_close_req(csk); 890 csk->cdev->csk_send_close_req(csk);
@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
1186 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1188 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
1187 skb = next; 1189 skb = next;
1188 } 1190 }
1189done: 1191
1190 if (likely(skb_queue_len(&csk->write_queue))) 1192 if (likely(skb_queue_len(&csk->write_queue)))
1191 cdev->csk_push_tx_frames(csk, 1); 1193 cdev->csk_push_tx_frames(csk, 1);
1194done:
1192 spin_unlock_bh(&csk->lock); 1195 spin_unlock_bh(&csk->lock);
1193 return copied; 1196 return copied;
1194 1197
@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn,
1568 } 1571 }
1569} 1572}
1570 1573
1571static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1574static int
1575skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
1576 struct sk_buff *skb)
1572{ 1577{
1573 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1578 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1579 int err;
1574 1580
1575 log_debug(1 << CXGBI_DBG_PDU_RX, 1581 log_debug(1 << CXGBI_DBG_PDU_RX,
1576 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", 1582 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1608 } 1614 }
1609 } 1615 }
1610 1616
1611 return read_pdu_skb(conn, skb, 0, 0); 1617 err = read_pdu_skb(conn, skb, 0, 0);
1618 if (likely(err >= 0)) {
1619 struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
1620 u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
1621
1622 if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
1623 cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
1624 }
1625
1626 return err;
1612} 1627}
1613 1628
1614static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, 1629static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1713 cxgbi_skcb_rx_pdulen(skb)); 1728 cxgbi_skcb_rx_pdulen(skb));
1714 1729
1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1730 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1716 err = skb_read_pdu_bhs(conn, skb); 1731 err = skb_read_pdu_bhs(csk, conn, skb);
1717 if (err < 0) { 1732 if (err < 0) {
1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1733 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1719 "f 0x%lx, plen %u.\n", 1734 "f 0x%lx, plen %u.\n",
@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1731 cxgbi_skcb_flags(skb), 1746 cxgbi_skcb_flags(skb),
1732 cxgbi_skcb_rx_pdulen(skb)); 1747 cxgbi_skcb_rx_pdulen(skb));
1733 } else { 1748 } else {
1734 err = skb_read_pdu_bhs(conn, skb); 1749 err = skb_read_pdu_bhs(csk, conn, skb);
1735 if (err < 0) { 1750 if (err < 0) {
1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1751 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1737 "f 0x%lx, plen %u.\n", 1752 "f 0x%lx, plen %u.\n",
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 239462a75760..37f07aaab1e4 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -187,6 +187,7 @@ enum cxgbi_sock_flags {
187 CTPF_HAS_ATID, /* reserved atid */ 187 CTPF_HAS_ATID, /* reserved atid */
188 CTPF_HAS_TID, /* reserved hw tid */ 188 CTPF_HAS_TID, /* reserved hw tid */
189 CTPF_OFFLOAD_DOWN, /* offload function off */ 189 CTPF_OFFLOAD_DOWN, /* offload function off */
190 CTPF_LOGOUT_RSP_RCVD, /* received logout response */
190}; 191};
191 192
192struct cxgbi_skb_rx_cb { 193struct cxgbi_skb_rx_cb {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8912767e7bc8..da669dce12fe 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
127void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 127void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
128int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 128int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
129 struct serv_parm *, uint32_t, int); 129 struct serv_parm *, uint32_t, int);
130int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 130void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
131void lpfc_more_plogi(struct lpfc_vport *); 131void lpfc_more_plogi(struct lpfc_vport *);
132void lpfc_more_adisc(struct lpfc_vport *); 132void lpfc_more_adisc(struct lpfc_vport *);
133void lpfc_end_rscn(struct lpfc_vport *); 133void lpfc_end_rscn(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f2cd19c6c2df..24ce96dcc94d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
978 ndlp, did, ndlp->nlp_fc4_type, 978 ndlp, did, ndlp->nlp_fc4_type,
979 FC_TYPE_FCP, FC_TYPE_NVME); 979 FC_TYPE_FCP, FC_TYPE_NVME);
980 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 980 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
981
982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
983 lpfc_issue_els_prli(vport, ndlp, 0);
981 } 984 }
982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
983 lpfc_issue_els_prli(vport, ndlp, 0);
984 } else 985 } else
985 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 986 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
986 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); 987 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bff3de053df4..f74cb0142fd4 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
206 * associated with a LPFC_NODELIST entry. This 206 * associated with a LPFC_NODELIST entry. This
207 * routine effectively results in a "software abort". 207 * routine effectively results in a "software abort".
208 */ 208 */
209int 209void
210lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 210lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
211{ 211{
212 LIST_HEAD(abort_list); 212 LIST_HEAD(abort_list);
@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
215 215
216 pring = lpfc_phba_elsring(phba); 216 pring = lpfc_phba_elsring(phba);
217 217
218 /* In case of error recovery path, we might have a NULL pring here */
219 if (!pring)
220 return;
221
218 /* Abort outstanding I/O on NPort <nlp_DID> */ 222 /* Abort outstanding I/O on NPort <nlp_DID> */
219 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, 223 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
220 "2819 Abort outstanding I/O on NPort x%x " 224 "2819 Abort outstanding I/O on NPort x%x "
@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
273 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 277 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
274 278
275 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 279 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
276 return 0;
277} 280}
278 281
279static int 282static int
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 074a6b5e7763..518b15e6f222 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
799 } 799 }
800 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 800 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
801 801
802 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, 802 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
803 ctxp->state, 0); 803 ctxp->state, aborting);
804 804
805 atomic_inc(&lpfc_nvmep->xmt_fcp_release); 805 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
806 806
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 16d1cd50feed..ca3420de5a01 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
730 return -EIO; 730 return -EIO;
731 } 731 }
732 732
733 memset(&elreq, 0, sizeof(elreq));
734
733 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
734 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
735 DMA_TO_DEVICE); 737 DMA_TO_DEVICE);
@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
795 797
796 if (atomic_read(&vha->loop_state) == LOOP_READY && 798 if (atomic_read(&vha->loop_state) == LOOP_READY &&
797 (ha->current_topology == ISP_CFG_F || 799 (ha->current_topology == ISP_CFG_F ||
798 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 800 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
799 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 801 req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
800 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 802 elreq.options == EXTERNAL_LOOPBACK) {
801 elreq.options == EXTERNAL_LOOPBACK) {
802 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 803 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
803 ql_dbg(ql_dbg_user, vha, 0x701e, 804 ql_dbg(ql_dbg_user, vha, 0x701e,
804 "BSG request type: %s.\n", type); 805 "BSG request type: %s.\n", type);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 51b4179469d1..88748a6ab73f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1131 1131
1132 /* Mailbox registers. */ 1132 /* Mailbox registers. */
1133 mbx_reg = &reg->mailbox0; 1133 mbx_reg = &reg->mailbox0;
1134 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 1134 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1136 1136
1137 /* Transfer sequence registers. */ 1137 /* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2090 2090
2091 /* Mailbox registers. */ 2091 /* Mailbox registers. */
2092 mbx_reg = &reg->mailbox0; 2092 mbx_reg = &reg->mailbox0;
2093 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 2093 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2095 2095
2096 /* Transfer sequence registers. */ 2096 /* Transfer sequence registers. */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae119018dfaa..eddbc1218a39 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3425,6 +3425,7 @@ struct qla_hw_data {
3425 uint8_t max_req_queues; 3425 uint8_t max_req_queues;
3426 uint8_t max_rsp_queues; 3426 uint8_t max_rsp_queues;
3427 uint8_t max_qpairs; 3427 uint8_t max_qpairs;
3428 uint8_t num_qpairs;
3428 struct qla_qpair *base_qpair; 3429 struct qla_qpair *base_qpair;
3429 struct qla_npiv_entry *npiv_info; 3430 struct qla_npiv_entry *npiv_info;
3430 uint16_t nvram_npiv_size; 3431 uint16_t nvram_npiv_size;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 034743309ada..0391fc317003 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
7543 /* Assign available que pair id */ 7543 /* Assign available que pair id */
7544 mutex_lock(&ha->mq_lock); 7544 mutex_lock(&ha->mq_lock);
7545 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 7545 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
7546 if (qpair_id >= ha->max_qpairs) { 7546 if (ha->num_qpairs >= ha->max_qpairs) {
7547 mutex_unlock(&ha->mq_lock); 7547 mutex_unlock(&ha->mq_lock);
7548 ql_log(ql_log_warn, vha, 0x0183, 7548 ql_log(ql_log_warn, vha, 0x0183,
7549 "No resources to create additional q pair.\n"); 7549 "No resources to create additional q pair.\n");
7550 goto fail_qid_map; 7550 goto fail_qid_map;
7551 } 7551 }
7552 ha->num_qpairs++;
7552 set_bit(qpair_id, ha->qpair_qid_map); 7553 set_bit(qpair_id, ha->qpair_qid_map);
7553 ha->queue_pair_map[qpair_id] = qpair; 7554 ha->queue_pair_map[qpair_id] = qpair;
7554 qpair->id = qpair_id; 7555 qpair->id = qpair_id;
@@ -7635,6 +7636,7 @@ fail_rsp:
7635fail_msix: 7636fail_msix:
7636 ha->queue_pair_map[qpair_id] = NULL; 7637 ha->queue_pair_map[qpair_id] = NULL;
7637 clear_bit(qpair_id, ha->qpair_qid_map); 7638 clear_bit(qpair_id, ha->qpair_qid_map);
7639 ha->num_qpairs--;
7638 mutex_unlock(&ha->mq_lock); 7640 mutex_unlock(&ha->mq_lock);
7639fail_qid_map: 7641fail_qid_map:
7640 kfree(qpair); 7642 kfree(qpair);
@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
7660 mutex_lock(&ha->mq_lock); 7662 mutex_lock(&ha->mq_lock);
7661 ha->queue_pair_map[qpair->id] = NULL; 7663 ha->queue_pair_map[qpair->id] = NULL;
7662 clear_bit(qpair->id, ha->qpair_qid_map); 7664 clear_bit(qpair->id, ha->qpair_qid_map);
7665 ha->num_qpairs--;
7663 list_del(&qpair->qp_list_elem); 7666 list_del(&qpair->qp_list_elem);
7664 if (list_empty(&vha->qp_list)) 7667 if (list_empty(&vha->qp_list))
7665 vha->flags.qpairs_available = 0; 7668 vha->flags.qpairs_available = 0;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 66df6cec59da..c61a6a871c8e 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
129} 129}
130 130
131static inline void 131static inline void
132qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, 132qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
133 struct qla_tgt_cmd *tc)
134{ 133{
135 struct dsd_dma *dsd_ptr, *tdsd_ptr; 134 struct dsd_dma *dsd, *tdsd;
136 struct crc_context *ctx;
137
138 if (sp)
139 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
140 else if (tc)
141 ctx = (struct crc_context *)tc->ctx;
142 else {
143 BUG();
144 return;
145 }
146 135
147 /* clean up allocated prev pool */ 136 /* clean up allocated prev pool */
148 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 137 list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
149 &ctx->dsd_list, list) { 138 dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
150 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 139 dsd->dsd_list_dma);
151 dsd_ptr->dsd_list_dma); 140 list_del(&dsd->list);
152 list_del(&dsd_ptr->list); 141 kfree(dsd);
153 kfree(dsd_ptr);
154 } 142 }
155 INIT_LIST_HEAD(&ctx->dsd_list); 143 INIT_LIST_HEAD(&ctx->dsd_list);
156} 144}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index aac03504d9a3..2572121b765b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3282,7 +3282,7 @@ msix_register_fail:
3282 } 3282 }
3283 3283
3284 /* Enable MSI-X vector for response queue update for queue 0 */ 3284 /* Enable MSI-X vector for response queue update for queue 0 */
3285 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3285 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3286 if (ha->msixbase && ha->mqiobase && 3286 if (ha->msixbase && ha->mqiobase &&
3287 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3287 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3288 ql2xmqsupport)) 3288 ql2xmqsupport))
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index a113ab3592a7..cba1fc5e8be9 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3676 qlt_update_host_map(vha, id); 3676 qlt_update_host_map(vha, id);
3677 } 3677 }
3678 3678
3679 fc_host_port_name(vha->host) =
3680 wwn_to_u64(vha->port_name);
3681
3682 if (qla_ini_mode_enabled(vha))
3683 ql_dbg(ql_dbg_mbx, vha, 0x1018,
3684 "FA-WWN portname %016llx (%x)\n",
3685 fc_host_port_name(vha->host),
3686 rptid_entry->vp_status);
3687
3688 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3679 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3689 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3680 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3690 } else { 3681 } else {
@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4821 4812
4822 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4813 memset(mcp->mb, 0 , sizeof(mcp->mb));
4823 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4814 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4824 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ 4815 /* BIT_6 specifies 64bit address */
4816 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4825 if (IS_CNA_CAPABLE(ha)) { 4817 if (IS_CNA_CAPABLE(ha)) {
4826 mcp->mb[1] |= BIT_15;
4827 mcp->mb[2] = vha->fcoe_fcf_idx; 4818 mcp->mb[2] = vha->fcoe_fcf_idx;
4828 } 4819 }
4829 mcp->mb[16] = LSW(mreq->rcv_dma); 4820 mcp->mb[16] = LSW(mreq->rcv_dma);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1c7957903283..79f050256c55 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr)
630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
631 } 631 }
632 632
633 if (!ctx)
634 goto end;
635
633 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 636 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
634 /* List assured to be having elements */ 637 /* List assured to be having elements */
635 qla2x00_clean_dsd_pool(ha, sp, NULL); 638 qla2x00_clean_dsd_pool(ha, ctx);
636 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 639 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
637 } 640 }
638 641
639 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 642 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
640 dma_pool_free(ha->dl_dma_pool, ctx, 643 struct crc_context *ctx0 = ctx;
641 ((struct crc_context *)ctx)->crc_ctx_dma); 644
645 dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
642 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 646 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
643 } 647 }
644 648
645 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 649 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
646 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 650 struct ct6_dsd *ctx1 = ctx;
647 651
648 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 652 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
649 ctx1->fcp_cmnd_dma); 653 ctx1->fcp_cmnd_dma);
650 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 654 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
651 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 655 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
652 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 656 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
653 mempool_free(ctx1, ha->ctx_mempool); 657 mempool_free(ctx1, ha->ctx_mempool);
654 } 658 }
655 659
660end:
656 CMD_SP(cmd) = NULL; 661 CMD_SP(cmd) = NULL;
657 qla2x00_rel_sp(sp); 662 qla2x00_rel_sp(sp);
658} 663}
@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
699 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 704 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
700 } 705 }
701 706
707 if (!ctx)
708 goto end;
709
702 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 710 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
703 /* List assured to be having elements */ 711 /* List assured to be having elements */
704 qla2x00_clean_dsd_pool(ha, sp, NULL); 712 qla2x00_clean_dsd_pool(ha, ctx);
705 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 713 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
706 } 714 }
707 715
708 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 716 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
709 dma_pool_free(ha->dl_dma_pool, ctx, 717 struct crc_context *ctx0 = ctx;
710 ((struct crc_context *)ctx)->crc_ctx_dma); 718
719 dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
711 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 720 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
712 } 721 }
713 722
714 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 723 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
715 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 724 struct ct6_dsd *ctx1 = ctx;
716
717 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 725 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
718 ctx1->fcp_cmnd_dma); 726 ctx1->fcp_cmnd_dma);
719 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 727 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
721 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 729 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
722 mempool_free(ctx1, ha->ctx_mempool); 730 mempool_free(ctx1, ha->ctx_mempool);
723 } 731 }
724 732end:
725 CMD_SP(cmd) = NULL; 733 CMD_SP(cmd) = NULL;
726 qla2xxx_rel_qpair_sp(sp->qpair, sp); 734 qla2xxx_rel_qpair_sp(sp->qpair, sp);
727} 735}
@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1632void 1640void
1633qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1641qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1634{ 1642{
1635 int que, cnt; 1643 int que, cnt, status;
1636 unsigned long flags; 1644 unsigned long flags;
1637 srb_t *sp; 1645 srb_t *sp;
1638 struct qla_hw_data *ha = vha->hw; 1646 struct qla_hw_data *ha = vha->hw;
@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1662 */ 1670 */
1663 sp_get(sp); 1671 sp_get(sp);
1664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1672 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1665 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1673 status = qla2xxx_eh_abort(GET_CMD_SP(sp));
1666 spin_lock_irqsave(&ha->hardware_lock, flags); 1674 spin_lock_irqsave(&ha->hardware_lock, flags);
1675 /* Get rid of extra reference if immediate exit
1676 * from ql2xxx_eh_abort */
1677 if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
1678 atomic_dec(&sp->ref_count);
1667 } 1679 }
1668 req->outstanding_cmds[cnt] = NULL; 1680 req->outstanding_cmds[cnt] = NULL;
1669 sp->done(sp, res); 1681 sp->done(sp, res);
@@ -2623,10 +2635,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2623 2635
2624 if (mem_only) { 2636 if (mem_only) {
2625 if (pci_enable_device_mem(pdev)) 2637 if (pci_enable_device_mem(pdev))
2626 goto probe_out; 2638 return ret;
2627 } else { 2639 } else {
2628 if (pci_enable_device(pdev)) 2640 if (pci_enable_device(pdev))
2629 goto probe_out; 2641 return ret;
2630 } 2642 }
2631 2643
2632 /* This may fail but that's ok */ 2644 /* This may fail but that's ok */
@@ -2636,7 +2648,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2636 if (!ha) { 2648 if (!ha) {
2637 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2649 ql_log_pci(ql_log_fatal, pdev, 0x0009,
2638 "Unable to allocate memory for ha.\n"); 2650 "Unable to allocate memory for ha.\n");
2639 goto probe_out; 2651 goto disable_device;
2640 } 2652 }
2641 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2653 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2642 "Memory allocated for ha=%p.\n", ha); 2654 "Memory allocated for ha=%p.\n", ha);
@@ -3254,7 +3266,7 @@ iospace_config_failed:
3254 pci_release_selected_regions(ha->pdev, ha->bars); 3266 pci_release_selected_regions(ha->pdev, ha->bars);
3255 kfree(ha); 3267 kfree(ha);
3256 3268
3257probe_out: 3269disable_device:
3258 pci_disable_device(pdev); 3270 pci_disable_device(pdev);
3259 return ret; 3271 return ret;
3260} 3272}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0e03ca2ab3e5..e766d8412384 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2245,11 +2245,13 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
2246 cmd->dma_data_direction); 2246 cmd->dma_data_direction);
2247 2247
2248 if (!cmd->ctx)
2249 return;
2250
2248 if (cmd->ctx_dsd_alloced) 2251 if (cmd->ctx_dsd_alloced)
2249 qla2x00_clean_dsd_pool(ha, NULL, cmd); 2252 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2250 2253
2251 if (cmd->ctx) 2254 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2252 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2253} 2255}
2254 2256
2255static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 2257static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8a58ef3adab4..c197972a3e2d 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
371 goto done; 371 goto done;
372 } 372 }
373 373
374 if (end <= start || start == 0 || end == 0) { 374 if (end < start || start == 0 || end == 0) {
375 ql_dbg(ql_dbg_misc, vha, 0xd023, 375 ql_dbg(ql_dbg_misc, vha, 0xd023,
376 "%s: unusable range (start=%x end=%x)\n", __func__, 376 "%s: unusable range (start=%x end=%x)\n", __func__,
377 ent->t262.end_addr, ent->t262.start_addr); 377 ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 17249c3650fe..dc095a292c61 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1404,7 +1404,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1404 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1404 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1405 arr[5] = (int)have_dif_prot; /* PROTECT bit */ 1405 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1406 if (sdebug_vpd_use_hostno == 0) 1406 if (sdebug_vpd_use_hostno == 0)
1407 arr[5] = 0x10; /* claim: implicit TGPS */ 1407 arr[5] |= 0x10; /* claim: implicit TPGS */
1408 arr[6] = 0x10; /* claim: MultiP */ 1408 arr[6] = 0x10; /* claim: MultiP */
1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */ 1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index ae627049c499..4be87f503e3b 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -1,6 +1,6 @@
1config CRYPTO_DEV_CCREE 1config CRYPTO_DEV_CCREE
2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
3 depends on CRYPTO_HW && OF && HAS_DMA 3 depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
4 default n 4 default n
5 select CRYPTO_HASH 5 select CRYPTO_HASH
6 select CRYPTO_BLKCIPHER 6 select CRYPTO_BLKCIPHER
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 038e2ff5e545..6471d3d2d375 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -216,7 +216,8 @@ void ssi_buffer_mgr_copy_scatterlist_portion(
216 uint32_t nents, lbytes; 216 uint32_t nents, lbytes;
217 217
218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); 218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
219 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF)); 219 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
220 (direct == SSI_SG_TO_BUF));
220} 221}
221 222
222static inline int ssi_buffer_mgr_render_buff_to_mlli( 223static inline int ssi_buffer_mgr_render_buff_to_mlli(
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2e1bd47337fd..e6727cefde05 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
293 size_t lmmk_size; 293 size_t lmmk_size;
294 size_t lum_size; 294 size_t lum_size;
295 int rc; 295 int rc;
296 mm_segment_t seg;
297 296
298 if (!lsm) 297 if (!lsm)
299 return -ENODATA; 298 return -ENODATA;
300 299
301 /*
302 * "Switch to kernel segment" to allow copying from kernel space by
303 * copy_{to,from}_user().
304 */
305 seg = get_fs();
306 set_fs(KERNEL_DS);
307
308 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { 300 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
309 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", 301 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
310 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3); 302 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
406out_free: 398out_free:
407 kvfree(lmmk); 399 kvfree(lmmk);
408out: 400out:
409 set_fs(seg);
410 return rc; 401 return rc;
411} 402}
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8ea01904c0ea..466517c7c8e6 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
19 19
20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o 20obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
21 21
22ccflags-y += -Werror
23
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 1d7f7ab94cac..6b13a3a66e49 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
4 4
5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o 5ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o 6obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
7
8ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index fceb9e9b881b..c9c0e1245858 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1,3 +1 @@
1obj-$(CONFIG_VIDEO_OV5693) += ov5693.o obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
2
3ccflags-y += -Werror
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 3fa7c1c1479f..f126a89a08e9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 351DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 352DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
353 353
354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror 354ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
355 355
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9e217b1361ea..fe4fe2440729 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -843,7 +843,10 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
843{ 843{
844 struct ci_hdrc *ci = dev_get_drvdata(dev); 844 struct ci_hdrc *ci = dev_get_drvdata(dev);
845 845
846 return sprintf(buf, "%s\n", ci_role(ci)->name); 846 if (ci->role != CI_ROLE_END)
847 return sprintf(buf, "%s\n", ci_role(ci)->name);
848
849 return 0;
847} 850}
848 851
849static ssize_t ci_role_store(struct device *dev, 852static ssize_t ci_role_store(struct device *dev,
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 6d23eede4d8c..1c31e8a08810 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
294{ 294{
295 struct ci_hdrc *ci = s->private; 295 struct ci_hdrc *ci = s->private;
296 296
297 seq_printf(s, "%s\n", ci_role(ci)->name); 297 if (ci->role != CI_ROLE_END)
298 seq_printf(s, "%s\n", ci_role(ci)->name);
298 299
299 return 0; 300 return 0;
300} 301}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 56d2d3213076..d68b125796f9 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1993,6 +1993,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
1993int ci_hdrc_gadget_init(struct ci_hdrc *ci) 1993int ci_hdrc_gadget_init(struct ci_hdrc *ci)
1994{ 1994{
1995 struct ci_role_driver *rdrv; 1995 struct ci_role_driver *rdrv;
1996 int ret;
1996 1997
1997 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) 1998 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
1998 return -ENXIO; 1999 return -ENXIO;
@@ -2005,7 +2006,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2005 rdrv->stop = udc_id_switch_for_host; 2006 rdrv->stop = udc_id_switch_for_host;
2006 rdrv->irq = udc_irq; 2007 rdrv->irq = udc_irq;
2007 rdrv->name = "gadget"; 2008 rdrv->name = "gadget";
2008 ci->roles[CI_ROLE_GADGET] = rdrv;
2009 2009
2010 return udc_start(ci); 2010 ret = udc_start(ci);
2011 if (!ret)
2012 ci->roles[CI_ROLE_GADGET] = rdrv;
2013
2014 return ret;
2011} 2015}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index e77a4ed4f021..9f4a0185dd60 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -108,6 +108,8 @@ struct imx_usbmisc {
108 const struct usbmisc_ops *ops; 108 const struct usbmisc_ops *ops;
109}; 109};
110 110
111static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
112
111static int usbmisc_imx25_init(struct imx_usbmisc_data *data) 113static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
112{ 114{
113 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); 115 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
242 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 244 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
243 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 245 | MX53_USB_UHx_CTRL_ULPI_INT_EN;
244 writel(val, reg); 246 writel(val, reg);
245 /* Disable internal 60Mhz clock */ 247 if (is_imx53_usbmisc(data)) {
246 reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 248 /* Disable internal 60Mhz clock */
247 val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; 249 reg = usbmisc->base +
248 writel(val, reg); 250 MX53_USB_CLKONOFF_CTRL_OFFSET;
251 val = readl(reg) |
252 MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
253 writel(val, reg);
254 }
255
249 } 256 }
250 if (data->disable_oc) { 257 if (data->disable_oc) {
251 reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; 258 reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
267 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 274 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
268 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 275 | MX53_USB_UHx_CTRL_ULPI_INT_EN;
269 writel(val, reg); 276 writel(val, reg);
270 /* Disable internal 60Mhz clock */ 277
271 reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 278 if (is_imx53_usbmisc(data)) {
272 val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; 279 /* Disable internal 60Mhz clock */
273 writel(val, reg); 280 reg = usbmisc->base +
281 MX53_USB_CLKONOFF_CTRL_OFFSET;
282 val = readl(reg) |
283 MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
284 writel(val, reg);
285 }
274 } 286 }
275 if (data->disable_oc) { 287 if (data->disable_oc) {
276 reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; 288 reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = {
456 .init = usbmisc_imx27_init, 468 .init = usbmisc_imx27_init,
457}; 469};
458 470
471static const struct usbmisc_ops imx51_usbmisc_ops = {
472 .init = usbmisc_imx53_init,
473};
474
459static const struct usbmisc_ops imx53_usbmisc_ops = { 475static const struct usbmisc_ops imx53_usbmisc_ops = {
460 .init = usbmisc_imx53_init, 476 .init = usbmisc_imx53_init,
461}; 477};
@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
479 .set_wakeup = usbmisc_imx7d_set_wakeup, 495 .set_wakeup = usbmisc_imx7d_set_wakeup,
480}; 496};
481 497
498static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
499{
500 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
501
502 return usbmisc->ops == &imx53_usbmisc_ops;
503}
504
482int imx_usbmisc_init(struct imx_usbmisc_data *data) 505int imx_usbmisc_init(struct imx_usbmisc_data *data)
483{ 506{
484 struct imx_usbmisc *usbmisc; 507 struct imx_usbmisc *usbmisc;
@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
536 }, 559 },
537 { 560 {
538 .compatible = "fsl,imx51-usbmisc", 561 .compatible = "fsl,imx51-usbmisc",
539 .data = &imx53_usbmisc_ops, 562 .data = &imx51_usbmisc_ops,
540 }, 563 },
541 { 564 {
542 .compatible = "fsl,imx53-usbmisc", 565 .compatible = "fsl,imx53-usbmisc",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 9cd8722f24f6..a3ffe97170ff 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = {
144 { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, 144 { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
145 { .compatible = "snps,dwc2" }, 145 { .compatible = "snps,dwc2" },
146 { .compatible = "samsung,s3c6400-hsotg" }, 146 { .compatible = "samsung,s3c6400-hsotg" },
147 { .compatible = "amlogic,meson8-usb",
148 .data = dwc2_set_amlogic_params },
147 { .compatible = "amlogic,meson8b-usb", 149 { .compatible = "amlogic,meson8b-usb",
148 .data = dwc2_set_amlogic_params }, 150 .data = dwc2_set_amlogic_params },
149 { .compatible = "amlogic,meson-gxbb-usb", 151 { .compatible = "amlogic,meson-gxbb-usb",
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 4c8aacc232c0..74d57d6994da 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
396/* Caller must hold fsg->lock */ 396/* Caller must hold fsg->lock */
397static void wakeup_thread(struct fsg_common *common) 397static void wakeup_thread(struct fsg_common *common)
398{ 398{
399 smp_wmb(); /* ensure the write of bh->state is complete */ 399 /*
400 * Ensure the reading of thread_wakeup_needed
401 * and the writing of bh->state are completed
402 */
403 smp_mb();
400 /* Tell the main thread that something has happened */ 404 /* Tell the main thread that something has happened */
401 common->thread_wakeup_needed = 1; 405 common->thread_wakeup_needed = 1;
402 if (common->thread_task) 406 if (common->thread_task)
@@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
627 } 631 }
628 __set_current_state(TASK_RUNNING); 632 __set_current_state(TASK_RUNNING);
629 common->thread_wakeup_needed = 0; 633 common->thread_wakeup_needed = 0;
630 smp_rmb(); /* ensure the latest bh->state is visible */ 634
635 /*
636 * Ensure the writing of thread_wakeup_needed
637 * and the reading of bh->state are completed
638 */
639 smp_mb();
631 return rc; 640 return rc;
632} 641}
633 642
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 5a2d845fb1a6..cd4c88529721 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -623,7 +623,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
623{ 623{
624 usb3_disconnect(usb3); 624 usb3_disconnect(usb3);
625 usb3_write(usb3, 0, USB3_P0_INT_ENA); 625 usb3_write(usb3, 0, USB3_P0_INT_ENA);
626 usb3_write(usb3, 0, USB3_PN_INT_ENA);
627 usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); 626 usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
628 usb3_write(usb3, 0, USB3_USB_INT_ENA_1); 627 usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
629 usb3_write(usb3, 0, USB3_USB_INT_ENA_2); 628 usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
@@ -1475,7 +1474,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
1475 struct renesas_usb3_request *usb3_req, 1474 struct renesas_usb3_request *usb3_req,
1476 int status) 1475 int status)
1477{ 1476{
1478 usb3_pn_stop(usb3); 1477 unsigned long flags;
1478
1479 spin_lock_irqsave(&usb3->lock, flags);
1480 if (usb3_pn_change(usb3, usb3_ep->num))
1481 usb3_pn_stop(usb3);
1482 spin_unlock_irqrestore(&usb3->lock, flags);
1483
1479 usb3_disable_pipe_irq(usb3, usb3_ep->num); 1484 usb3_disable_pipe_irq(usb3, usb3_ep->num);
1480 usb3_request_done(usb3_ep, usb3_req, status); 1485 usb3_request_done(usb3_ep, usb3_req, status);
1481 1486
@@ -1504,30 +1509,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
1504{ 1509{
1505 struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); 1510 struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
1506 struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); 1511 struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
1512 bool done = false;
1507 1513
1508 if (!usb3_req) 1514 if (!usb3_req)
1509 return; 1515 return;
1510 1516
1517 spin_lock(&usb3->lock);
1518 if (usb3_pn_change(usb3, num))
1519 goto out;
1520
1511 if (usb3_ep->dir_in) { 1521 if (usb3_ep->dir_in) {
1512 /* Do not stop the IN pipe here to detect LSTTR interrupt */ 1522 /* Do not stop the IN pipe here to detect LSTTR interrupt */
1513 if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) 1523 if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
1514 usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); 1524 usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
1515 } else { 1525 } else {
1516 if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) 1526 if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
1517 usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); 1527 done = true;
1518 } 1528 }
1529
1530out:
1531 /* need to unlock because usb3_request_done_pipen() locks it */
1532 spin_unlock(&usb3->lock);
1533
1534 if (done)
1535 usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
1519} 1536}
1520 1537
1521static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) 1538static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
1522{ 1539{
1523 u32 pn_int_sta; 1540 u32 pn_int_sta;
1524 1541
1525 if (usb3_pn_change(usb3, num) < 0) 1542 spin_lock(&usb3->lock);
1543 if (usb3_pn_change(usb3, num) < 0) {
1544 spin_unlock(&usb3->lock);
1526 return; 1545 return;
1546 }
1527 1547
1528 pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); 1548 pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
1529 pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); 1549 pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
1530 usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); 1550 usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
1551 spin_unlock(&usb3->lock);
1531 if (pn_int_sta & PN_INT_LSTTR) 1552 if (pn_int_sta & PN_INT_LSTTR)
1532 usb3_irq_epc_pipen_lsttr(usb3, num); 1553 usb3_irq_epc_pipen_lsttr(usb3, num);
1533 if (pn_int_sta & PN_INT_BFRDY) 1554 if (pn_int_sta & PN_INT_BFRDY)
@@ -1660,6 +1681,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
1660 1681
1661 spin_lock_irqsave(&usb3->lock, flags); 1682 spin_lock_irqsave(&usb3->lock, flags);
1662 if (!usb3_pn_change(usb3, usb3_ep->num)) { 1683 if (!usb3_pn_change(usb3, usb3_ep->num)) {
1684 usb3_write(usb3, 0, USB3_PN_INT_ENA);
1663 usb3_write(usb3, 0, USB3_PN_RAMMAP); 1685 usb3_write(usb3, 0, USB3_PN_RAMMAP);
1664 usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); 1686 usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
1665 } 1687 }
@@ -1799,6 +1821,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
1799 /* hook up the driver */ 1821 /* hook up the driver */
1800 usb3->driver = driver; 1822 usb3->driver = driver;
1801 1823
1824 pm_runtime_enable(usb3_to_dev(usb3));
1825 pm_runtime_get_sync(usb3_to_dev(usb3));
1826
1802 renesas_usb3_init_controller(usb3); 1827 renesas_usb3_init_controller(usb3);
1803 1828
1804 return 0; 1829 return 0;
@@ -1807,14 +1832,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
1807static int renesas_usb3_stop(struct usb_gadget *gadget) 1832static int renesas_usb3_stop(struct usb_gadget *gadget)
1808{ 1833{
1809 struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); 1834 struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
1810 unsigned long flags;
1811 1835
1812 spin_lock_irqsave(&usb3->lock, flags);
1813 usb3->softconnect = false; 1836 usb3->softconnect = false;
1814 usb3->gadget.speed = USB_SPEED_UNKNOWN; 1837 usb3->gadget.speed = USB_SPEED_UNKNOWN;
1815 usb3->driver = NULL; 1838 usb3->driver = NULL;
1816 renesas_usb3_stop_controller(usb3); 1839 renesas_usb3_stop_controller(usb3);
1817 spin_unlock_irqrestore(&usb3->lock, flags); 1840
1841 pm_runtime_put(usb3_to_dev(usb3));
1842 pm_runtime_disable(usb3_to_dev(usb3));
1818 1843
1819 return 0; 1844 return 0;
1820} 1845}
@@ -1891,9 +1916,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
1891 1916
1892 device_remove_file(&pdev->dev, &dev_attr_role); 1917 device_remove_file(&pdev->dev, &dev_attr_role);
1893 1918
1894 pm_runtime_put(&pdev->dev);
1895 pm_runtime_disable(&pdev->dev);
1896
1897 usb_del_gadget_udc(&usb3->gadget); 1919 usb_del_gadget_udc(&usb3->gadget);
1898 1920
1899 __renesas_usb3_ep_free_request(usb3->ep0_req); 1921 __renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2099,9 +2121,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
2099 2121
2100 usb3->workaround_for_vbus = priv->workaround_for_vbus; 2122 usb3->workaround_for_vbus = priv->workaround_for_vbus;
2101 2123
2102 pm_runtime_enable(&pdev->dev);
2103 pm_runtime_get_sync(&pdev->dev);
2104
2105 dev_info(&pdev->dev, "probed\n"); 2124 dev_info(&pdev->dev, "probed\n");
2106 2125
2107 return 0; 2126 return 0;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9c7ee26ef388..bc6a9be2ccc5 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused)
245 dsps_mod_timer_optional(glue); 245 dsps_mod_timer_optional(glue);
246 break; 246 break;
247 case OTG_STATE_A_WAIT_BCON: 247 case OTG_STATE_A_WAIT_BCON:
248 /* keep VBUS on for host-only mode */
249 if (musb->port_mode == MUSB_PORT_MODE_HOST) {
250 dsps_mod_timer_optional(glue);
251 break;
252 }
248 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
249 skip_session = 1; 254 skip_session = 1;
250 /* fall */ 255 /* fall */
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 7a92a5e1d40c..feca75b07fdd 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
362 st->global_error = 1; 362 st->global_error = 1;
363 } 363 }
364 } 364 }
365 st->va += PAGE_SIZE * nr; 365 st->va += XEN_PAGE_SIZE * nr;
366 st->index += nr; 366 st->index += nr / XEN_PFN_PER_PAGE;
367 367
368 return 0; 368 return 0;
369} 369}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 643c70d2b2e6..4f8f75d9e839 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2563,7 +2563,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, 2563static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2564 unsigned num_items) 2564 unsigned num_items)
2565{ 2565{
2566 return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2566 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
2567} 2567}
2568 2568
2569/* 2569/*
@@ -2573,7 +2573,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, 2573static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
2574 unsigned num_items) 2574 unsigned num_items)
2575{ 2575{
2576 return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 2576 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
2577} 2577}
2578 2578
2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, 2579int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 60a750678a82..c24d615e3d7f 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -468,7 +468,7 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
468 468
469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) { 469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
470 btrfs_crit(fs_info, "invalid dir item name len: %u", 470 btrfs_crit(fs_info, "invalid dir item name len: %u",
471 (unsigned)btrfs_dir_data_len(leaf, dir_item)); 471 (unsigned)btrfs_dir_name_len(leaf, dir_item));
472 return 1; 472 return 1;
473 } 473 }
474 474
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8685d67185d0..5f678dcb20e6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3467,10 +3467,12 @@ static int write_dev_supers(struct btrfs_device *device,
3467 * we fua the first super. The others we allow 3467 * we fua the first super. The others we allow
3468 * to go down lazy. 3468 * to go down lazy.
3469 */ 3469 */
3470 if (i == 0) 3470 if (i == 0) {
3471 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); 3471 ret = btrfsic_submit_bh(REQ_OP_WRITE,
3472 else 3472 REQ_SYNC | REQ_FUA, bh);
3473 } else {
3473 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 3474 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
3475 }
3474 if (ret) 3476 if (ret)
3475 errors++; 3477 errors++;
3476 } 3478 }
@@ -3535,7 +3537,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3535 3537
3536 bio->bi_end_io = btrfs_end_empty_barrier; 3538 bio->bi_end_io = btrfs_end_empty_barrier;
3537 bio->bi_bdev = device->bdev; 3539 bio->bi_bdev = device->bdev;
3538 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 3540 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
3539 init_completion(&device->flush_wait); 3541 init_completion(&device->flush_wait);
3540 bio->bi_private = &device->flush_wait; 3542 bio->bi_private = &device->flush_wait;
3541 device->flush_bio = bio; 3543 device->flush_bio = bio;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e390451c72e6..33d979e9ea2a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3993,6 +3993,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3993 info->space_info_kobj, "%s", 3993 info->space_info_kobj, "%s",
3994 alloc_name(found->flags)); 3994 alloc_name(found->flags));
3995 if (ret) { 3995 if (ret) {
3996 percpu_counter_destroy(&found->total_bytes_pinned);
3996 kfree(found); 3997 kfree(found);
3997 return ret; 3998 return ret;
3998 } 3999 }
@@ -4844,7 +4845,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
4844 spin_unlock(&delayed_rsv->lock); 4845 spin_unlock(&delayed_rsv->lock);
4845 4846
4846commit: 4847commit:
4847 trans = btrfs_join_transaction(fs_info->fs_root); 4848 trans = btrfs_join_transaction(fs_info->extent_root);
4848 if (IS_ERR(trans)) 4849 if (IS_ERR(trans))
4849 return -ENOSPC; 4850 return -ENOSPC;
4850 4851
@@ -4862,7 +4863,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
4862 struct btrfs_space_info *space_info, u64 num_bytes, 4863 struct btrfs_space_info *space_info, u64 num_bytes,
4863 u64 orig_bytes, int state) 4864 u64 orig_bytes, int state)
4864{ 4865{
4865 struct btrfs_root *root = fs_info->fs_root; 4866 struct btrfs_root *root = fs_info->extent_root;
4866 struct btrfs_trans_handle *trans; 4867 struct btrfs_trans_handle *trans;
4867 int nr; 4868 int nr;
4868 int ret = 0; 4869 int ret = 0;
@@ -5062,7 +5063,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5062 int flush_state = FLUSH_DELAYED_ITEMS_NR; 5063 int flush_state = FLUSH_DELAYED_ITEMS_NR;
5063 5064
5064 spin_lock(&space_info->lock); 5065 spin_lock(&space_info->lock);
5065 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, 5066 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
5066 space_info); 5067 space_info);
5067 if (!to_reclaim) { 5068 if (!to_reclaim) {
5068 spin_unlock(&space_info->lock); 5069 spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d8da3edf2ac3..d3619e010005 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2458,7 +2458,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2458 if (!uptodate) { 2458 if (!uptodate) {
2459 ClearPageUptodate(page); 2459 ClearPageUptodate(page);
2460 SetPageError(page); 2460 SetPageError(page);
2461 ret = ret < 0 ? ret : -EIO; 2461 ret = err < 0 ? err : -EIO;
2462 mapping_set_error(page->mapping, ret); 2462 mapping_set_error(page->mapping, ret);
2463 } 2463 }
2464} 2464}
@@ -4377,6 +4377,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
4377 return NULL; 4377 return NULL;
4378} 4378}
4379 4379
4380/*
4381 * To cache previous fiemap extent
4382 *
4383 * Will be used for merging fiemap extent
4384 */
4385struct fiemap_cache {
4386 u64 offset;
4387 u64 phys;
4388 u64 len;
4389 u32 flags;
4390 bool cached;
4391};
4392
4393/*
4394 * Helper to submit fiemap extent.
4395 *
4396 * Will try to merge current fiemap extent specified by @offset, @phys,
4397 * @len and @flags with cached one.
4398 * And only when we fails to merge, cached one will be submitted as
4399 * fiemap extent.
4400 *
4401 * Return value is the same as fiemap_fill_next_extent().
4402 */
4403static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
4404 struct fiemap_cache *cache,
4405 u64 offset, u64 phys, u64 len, u32 flags)
4406{
4407 int ret = 0;
4408
4409 if (!cache->cached)
4410 goto assign;
4411
4412 /*
4413 * Sanity check, extent_fiemap() should have ensured that new
4414 * fiemap extent won't overlap with cahced one.
4415 * Not recoverable.
4416 *
4417 * NOTE: Physical address can overlap, due to compression
4418 */
4419 if (cache->offset + cache->len > offset) {
4420 WARN_ON(1);
4421 return -EINVAL;
4422 }
4423
4424 /*
4425 * Only merges fiemap extents if
4426 * 1) Their logical addresses are continuous
4427 *
4428 * 2) Their physical addresses are continuous
4429 * So truly compressed (physical size smaller than logical size)
4430 * extents won't get merged with each other
4431 *
4432 * 3) Share same flags except FIEMAP_EXTENT_LAST
4433 * So regular extent won't get merged with prealloc extent
4434 */
4435 if (cache->offset + cache->len == offset &&
4436 cache->phys + cache->len == phys &&
4437 (cache->flags & ~FIEMAP_EXTENT_LAST) ==
4438 (flags & ~FIEMAP_EXTENT_LAST)) {
4439 cache->len += len;
4440 cache->flags |= flags;
4441 goto try_submit_last;
4442 }
4443
4444 /* Not mergeable, need to submit cached one */
4445 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4446 cache->len, cache->flags);
4447 cache->cached = false;
4448 if (ret)
4449 return ret;
4450assign:
4451 cache->cached = true;
4452 cache->offset = offset;
4453 cache->phys = phys;
4454 cache->len = len;
4455 cache->flags = flags;
4456try_submit_last:
4457 if (cache->flags & FIEMAP_EXTENT_LAST) {
4458 ret = fiemap_fill_next_extent(fieinfo, cache->offset,
4459 cache->phys, cache->len, cache->flags);
4460 cache->cached = false;
4461 }
4462 return ret;
4463}
4464
4465/*
4466 * Sanity check for fiemap cache
4467 *
4468 * All fiemap cache should be submitted by emit_fiemap_extent()
4469 * Iteration should be terminated either by last fiemap extent or
4470 * fieinfo->fi_extents_max.
4471 * So no cached fiemap should exist.
4472 */
4473static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
4474 struct fiemap_extent_info *fieinfo,
4475 struct fiemap_cache *cache)
4476{
4477 int ret;
4478
4479 if (!cache->cached)
4480 return 0;
4481
4482 /* Small and recoverbale problem, only to info developer */
4483#ifdef CONFIG_BTRFS_DEBUG
4484 WARN_ON(1);
4485#endif
4486 btrfs_warn(fs_info,
4487 "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
4488 cache->offset, cache->phys, cache->len, cache->flags);
4489 ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
4490 cache->len, cache->flags);
4491 cache->cached = false;
4492 if (ret > 0)
4493 ret = 0;
4494 return ret;
4495}
4496
4380int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4497int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4381 __u64 start, __u64 len, get_extent_t *get_extent) 4498 __u64 start, __u64 len, get_extent_t *get_extent)
4382{ 4499{
@@ -4394,6 +4511,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4394 struct extent_state *cached_state = NULL; 4511 struct extent_state *cached_state = NULL;
4395 struct btrfs_path *path; 4512 struct btrfs_path *path;
4396 struct btrfs_root *root = BTRFS_I(inode)->root; 4513 struct btrfs_root *root = BTRFS_I(inode)->root;
4514 struct fiemap_cache cache = { 0 };
4397 int end = 0; 4515 int end = 0;
4398 u64 em_start = 0; 4516 u64 em_start = 0;
4399 u64 em_len = 0; 4517 u64 em_len = 0;
@@ -4573,8 +4691,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4573 flags |= FIEMAP_EXTENT_LAST; 4691 flags |= FIEMAP_EXTENT_LAST;
4574 end = 1; 4692 end = 1;
4575 } 4693 }
4576 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 4694 ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
4577 em_len, flags); 4695 em_len, flags);
4578 if (ret) { 4696 if (ret) {
4579 if (ret == 1) 4697 if (ret == 1)
4580 ret = 0; 4698 ret = 0;
@@ -4582,6 +4700,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4582 } 4700 }
4583 } 4701 }
4584out_free: 4702out_free:
4703 if (!ret)
4704 ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
4585 free_extent_map(em); 4705 free_extent_map(em);
4586out: 4706out:
4587 btrfs_free_path(path); 4707 btrfs_free_path(path);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17cbe9306faf..ef3c98c527c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2952,7 +2952,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2952 2952
2953 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2953 ret = test_range_bit(io_tree, ordered_extent->file_offset,
2954 ordered_extent->file_offset + ordered_extent->len - 1, 2954 ordered_extent->file_offset + ordered_extent->len - 1,
2955 EXTENT_DEFRAG, 1, cached_state); 2955 EXTENT_DEFRAG, 0, cached_state);
2956 if (ret) { 2956 if (ret) {
2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation) 2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7483,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7483 int found = false; 7483 int found = false;
7484 void **pagep = NULL; 7484 void **pagep = NULL;
7485 struct page *page = NULL; 7485 struct page *page = NULL;
7486 int start_idx; 7486 unsigned long start_idx;
7487 int end_idx; 7487 unsigned long end_idx;
7488 7488
7489 start_idx = start >> PAGE_SHIFT; 7489 start_idx = start >> PAGE_SHIFT;
7490 7490
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index fd389935ecd1..3ec0e46de95f 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5 */ 5 */
6 6
7#include <linux/quotaops.h>
7#include "ext4_jbd2.h" 8#include "ext4_jbd2.h"
8#include "ext4.h" 9#include "ext4.h"
9#include "xattr.h" 10#include "xattr.h"
@@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
232 handle_t *handle; 233 handle_t *handle;
233 int error, retries = 0; 234 int error, retries = 0;
234 235
236 error = dquot_initialize(inode);
237 if (error)
238 return error;
235retry: 239retry:
236 handle = ext4_journal_start(inode, EXT4_HT_XATTR, 240 handle = ext4_journal_start(inode, EXT4_HT_XATTR,
237 ext4_jbd2_credits_xattr(inode)); 241 ext4_jbd2_credits_xattr(inode));
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8e8046104f4d..32191548abed 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2523,7 +2523,6 @@ extern int ext4_search_dir(struct buffer_head *bh,
2523 int buf_size, 2523 int buf_size,
2524 struct inode *dir, 2524 struct inode *dir,
2525 struct ext4_filename *fname, 2525 struct ext4_filename *fname,
2526 const struct qstr *d_name,
2527 unsigned int offset, 2526 unsigned int offset,
2528 struct ext4_dir_entry_2 **res_dir); 2527 struct ext4_dir_entry_2 **res_dir);
2529extern int ext4_generic_delete_entry(handle_t *handle, 2528extern int ext4_generic_delete_entry(handle_t *handle,
@@ -3007,7 +3006,6 @@ extern int htree_inlinedir_to_tree(struct file *dir_file,
3007 int *has_inline_data); 3006 int *has_inline_data);
3008extern struct buffer_head *ext4_find_inline_entry(struct inode *dir, 3007extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
3009 struct ext4_filename *fname, 3008 struct ext4_filename *fname,
3010 const struct qstr *d_name,
3011 struct ext4_dir_entry_2 **res_dir, 3009 struct ext4_dir_entry_2 **res_dir,
3012 int *has_inline_data); 3010 int *has_inline_data);
3013extern int ext4_delete_inline_entry(handle_t *handle, 3011extern int ext4_delete_inline_entry(handle_t *handle,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2a97dff87b96..3e36508610b7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3413 struct ext4_sb_info *sbi; 3413 struct ext4_sb_info *sbi;
3414 struct ext4_extent_header *eh; 3414 struct ext4_extent_header *eh;
3415 struct ext4_map_blocks split_map; 3415 struct ext4_map_blocks split_map;
3416 struct ext4_extent zero_ex; 3416 struct ext4_extent zero_ex1, zero_ex2;
3417 struct ext4_extent *ex, *abut_ex; 3417 struct ext4_extent *ex, *abut_ex;
3418 ext4_lblk_t ee_block, eof_block; 3418 ext4_lblk_t ee_block, eof_block;
3419 unsigned int ee_len, depth, map_len = map->m_len; 3419 unsigned int ee_len, depth, map_len = map->m_len;
3420 int allocated = 0, max_zeroout = 0; 3420 int allocated = 0, max_zeroout = 0;
3421 int err = 0; 3421 int err = 0;
3422 int split_flag = 0; 3422 int split_flag = EXT4_EXT_DATA_VALID2;
3423 3423
3424 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3424 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3425 "block %llu, max_blocks %u\n", inode->i_ino, 3425 "block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3436 ex = path[depth].p_ext; 3436 ex = path[depth].p_ext;
3437 ee_block = le32_to_cpu(ex->ee_block); 3437 ee_block = le32_to_cpu(ex->ee_block);
3438 ee_len = ext4_ext_get_actual_len(ex); 3438 ee_len = ext4_ext_get_actual_len(ex);
3439 zero_ex.ee_len = 0; 3439 zero_ex1.ee_len = 0;
3440 zero_ex2.ee_len = 0;
3440 3441
3441 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3442 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3442 3443
@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3576 if (ext4_encrypted_inode(inode)) 3577 if (ext4_encrypted_inode(inode))
3577 max_zeroout = 0; 3578 max_zeroout = 0;
3578 3579
3579 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3580 if (max_zeroout && (ee_len <= max_zeroout)) {
3581 err = ext4_ext_zeroout(inode, ex);
3582 if (err)
3583 goto out;
3584 zero_ex.ee_block = ex->ee_block;
3585 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3586 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3587
3588 err = ext4_ext_get_access(handle, inode, path + depth);
3589 if (err)
3590 goto out;
3591 ext4_ext_mark_initialized(ex);
3592 ext4_ext_try_to_merge(handle, inode, path, ex);
3593 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3594 goto out;
3595 }
3596
3597 /* 3580 /*
3598 * four cases: 3581 * five cases:
3599 * 1. split the extent into three extents. 3582 * 1. split the extent into three extents.
3600 * 2. split the extent into two extents, zeroout the first half. 3583 * 2. split the extent into two extents, zeroout the head of the first
3601 * 3. split the extent into two extents, zeroout the second half. 3584 * extent.
3585 * 3. split the extent into two extents, zeroout the tail of the second
3586 * extent.
3602 * 4. split the extent into two extents with out zeroout. 3587 * 4. split the extent into two extents with out zeroout.
3588 * 5. no splitting needed, just possibly zeroout the head and / or the
3589 * tail of the extent.
3603 */ 3590 */
3604 split_map.m_lblk = map->m_lblk; 3591 split_map.m_lblk = map->m_lblk;
3605 split_map.m_len = map->m_len; 3592 split_map.m_len = map->m_len;
3606 3593
3607 if (max_zeroout && (allocated > map->m_len)) { 3594 if (max_zeroout && (allocated > split_map.m_len)) {
3608 if (allocated <= max_zeroout) { 3595 if (allocated <= max_zeroout) {
3609 /* case 3 */ 3596 /* case 3 or 5 */
3610 zero_ex.ee_block = 3597 zero_ex1.ee_block =
3611 cpu_to_le32(map->m_lblk); 3598 cpu_to_le32(split_map.m_lblk +
3612 zero_ex.ee_len = cpu_to_le16(allocated); 3599 split_map.m_len);
3613 ext4_ext_store_pblock(&zero_ex, 3600 zero_ex1.ee_len =
3614 ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3601 cpu_to_le16(allocated - split_map.m_len);
3615 err = ext4_ext_zeroout(inode, &zero_ex); 3602 ext4_ext_store_pblock(&zero_ex1,
3603 ext4_ext_pblock(ex) + split_map.m_lblk +
3604 split_map.m_len - ee_block);
3605 err = ext4_ext_zeroout(inode, &zero_ex1);
3616 if (err) 3606 if (err)
3617 goto out; 3607 goto out;
3618 split_map.m_lblk = map->m_lblk;
3619 split_map.m_len = allocated; 3608 split_map.m_len = allocated;
3620 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3609 }
3621 /* case 2 */ 3610 if (split_map.m_lblk - ee_block + split_map.m_len <
3622 if (map->m_lblk != ee_block) { 3611 max_zeroout) {
3623 zero_ex.ee_block = ex->ee_block; 3612 /* case 2 or 5 */
3624 zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3613 if (split_map.m_lblk != ee_block) {
3614 zero_ex2.ee_block = ex->ee_block;
3615 zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
3625 ee_block); 3616 ee_block);
3626 ext4_ext_store_pblock(&zero_ex, 3617 ext4_ext_store_pblock(&zero_ex2,
3627 ext4_ext_pblock(ex)); 3618 ext4_ext_pblock(ex));
3628 err = ext4_ext_zeroout(inode, &zero_ex); 3619 err = ext4_ext_zeroout(inode, &zero_ex2);
3629 if (err) 3620 if (err)
3630 goto out; 3621 goto out;
3631 } 3622 }
3632 3623
3624 split_map.m_len += split_map.m_lblk - ee_block;
3633 split_map.m_lblk = ee_block; 3625 split_map.m_lblk = ee_block;
3634 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3635 allocated = map->m_len; 3626 allocated = map->m_len;
3636 } 3627 }
3637 } 3628 }
@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3642 err = 0; 3633 err = 0;
3643out: 3634out:
3644 /* If we have gotten a failure, don't zero out status tree */ 3635 /* If we have gotten a failure, don't zero out status tree */
3645 if (!err) 3636 if (!err) {
3646 err = ext4_zeroout_es(inode, &zero_ex); 3637 err = ext4_zeroout_es(inode, &zero_ex1);
3638 if (!err)
3639 err = ext4_zeroout_es(inode, &zero_ex2);
3640 }
3647 return err ? err : allocated; 3641 return err ? err : allocated;
3648} 3642}
3649 3643
@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4883 4877
4884 /* Zero out partial block at the edges of the range */ 4878 /* Zero out partial block at the edges of the range */
4885 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4879 ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4880 if (ret >= 0)
4881 ext4_update_inode_fsync_trans(handle, inode, 1);
4886 4882
4887 if (file->f_flags & O_SYNC) 4883 if (file->f_flags & O_SYNC)
4888 ext4_handle_sync(handle); 4884 ext4_handle_sync(handle);
@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5569 ext4_handle_sync(handle); 5565 ext4_handle_sync(handle);
5570 inode->i_mtime = inode->i_ctime = current_time(inode); 5566 inode->i_mtime = inode->i_ctime = current_time(inode);
5571 ext4_mark_inode_dirty(handle, inode); 5567 ext4_mark_inode_dirty(handle, inode);
5568 ext4_update_inode_fsync_trans(handle, inode, 1);
5572 5569
5573out_stop: 5570out_stop:
5574 ext4_journal_stop(handle); 5571 ext4_journal_stop(handle);
@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
5742 up_write(&EXT4_I(inode)->i_data_sem); 5739 up_write(&EXT4_I(inode)->i_data_sem);
5743 if (IS_SYNC(inode)) 5740 if (IS_SYNC(inode))
5744 ext4_handle_sync(handle); 5741 ext4_handle_sync(handle);
5742 if (ret >= 0)
5743 ext4_update_inode_fsync_trans(handle, inode, 1);
5745 5744
5746out_stop: 5745out_stop:
5747 ext4_journal_stop(handle); 5746 ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 831fd6beebf0..02ce7e7bbdf5 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -474,57 +474,37 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
474 endoff = (loff_t)end_blk << blkbits; 474 endoff = (loff_t)end_blk << blkbits;
475 475
476 index = startoff >> PAGE_SHIFT; 476 index = startoff >> PAGE_SHIFT;
477 end = endoff >> PAGE_SHIFT; 477 end = (endoff - 1) >> PAGE_SHIFT;
478 478
479 pagevec_init(&pvec, 0); 479 pagevec_init(&pvec, 0);
480 do { 480 do {
481 int i, num; 481 int i, num;
482 unsigned long nr_pages; 482 unsigned long nr_pages;
483 483
484 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 484 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
486 (pgoff_t)num); 486 (pgoff_t)num);
487 if (nr_pages == 0) { 487 if (nr_pages == 0)
488 if (whence == SEEK_DATA)
489 break;
490
491 BUG_ON(whence != SEEK_HOLE);
492 /*
493 * If this is the first time to go into the loop and
494 * offset is not beyond the end offset, it will be a
495 * hole at this offset
496 */
497 if (lastoff == startoff || lastoff < endoff)
498 found = 1;
499 break;
500 }
501
502 /*
503 * If this is the first time to go into the loop and
504 * offset is smaller than the first page offset, it will be a
505 * hole at this offset.
506 */
507 if (lastoff == startoff && whence == SEEK_HOLE &&
508 lastoff < page_offset(pvec.pages[0])) {
509 found = 1;
510 break; 488 break;
511 }
512 489
513 for (i = 0; i < nr_pages; i++) { 490 for (i = 0; i < nr_pages; i++) {
514 struct page *page = pvec.pages[i]; 491 struct page *page = pvec.pages[i];
515 struct buffer_head *bh, *head; 492 struct buffer_head *bh, *head;
516 493
517 /* 494 /*
518 * If the current offset is not beyond the end of given 495 * If current offset is smaller than the page offset,
519 * range, it will be a hole. 496 * there is a hole at this offset.
520 */ 497 */
521 if (lastoff < endoff && whence == SEEK_HOLE && 498 if (whence == SEEK_HOLE && lastoff < endoff &&
522 page->index > end) { 499 lastoff < page_offset(pvec.pages[i])) {
523 found = 1; 500 found = 1;
524 *offset = lastoff; 501 *offset = lastoff;
525 goto out; 502 goto out;
526 } 503 }
527 504
505 if (page->index > end)
506 goto out;
507
528 lock_page(page); 508 lock_page(page);
529 509
530 if (unlikely(page->mapping != inode->i_mapping)) { 510 if (unlikely(page->mapping != inode->i_mapping)) {
@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
564 unlock_page(page); 544 unlock_page(page);
565 } 545 }
566 546
567 /* 547 /* The no. of pages is less than our desired, we are done. */
568 * The no. of pages is less than our desired, that would be a 548 if (nr_pages < num)
569 * hole in there.
570 */
571 if (nr_pages < num && whence == SEEK_HOLE) {
572 found = 1;
573 *offset = lastoff;
574 break; 549 break;
575 }
576 550
577 index = pvec.pages[i - 1]->index + 1; 551 index = pvec.pages[i - 1]->index + 1;
578 pagevec_release(&pvec); 552 pagevec_release(&pvec);
579 } while (index <= end); 553 } while (index <= end);
580 554
555 if (whence == SEEK_HOLE && lastoff < endoff) {
556 found = 1;
557 *offset = lastoff;
558 }
581out: 559out:
582 pagevec_release(&pvec); 560 pagevec_release(&pvec);
583 return found; 561 return found;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index d5dea4c293ef..8d141c0c8ff9 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1627,7 +1627,6 @@ out:
1627 1627
1628struct buffer_head *ext4_find_inline_entry(struct inode *dir, 1628struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1629 struct ext4_filename *fname, 1629 struct ext4_filename *fname,
1630 const struct qstr *d_name,
1631 struct ext4_dir_entry_2 **res_dir, 1630 struct ext4_dir_entry_2 **res_dir,
1632 int *has_inline_data) 1631 int *has_inline_data)
1633{ 1632{
@@ -1649,7 +1648,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1649 EXT4_INLINE_DOTDOT_SIZE; 1648 EXT4_INLINE_DOTDOT_SIZE;
1650 inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; 1649 inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
1651 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1650 ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
1652 dir, fname, d_name, 0, res_dir); 1651 dir, fname, 0, res_dir);
1653 if (ret == 1) 1652 if (ret == 1)
1654 goto out_find; 1653 goto out_find;
1655 if (ret < 0) 1654 if (ret < 0)
@@ -1662,7 +1661,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
1662 inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE; 1661 inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
1663 1662
1664 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1663 ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
1665 dir, fname, d_name, 0, res_dir); 1664 dir, fname, 0, res_dir);
1666 if (ret == 1) 1665 if (ret == 1)
1667 goto out_find; 1666 goto out_find;
1668 1667
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1bd0bfa547f6..5cf82d03968c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2124,15 +2124,29 @@ static int ext4_writepage(struct page *page,
2124static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2124static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2125{ 2125{
2126 int len; 2126 int len;
2127 loff_t size = i_size_read(mpd->inode); 2127 loff_t size;
2128 int err; 2128 int err;
2129 2129
2130 BUG_ON(page->index != mpd->first_page); 2130 BUG_ON(page->index != mpd->first_page);
2131 clear_page_dirty_for_io(page);
2132 /*
2133 * We have to be very careful here! Nothing protects writeback path
2134 * against i_size changes and the page can be writeably mapped into
2135 * page tables. So an application can be growing i_size and writing
2136 * data through mmap while writeback runs. clear_page_dirty_for_io()
2137 * write-protects our page in page tables and the page cannot get
2138 * written to again until we release page lock. So only after
2139 * clear_page_dirty_for_io() we are safe to sample i_size for
2140 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2141 * on the barrier provided by TestClearPageDirty in
2142 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2143 * after page tables are updated.
2144 */
2145 size = i_size_read(mpd->inode);
2131 if (page->index == size >> PAGE_SHIFT) 2146 if (page->index == size >> PAGE_SHIFT)
2132 len = size & ~PAGE_MASK; 2147 len = size & ~PAGE_MASK;
2133 else 2148 else
2134 len = PAGE_SIZE; 2149 len = PAGE_SIZE;
2135 clear_page_dirty_for_io(page);
2136 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2150 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2137 if (!err) 2151 if (!err)
2138 mpd->wbc->nr_to_write--; 2152 mpd->wbc->nr_to_write--;
@@ -3629,9 +3643,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3629 get_block_func = ext4_dio_get_block_unwritten_async; 3643 get_block_func = ext4_dio_get_block_unwritten_async;
3630 dio_flags = DIO_LOCKING; 3644 dio_flags = DIO_LOCKING;
3631 } 3645 }
3632#ifdef CONFIG_EXT4_FS_ENCRYPTION
3633 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
3634#endif
3635 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3646 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3636 get_block_func, ext4_end_io_dio, NULL, 3647 get_block_func, ext4_end_io_dio, NULL,
3637 dio_flags); 3648 dio_flags);
@@ -3713,7 +3724,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3713 */ 3724 */
3714 inode_lock_shared(inode); 3725 inode_lock_shared(inode);
3715 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3726 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3716 iocb->ki_pos + count); 3727 iocb->ki_pos + count - 1);
3717 if (ret) 3728 if (ret)
3718 goto out_unlock; 3729 goto out_unlock;
3719 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, 3730 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
@@ -4207,6 +4218,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4207 4218
4208 inode->i_mtime = inode->i_ctime = current_time(inode); 4219 inode->i_mtime = inode->i_ctime = current_time(inode);
4209 ext4_mark_inode_dirty(handle, inode); 4220 ext4_mark_inode_dirty(handle, inode);
4221 if (ret >= 0)
4222 ext4_update_inode_fsync_trans(handle, inode, 1);
4210out_stop: 4223out_stop:
4211 ext4_journal_stop(handle); 4224 ext4_journal_stop(handle);
4212out_dio: 4225out_dio:
@@ -5637,8 +5650,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
5637 /* No extended attributes present */ 5650 /* No extended attributes present */
5638 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5651 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5639 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5652 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5640 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5653 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5641 new_extra_isize); 5654 EXT4_I(inode)->i_extra_isize, 0,
5655 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5642 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5656 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5643 return 0; 5657 return 0;
5644 } 5658 }
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5083bce20ac4..b7928cddd539 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3887,7 +3887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
3887 3887
3888 err = ext4_mb_load_buddy(sb, group, &e4b); 3888 err = ext4_mb_load_buddy(sb, group, &e4b);
3889 if (err) { 3889 if (err) {
3890 ext4_error(sb, "Error loading buddy information for %u", group); 3890 ext4_warning(sb, "Error %d loading buddy information for %u",
3891 err, group);
3891 put_bh(bitmap_bh); 3892 put_bh(bitmap_bh);
3892 return 0; 3893 return 0;
3893 } 3894 }
@@ -4044,10 +4045,11 @@ repeat:
4044 BUG_ON(pa->pa_type != MB_INODE_PA); 4045 BUG_ON(pa->pa_type != MB_INODE_PA);
4045 group = ext4_get_group_number(sb, pa->pa_pstart); 4046 group = ext4_get_group_number(sb, pa->pa_pstart);
4046 4047
4047 err = ext4_mb_load_buddy(sb, group, &e4b); 4048 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4049 GFP_NOFS|__GFP_NOFAIL);
4048 if (err) { 4050 if (err) {
4049 ext4_error(sb, "Error loading buddy information for %u", 4051 ext4_error(sb, "Error %d loading buddy information for %u",
4050 group); 4052 err, group);
4051 continue; 4053 continue;
4052 } 4054 }
4053 4055
@@ -4303,11 +4305,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
4303 spin_unlock(&lg->lg_prealloc_lock); 4305 spin_unlock(&lg->lg_prealloc_lock);
4304 4306
4305 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4307 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4308 int err;
4306 4309
4307 group = ext4_get_group_number(sb, pa->pa_pstart); 4310 group = ext4_get_group_number(sb, pa->pa_pstart);
4308 if (ext4_mb_load_buddy(sb, group, &e4b)) { 4311 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4309 ext4_error(sb, "Error loading buddy information for %u", 4312 GFP_NOFS|__GFP_NOFAIL);
4310 group); 4313 if (err) {
4314 ext4_error(sb, "Error %d loading buddy information for %u",
4315 err, group);
4311 continue; 4316 continue;
4312 } 4317 }
4313 ext4_lock_group(sb, group); 4318 ext4_lock_group(sb, group);
@@ -5127,8 +5132,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5127 5132
5128 ret = ext4_mb_load_buddy(sb, group, &e4b); 5133 ret = ext4_mb_load_buddy(sb, group, &e4b);
5129 if (ret) { 5134 if (ret) {
5130 ext4_error(sb, "Error in loading buddy " 5135 ext4_warning(sb, "Error %d loading buddy information for %u",
5131 "information for %u", group); 5136 ret, group);
5132 return ret; 5137 return ret;
5133 } 5138 }
5134 bitmap = e4b.bd_bitmap; 5139 bitmap = e4b.bd_bitmap;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b81f7d46f344..404256caf9cf 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1155,12 +1155,11 @@ errout:
1155static inline int search_dirblock(struct buffer_head *bh, 1155static inline int search_dirblock(struct buffer_head *bh,
1156 struct inode *dir, 1156 struct inode *dir,
1157 struct ext4_filename *fname, 1157 struct ext4_filename *fname,
1158 const struct qstr *d_name,
1159 unsigned int offset, 1158 unsigned int offset,
1160 struct ext4_dir_entry_2 **res_dir) 1159 struct ext4_dir_entry_2 **res_dir)
1161{ 1160{
1162 return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, 1161 return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
1163 fname, d_name, offset, res_dir); 1162 fname, offset, res_dir);
1164} 1163}
1165 1164
1166/* 1165/*
@@ -1262,7 +1261,6 @@ static inline bool ext4_match(const struct ext4_filename *fname,
1262 */ 1261 */
1263int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, 1262int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1264 struct inode *dir, struct ext4_filename *fname, 1263 struct inode *dir, struct ext4_filename *fname,
1265 const struct qstr *d_name,
1266 unsigned int offset, struct ext4_dir_entry_2 **res_dir) 1264 unsigned int offset, struct ext4_dir_entry_2 **res_dir)
1267{ 1265{
1268 struct ext4_dir_entry_2 * de; 1266 struct ext4_dir_entry_2 * de;
@@ -1355,7 +1353,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1355 1353
1356 if (ext4_has_inline_data(dir)) { 1354 if (ext4_has_inline_data(dir)) {
1357 int has_inline_data = 1; 1355 int has_inline_data = 1;
1358 ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir, 1356 ret = ext4_find_inline_entry(dir, &fname, res_dir,
1359 &has_inline_data); 1357 &has_inline_data);
1360 if (has_inline_data) { 1358 if (has_inline_data) {
1361 if (inlined) 1359 if (inlined)
@@ -1447,7 +1445,7 @@ restart:
1447 goto next; 1445 goto next;
1448 } 1446 }
1449 set_buffer_verified(bh); 1447 set_buffer_verified(bh);
1450 i = search_dirblock(bh, dir, &fname, d_name, 1448 i = search_dirblock(bh, dir, &fname,
1451 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); 1449 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
1452 if (i == 1) { 1450 if (i == 1) {
1453 EXT4_I(dir)->i_dir_start_lookup = block; 1451 EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1488,7 +1486,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1488{ 1486{
1489 struct super_block * sb = dir->i_sb; 1487 struct super_block * sb = dir->i_sb;
1490 struct dx_frame frames[2], *frame; 1488 struct dx_frame frames[2], *frame;
1491 const struct qstr *d_name = fname->usr_fname;
1492 struct buffer_head *bh; 1489 struct buffer_head *bh;
1493 ext4_lblk_t block; 1490 ext4_lblk_t block;
1494 int retval; 1491 int retval;
@@ -1505,7 +1502,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1505 if (IS_ERR(bh)) 1502 if (IS_ERR(bh))
1506 goto errout; 1503 goto errout;
1507 1504
1508 retval = search_dirblock(bh, dir, fname, d_name, 1505 retval = search_dirblock(bh, dir, fname,
1509 block << EXT4_BLOCK_SIZE_BITS(sb), 1506 block << EXT4_BLOCK_SIZE_BITS(sb),
1510 res_dir); 1507 res_dir);
1511 if (retval == 1) 1508 if (retval == 1)
@@ -1530,7 +1527,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
1530 1527
1531 bh = NULL; 1528 bh = NULL;
1532errout: 1529errout:
1533 dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); 1530 dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
1534success: 1531success:
1535 dx_release(frames); 1532 dx_release(frames);
1536 return bh; 1533 return bh;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0b177da9ea82..d37c81f327e7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -848,14 +848,9 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
848{ 848{
849 int type; 849 int type;
850 850
851 if (ext4_has_feature_quota(sb)) { 851 /* Use our quota_off function to clear inode flags etc. */
852 dquot_disable(sb, -1, 852 for (type = 0; type < EXT4_MAXQUOTAS; type++)
853 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 853 ext4_quota_off(sb, type);
854 } else {
855 /* Use our quota_off function to clear inode flags etc. */
856 for (type = 0; type < EXT4_MAXQUOTAS; type++)
857 ext4_quota_off(sb, type);
858 }
859} 854}
860#else 855#else
861static inline void ext4_quota_off_umount(struct super_block *sb) 856static inline void ext4_quota_off_umount(struct super_block *sb)
@@ -1179,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
1179 return res; 1174 return res;
1180 } 1175 }
1181 1176
1177 res = dquot_initialize(inode);
1178 if (res)
1179 return res;
1182retry: 1180retry:
1183 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1181 handle = ext4_journal_start(inode, EXT4_HT_MISC,
1184 ext4_jbd2_credits_xattr(inode)); 1182 ext4_jbd2_credits_xattr(inode));
@@ -5485,7 +5483,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
5485 goto out; 5483 goto out;
5486 5484
5487 err = dquot_quota_off(sb, type); 5485 err = dquot_quota_off(sb, type);
5488 if (err) 5486 if (err || ext4_has_feature_quota(sb))
5489 goto out_put; 5487 goto out_put;
5490 5488
5491 inode_lock(inode); 5489 inode_lock(inode);
@@ -5505,6 +5503,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
5505out_unlock: 5503out_unlock:
5506 inode_unlock(inode); 5504 inode_unlock(inode);
5507out_put: 5505out_put:
5506 lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
5508 iput(inode); 5507 iput(inode);
5509 return err; 5508 return err;
5510out: 5509out:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 8fb7ce14e6eb..5d3c2536641c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -888,6 +888,8 @@ inserted:
888 else { 888 else {
889 u32 ref; 889 u32 ref;
890 890
891 WARN_ON_ONCE(dquot_initialize_needed(inode));
892
891 /* The old block is released after updating 893 /* The old block is released after updating
892 the inode. */ 894 the inode. */
893 error = dquot_alloc_block(inode, 895 error = dquot_alloc_block(inode,
@@ -954,6 +956,8 @@ inserted:
954 /* We need to allocate a new block */ 956 /* We need to allocate a new block */
955 ext4_fsblk_t goal, block; 957 ext4_fsblk_t goal, block;
956 958
959 WARN_ON_ONCE(dquot_initialize_needed(inode));
960
957 goal = ext4_group_first_block_no(sb, 961 goal = ext4_group_first_block_no(sb,
958 EXT4_I(inode)->i_block_group); 962 EXT4_I(inode)->i_block_group);
959 963
@@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1166 return -EINVAL; 1170 return -EINVAL;
1167 if (strlen(name) > 255) 1171 if (strlen(name) > 255)
1168 return -ERANGE; 1172 return -ERANGE;
1173
1169 ext4_write_lock_xattr(inode, &no_expand); 1174 ext4_write_lock_xattr(inode, &no_expand);
1170 1175
1171 error = ext4_reserve_inode_write(handle, inode, &is.iloc); 1176 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
1267 int error, retries = 0; 1272 int error, retries = 0;
1268 int credits = ext4_jbd2_credits_xattr(inode); 1273 int credits = ext4_jbd2_credits_xattr(inode);
1269 1274
1275 error = dquot_initialize(inode);
1276 if (error)
1277 return error;
1270retry: 1278retry:
1271 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); 1279 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
1272 if (IS_ERR(handle)) { 1280 if (IS_ERR(handle)) {
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 9ee4832b6f8b..2d30a6da7013 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -680,6 +680,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
680 680
681 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 681 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
682 handle->h_buffer_credits = nblocks; 682 handle->h_buffer_credits = nblocks;
683 /*
684 * Restore the original nofs context because the journal restart
685 * is basically the same thing as journal stop and start.
686 * start_this_handle will start a new nofs context.
687 */
688 memalloc_nofs_restore(handle->saved_alloc_context);
683 ret = start_this_handle(journal, handle, gfp_mask); 689 ret = start_this_handle(journal, handle, gfp_mask);
684 return ret; 690 return ret;
685} 691}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ebf80c7739e1..48813aeaab80 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode)
1512} 1512}
1513EXPORT_SYMBOL(dquot_initialize); 1513EXPORT_SYMBOL(dquot_initialize);
1514 1514
1515bool dquot_initialize_needed(struct inode *inode)
1516{
1517 struct dquot **dquots;
1518 int i;
1519
1520 if (!dquot_active(inode))
1521 return false;
1522
1523 dquots = i_dquot(inode);
1524 for (i = 0; i < MAXQUOTAS; i++)
1525 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1526 return true;
1527 return false;
1528}
1529EXPORT_SYMBOL(dquot_initialize_needed);
1530
1515/* 1531/*
1516 * Release all quotas referenced by inode. 1532 * Release all quotas referenced by inode.
1517 * 1533 *
diff --git a/fs/stat.c b/fs/stat.c
index f494b182c7c7..c35610845ab1 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
672 inode->i_bytes -= 512; 672 inode->i_bytes -= 512;
673 } 673 }
674} 674}
675EXPORT_SYMBOL(__inode_add_bytes);
675 676
676void inode_add_bytes(struct inode *inode, loff_t bytes) 677void inode_add_bytes(struct inode *inode, loff_t bytes)
677{ 678{
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index a0376a2c1c29..d642cc0a8271 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
82 ufs_error (sb, "ufs_free_fragments", 82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i); 83 "bit already cleared for fragment %u", i);
84 } 84 }
85 85
86 inode_sub_bytes(inode, count << uspi->s_fshift);
86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 uspi->cs_total.cs_nffree += count; 88 uspi->cs_total.cs_nffree += count;
88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@ do_more:
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 } 186 }
186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1); 190 ufs_clusteracct (sb, ucpi, blkno, 1);
189 191
@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
494 return 0; 496 return 0;
495} 497}
496 498
499static bool try_add_frags(struct inode *inode, unsigned frags)
500{
501 unsigned size = frags * i_blocksize(inode);
502 spin_lock(&inode->i_lock);
503 __inode_add_bytes(inode, size);
504 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
505 __inode_sub_bytes(inode, size);
506 spin_unlock(&inode->i_lock);
507 return false;
508 }
509 spin_unlock(&inode->i_lock);
510 return true;
511}
512
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 513static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498 unsigned oldcount, unsigned newcount) 514 unsigned oldcount, unsigned newcount)
499{ 515{
@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
530 for (i = oldcount; i < newcount; i++) 546 for (i = oldcount; i < newcount; i++)
531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 547 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532 return 0; 548 return 0;
549
550 if (!try_add_frags(inode, count))
551 return 0;
533 /* 552 /*
534 * Block can be extended 553 * Block can be extended
535 */ 554 */
@@ -647,6 +666,7 @@ cg_found:
647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 666 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648 i = uspi->s_fpb - count; 667 i = uspi->s_fpb - count;
649 668
669 inode_sub_bytes(inode, i << uspi->s_fshift);
650 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 670 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651 uspi->cs_total.cs_nffree += i; 671 uspi->cs_total.cs_nffree += i;
652 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); 672 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@ cg_found:
657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 677 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658 if (result == INVBLOCK) 678 if (result == INVBLOCK)
659 return 0; 679 return 0;
680 if (!try_add_frags(inode, count))
681 return 0;
660 for (i = 0; i < count; i++) 682 for (i = 0; i < count; i++)
661 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); 683 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662 684
@@ -716,6 +738,8 @@ norot:
716 return INVBLOCK; 738 return INVBLOCK;
717 ucpi->c_rotor = result; 739 ucpi->c_rotor = result;
718gotit: 740gotit:
741 if (!try_add_frags(inode, uspi->s_fpb))
742 return 0;
719 blkno = ufs_fragstoblks(result); 743 blkno = ufs_fragstoblks(result);
720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 744 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 745 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7e41aee7b69a..da553ffec85b 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
235 235
236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block);
237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
238 new_size, err, locked_page); 238 new_size - (lastfrag & uspi->s_fpbmask), err,
239 locked_page);
239 return tmp != 0; 240 return tmp != 0;
240} 241}
241 242
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
284 goal += uspi->s_fpb; 285 goal += uspi->s_fpb;
285 } 286 }
286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 287 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
287 goal, uspi->s_fpb, err, locked_page); 288 goal, nfrags, err, locked_page);
288 289
289 if (!tmp) { 290 if (!tmp) {
290 *err = -ENOSPC; 291 *err = -ENOSPC;
@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
402 403
403 if (!create) { 404 if (!create) {
404 phys64 = ufs_frag_map(inode, offsets, depth); 405 phys64 = ufs_frag_map(inode, offsets, depth);
405 goto out; 406 if (phys64)
407 map_bh(bh_result, sb, phys64 + frag);
408 return 0;
406 } 409 }
407 410
408 /* This code entered only while writing ....? */ 411 /* This code entered only while writing ....? */
@@ -841,8 +844,11 @@ void ufs_evict_inode(struct inode * inode)
841 truncate_inode_pages_final(&inode->i_data); 844 truncate_inode_pages_final(&inode->i_data);
842 if (want_delete) { 845 if (want_delete) {
843 inode->i_size = 0; 846 inode->i_size = 0;
844 if (inode->i_blocks) 847 if (inode->i_blocks &&
848 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
849 S_ISLNK(inode->i_mode)))
845 ufs_truncate_blocks(inode); 850 ufs_truncate_blocks(inode);
851 ufs_update_inode(inode, inode_needs_sync(inode));
846 } 852 }
847 853
848 invalidate_inode_buffers(inode); 854 invalidate_inode_buffers(inode);
@@ -1100,7 +1106,7 @@ out:
1100 return err; 1106 return err;
1101} 1107}
1102 1108
1103static void __ufs_truncate_blocks(struct inode *inode) 1109static void ufs_truncate_blocks(struct inode *inode)
1104{ 1110{
1105 struct ufs_inode_info *ufsi = UFS_I(inode); 1111 struct ufs_inode_info *ufsi = UFS_I(inode);
1106 struct super_block *sb = inode->i_sb; 1112 struct super_block *sb = inode->i_sb;
@@ -1183,7 +1189,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
1183 1189
1184 truncate_setsize(inode, size); 1190 truncate_setsize(inode, size);
1185 1191
1186 __ufs_truncate_blocks(inode); 1192 ufs_truncate_blocks(inode);
1187 inode->i_mtime = inode->i_ctime = current_time(inode); 1193 inode->i_mtime = inode->i_ctime = current_time(inode);
1188 mark_inode_dirty(inode); 1194 mark_inode_dirty(inode);
1189out: 1195out:
@@ -1191,16 +1197,6 @@ out:
1191 return err; 1197 return err;
1192} 1198}
1193 1199
1194static void ufs_truncate_blocks(struct inode *inode)
1195{
1196 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1197 S_ISLNK(inode->i_mode)))
1198 return;
1199 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1200 return;
1201 __ufs_truncate_blocks(inode);
1202}
1203
1204int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1200int ufs_setattr(struct dentry *dentry, struct iattr *attr)
1205{ 1201{
1206 struct inode *inode = d_inode(dentry); 1202 struct inode *inode = d_inode(dentry);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 29ecaf739449..878cc6264f1a 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
746 return; 746 return;
747} 747}
748 748
749static u64 ufs_max_bytes(struct super_block *sb)
750{
751 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
752 int bits = uspi->s_apbshift;
753 u64 res;
754
755 if (bits > 21)
756 res = ~0ULL;
757 else
758 res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
759 (1LL << (3*bits));
760
761 if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
762 return MAX_LFS_FILESIZE;
763 return res << uspi->s_bshift;
764}
765
749static int ufs_fill_super(struct super_block *sb, void *data, int silent) 766static int ufs_fill_super(struct super_block *sb, void *data, int silent)
750{ 767{
751 struct ufs_sb_info * sbi; 768 struct ufs_sb_info * sbi;
@@ -1211,6 +1228,7 @@ magic_found:
1211 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1228 "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
1212 uspi->s_maxsymlinklen = maxsymlen; 1229 uspi->s_maxsymlinklen = maxsymlen;
1213 } 1230 }
1231 sb->s_maxbytes = ufs_max_bytes(sb);
1214 sb->s_max_links = UFS_LINK_MAX; 1232 sb->s_max_links = UFS_LINK_MAX;
1215 1233
1216 inode = ufs_iget(sb, UFS_ROOTINO); 1234 inode = ufs_iget(sb, UFS_ROOTINO);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53dbc81..398019fb1448 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
473static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 473static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
475{ 475{
476 u8 mask;
476 switch (uspi->s_fpb) { 477 switch (uspi->s_fpb) {
477 case 8: 478 case 8:
478 return (*ubh_get_addr (ubh, begin + block) == 0xff); 479 return (*ubh_get_addr (ubh, begin + block) == 0xff);
479 case 4: 480 case 4:
480 return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 481 mask = 0x0f << ((block & 0x01) << 2);
482 return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
481 case 2: 483 case 2:
482 return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 484 mask = 0x03 << ((block & 0x03) << 1);
485 return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
483 case 1: 486 case 1:
484 return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 487 mask = 0x01 << (block & 0x07);
488 return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
485 } 489 }
486 return 0; 490 return 0;
487} 491}
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index de179993e039..d614c5ea1b5e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,3 +15,11 @@
15 * with any version that can compile the kernel 15 * with any version that can compile the kernel
16 */ 16 */
17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 17#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
18
19/*
20 * GCC does not warn about unused static inline functions for
21 * -Wunused-function. This turns out to avoid the need for complex #ifdef
22 * directives. Suppress the warning in clang as well.
23 */
24#undef inline
25#define inline inline __attribute__((unused)) notrace
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 4eac2670bfa1..92f20832fd28 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
78 78
79struct iommu_domain; 79struct iommu_domain;
80struct msi_msg; 80struct msi_msg;
81struct device;
81 82
82static inline int iommu_dma_init(void) 83static inline int iommu_dma_init(void)
83{ 84{
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 9ec5e22846e0..0e306c5a86d6 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -153,7 +153,7 @@ struct elevator_type
153#endif 153#endif
154 154
155 /* managed by elevator core */ 155 /* managed by elevator core */
156 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ 156 char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
157 struct list_head list; 157 struct list_head list;
158}; 158};
159 159
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index fffb91202bc9..1fa293a37f4a 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -417,6 +417,10 @@
417#define ICH_HCR_EN (1 << 0) 417#define ICH_HCR_EN (1 << 0)
418#define ICH_HCR_UIE (1 << 1) 418#define ICH_HCR_UIE (1 << 1)
419 419
420#define ICH_VMCR_ACK_CTL_SHIFT 2
421#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
422#define ICH_VMCR_FIQ_EN_SHIFT 3
423#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
420#define ICH_VMCR_CBPR_SHIFT 4 424#define ICH_VMCR_CBPR_SHIFT 4
421#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) 425#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
422#define ICH_VMCR_EOIM_SHIFT 9 426#define ICH_VMCR_EOIM_SHIFT 9
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index dc30f3d057eb..d3453ee072fc 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -25,7 +25,18 @@
25#define GICC_ENABLE 0x1 25#define GICC_ENABLE 0x1
26#define GICC_INT_PRI_THRESHOLD 0xf0 26#define GICC_INT_PRI_THRESHOLD 0xf0
27 27
28#define GIC_CPU_CTRL_EOImodeNS (1 << 9) 28#define GIC_CPU_CTRL_EnableGrp0_SHIFT 0
29#define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
30#define GIC_CPU_CTRL_EnableGrp1_SHIFT 1
31#define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
32#define GIC_CPU_CTRL_AckCtl_SHIFT 2
33#define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
34#define GIC_CPU_CTRL_FIQEn_SHIFT 3
35#define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
36#define GIC_CPU_CTRL_CBPR_SHIFT 4
37#define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT)
38#define GIC_CPU_CTRL_EOImodeNS_SHIFT 9
39#define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
29 40
30#define GICC_IAR_INT_ID_MASK 0x3ff 41#define GICC_IAR_INT_ID_MASK 0x3ff
31#define GICC_INT_SPURIOUS 1023 42#define GICC_INT_SPURIOUS 1023
@@ -84,8 +95,19 @@
84#define GICH_LR_EOI (1 << 19) 95#define GICH_LR_EOI (1 << 19)
85#define GICH_LR_HW (1 << 31) 96#define GICH_LR_HW (1 << 31)
86 97
87#define GICH_VMCR_CTRL_SHIFT 0 98#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
88#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 99#define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
100#define GICH_VMCR_ENABLE_GRP1_SHIFT 1
101#define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
102#define GICH_VMCR_ACK_CTL_SHIFT 2
103#define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT)
104#define GICH_VMCR_FIQ_EN_SHIFT 3
105#define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT)
106#define GICH_VMCR_CBPR_SHIFT 4
107#define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT)
108#define GICH_VMCR_EOI_MODE_SHIFT 9
109#define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT)
110
89#define GICH_VMCR_PRIMASK_SHIFT 27 111#define GICH_VMCR_PRIMASK_SHIFT 27
90#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) 112#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
91#define GICH_VMCR_BINPOINT_SHIFT 21 113#define GICH_VMCR_BINPOINT_SHIFT 21
diff --git a/include/linux/key.h b/include/linux/key.h
index 0c9b93b0d1f7..78e25aabedaf 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -173,7 +173,6 @@ struct key {
173#ifdef KEY_DEBUGGING 173#ifdef KEY_DEBUGGING
174 unsigned magic; 174 unsigned magic;
175#define KEY_DEBUG_MAGIC 0x18273645u 175#define KEY_DEBUG_MAGIC 0x18273645u
176#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
177#endif 176#endif
178 177
179 unsigned long flags; /* status flags (change with bitops) */ 178 unsigned long flags; /* status flags (change with bitops) */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b4ee8f62ce8d..8e2828d48d7f 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
470 u16 rate_val; 470 u16 rate_val;
471}; 471};
472 472
473struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
473int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 474int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
474 enum mlx4_update_qp_attr attr, 475 enum mlx4_update_qp_attr attr,
475 struct mlx4_update_qp_params *params); 476 struct mlx4_update_qp_params *params);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 9c6f768b7d32..dda22f45fc1b 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); 44void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
45 45
46int dquot_initialize(struct inode *inode); 46int dquot_initialize(struct inode *inode);
47bool dquot_initialize_needed(struct inode *inode);
47void dquot_drop(struct inode *inode); 48void dquot_drop(struct inode *inode);
48struct dquot *dqget(struct super_block *sb, struct kqid qid); 49struct dquot *dqget(struct super_block *sb, struct kqid qid);
49static inline struct dquot *dqgrab(struct dquot *dquot) 50static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
207 return 0; 208 return 0;
208} 209}
209 210
211static inline bool dquot_initialize_needed(struct inode *inode)
212{
213 return false;
214}
215
210static inline void dquot_drop(struct inode *inode) 216static inline void dquot_drop(struct inode *inode)
211{ 217{
212} 218}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 167ad8831aaf..4c1d5f7e62c4 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
172{ 172{
173 int retval; 173 int retval;
174 174
175 preempt_disable();
176 retval = __srcu_read_lock(sp); 175 retval = __srcu_read_lock(sp);
177 preempt_enable();
178 rcu_lock_acquire(&(sp)->dep_map); 176 rcu_lock_acquire(&(sp)->dep_map);
179 return retval; 177 return retval;
180} 178}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0b1cf32edfd7..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
189struct platform_freeze_ops { 189struct platform_freeze_ops {
190 int (*begin)(void); 190 int (*begin)(void);
191 int (*prepare)(void); 191 int (*prepare)(void);
192 void (*wake)(void);
193 void (*sync)(void);
194 void (*restore)(void); 192 void (*restore)(void);
195 void (*end)(void); 193 void (*end)(void);
196}; 194};
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
430 428
431extern bool pm_wakeup_pending(void); 429extern bool pm_wakeup_pending(void);
432extern void pm_system_wakeup(void); 430extern void pm_system_wakeup(void);
433extern void pm_system_cancel_wakeup(void); 431extern void pm_wakeup_clear(void);
434extern void pm_wakeup_clear(bool reset);
435extern void pm_system_irq_wakeup(unsigned int irq_number); 432extern void pm_system_irq_wakeup(unsigned int irq_number);
436extern bool pm_get_wakeup_count(unsigned int *count, bool block); 433extern bool pm_get_wakeup_count(unsigned int *count, bool block);
437extern bool pm_save_wakeup_count(unsigned int count); 434extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
481 478
482static inline bool pm_wakeup_pending(void) { return false; } 479static inline bool pm_wakeup_pending(void) { return false; }
483static inline void pm_system_wakeup(void) {} 480static inline void pm_system_wakeup(void) {}
484static inline void pm_wakeup_clear(bool reset) {} 481static inline void pm_wakeup_clear(void) {}
485static inline void pm_system_irq_wakeup(unsigned int irq_number) {} 482static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
486 483
487static inline void lock_system_sleep(void) {} 484static inline void lock_system_sleep(void) {}
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index eb50ce54b759..413335c8cb52 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -29,7 +29,7 @@ struct edid;
29struct cec_adapter; 29struct cec_adapter;
30struct cec_notifier; 30struct cec_notifier;
31 31
32#ifdef CONFIG_MEDIA_CEC_NOTIFIER 32#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
33 33
34/** 34/**
35 * cec_notifier_get - find or create a new cec_notifier for the given device. 35 * cec_notifier_get - find or create a new cec_notifier for the given device.
diff --git a/include/media/cec.h b/include/media/cec.h
index b8eb895731d5..bfa88d4d67e1 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -173,7 +173,7 @@ struct cec_adapter {
173 bool passthrough; 173 bool passthrough;
174 struct cec_log_addrs log_addrs; 174 struct cec_log_addrs log_addrs;
175 175
176#ifdef CONFIG_MEDIA_CEC_NOTIFIER 176#ifdef CONFIG_CEC_NOTIFIER
177 struct cec_notifier *notifier; 177 struct cec_notifier *notifier;
178#endif 178#endif
179 179
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
300 */ 300 */
301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); 301int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
302 302
303#ifdef CONFIG_MEDIA_CEC_NOTIFIER 303#ifdef CONFIG_CEC_NOTIFIER
304void cec_register_cec_notifier(struct cec_adapter *adap, 304void cec_register_cec_notifier(struct cec_adapter *adap,
305 struct cec_notifier *notifier); 305 struct cec_notifier *notifier);
306#endif 306#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index dbf0abba33b8..3e505bbff8ca 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
1007 */ 1007 */
1008extern const struct proto_ops inet6_stream_ops; 1008extern const struct proto_ops inet6_stream_ops;
1009extern const struct proto_ops inet6_dgram_ops; 1009extern const struct proto_ops inet6_dgram_ops;
1010extern const struct proto_ops inet6_sockraw_ops;
1010 1011
1011struct group_source_req; 1012struct group_source_req;
1012struct group_filter; 1013struct group_filter;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38a7427ae902..be6223c586fa 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
925 /* call when ack arrives (optional) */ 925 /* call when ack arrives (optional) */
926 void (*in_ack_event)(struct sock *sk, u32 flags); 926 void (*in_ack_event)(struct sock *sk, u32 flags);
927 /* new value of cwnd after loss (optional) */ 927 /* new value of cwnd after loss (required) */
928 u32 (*undo_cwnd)(struct sock *sk); 928 u32 (*undo_cwnd)(struct sock *sk);
929 /* hook for packet ack accounting (optional) */ 929 /* hook for packet ack accounting (optional) */
930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 201c6644b237..ef16df06642a 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -70,8 +70,8 @@ struct keyctl_dh_params {
70}; 70};
71 71
72struct keyctl_kdf_params { 72struct keyctl_kdf_params {
73 char *hashname; 73 char __user *hashname;
74 char *otherinfo; 74 char __user *otherinfo;
75 __u32 otherinfolen; 75 __u32 otherinfolen;
76 __u32 __spare[8]; 76 __u32 __spare[8];
77}; 77};
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9ae6fbe5b5cf..cb5103413bd8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1659 mutex_unlock(&cpuhp_state_mutex); 1659 mutex_unlock(&cpuhp_state_mutex);
1660 if (ret) 1660 if (ret)
1661 return ret; 1661 goto out;
1662 1662
1663 if (st->state < target) 1663 if (st->state < target)
1664 ret = do_cpu_up(dev->id, target); 1664 ret = do_cpu_up(dev->id, target);
1665 else 1665 else
1666 ret = do_cpu_down(dev->id, target); 1666 ret = do_cpu_down(dev->id, target);
1667 1667out:
1668 unlock_device_hotplug(); 1668 unlock_device_hotplug();
1669 return ret ? ret : count; 1669 return ret ? ret : count;
1670} 1670}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..c7209f060eeb 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -132,7 +132,7 @@ int freeze_processes(void)
132 if (!pm_freezing) 132 if (!pm_freezing)
133 atomic_inc(&system_freezing_cnt); 133 atomic_inc(&system_freezing_cnt);
134 134
135 pm_wakeup_clear(true); 135 pm_wakeup_clear();
136 pr_info("Freezing user space processes ... "); 136 pr_info("Freezing user space processes ... ");
137 pm_freezing = true; 137 pm_freezing = true;
138 error = try_to_freeze_tasks(true); 138 error = try_to_freeze_tasks(true);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c0248c74d6d4..15e6baef5c73 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -72,8 +72,6 @@ static void freeze_begin(void)
72 72
73static void freeze_enter(void) 73static void freeze_enter(void)
74{ 74{
75 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
76
77 spin_lock_irq(&suspend_freeze_lock); 75 spin_lock_irq(&suspend_freeze_lock);
78 if (pm_wakeup_pending()) 76 if (pm_wakeup_pending())
79 goto out; 77 goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
100 out: 98 out:
101 suspend_freeze_state = FREEZE_STATE_NONE; 99 suspend_freeze_state = FREEZE_STATE_NONE;
102 spin_unlock_irq(&suspend_freeze_lock); 100 spin_unlock_irq(&suspend_freeze_lock);
103
104 trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
105}
106
107static void s2idle_loop(void)
108{
109 do {
110 freeze_enter();
111
112 if (freeze_ops && freeze_ops->wake)
113 freeze_ops->wake();
114
115 dpm_resume_noirq(PMSG_RESUME);
116 if (freeze_ops && freeze_ops->sync)
117 freeze_ops->sync();
118
119 if (pm_wakeup_pending())
120 break;
121
122 pm_wakeup_clear(false);
123 } while (!dpm_suspend_noirq(PMSG_SUSPEND));
124} 101}
125 102
126void freeze_wake(void) 103void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
394 * all the devices are suspended. 371 * all the devices are suspended.
395 */ 372 */
396 if (state == PM_SUSPEND_FREEZE) { 373 if (state == PM_SUSPEND_FREEZE) {
397 s2idle_loop(); 374 trace_suspend_resume(TPS("machine_suspend"), state, true);
398 goto Platform_early_resume; 375 freeze_enter();
376 trace_suspend_resume(TPS("machine_suspend"), state, false);
377 goto Platform_wake;
399 } 378 }
400 379
401 error = disable_nonboot_cpus(); 380 error = disable_nonboot_cpus();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a1aecf44ab07..a1db38abac5b 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
269#define MAX_CMDLINECONSOLES 8 269#define MAX_CMDLINECONSOLES 8
270 270
271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 271static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
272static int console_cmdline_cnt;
273 272
274static int preferred_console = -1; 273static int preferred_console = -1;
275int console_set_on_cmdline; 274int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
1906 * See if this tty is not yet registered, and 1905 * See if this tty is not yet registered, and
1907 * if we have a slot free. 1906 * if we have a slot free.
1908 */ 1907 */
1909 for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) { 1908 for (i = 0, c = console_cmdline;
1909 i < MAX_CMDLINECONSOLES && c->name[0];
1910 i++, c++) {
1910 if (strcmp(c->name, name) == 0 && c->index == idx) { 1911 if (strcmp(c->name, name) == 0 && c->index == idx) {
1911 if (brl_options) 1912 if (!brl_options)
1912 return 0; 1913 preferred_console = i;
1913
1914 /*
1915 * Maintain an invariant that will help to find if
1916 * the matching console is preferred, see
1917 * register_console():
1918 *
1919 * The last non-braille console is always
1920 * the preferred one.
1921 */
1922 if (i != console_cmdline_cnt - 1)
1923 swap(console_cmdline[i],
1924 console_cmdline[console_cmdline_cnt - 1]);
1925
1926 preferred_console = console_cmdline_cnt - 1;
1927
1928 return 0; 1914 return 0;
1929 } 1915 }
1930 } 1916 }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
1937 braille_set_options(c, brl_options); 1923 braille_set_options(c, brl_options);
1938 1924
1939 c->index = idx; 1925 c->index = idx;
1940 console_cmdline_cnt++;
1941 return 0; 1926 return 0;
1942} 1927}
1943/* 1928/*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
2477 } 2462 }
2478 2463
2479 /* 2464 /*
2480 * See if this console matches one we selected on the command line. 2465 * See if this console matches one we selected on
2481 * 2466 * the command line.
2482 * There may be several entries in the console_cmdline array matching
2483 * with the same console, one with newcon->match(), another by
2484 * name/index:
2485 *
2486 * pl011,mmio,0x87e024000000,115200 -- added from SPCR
2487 * ttyAMA0 -- added from command line
2488 *
2489 * Traverse the console_cmdline array in reverse order to be
2490 * sure that if this console is preferred then it will be the first
2491 * matching entry. We use the invariant that is maintained in
2492 * __add_preferred_console().
2493 */ 2467 */
2494 for (i = console_cmdline_cnt - 1; i >= 0; i--) { 2468 for (i = 0, c = console_cmdline;
2495 c = console_cmdline + i; 2469 i < MAX_CMDLINECONSOLES && c->name[0];
2496 2470 i++, c++) {
2497 if (!newcon->match || 2471 if (!newcon->match ||
2498 newcon->match(newcon, c->name, c->index, c->options) != 0) { 2472 newcon->match(newcon, c->name, c->index, c->options) != 0) {
2499 /* default matching */ 2473 /* default matching */
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index 584d8a983883..dea03614263f 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
263 263
264/* 264/*
265 * Counts the new reader in the appropriate per-CPU element of the 265 * Counts the new reader in the appropriate per-CPU element of the
266 * srcu_struct. Must be called from process context. 266 * srcu_struct.
267 * Returns an index that must be passed to the matching srcu_read_unlock(). 267 * Returns an index that must be passed to the matching srcu_read_unlock().
268 */ 268 */
269int __srcu_read_lock(struct srcu_struct *sp) 269int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
271 int idx; 271 int idx;
272 272
273 idx = READ_ONCE(sp->completed) & 0x1; 273 idx = READ_ONCE(sp->completed) & 0x1;
274 __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); 274 this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
275 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 275 smp_mb(); /* B */ /* Avoid leaking the critical section. */
276 return idx; 276 return idx;
277} 277}
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
281 * Removes the count for the old reader from the appropriate per-CPU 281 * Removes the count for the old reader from the appropriate per-CPU
282 * element of the srcu_struct. Note that this may well be a different 282 * element of the srcu_struct. Note that this may well be a different
283 * CPU than that which was incremented by the corresponding srcu_read_lock(). 283 * CPU than that which was incremented by the corresponding srcu_read_lock().
284 * Must be called from process context.
285 */ 284 */
286void __srcu_read_unlock(struct srcu_struct *sp, int idx) 285void __srcu_read_unlock(struct srcu_struct *sp, int idx)
287{ 286{
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 36e1f82faed1..32798eb14853 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
97 97
98/* 98/*
99 * Counts the new reader in the appropriate per-CPU element of the 99 * Counts the new reader in the appropriate per-CPU element of the
100 * srcu_struct. Must be called from process context. 100 * srcu_struct. Can be invoked from irq/bh handlers, but the matching
101 * Returns an index that must be passed to the matching srcu_read_unlock(). 101 * __srcu_read_unlock() must be in the same handler instance. Returns an
102 * index that must be passed to the matching srcu_read_unlock().
102 */ 103 */
103int __srcu_read_lock(struct srcu_struct *sp) 104int __srcu_read_lock(struct srcu_struct *sp)
104{ 105{
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
112 113
113/* 114/*
114 * Removes the count for the old reader from the appropriate element of 115 * Removes the count for the old reader from the appropriate element of
115 * the srcu_struct. Must be called from process context. 116 * the srcu_struct.
116 */ 117 */
117void __srcu_read_unlock(struct srcu_struct *sp, int idx) 118void __srcu_read_unlock(struct srcu_struct *sp, int idx)
118{ 119{
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 3ae8474557df..157654fa436a 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
357 357
358/* 358/*
359 * Counts the new reader in the appropriate per-CPU element of the 359 * Counts the new reader in the appropriate per-CPU element of the
360 * srcu_struct. Must be called from process context. 360 * srcu_struct.
361 * Returns an index that must be passed to the matching srcu_read_unlock(). 361 * Returns an index that must be passed to the matching srcu_read_unlock().
362 */ 362 */
363int __srcu_read_lock(struct srcu_struct *sp) 363int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
365 int idx; 365 int idx;
366 366
367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 367 idx = READ_ONCE(sp->srcu_idx) & 0x1;
368 __this_cpu_inc(sp->sda->srcu_lock_count[idx]); 368 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */
370 return idx; 370 return idx;
371} 371}
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
375 * Removes the count for the old reader from the appropriate per-CPU 375 * Removes the count for the old reader from the appropriate per-CPU
376 * element of the srcu_struct. Note that this may well be a different 376 * element of the srcu_struct. Note that this may well be a different
377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 377 * CPU than that which was incremented by the corresponding srcu_read_lock().
378 * Must be called from process context.
379 */ 378 */
380void __srcu_read_unlock(struct srcu_struct *sp, int idx) 379void __srcu_read_unlock(struct srcu_struct *sp, int idx)
381{ 380{
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 574f78824d8a..32bd3ead9ba1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
595 err = 0; 595 err = 0;
596 switch (nla_type(attr)) { 596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!(p->flags & BR_VLAN_TUNNEL)) 598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL; 599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err) 601 if (err)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 0db8102995a5..6f12a5271219 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
179 br_debug(br, "using kernel STP\n"); 179 br_debug(br, "using kernel STP\n");
180 180
181 /* To start timers on any ports left in blocking */ 181 /* To start timers on any ports left in blocking */
182 mod_timer(&br->hello_timer, jiffies + br->hello_time); 182 if (br->dev->flags & IFF_UP)
183 mod_timer(&br->hello_timer, jiffies + br->hello_time);
183 br_port_state_selection(br); 184 br_port_state_selection(br);
184 } 185 }
185 186
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a292e7c..a0adfc31a3fe 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ start_again:
1680 1680
1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1682 &devlink_nl_family, NLM_F_MULTI, cmd);
1683 if (!hdr) 1683 if (!hdr) {
1684 nlmsg_free(skb);
1684 return -EMSGSIZE; 1685 return -EMSGSIZE;
1686 }
1685 1687
1686 if (devlink_nl_put_handle(skb, devlink)) 1688 if (devlink_nl_put_handle(skb, devlink))
1687 goto nla_put_failure; 1689 goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
2098 2100
2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2101 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2102 &devlink_nl_family, NLM_F_MULTI, cmd);
2101 if (!hdr) 2103 if (!hdr) {
2104 nlmsg_free(skb);
2102 return -EMSGSIZE; 2105 return -EMSGSIZE;
2106 }
2103 2107
2104 if (devlink_nl_put_handle(skb, devlink)) 2108 if (devlink_nl_put_handle(skb, devlink))
2105 goto nla_put_failure; 2109 goto nla_put_failure;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..b1be7c01efe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3754 3754
3755 spin_lock_irqsave(&q->lock, flags); 3755 spin_lock_irqsave(&q->lock, flags);
3756 skb = __skb_dequeue(q); 3756 skb = __skb_dequeue(q);
3757 if (skb && (skb_next = skb_peek(q))) 3757 if (skb && (skb_next = skb_peek(q))) {
3758 icmp_next = is_icmp_err_skb(skb_next); 3758 icmp_next = is_icmp_err_skb(skb_next);
3759 if (icmp_next)
3760 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3761 }
3759 spin_unlock_irqrestore(&q->lock, flags); 3762 spin_unlock_irqrestore(&q->lock, flags);
3760 3763
3761 if (is_icmp_err_skb(skb) && !icmp_next) 3764 if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..90038d45a547 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
223 return 0; 223 return 0;
224} 224}
225 225
226#ifdef CONFIG_PM_SLEEP
227int dsa_switch_suspend(struct dsa_switch *ds)
228{
229 int i, ret = 0;
230
231 /* Suspend slave network devices */
232 for (i = 0; i < ds->num_ports; i++) {
233 if (!dsa_is_port_initialized(ds, i))
234 continue;
235
236 ret = dsa_slave_suspend(ds->ports[i].netdev);
237 if (ret)
238 return ret;
239 }
240
241 if (ds->ops->suspend)
242 ret = ds->ops->suspend(ds);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(dsa_switch_suspend);
247
248int dsa_switch_resume(struct dsa_switch *ds)
249{
250 int i, ret = 0;
251
252 if (ds->ops->resume)
253 ret = ds->ops->resume(ds);
254
255 if (ret)
256 return ret;
257
258 /* Resume slave network devices */
259 for (i = 0; i < ds->num_ports; i++) {
260 if (!dsa_is_port_initialized(ds, i))
261 continue;
262
263 ret = dsa_slave_resume(ds->ports[i].netdev);
264 if (ret)
265 return ret;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dsa_switch_resume);
271#endif
272
226static struct packet_type dsa_pack_type __read_mostly = { 273static struct packet_type dsa_pack_type __read_mostly = {
227 .type = cpu_to_be16(ETH_P_XDSA), 274 .type = cpu_to_be16(ETH_P_XDSA),
228 .func = dsa_switch_rcv, 275 .func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..7796580e99ee 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
484 dsa_ds_unapply(dst, ds); 484 dsa_ds_unapply(dst, ds);
485 } 485 }
486 486
487 if (dst->cpu_switch) 487 if (dst->cpu_switch) {
488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
489 dst->cpu_switch = NULL;
490 }
489 491
490 pr_info("DSA: tree %d unapplied\n", dst->tree); 492 pr_info("DSA: tree %d unapplied\n", dst->tree);
491 dst->applied = false; 493 dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..7281098df04e 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
289 dsa_switch_unregister_notifier(ds); 289 dsa_switch_unregister_notifier(ds);
290} 290}
291 291
292#ifdef CONFIG_PM_SLEEP
293int dsa_switch_suspend(struct dsa_switch *ds)
294{
295 int i, ret = 0;
296
297 /* Suspend slave network devices */
298 for (i = 0; i < ds->num_ports; i++) {
299 if (!dsa_is_port_initialized(ds, i))
300 continue;
301
302 ret = dsa_slave_suspend(ds->ports[i].netdev);
303 if (ret)
304 return ret;
305 }
306
307 if (ds->ops->suspend)
308 ret = ds->ops->suspend(ds);
309
310 return ret;
311}
312EXPORT_SYMBOL_GPL(dsa_switch_suspend);
313
314int dsa_switch_resume(struct dsa_switch *ds)
315{
316 int i, ret = 0;
317
318 if (ds->ops->resume)
319 ret = ds->ops->resume(ds);
320
321 if (ret)
322 return ret;
323
324 /* Resume slave network devices */
325 for (i = 0; i < ds->num_ports; i++) {
326 if (!dsa_is_port_initialized(ds, i))
327 continue;
328
329 ret = dsa_slave_resume(ds->ports[i].netdev);
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(dsa_switch_resume);
337#endif
338
339/* platform driver init and cleanup *****************************************/ 292/* platform driver init and cleanup *****************************************/
340static int dev_is_class(struct device *dev, void *class) 293static int dev_is_class(struct device *dev, void *class)
341{ 294{
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad1661343..58925b6597de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
1043 .type = SOCK_DGRAM, 1043 .type = SOCK_DGRAM,
1044 .protocol = IPPROTO_ICMP, 1044 .protocol = IPPROTO_ICMP,
1045 .prot = &ping_prot, 1045 .prot = &ping_prot,
1046 .ops = &inet_dgram_ops, 1046 .ops = &inet_sockraw_ops,
1047 .flags = INET_PROTOSW_REUSE, 1047 .flags = INET_PROTOSW_REUSE,
1048 }, 1048 },
1049 1049
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 59792d283ff8..b5ea036ca781 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
2381 return 0; 2381 return 0;
2382} 2382}
2383 2383
2384static int tcp_repair_options_est(struct tcp_sock *tp, 2384static int tcp_repair_options_est(struct sock *sk,
2385 struct tcp_repair_opt __user *optbuf, unsigned int len) 2385 struct tcp_repair_opt __user *optbuf, unsigned int len)
2386{ 2386{
2387 struct tcp_sock *tp = tcp_sk(sk);
2387 struct tcp_repair_opt opt; 2388 struct tcp_repair_opt opt;
2388 2389
2389 while (len >= sizeof(opt)) { 2390 while (len >= sizeof(opt)) {
@@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
2396 switch (opt.opt_code) { 2397 switch (opt.opt_code) {
2397 case TCPOPT_MSS: 2398 case TCPOPT_MSS:
2398 tp->rx_opt.mss_clamp = opt.opt_val; 2399 tp->rx_opt.mss_clamp = opt.opt_val;
2400 tcp_mtup_init(sk);
2399 break; 2401 break;
2400 case TCPOPT_WINDOW: 2402 case TCPOPT_WINDOW:
2401 { 2403 {
@@ -2555,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2555 if (!tp->repair) 2557 if (!tp->repair)
2556 err = -EINVAL; 2558 err = -EINVAL;
2557 else if (sk->sk_state == TCP_ESTABLISHED) 2559 else if (sk->sk_state == TCP_ESTABLISHED)
2558 err = tcp_repair_options_est(tp, 2560 err = tcp_repair_options_est(sk,
2559 (struct tcp_repair_opt __user *)optval, 2561 (struct tcp_repair_opt __user *)optval,
2560 optlen); 2562 optlen);
2561 else 2563 else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512054a6..324c9bcc5456 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
180{ 180{
181 const struct inet_connection_sock *icsk = inet_csk(sk); 181 const struct inet_connection_sock *icsk = inet_csk(sk);
182 182
183 tcp_sk(sk)->prior_ssthresh = 0;
183 if (icsk->icsk_ca_ops->init) 184 if (icsk->icsk_ca_ops->init)
184 icsk->icsk_ca_ops->init(sk); 185 icsk->icsk_ca_ops->init(sk);
185 if (tcp_ca_needs_ecn(sk)) 186 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de713c6..8d772fea1dde 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1319 struct ipv6hdr *ip6_hdr; 1319 struct ipv6hdr *ip6_hdr;
1320 struct ipv6_opt_hdr *hop; 1320 struct ipv6_opt_hdr *hop;
1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1321 unsigned char buf[CALIPSO_MAX_BUFFER];
1322 int len_delta, new_end, pad; 1322 int len_delta, new_end, pad, payload;
1323 unsigned int start, end; 1323 unsigned int start, end;
1324 1324
1325 ip6_hdr = ipv6_hdr(skb); 1325 ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1346 if (ret_val < 0) 1346 if (ret_val < 0)
1347 return ret_val; 1347 return ret_val;
1348 1348
1349 ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
1350
1349 if (len_delta) { 1351 if (len_delta) {
1350 if (len_delta > 0) 1352 if (len_delta > 0)
1351 skb_push(skb, len_delta); 1353 skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1355 sizeof(*ip6_hdr) + start); 1357 sizeof(*ip6_hdr) + start);
1356 skb_reset_network_header(skb); 1358 skb_reset_network_header(skb);
1357 ip6_hdr = ipv6_hdr(skb); 1359 ip6_hdr = ipv6_hdr(skb);
1360 payload = ntohs(ip6_hdr->payload_len);
1361 ip6_hdr->payload_len = htons(payload + len_delta);
1358 } 1362 }
1359 1363
1360 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); 1364 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 280268f1dd7b..cdb3728faca7 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
116 116
117 if (udpfrag) { 117 if (udpfrag) {
118 int err = ip6_find_1stfragopt(skb, &prevhdr); 118 int err = ip6_find_1stfragopt(skb, &prevhdr);
119 if (err < 0) 119 if (err < 0) {
120 kfree_skb_list(segs);
120 return ERR_PTR(err); 121 return ERR_PTR(err);
122 }
121 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
122 fptr->frag_off = htons(offset); 124 fptr->frag_off = htons(offset);
123 if (skb->next) 125 if (skb->next)
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 7ae6c503f1ca..9b37f9747fc6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1095 1095
1096 if (!dst) { 1096 if (!dst) {
1097route_lookup: 1097route_lookup:
1098 /* add dsfield to flowlabel for route lookup */
1099 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1100
1098 dst = ip6_route_output(net, NULL, fl6); 1101 dst = ip6_route_output(net, NULL, fl6);
1099 1102
1100 if (dst->error) 1103 if (dst->error)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa90e6d..ac826dd338ff 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
192 .type = SOCK_DGRAM, 192 .type = SOCK_DGRAM,
193 .protocol = IPPROTO_ICMPV6, 193 .protocol = IPPROTO_ICMPV6,
194 .prot = &pingv6_prot, 194 .prot = &pingv6_prot,
195 .ops = &inet6_dgram_ops, 195 .ops = &inet6_sockraw_ops,
196 .flags = INET_PROTOSW_REUSE, 196 .flags = INET_PROTOSW_REUSE,
197}; 197};
198 198
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9e261d..60be012fe708 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
1338#endif /* CONFIG_PROC_FS */ 1338#endif /* CONFIG_PROC_FS */
1339 1339
1340/* Same as inet6_dgram_ops, sans udp_poll. */ 1340/* Same as inet6_dgram_ops, sans udp_poll. */
1341static const struct proto_ops inet6_sockraw_ops = { 1341const struct proto_ops inet6_sockraw_ops = {
1342 .family = PF_INET6, 1342 .family = PF_INET6,
1343 .owner = THIS_MODULE, 1343 .owner = THIS_MODULE,
1344 .release = inet6_release, 1344 .release = inet6_release,
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f31912..9ad07a91708e 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
31 31
32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
33 if (hdr_len < 0)
34 return hdr_len;
33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
34 skb_set_network_header(skb, -x->props.header_len); 36 skb_set_network_header(skb, -x->props.header_len);
35 skb->transport_header = skb->network_header + hdr_len; 37 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62f7bef..cf2392b2ac71 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
741 ieee80211_agg_start_txq(sta, tid, true); 741 ieee80211_agg_start_txq(sta, tid, true);
742} 742}
743 743
744void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
745 struct tid_ampdu_tx *tid_tx)
745{ 746{
746 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 struct ieee80211_sub_if_data *sdata = sta->sdata;
747 struct ieee80211_local *local = sdata->local; 748 struct ieee80211_local *local = sdata->local;
748 struct sta_info *sta;
749 struct tid_ampdu_tx *tid_tx;
750 749
751 trace_api_start_tx_ba_cb(sdata, ra, tid); 750 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
751 return;
752
753 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
754 ieee80211_agg_tx_operational(local, sta, tid);
755}
756
757static struct tid_ampdu_tx *
758ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
759 const u8 *ra, u16 tid, struct sta_info **sta)
760{
761 struct tid_ampdu_tx *tid_tx;
752 762
753 if (tid >= IEEE80211_NUM_TIDS) { 763 if (tid >= IEEE80211_NUM_TIDS) {
754 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 764 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
755 tid, IEEE80211_NUM_TIDS); 765 tid, IEEE80211_NUM_TIDS);
756 return; 766 return NULL;
757 } 767 }
758 768
759 mutex_lock(&local->sta_mtx); 769 *sta = sta_info_get_bss(sdata, ra);
760 sta = sta_info_get_bss(sdata, ra); 770 if (!*sta) {
761 if (!sta) {
762 mutex_unlock(&local->sta_mtx);
763 ht_dbg(sdata, "Could not find station: %pM\n", ra); 771 ht_dbg(sdata, "Could not find station: %pM\n", ra);
764 return; 772 return NULL;
765 } 773 }
766 774
767 mutex_lock(&sta->ampdu_mlme.mtx); 775 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
768 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
769 776
770 if (WARN_ON(!tid_tx)) { 777 if (WARN_ON(!tid_tx))
771 ht_dbg(sdata, "addBA was not requested!\n"); 778 ht_dbg(sdata, "addBA was not requested!\n");
772 goto unlock;
773 }
774 779
775 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 780 return tid_tx;
776 goto unlock;
777
778 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
779 ieee80211_agg_tx_operational(local, sta, tid);
780
781 unlock:
782 mutex_unlock(&sta->ampdu_mlme.mtx);
783 mutex_unlock(&local->sta_mtx);
784} 781}
785 782
786void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 783void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
788{ 785{
789 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
790 struct ieee80211_local *local = sdata->local; 787 struct ieee80211_local *local = sdata->local;
791 struct ieee80211_ra_tid *ra_tid; 788 struct sta_info *sta;
792 struct sk_buff *skb = dev_alloc_skb(0); 789 struct tid_ampdu_tx *tid_tx;
793 790
794 if (unlikely(!skb)) 791 trace_api_start_tx_ba_cb(sdata, ra, tid);
795 return;
796 792
797 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 793 rcu_read_lock();
798 memcpy(&ra_tid->ra, ra, ETH_ALEN); 794 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
799 ra_tid->tid = tid; 795 if (!tid_tx)
796 goto out;
800 797
801 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 798 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
802 skb_queue_tail(&sdata->skb_queue, skb); 799 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
803 ieee80211_queue_work(&local->hw, &sdata->work); 800 out:
801 rcu_read_unlock();
804} 802}
805EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
806 804
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
860} 858}
861EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 859EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
862 860
863void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 861void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
862 struct tid_ampdu_tx *tid_tx)
864{ 863{
865 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 struct ieee80211_sub_if_data *sdata = sta->sdata;
866 struct ieee80211_local *local = sdata->local;
867 struct sta_info *sta;
868 struct tid_ampdu_tx *tid_tx;
869 bool send_delba = false; 865 bool send_delba = false;
870 866
871 trace_api_stop_tx_ba_cb(sdata, ra, tid); 867 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
872 868 sta->sta.addr, tid);
873 if (tid >= IEEE80211_NUM_TIDS) {
874 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
875 tid, IEEE80211_NUM_TIDS);
876 return;
877 }
878
879 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
880
881 mutex_lock(&local->sta_mtx);
882
883 sta = sta_info_get_bss(sdata, ra);
884 if (!sta) {
885 ht_dbg(sdata, "Could not find station: %pM\n", ra);
886 goto unlock;
887 }
888 869
889 mutex_lock(&sta->ampdu_mlme.mtx);
890 spin_lock_bh(&sta->lock); 870 spin_lock_bh(&sta->lock);
891 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
892 871
893 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 872 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
894 ht_dbg(sdata, 873 ht_dbg(sdata,
895 "unexpected callback to A-MPDU stop for %pM tid %d\n", 874 "unexpected callback to A-MPDU stop for %pM tid %d\n",
896 sta->sta.addr, tid); 875 sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
906 spin_unlock_bh(&sta->lock); 885 spin_unlock_bh(&sta->lock);
907 886
908 if (send_delba) 887 if (send_delba)
909 ieee80211_send_delba(sdata, ra, tid, 888 ieee80211_send_delba(sdata, sta->sta.addr, tid,
910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 889 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
911
912 mutex_unlock(&sta->ampdu_mlme.mtx);
913 unlock:
914 mutex_unlock(&local->sta_mtx);
915} 890}
916 891
917void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 892void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
919{ 894{
920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 895 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
921 struct ieee80211_local *local = sdata->local; 896 struct ieee80211_local *local = sdata->local;
922 struct ieee80211_ra_tid *ra_tid; 897 struct sta_info *sta;
923 struct sk_buff *skb = dev_alloc_skb(0); 898 struct tid_ampdu_tx *tid_tx;
924 899
925 if (unlikely(!skb)) 900 trace_api_stop_tx_ba_cb(sdata, ra, tid);
926 return;
927 901
928 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 902 rcu_read_lock();
929 memcpy(&ra_tid->ra, ra, ETH_ALEN); 903 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
930 ra_tid->tid = tid; 904 if (!tid_tx)
905 goto out;
931 906
932 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 907 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
933 skb_queue_tail(&sdata->skb_queue, skb); 908 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
934 ieee80211_queue_work(&local->hw, &sdata->work); 909 out:
910 rcu_read_unlock();
935} 911}
936EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
937 913
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a528773563..6ca5442b1e03 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright 2017 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
289{ 290{
290 int i; 291 int i;
291 292
292 cancel_work_sync(&sta->ampdu_mlme.work);
293
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 293 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 __ieee80211_stop_tx_ba_session(sta, i, reason); 294 __ieee80211_stop_tx_ba_session(sta, i, reason);
296 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
299 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
300 } 299 }
300
301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work);
301} 303}
302 304
303void ieee80211_ba_session_work(struct work_struct *work) 305void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
352 spin_unlock_bh(&sta->lock); 354 spin_unlock_bh(&sta->lock);
353 355
354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 356 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
355 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 357 if (!tid_tx)
356 &tid_tx->state)) 358 continue;
359
360 if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
361 ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
362 if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
357 ___ieee80211_stop_tx_ba_session(sta, tid, 363 ___ieee80211_stop_tx_ba_session(sta, tid,
358 AGG_STOP_LOCAL_REQUEST); 364 AGG_STOP_LOCAL_REQUEST);
365 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
366 ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
359 } 367 }
360 mutex_unlock(&sta->ampdu_mlme.mtx); 368 mutex_unlock(&sta->ampdu_mlme.mtx);
361} 369}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c148f554..665501ac358f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
1036 1036
1037enum sdata_queue_type { 1037enum sdata_queue_type {
1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
1039 IEEE80211_SDATA_QUEUE_AGG_START = 1,
1040 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
1041 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
1042 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
1043}; 1041};
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1427 return local->hw.wiphy->bands[band]; 1425 return local->hw.wiphy->bands[band];
1428} 1426}
1429 1427
1430/* this struct represents 802.11n's RA/TID combination */
1431struct ieee80211_ra_tid {
1432 u8 ra[ETH_ALEN];
1433 u16 tid;
1434};
1435
1436/* this struct holds the value parsing from channel switch IE */ 1428/* this struct holds the value parsing from channel switch IE */
1437struct ieee80211_csa_ie { 1429struct ieee80211_csa_ie {
1438 struct cfg80211_chan_def chandef; 1430 struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1794 enum ieee80211_agg_stop_reason reason); 1786 enum ieee80211_agg_stop_reason reason);
1795int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1787int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1796 enum ieee80211_agg_stop_reason reason); 1788 enum ieee80211_agg_stop_reason reason);
1797void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1789void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
1798void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1790 struct tid_ampdu_tx *tid_tx);
1791void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1792 struct tid_ampdu_tx *tid_tx);
1799void ieee80211_ba_session_work(struct work_struct *work); 1793void ieee80211_ba_session_work(struct work_struct *work);
1800void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1794void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1801void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1795void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81f5d81..8fae1a72e6a7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
1237 struct ieee80211_local *local = sdata->local; 1237 struct ieee80211_local *local = sdata->local;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 struct sta_info *sta; 1239 struct sta_info *sta;
1240 struct ieee80211_ra_tid *ra_tid;
1241 struct ieee80211_rx_agg *rx_agg; 1240 struct ieee80211_rx_agg *rx_agg;
1242 1241
1243 if (!ieee80211_sdata_running(sdata)) 1242 if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1252 while ((skb = skb_dequeue(&sdata->skb_queue))) {
1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1253 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1255 1254
1256 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1255 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1257 ra_tid = (void *)&skb->cb;
1258 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
1259 ra_tid->tid);
1260 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
1261 ra_tid = (void *)&skb->cb;
1262 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
1263 ra_tid->tid);
1264 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1265 rx_agg = (void *)&skb->cb; 1256 rx_agg = (void *)&skb->cb;
1266 mutex_lock(&local->sta_mtx); 1257 mutex_lock(&local->sta_mtx);
1267 sta = sta_info_get_bss(sdata, rx_agg->addr); 1258 sta = sta_info_get_bss(sdata, rx_agg->addr);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a835bb0..403e3cc58b57 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
2155 struct ieee80211_sta_rx_stats *cpurxs; 2155 struct ieee80211_sta_rx_stats *cpurxs;
2156 2156
2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2158 sinfo->rx_packets += cpurxs->dropped; 2158 sinfo->rx_dropped_misc += cpurxs->dropped;
2159 } 2159 }
2160 } 2160 }
2161 2161
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cacb20d5..ea0747d6a6da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
116#define HT_AGG_STATE_STOPPING 3 116#define HT_AGG_STATE_STOPPING 3
117#define HT_AGG_STATE_WANT_START 4 117#define HT_AGG_STATE_WANT_START 4
118#define HT_AGG_STATE_WANT_STOP 5 118#define HT_AGG_STATE_WANT_STOP 5
119#define HT_AGG_STATE_START_CB 6
120#define HT_AGG_STATE_STOP_CB 7
119 121
120enum ieee80211_agg_stop_reason { 122enum ieee80211_agg_stop_reason {
121 AGG_STOP_DECLINED, 123 AGG_STOP_DECLINED,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66009da..7b05fd1497ce 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
1418 continue; 1418 continue;
1419 alive++; 1419 alive++;
1420 nh_flags &= ~flags; 1420 nh_flags &= ~flags;
1421 WRITE_ONCE(nh->nh_flags, flags); 1421 WRITE_ONCE(nh->nh_flags, nh_flags);
1422 } endfor_nexthops(rt); 1422 } endfor_nexthops(rt);
1423 1423
1424 WRITE_ONCE(rt->rt_nhn_alive, alive); 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9799a50bc604..a8be9b72e6cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -890,8 +890,13 @@ restart:
890 } 890 }
891out: 891out:
892 local_bh_enable(); 892 local_bh_enable();
893 if (last) 893 if (last) {
894 /* nf ct hash resize happened, now clear the leftover. */
895 if ((struct nf_conn *)cb->args[1] == last)
896 cb->args[1] = 0;
897
894 nf_ct_put(last); 898 nf_ct_put(last);
899 }
895 900
896 while (i) { 901 while (i) {
897 i--; 902 i--;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d599a85..1c5b14a6cab3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
512 u8 pf, unsigned int hooknum) 512 u8 pf, unsigned int hooknum)
513{ 513{
514 const struct sctphdr *sh; 514 const struct sctphdr *sh;
515 struct sctphdr _sctph;
516 const char *logmsg; 515 const char *logmsg;
517 516
518 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 if (skb->len < dataoff + sizeof(struct sctphdr)) {
519 if (!sh) {
520 logmsg = "nf_ct_sctp: short packet "; 518 logmsg = "nf_ct_sctp: short packet ";
521 goto out_invalid; 519 goto out_invalid;
522 } 520 }
523 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
524 skb->ip_summed == CHECKSUM_NONE) { 522 skb->ip_summed == CHECKSUM_NONE) {
523 if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
524 logmsg = "nf_ct_sctp: failed to read header ";
525 goto out_invalid;
526 }
527 sh = (const struct sctphdr *)(skb->data + dataoff);
525 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
526 logmsg = "nf_ct_sctp: bad CRC "; 529 logmsg = "nf_ct_sctp: bad CRC ";
527 goto out_invalid; 530 goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index ef0be325a0c6..6c72922d20ca 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
567 * will delete entry from already-freed table. 567 * will delete entry from already-freed table.
568 */ 568 */
569 ct->status &= ~IPS_NAT_DONE_MASK; 569 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
571 nf_nat_bysource_params); 571 nf_nat_bysource_params);
572 572
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb53f0a..fbdbaa00dd5f 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
116 else if (d > 0) 116 else if (d > 0)
117 p = &parent->rb_right; 117 p = &parent->rb_right;
118 else { 118 else {
119 if (nft_set_elem_active(&rbe->ext, genmask)) { 119 if (nft_rbtree_interval_end(rbe) &&
120 if (nft_rbtree_interval_end(rbe) && 120 !nft_rbtree_interval_end(new)) {
121 !nft_rbtree_interval_end(new)) 121 p = &parent->rb_left;
122 p = &parent->rb_left; 122 } else if (!nft_rbtree_interval_end(rbe) &&
123 else if (!nft_rbtree_interval_end(rbe) && 123 nft_rbtree_interval_end(new)) {
124 nft_rbtree_interval_end(new)) 124 p = &parent->rb_right;
125 p = &parent->rb_right; 125 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
126 else { 126 *ext = &rbe->ext;
127 *ext = &rbe->ext; 127 return -EEXIST;
128 return -EEXIST; 128 } else {
129 } 129 p = &parent->rb_left;
130 } 130 }
131 } 131 }
132 } 132 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f00a6ec..7586d446d7dc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
1415 goto out; 1416 goto out;
1416 } 1417 }
1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1418 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1418 NETLINK_CB(p->skb2).nsid_is_set = true; 1419 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1420 NETLINK_CB(p->skb2).nsid_is_set = true;
1419 val = netlink_broadcast_deliver(sk, p->skb2); 1421 val = netlink_broadcast_deliver(sk, p->skb2);
1420 if (val < 0) { 1422 if (val < 0) {
1421 netlink_overrun(sk); 1423 netlink_overrun(sk);
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 6fd95f76bfae..a7a23b5541f8 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -20,6 +20,10 @@ config KEYS
20 20
21 If you are unsure as to whether this is required, answer N. 21 If you are unsure as to whether this is required, answer N.
22 22
23config KEYS_COMPAT
24 def_bool y
25 depends on COMPAT && KEYS
26
23config PERSISTENT_KEYRINGS 27config PERSISTENT_KEYRINGS
24 bool "Enable register of persistent per-UID keyrings" 28 bool "Enable register of persistent per-UID keyrings"
25 depends on KEYS 29 depends on KEYS
@@ -89,9 +93,9 @@ config ENCRYPTED_KEYS
89config KEY_DH_OPERATIONS 93config KEY_DH_OPERATIONS
90 bool "Diffie-Hellman operations on retained keys" 94 bool "Diffie-Hellman operations on retained keys"
91 depends on KEYS 95 depends on KEYS
92 select MPILIB
93 select CRYPTO 96 select CRYPTO
94 select CRYPTO_HASH 97 select CRYPTO_HASH
98 select CRYPTO_DH
95 help 99 help
96 This option provides support for calculating Diffie-Hellman 100 This option provides support for calculating Diffie-Hellman
97 public keys and shared secrets using values stored as keys 101 public keys and shared secrets using values stored as keys
diff --git a/security/keys/dh.c b/security/keys/dh.c
index e603bd912e4c..4755d4b4f945 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -8,34 +8,17 @@
8 * 2 of the License, or (at your option) any later version. 8 * 2 of the License, or (at your option) any later version.
9 */ 9 */
10 10
11#include <linux/mpi.h>
12#include <linux/slab.h> 11#include <linux/slab.h>
13#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/scatterlist.h>
14#include <linux/crypto.h> 14#include <linux/crypto.h>
15#include <crypto/hash.h> 15#include <crypto/hash.h>
16#include <crypto/kpp.h>
17#include <crypto/dh.h>
16#include <keys/user-type.h> 18#include <keys/user-type.h>
17#include "internal.h" 19#include "internal.h"
18 20
19/* 21static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
20 * Public key or shared secret generation function [RFC2631 sec 2.1.1]
21 *
22 * ya = g^xa mod p;
23 * or
24 * ZZ = yb^xa mod p;
25 *
26 * where xa is the local private key, ya is the local public key, g is
27 * the generator, p is the prime, yb is the remote public key, and ZZ
28 * is the shared secret.
29 *
30 * Both are the same calculation, so g or yb are the "base" and ya or
31 * ZZ are the "result".
32 */
33static int do_dh(MPI result, MPI base, MPI xa, MPI p)
34{
35 return mpi_powm(result, base, xa, p);
36}
37
38static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
39{ 22{
40 struct key *key; 23 struct key *key;
41 key_ref_t key_ref; 24 key_ref_t key_ref;
@@ -56,19 +39,17 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
56 status = key_validate(key); 39 status = key_validate(key);
57 if (status == 0) { 40 if (status == 0) {
58 const struct user_key_payload *payload; 41 const struct user_key_payload *payload;
42 uint8_t *duplicate;
59 43
60 payload = user_key_payload_locked(key); 44 payload = user_key_payload_locked(key);
61 45
62 if (maxlen == 0) { 46 duplicate = kmemdup(payload->data, payload->datalen,
63 *mpi = NULL; 47 GFP_KERNEL);
48 if (duplicate) {
49 *data = duplicate;
64 ret = payload->datalen; 50 ret = payload->datalen;
65 } else if (payload->datalen <= maxlen) {
66 *mpi = mpi_read_raw_data(payload->data,
67 payload->datalen);
68 if (*mpi)
69 ret = payload->datalen;
70 } else { 51 } else {
71 ret = -EINVAL; 52 ret = -ENOMEM;
72 } 53 }
73 } 54 }
74 up_read(&key->sem); 55 up_read(&key->sem);
@@ -79,6 +60,29 @@ error:
79 return ret; 60 return ret;
80} 61}
81 62
63static void dh_free_data(struct dh *dh)
64{
65 kzfree(dh->key);
66 kzfree(dh->p);
67 kzfree(dh->g);
68}
69
70struct dh_completion {
71 struct completion completion;
72 int err;
73};
74
75static void dh_crypto_done(struct crypto_async_request *req, int err)
76{
77 struct dh_completion *compl = req->data;
78
79 if (err == -EINPROGRESS)
80 return;
81
82 compl->err = err;
83 complete(&compl->completion);
84}
85
82struct kdf_sdesc { 86struct kdf_sdesc {
83 struct shash_desc shash; 87 struct shash_desc shash;
84 char ctx[]; 88 char ctx[];
@@ -89,6 +93,7 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
89 struct crypto_shash *tfm; 93 struct crypto_shash *tfm;
90 struct kdf_sdesc *sdesc; 94 struct kdf_sdesc *sdesc;
91 int size; 95 int size;
96 int err;
92 97
93 /* allocate synchronous hash */ 98 /* allocate synchronous hash */
94 tfm = crypto_alloc_shash(hashname, 0, 0); 99 tfm = crypto_alloc_shash(hashname, 0, 0);
@@ -97,16 +102,25 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
97 return PTR_ERR(tfm); 102 return PTR_ERR(tfm);
98 } 103 }
99 104
105 err = -EINVAL;
106 if (crypto_shash_digestsize(tfm) == 0)
107 goto out_free_tfm;
108
109 err = -ENOMEM;
100 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); 110 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
101 sdesc = kmalloc(size, GFP_KERNEL); 111 sdesc = kmalloc(size, GFP_KERNEL);
102 if (!sdesc) 112 if (!sdesc)
103 return -ENOMEM; 113 goto out_free_tfm;
104 sdesc->shash.tfm = tfm; 114 sdesc->shash.tfm = tfm;
105 sdesc->shash.flags = 0x0; 115 sdesc->shash.flags = 0x0;
106 116
107 *sdesc_ret = sdesc; 117 *sdesc_ret = sdesc;
108 118
109 return 0; 119 return 0;
120
121out_free_tfm:
122 crypto_free_shash(tfm);
123 return err;
110} 124}
111 125
112static void kdf_dealloc(struct kdf_sdesc *sdesc) 126static void kdf_dealloc(struct kdf_sdesc *sdesc)
@@ -120,14 +134,6 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
120 kzfree(sdesc); 134 kzfree(sdesc);
121} 135}
122 136
123/* convert 32 bit integer into its string representation */
124static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
125{
126 __be32 *a = (__be32 *)buf;
127
128 *a = cpu_to_be32(val);
129}
130
131/* 137/*
132 * Implementation of the KDF in counter mode according to SP800-108 section 5.1 138 * Implementation of the KDF in counter mode according to SP800-108 section 5.1
133 * as well as SP800-56A section 5.8.1 (Single-step KDF). 139 * as well as SP800-56A section 5.8.1 (Single-step KDF).
@@ -138,25 +144,39 @@ static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
138 * 5.8.1.2). 144 * 5.8.1.2).
139 */ 145 */
140static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, 146static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
141 u8 *dst, unsigned int dlen) 147 u8 *dst, unsigned int dlen, unsigned int zlen)
142{ 148{
143 struct shash_desc *desc = &sdesc->shash; 149 struct shash_desc *desc = &sdesc->shash;
144 unsigned int h = crypto_shash_digestsize(desc->tfm); 150 unsigned int h = crypto_shash_digestsize(desc->tfm);
145 int err = 0; 151 int err = 0;
146 u8 *dst_orig = dst; 152 u8 *dst_orig = dst;
147 u32 i = 1; 153 __be32 counter = cpu_to_be32(1);
148 u8 iteration[sizeof(u32)];
149 154
150 while (dlen) { 155 while (dlen) {
151 err = crypto_shash_init(desc); 156 err = crypto_shash_init(desc);
152 if (err) 157 if (err)
153 goto err; 158 goto err;
154 159
155 crypto_kw_cpu_to_be32(i, iteration); 160 err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
156 err = crypto_shash_update(desc, iteration, sizeof(u32));
157 if (err) 161 if (err)
158 goto err; 162 goto err;
159 163
164 if (zlen && h) {
165 u8 tmpbuffer[h];
166 size_t chunk = min_t(size_t, zlen, h);
167 memset(tmpbuffer, 0, chunk);
168
169 do {
170 err = crypto_shash_update(desc, tmpbuffer,
171 chunk);
172 if (err)
173 goto err;
174
175 zlen -= chunk;
176 chunk = min_t(size_t, zlen, h);
177 } while (zlen);
178 }
179
160 if (src && slen) { 180 if (src && slen) {
161 err = crypto_shash_update(desc, src, slen); 181 err = crypto_shash_update(desc, src, slen);
162 if (err) 182 if (err)
@@ -179,7 +199,7 @@ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
179 199
180 dlen -= h; 200 dlen -= h;
181 dst += h; 201 dst += h;
182 i++; 202 counter = cpu_to_be32(be32_to_cpu(counter) + 1);
183 } 203 }
184 } 204 }
185 205
@@ -192,7 +212,7 @@ err:
192 212
193static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, 213static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
194 char __user *buffer, size_t buflen, 214 char __user *buffer, size_t buflen,
195 uint8_t *kbuf, size_t kbuflen) 215 uint8_t *kbuf, size_t kbuflen, size_t lzero)
196{ 216{
197 uint8_t *outbuf = NULL; 217 uint8_t *outbuf = NULL;
198 int ret; 218 int ret;
@@ -203,7 +223,7 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
203 goto err; 223 goto err;
204 } 224 }
205 225
206 ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen); 226 ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero);
207 if (ret) 227 if (ret)
208 goto err; 228 goto err;
209 229
@@ -221,21 +241,26 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
221 struct keyctl_kdf_params *kdfcopy) 241 struct keyctl_kdf_params *kdfcopy)
222{ 242{
223 long ret; 243 long ret;
224 MPI base, private, prime, result; 244 ssize_t dlen;
225 unsigned nbytes; 245 int secretlen;
246 int outlen;
226 struct keyctl_dh_params pcopy; 247 struct keyctl_dh_params pcopy;
227 uint8_t *kbuf; 248 struct dh dh_inputs;
228 ssize_t keylen; 249 struct scatterlist outsg;
229 size_t resultlen; 250 struct dh_completion compl;
251 struct crypto_kpp *tfm;
252 struct kpp_request *req;
253 uint8_t *secret;
254 uint8_t *outbuf;
230 struct kdf_sdesc *sdesc = NULL; 255 struct kdf_sdesc *sdesc = NULL;
231 256
232 if (!params || (!buffer && buflen)) { 257 if (!params || (!buffer && buflen)) {
233 ret = -EINVAL; 258 ret = -EINVAL;
234 goto out; 259 goto out1;
235 } 260 }
236 if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) { 261 if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
237 ret = -EFAULT; 262 ret = -EFAULT;
238 goto out; 263 goto out1;
239 } 264 }
240 265
241 if (kdfcopy) { 266 if (kdfcopy) {
@@ -244,104 +269,147 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
244 if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN || 269 if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
245 kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) { 270 kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
246 ret = -EMSGSIZE; 271 ret = -EMSGSIZE;
247 goto out; 272 goto out1;
248 } 273 }
249 274
250 /* get KDF name string */ 275 /* get KDF name string */
251 hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME); 276 hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
252 if (IS_ERR(hashname)) { 277 if (IS_ERR(hashname)) {
253 ret = PTR_ERR(hashname); 278 ret = PTR_ERR(hashname);
254 goto out; 279 goto out1;
255 } 280 }
256 281
257 /* allocate KDF from the kernel crypto API */ 282 /* allocate KDF from the kernel crypto API */
258 ret = kdf_alloc(&sdesc, hashname); 283 ret = kdf_alloc(&sdesc, hashname);
259 kfree(hashname); 284 kfree(hashname);
260 if (ret) 285 if (ret)
261 goto out; 286 goto out1;
262 } 287 }
263 288
264 /* 289 memset(&dh_inputs, 0, sizeof(dh_inputs));
265 * If the caller requests postprocessing with a KDF, allow an 290
266 * arbitrary output buffer size since the KDF ensures proper truncation. 291 dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
267 */ 292 if (dlen < 0) {
268 keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime); 293 ret = dlen;
269 if (keylen < 0 || !prime) { 294 goto out1;
270 /* buflen == 0 may be used to query the required buffer size, 295 }
271 * which is the prime key length. 296 dh_inputs.p_size = dlen;
272 */ 297
273 ret = keylen; 298 dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
274 goto out; 299 if (dlen < 0) {
300 ret = dlen;
301 goto out2;
275 } 302 }
303 dh_inputs.g_size = dlen;
276 304
277 /* The result is never longer than the prime */ 305 dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
278 resultlen = keylen; 306 if (dlen < 0) {
307 ret = dlen;
308 goto out2;
309 }
310 dh_inputs.key_size = dlen;
279 311
280 keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base); 312 secretlen = crypto_dh_key_len(&dh_inputs);
281 if (keylen < 0 || !base) { 313 secret = kmalloc(secretlen, GFP_KERNEL);
282 ret = keylen; 314 if (!secret) {
283 goto error1; 315 ret = -ENOMEM;
316 goto out2;
284 } 317 }
318 ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
319 if (ret)
320 goto out3;
285 321
286 keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private); 322 tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
287 if (keylen < 0 || !private) { 323 if (IS_ERR(tfm)) {
288 ret = keylen; 324 ret = PTR_ERR(tfm);
289 goto error2; 325 goto out3;
326 }
327
328 ret = crypto_kpp_set_secret(tfm, secret, secretlen);
329 if (ret)
330 goto out4;
331
332 outlen = crypto_kpp_maxsize(tfm);
333
334 if (!kdfcopy) {
335 /*
336 * When not using a KDF, buflen 0 is used to read the
337 * required buffer length
338 */
339 if (buflen == 0) {
340 ret = outlen;
341 goto out4;
342 } else if (outlen > buflen) {
343 ret = -EOVERFLOW;
344 goto out4;
345 }
290 } 346 }
291 347
292 result = mpi_alloc(0); 348 outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
293 if (!result) { 349 GFP_KERNEL);
350 if (!outbuf) {
294 ret = -ENOMEM; 351 ret = -ENOMEM;
295 goto error3; 352 goto out4;
296 } 353 }
297 354
298 /* allocate space for DH shared secret and SP800-56A otherinfo */ 355 sg_init_one(&outsg, outbuf, outlen);
299 kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen, 356
300 GFP_KERNEL); 357 req = kpp_request_alloc(tfm, GFP_KERNEL);
301 if (!kbuf) { 358 if (!req) {
302 ret = -ENOMEM; 359 ret = -ENOMEM;
303 goto error4; 360 goto out5;
304 } 361 }
305 362
363 kpp_request_set_input(req, NULL, 0);
364 kpp_request_set_output(req, &outsg, outlen);
365 init_completion(&compl.completion);
366 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
367 CRYPTO_TFM_REQ_MAY_SLEEP,
368 dh_crypto_done, &compl);
369
306 /* 370 /*
307 * Concatenate SP800-56A otherinfo past DH shared secret -- the 371 * For DH, generate_public_key and generate_shared_secret are
308 * input to the KDF is (DH shared secret || otherinfo) 372 * the same calculation
309 */ 373 */
310 if (kdfcopy && kdfcopy->otherinfo && 374 ret = crypto_kpp_generate_public_key(req);
311 copy_from_user(kbuf + resultlen, kdfcopy->otherinfo, 375 if (ret == -EINPROGRESS) {
312 kdfcopy->otherinfolen) != 0) { 376 wait_for_completion(&compl.completion);
313 ret = -EFAULT; 377 ret = compl.err;
314 goto error5; 378 if (ret)
379 goto out6;
315 } 380 }
316 381
317 ret = do_dh(result, base, private, prime);
318 if (ret)
319 goto error5;
320
321 ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
322 if (ret != 0)
323 goto error5;
324
325 if (kdfcopy) { 382 if (kdfcopy) {
326 ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf, 383 /*
327 resultlen + kdfcopy->otherinfolen); 384 * Concatenate SP800-56A otherinfo past DH shared secret -- the
328 } else { 385 * input to the KDF is (DH shared secret || otherinfo)
329 ret = nbytes; 386 */
330 if (copy_to_user(buffer, kbuf, nbytes) != 0) 387 if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
388 kdfcopy->otherinfolen) != 0) {
331 ret = -EFAULT; 389 ret = -EFAULT;
390 goto out6;
391 }
392
393 ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
394 req->dst_len + kdfcopy->otherinfolen,
395 outlen - req->dst_len);
396 } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
397 ret = req->dst_len;
398 } else {
399 ret = -EFAULT;
332 } 400 }
333 401
334error5: 402out6:
335 kzfree(kbuf); 403 kpp_request_free(req);
336error4: 404out5:
337 mpi_free(result); 405 kzfree(outbuf);
338error3: 406out4:
339 mpi_free(private); 407 crypto_free_kpp(tfm);
340error2: 408out3:
341 mpi_free(base); 409 kzfree(secret);
342error1: 410out2:
343 mpi_free(prime); 411 dh_free_data(&dh_inputs);
344out: 412out1:
345 kdf_dealloc(sdesc); 413 kdf_dealloc(sdesc);
346 return ret; 414 return ret;
347} 415}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 0010955d7876..bb6324d1ccec 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -30,6 +30,7 @@
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/ctype.h> 31#include <linux/ctype.h>
32#include <crypto/aes.h> 32#include <crypto/aes.h>
33#include <crypto/algapi.h>
33#include <crypto/hash.h> 34#include <crypto/hash.h>
34#include <crypto/sha.h> 35#include <crypto/sha.h>
35#include <crypto/skcipher.h> 36#include <crypto/skcipher.h>
@@ -54,13 +55,7 @@ static int blksize;
54#define MAX_DATA_SIZE 4096 55#define MAX_DATA_SIZE 4096
55#define MIN_DATA_SIZE 20 56#define MIN_DATA_SIZE 20
56 57
57struct sdesc { 58static struct crypto_shash *hash_tfm;
58 struct shash_desc shash;
59 char ctx[];
60};
61
62static struct crypto_shash *hashalg;
63static struct crypto_shash *hmacalg;
64 59
65enum { 60enum {
66 Opt_err = -1, Opt_new, Opt_load, Opt_update 61 Opt_err = -1, Opt_new, Opt_load, Opt_update
@@ -141,23 +136,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
141 */ 136 */
142static int valid_master_desc(const char *new_desc, const char *orig_desc) 137static int valid_master_desc(const char *new_desc, const char *orig_desc)
143{ 138{
144 if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { 139 int prefix_len;
145 if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) 140
146 goto out; 141 if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
147 if (orig_desc) 142 prefix_len = KEY_TRUSTED_PREFIX_LEN;
148 if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) 143 else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
149 goto out; 144 prefix_len = KEY_USER_PREFIX_LEN;
150 } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { 145 else
151 if (strlen(new_desc) == KEY_USER_PREFIX_LEN) 146 return -EINVAL;
152 goto out; 147
153 if (orig_desc) 148 if (!new_desc[prefix_len])
154 if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) 149 return -EINVAL;
155 goto out; 150
156 } else 151 if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
157 goto out; 152 return -EINVAL;
153
158 return 0; 154 return 0;
159out:
160 return -EINVAL;
161} 155}
162 156
163/* 157/*
@@ -321,53 +315,38 @@ error:
321 return ukey; 315 return ukey;
322} 316}
323 317
324static struct sdesc *alloc_sdesc(struct crypto_shash *alg) 318static int calc_hash(struct crypto_shash *tfm, u8 *digest,
325{
326 struct sdesc *sdesc;
327 int size;
328
329 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
330 sdesc = kmalloc(size, GFP_KERNEL);
331 if (!sdesc)
332 return ERR_PTR(-ENOMEM);
333 sdesc->shash.tfm = alg;
334 sdesc->shash.flags = 0x0;
335 return sdesc;
336}
337
338static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
339 const u8 *buf, unsigned int buflen) 319 const u8 *buf, unsigned int buflen)
340{ 320{
341 struct sdesc *sdesc; 321 SHASH_DESC_ON_STACK(desc, tfm);
342 int ret; 322 int err;
343 323
344 sdesc = alloc_sdesc(hmacalg); 324 desc->tfm = tfm;
345 if (IS_ERR(sdesc)) { 325 desc->flags = 0;
346 pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
347 return PTR_ERR(sdesc);
348 }
349 326
350 ret = crypto_shash_setkey(hmacalg, key, keylen); 327 err = crypto_shash_digest(desc, buf, buflen, digest);
351 if (!ret) 328 shash_desc_zero(desc);
352 ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 329 return err;
353 kfree(sdesc);
354 return ret;
355} 330}
356 331
357static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen) 332static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
333 const u8 *buf, unsigned int buflen)
358{ 334{
359 struct sdesc *sdesc; 335 struct crypto_shash *tfm;
360 int ret; 336 int err;
361 337
362 sdesc = alloc_sdesc(hashalg); 338 tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
363 if (IS_ERR(sdesc)) { 339 if (IS_ERR(tfm)) {
364 pr_info("encrypted_key: can't alloc %s\n", hash_alg); 340 pr_err("encrypted_key: can't alloc %s transform: %ld\n",
365 return PTR_ERR(sdesc); 341 hmac_alg, PTR_ERR(tfm));
342 return PTR_ERR(tfm);
366 } 343 }
367 344
368 ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 345 err = crypto_shash_setkey(tfm, key, keylen);
369 kfree(sdesc); 346 if (!err)
370 return ret; 347 err = calc_hash(tfm, digest, buf, buflen);
348 crypto_free_shash(tfm);
349 return err;
371} 350}
372 351
373enum derived_key_type { ENC_KEY, AUTH_KEY }; 352enum derived_key_type { ENC_KEY, AUTH_KEY };
@@ -385,10 +364,9 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
385 derived_buf_len = HASH_SIZE; 364 derived_buf_len = HASH_SIZE;
386 365
387 derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); 366 derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
388 if (!derived_buf) { 367 if (!derived_buf)
389 pr_err("encrypted_key: out of memory\n");
390 return -ENOMEM; 368 return -ENOMEM;
391 } 369
392 if (key_type) 370 if (key_type)
393 strcpy(derived_buf, "AUTH_KEY"); 371 strcpy(derived_buf, "AUTH_KEY");
394 else 372 else
@@ -396,8 +374,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
396 374
397 memcpy(derived_buf + strlen(derived_buf) + 1, master_key, 375 memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
398 master_keylen); 376 master_keylen);
399 ret = calc_hash(derived_key, derived_buf, derived_buf_len); 377 ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
400 kfree(derived_buf); 378 kzfree(derived_buf);
401 return ret; 379 return ret;
402} 380}
403 381
@@ -480,12 +458,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
480 struct skcipher_request *req; 458 struct skcipher_request *req;
481 unsigned int encrypted_datalen; 459 unsigned int encrypted_datalen;
482 u8 iv[AES_BLOCK_SIZE]; 460 u8 iv[AES_BLOCK_SIZE];
483 unsigned int padlen;
484 char pad[16];
485 int ret; 461 int ret;
486 462
487 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 463 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
488 padlen = encrypted_datalen - epayload->decrypted_datalen;
489 464
490 req = init_skcipher_req(derived_key, derived_keylen); 465 req = init_skcipher_req(derived_key, derived_keylen);
491 ret = PTR_ERR(req); 466 ret = PTR_ERR(req);
@@ -493,11 +468,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
493 goto out; 468 goto out;
494 dump_decrypted_data(epayload); 469 dump_decrypted_data(epayload);
495 470
496 memset(pad, 0, sizeof pad);
497 sg_init_table(sg_in, 2); 471 sg_init_table(sg_in, 2);
498 sg_set_buf(&sg_in[0], epayload->decrypted_data, 472 sg_set_buf(&sg_in[0], epayload->decrypted_data,
499 epayload->decrypted_datalen); 473 epayload->decrypted_datalen);
500 sg_set_buf(&sg_in[1], pad, padlen); 474 sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
501 475
502 sg_init_table(sg_out, 1); 476 sg_init_table(sg_out, 1);
503 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); 477 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -533,6 +507,7 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload,
533 if (!ret) 507 if (!ret)
534 dump_hmac(NULL, digest, HASH_SIZE); 508 dump_hmac(NULL, digest, HASH_SIZE);
535out: 509out:
510 memzero_explicit(derived_key, sizeof(derived_key));
536 return ret; 511 return ret;
537} 512}
538 513
@@ -561,8 +536,8 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
561 ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); 536 ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
562 if (ret < 0) 537 if (ret < 0)
563 goto out; 538 goto out;
564 ret = memcmp(digest, epayload->format + epayload->datablob_len, 539 ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
565 sizeof digest); 540 sizeof(digest));
566 if (ret) { 541 if (ret) {
567 ret = -EINVAL; 542 ret = -EINVAL;
568 dump_hmac("datablob", 543 dump_hmac("datablob",
@@ -571,6 +546,7 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
571 dump_hmac("calc", digest, HASH_SIZE); 546 dump_hmac("calc", digest, HASH_SIZE);
572 } 547 }
573out: 548out:
549 memzero_explicit(derived_key, sizeof(derived_key));
574 return ret; 550 return ret;
575} 551}
576 552
@@ -584,9 +560,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
584 struct skcipher_request *req; 560 struct skcipher_request *req;
585 unsigned int encrypted_datalen; 561 unsigned int encrypted_datalen;
586 u8 iv[AES_BLOCK_SIZE]; 562 u8 iv[AES_BLOCK_SIZE];
587 char pad[16]; 563 u8 *pad;
588 int ret; 564 int ret;
589 565
566 /* Throwaway buffer to hold the unused zero padding at the end */
567 pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
568 if (!pad)
569 return -ENOMEM;
570
590 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 571 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
591 req = init_skcipher_req(derived_key, derived_keylen); 572 req = init_skcipher_req(derived_key, derived_keylen);
592 ret = PTR_ERR(req); 573 ret = PTR_ERR(req);
@@ -594,13 +575,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
594 goto out; 575 goto out;
595 dump_encrypted_data(epayload, encrypted_datalen); 576 dump_encrypted_data(epayload, encrypted_datalen);
596 577
597 memset(pad, 0, sizeof pad);
598 sg_init_table(sg_in, 1); 578 sg_init_table(sg_in, 1);
599 sg_init_table(sg_out, 2); 579 sg_init_table(sg_out, 2);
600 sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); 580 sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
601 sg_set_buf(&sg_out[0], epayload->decrypted_data, 581 sg_set_buf(&sg_out[0], epayload->decrypted_data,
602 epayload->decrypted_datalen); 582 epayload->decrypted_datalen);
603 sg_set_buf(&sg_out[1], pad, sizeof pad); 583 sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
604 584
605 memcpy(iv, epayload->iv, sizeof(iv)); 585 memcpy(iv, epayload->iv, sizeof(iv));
606 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); 586 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +592,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
612 goto out; 592 goto out;
613 dump_decrypted_data(epayload); 593 dump_decrypted_data(epayload);
614out: 594out:
595 kfree(pad);
615 return ret; 596 return ret;
616} 597}
617 598
@@ -722,6 +703,7 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
722out: 703out:
723 up_read(&mkey->sem); 704 up_read(&mkey->sem);
724 key_put(mkey); 705 key_put(mkey);
706 memzero_explicit(derived_key, sizeof(derived_key));
725 return ret; 707 return ret;
726} 708}
727 709
@@ -828,13 +810,13 @@ static int encrypted_instantiate(struct key *key,
828 ret = encrypted_init(epayload, key->description, format, master_desc, 810 ret = encrypted_init(epayload, key->description, format, master_desc,
829 decrypted_datalen, hex_encoded_iv); 811 decrypted_datalen, hex_encoded_iv);
830 if (ret < 0) { 812 if (ret < 0) {
831 kfree(epayload); 813 kzfree(epayload);
832 goto out; 814 goto out;
833 } 815 }
834 816
835 rcu_assign_keypointer(key, epayload); 817 rcu_assign_keypointer(key, epayload);
836out: 818out:
837 kfree(datablob); 819 kzfree(datablob);
838 return ret; 820 return ret;
839} 821}
840 822
@@ -843,8 +825,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
843 struct encrypted_key_payload *epayload; 825 struct encrypted_key_payload *epayload;
844 826
845 epayload = container_of(rcu, struct encrypted_key_payload, rcu); 827 epayload = container_of(rcu, struct encrypted_key_payload, rcu);
846 memset(epayload->decrypted_data, 0, epayload->decrypted_datalen); 828 kzfree(epayload);
847 kfree(epayload);
848} 829}
849 830
850/* 831/*
@@ -902,7 +883,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
902 rcu_assign_keypointer(key, new_epayload); 883 rcu_assign_keypointer(key, new_epayload);
903 call_rcu(&epayload->rcu, encrypted_rcu_free); 884 call_rcu(&epayload->rcu, encrypted_rcu_free);
904out: 885out:
905 kfree(buf); 886 kzfree(buf);
906 return ret; 887 return ret;
907} 888}
908 889
@@ -960,33 +941,26 @@ static long encrypted_read(const struct key *key, char __user *buffer,
960 941
961 up_read(&mkey->sem); 942 up_read(&mkey->sem);
962 key_put(mkey); 943 key_put(mkey);
944 memzero_explicit(derived_key, sizeof(derived_key));
963 945
964 if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) 946 if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
965 ret = -EFAULT; 947 ret = -EFAULT;
966 kfree(ascii_buf); 948 kzfree(ascii_buf);
967 949
968 return asciiblob_len; 950 return asciiblob_len;
969out: 951out:
970 up_read(&mkey->sem); 952 up_read(&mkey->sem);
971 key_put(mkey); 953 key_put(mkey);
954 memzero_explicit(derived_key, sizeof(derived_key));
972 return ret; 955 return ret;
973} 956}
974 957
975/* 958/*
976 * encrypted_destroy - before freeing the key, clear the decrypted data 959 * encrypted_destroy - clear and free the key's payload
977 *
978 * Before freeing the key, clear the memory containing the decrypted
979 * key data.
980 */ 960 */
981static void encrypted_destroy(struct key *key) 961static void encrypted_destroy(struct key *key)
982{ 962{
983 struct encrypted_key_payload *epayload = key->payload.data[0]; 963 kzfree(key->payload.data[0]);
984
985 if (!epayload)
986 return;
987
988 memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen);
989 kfree(key->payload.data[0]);
990} 964}
991 965
992struct key_type key_type_encrypted = { 966struct key_type key_type_encrypted = {
@@ -999,47 +973,17 @@ struct key_type key_type_encrypted = {
999}; 973};
1000EXPORT_SYMBOL_GPL(key_type_encrypted); 974EXPORT_SYMBOL_GPL(key_type_encrypted);
1001 975
1002static void encrypted_shash_release(void) 976static int __init init_encrypted(void)
1003{
1004 if (hashalg)
1005 crypto_free_shash(hashalg);
1006 if (hmacalg)
1007 crypto_free_shash(hmacalg);
1008}
1009
1010static int __init encrypted_shash_alloc(void)
1011{ 977{
1012 int ret; 978 int ret;
1013 979
1014 hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); 980 hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
1015 if (IS_ERR(hmacalg)) { 981 if (IS_ERR(hash_tfm)) {
1016 pr_info("encrypted_key: could not allocate crypto %s\n", 982 pr_err("encrypted_key: can't allocate %s transform: %ld\n",
1017 hmac_alg); 983 hash_alg, PTR_ERR(hash_tfm));
1018 return PTR_ERR(hmacalg); 984 return PTR_ERR(hash_tfm);
1019 }
1020
1021 hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
1022 if (IS_ERR(hashalg)) {
1023 pr_info("encrypted_key: could not allocate crypto %s\n",
1024 hash_alg);
1025 ret = PTR_ERR(hashalg);
1026 goto hashalg_fail;
1027 } 985 }
1028 986
1029 return 0;
1030
1031hashalg_fail:
1032 crypto_free_shash(hmacalg);
1033 return ret;
1034}
1035
1036static int __init init_encrypted(void)
1037{
1038 int ret;
1039
1040 ret = encrypted_shash_alloc();
1041 if (ret < 0)
1042 return ret;
1043 ret = aes_get_sizes(); 987 ret = aes_get_sizes();
1044 if (ret < 0) 988 if (ret < 0)
1045 goto out; 989 goto out;
@@ -1048,14 +992,14 @@ static int __init init_encrypted(void)
1048 goto out; 992 goto out;
1049 return 0; 993 return 0;
1050out: 994out:
1051 encrypted_shash_release(); 995 crypto_free_shash(hash_tfm);
1052 return ret; 996 return ret;
1053 997
1054} 998}
1055 999
1056static void __exit cleanup_encrypted(void) 1000static void __exit cleanup_encrypted(void)
1057{ 1001{
1058 encrypted_shash_release(); 1002 crypto_free_shash(hash_tfm);
1059 unregister_key_type(&key_type_encrypted); 1003 unregister_key_type(&key_type_encrypted);
1060} 1004}
1061 1005
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 595becc6d0d2..87cb260e4890 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -158,9 +158,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
158 158
159 kfree(key->description); 159 kfree(key->description);
160 160
161#ifdef KEY_DEBUGGING 161 memzero_explicit(key, sizeof(*key));
162 key->magic = KEY_DEBUG_MAGIC_X;
163#endif
164 kmem_cache_free(key_jar, key); 162 kmem_cache_free(key_jar, key);
165 } 163 }
166} 164}
diff --git a/security/keys/key.c b/security/keys/key.c
index 455c04d80bbb..83da68d98b40 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -660,14 +660,11 @@ not_found:
660 goto error; 660 goto error;
661 661
662found: 662found:
663 /* pretend it doesn't exist if it is awaiting deletion */ 663 /* A key is allowed to be looked up only if someone still owns a
664 if (refcount_read(&key->usage) == 0) 664 * reference to it - otherwise it's awaiting the gc.
665 goto not_found;
666
667 /* this races with key_put(), but that doesn't matter since key_put()
668 * doesn't actually change the key
669 */ 665 */
670 __key_get(key); 666 if (!refcount_inc_not_zero(&key->usage))
667 goto not_found;
671 668
672error: 669error:
673 spin_unlock(&key_serial_lock); 670 spin_unlock(&key_serial_lock);
@@ -966,12 +963,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
966 /* the key must be writable */ 963 /* the key must be writable */
967 ret = key_permission(key_ref, KEY_NEED_WRITE); 964 ret = key_permission(key_ref, KEY_NEED_WRITE);
968 if (ret < 0) 965 if (ret < 0)
969 goto error; 966 return ret;
970 967
971 /* attempt to update it if supported */ 968 /* attempt to update it if supported */
972 ret = -EOPNOTSUPP;
973 if (!key->type->update) 969 if (!key->type->update)
974 goto error; 970 return -EOPNOTSUPP;
975 971
976 memset(&prep, 0, sizeof(prep)); 972 memset(&prep, 0, sizeof(prep));
977 prep.data = payload; 973 prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 447a7d5cee0f..ab0b337c84b4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
99 /* pull the payload in if one was supplied */ 99 /* pull the payload in if one was supplied */
100 payload = NULL; 100 payload = NULL;
101 101
102 if (_payload) { 102 if (plen) {
103 ret = -ENOMEM; 103 ret = -ENOMEM;
104 payload = kvmalloc(plen, GFP_KERNEL); 104 payload = kvmalloc(plen, GFP_KERNEL);
105 if (!payload) 105 if (!payload)
@@ -132,7 +132,10 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
132 132
133 key_ref_put(keyring_ref); 133 key_ref_put(keyring_ref);
134 error3: 134 error3:
135 kvfree(payload); 135 if (payload) {
136 memzero_explicit(payload, plen);
137 kvfree(payload);
138 }
136 error2: 139 error2:
137 kfree(description); 140 kfree(description);
138 error: 141 error:
@@ -324,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
324 327
325 /* pull the payload in if one was supplied */ 328 /* pull the payload in if one was supplied */
326 payload = NULL; 329 payload = NULL;
327 if (_payload) { 330 if (plen) {
328 ret = -ENOMEM; 331 ret = -ENOMEM;
329 payload = kmalloc(plen, GFP_KERNEL); 332 payload = kmalloc(plen, GFP_KERNEL);
330 if (!payload) 333 if (!payload)
@@ -347,7 +350,7 @@ long keyctl_update_key(key_serial_t id,
347 350
348 key_ref_put(key_ref); 351 key_ref_put(key_ref);
349error2: 352error2:
350 kfree(payload); 353 kzfree(payload);
351error: 354error:
352 return ret; 355 return ret;
353} 356}
@@ -1093,7 +1096,10 @@ long keyctl_instantiate_key_common(key_serial_t id,
1093 keyctl_change_reqkey_auth(NULL); 1096 keyctl_change_reqkey_auth(NULL);
1094 1097
1095error2: 1098error2:
1096 kvfree(payload); 1099 if (payload) {
1100 memzero_explicit(payload, plen);
1101 kvfree(payload);
1102 }
1097error: 1103error:
1098 return ret; 1104 return ret;
1099} 1105}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4d1678e4586f..de81793f9920 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -706,7 +706,7 @@ descend_to_keyring:
706 * Non-keyrings avoid the leftmost branch of the root entirely (root 706 * Non-keyrings avoid the leftmost branch of the root entirely (root
707 * slots 1-15). 707 * slots 1-15).
708 */ 708 */
709 ptr = ACCESS_ONCE(keyring->keys.root); 709 ptr = READ_ONCE(keyring->keys.root);
710 if (!ptr) 710 if (!ptr)
711 goto not_this_keyring; 711 goto not_this_keyring;
712 712
@@ -720,7 +720,7 @@ descend_to_keyring:
720 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) 720 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
721 goto not_this_keyring; 721 goto not_this_keyring;
722 722
723 ptr = ACCESS_ONCE(shortcut->next_node); 723 ptr = READ_ONCE(shortcut->next_node);
724 node = assoc_array_ptr_to_node(ptr); 724 node = assoc_array_ptr_to_node(ptr);
725 goto begin_node; 725 goto begin_node;
726 } 726 }
@@ -740,7 +740,7 @@ descend_to_node:
740 if (assoc_array_ptr_is_shortcut(ptr)) { 740 if (assoc_array_ptr_is_shortcut(ptr)) {
741 shortcut = assoc_array_ptr_to_shortcut(ptr); 741 shortcut = assoc_array_ptr_to_shortcut(ptr);
742 smp_read_barrier_depends(); 742 smp_read_barrier_depends();
743 ptr = ACCESS_ONCE(shortcut->next_node); 743 ptr = READ_ONCE(shortcut->next_node);
744 BUG_ON(!assoc_array_ptr_is_node(ptr)); 744 BUG_ON(!assoc_array_ptr_is_node(ptr));
745 } 745 }
746 node = assoc_array_ptr_to_node(ptr); 746 node = assoc_array_ptr_to_node(ptr);
@@ -752,7 +752,7 @@ begin_node:
752ascend_to_node: 752ascend_to_node:
753 /* Go through the slots in a node */ 753 /* Go through the slots in a node */
754 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 754 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
755 ptr = ACCESS_ONCE(node->slots[slot]); 755 ptr = READ_ONCE(node->slots[slot]);
756 756
757 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) 757 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
758 goto descend_to_node; 758 goto descend_to_node;
@@ -790,13 +790,13 @@ ascend_to_node:
790 /* We've dealt with all the slots in the current node, so now we need 790 /* We've dealt with all the slots in the current node, so now we need
791 * to ascend to the parent and continue processing there. 791 * to ascend to the parent and continue processing there.
792 */ 792 */
793 ptr = ACCESS_ONCE(node->back_pointer); 793 ptr = READ_ONCE(node->back_pointer);
794 slot = node->parent_slot; 794 slot = node->parent_slot;
795 795
796 if (ptr && assoc_array_ptr_is_shortcut(ptr)) { 796 if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
797 shortcut = assoc_array_ptr_to_shortcut(ptr); 797 shortcut = assoc_array_ptr_to_shortcut(ptr);
798 smp_read_barrier_depends(); 798 smp_read_barrier_depends();
799 ptr = ACCESS_ONCE(shortcut->back_pointer); 799 ptr = READ_ONCE(shortcut->back_pointer);
800 slot = shortcut->parent_slot; 800 slot = shortcut->parent_slot;
801 } 801 }
802 if (!ptr) 802 if (!ptr)
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2217dfec7996..86bced9fdbdf 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -809,15 +809,14 @@ long join_session_keyring(const char *name)
809 ret = PTR_ERR(keyring); 809 ret = PTR_ERR(keyring);
810 goto error2; 810 goto error2;
811 } else if (keyring == new->session_keyring) { 811 } else if (keyring == new->session_keyring) {
812 key_put(keyring);
813 ret = 0; 812 ret = 0;
814 goto error2; 813 goto error3;
815 } 814 }
816 815
817 /* we've got a keyring - now to install it */ 816 /* we've got a keyring - now to install it */
818 ret = install_session_keyring_to_cred(new, keyring); 817 ret = install_session_keyring_to_cred(new, keyring);
819 if (ret < 0) 818 if (ret < 0)
820 goto error2; 819 goto error3;
821 820
822 commit_creds(new); 821 commit_creds(new);
823 mutex_unlock(&key_session_mutex); 822 mutex_unlock(&key_session_mutex);
@@ -827,6 +826,8 @@ long join_session_keyring(const char *name)
827okay: 826okay:
828 return ret; 827 return ret;
829 828
829error3:
830 key_put(keyring);
830error2: 831error2:
831 mutex_unlock(&key_session_mutex); 832 mutex_unlock(&key_session_mutex);
832error: 833error:
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 2ae31c5a87de..435e86e13879 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -70,7 +70,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
70 } 70 }
71 71
72 ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); 72 ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
73 kfree(sdesc); 73 kzfree(sdesc);
74 return ret; 74 return ret;
75} 75}
76 76
@@ -114,7 +114,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
114 if (!ret) 114 if (!ret)
115 ret = crypto_shash_final(&sdesc->shash, digest); 115 ret = crypto_shash_final(&sdesc->shash, digest);
116out: 116out:
117 kfree(sdesc); 117 kzfree(sdesc);
118 return ret; 118 return ret;
119} 119}
120 120
@@ -165,7 +165,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
165 paramdigest, TPM_NONCE_SIZE, h1, 165 paramdigest, TPM_NONCE_SIZE, h1,
166 TPM_NONCE_SIZE, h2, 1, &c, 0, 0); 166 TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
167out: 167out:
168 kfree(sdesc); 168 kzfree(sdesc);
169 return ret; 169 return ret;
170} 170}
171 171
@@ -246,7 +246,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
246 if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) 246 if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
247 ret = -EINVAL; 247 ret = -EINVAL;
248out: 248out:
249 kfree(sdesc); 249 kzfree(sdesc);
250 return ret; 250 return ret;
251} 251}
252 252
@@ -347,7 +347,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
347 if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) 347 if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
348 ret = -EINVAL; 348 ret = -EINVAL;
349out: 349out:
350 kfree(sdesc); 350 kzfree(sdesc);
351 return ret; 351 return ret;
352} 352}
353 353
@@ -564,7 +564,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
564 *bloblen = storedsize; 564 *bloblen = storedsize;
565 } 565 }
566out: 566out:
567 kfree(td); 567 kzfree(td);
568 return ret; 568 return ret;
569} 569}
570 570
@@ -678,7 +678,7 @@ static int key_seal(struct trusted_key_payload *p,
678 if (ret < 0) 678 if (ret < 0)
679 pr_info("trusted_key: srkseal failed (%d)\n", ret); 679 pr_info("trusted_key: srkseal failed (%d)\n", ret);
680 680
681 kfree(tb); 681 kzfree(tb);
682 return ret; 682 return ret;
683} 683}
684 684
@@ -703,7 +703,7 @@ static int key_unseal(struct trusted_key_payload *p,
703 /* pull migratable flag out of sealed key */ 703 /* pull migratable flag out of sealed key */
704 p->migratable = p->key[--p->key_len]; 704 p->migratable = p->key[--p->key_len];
705 705
706 kfree(tb); 706 kzfree(tb);
707 return ret; 707 return ret;
708} 708}
709 709
@@ -1037,12 +1037,12 @@ static int trusted_instantiate(struct key *key,
1037 if (!ret && options->pcrlock) 1037 if (!ret && options->pcrlock)
1038 ret = pcrlock(options->pcrlock); 1038 ret = pcrlock(options->pcrlock);
1039out: 1039out:
1040 kfree(datablob); 1040 kzfree(datablob);
1041 kfree(options); 1041 kzfree(options);
1042 if (!ret) 1042 if (!ret)
1043 rcu_assign_keypointer(key, payload); 1043 rcu_assign_keypointer(key, payload);
1044 else 1044 else
1045 kfree(payload); 1045 kzfree(payload);
1046 return ret; 1046 return ret;
1047} 1047}
1048 1048
@@ -1051,8 +1051,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
1051 struct trusted_key_payload *p; 1051 struct trusted_key_payload *p;
1052 1052
1053 p = container_of(rcu, struct trusted_key_payload, rcu); 1053 p = container_of(rcu, struct trusted_key_payload, rcu);
1054 memset(p->key, 0, p->key_len); 1054 kzfree(p);
1055 kfree(p);
1056} 1055}
1057 1056
1058/* 1057/*
@@ -1094,13 +1093,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1094 ret = datablob_parse(datablob, new_p, new_o); 1093 ret = datablob_parse(datablob, new_p, new_o);
1095 if (ret != Opt_update) { 1094 if (ret != Opt_update) {
1096 ret = -EINVAL; 1095 ret = -EINVAL;
1097 kfree(new_p); 1096 kzfree(new_p);
1098 goto out; 1097 goto out;
1099 } 1098 }
1100 1099
1101 if (!new_o->keyhandle) { 1100 if (!new_o->keyhandle) {
1102 ret = -EINVAL; 1101 ret = -EINVAL;
1103 kfree(new_p); 1102 kzfree(new_p);
1104 goto out; 1103 goto out;
1105 } 1104 }
1106 1105
@@ -1114,22 +1113,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
1114 ret = key_seal(new_p, new_o); 1113 ret = key_seal(new_p, new_o);
1115 if (ret < 0) { 1114 if (ret < 0) {
1116 pr_info("trusted_key: key_seal failed (%d)\n", ret); 1115 pr_info("trusted_key: key_seal failed (%d)\n", ret);
1117 kfree(new_p); 1116 kzfree(new_p);
1118 goto out; 1117 goto out;
1119 } 1118 }
1120 if (new_o->pcrlock) { 1119 if (new_o->pcrlock) {
1121 ret = pcrlock(new_o->pcrlock); 1120 ret = pcrlock(new_o->pcrlock);
1122 if (ret < 0) { 1121 if (ret < 0) {
1123 pr_info("trusted_key: pcrlock failed (%d)\n", ret); 1122 pr_info("trusted_key: pcrlock failed (%d)\n", ret);
1124 kfree(new_p); 1123 kzfree(new_p);
1125 goto out; 1124 goto out;
1126 } 1125 }
1127 } 1126 }
1128 rcu_assign_keypointer(key, new_p); 1127 rcu_assign_keypointer(key, new_p);
1129 call_rcu(&p->rcu, trusted_rcu_free); 1128 call_rcu(&p->rcu, trusted_rcu_free);
1130out: 1129out:
1131 kfree(datablob); 1130 kzfree(datablob);
1132 kfree(new_o); 1131 kzfree(new_o);
1133 return ret; 1132 return ret;
1134} 1133}
1135 1134
@@ -1158,24 +1157,19 @@ static long trusted_read(const struct key *key, char __user *buffer,
1158 for (i = 0; i < p->blob_len; i++) 1157 for (i = 0; i < p->blob_len; i++)
1159 bufp = hex_byte_pack(bufp, p->blob[i]); 1158 bufp = hex_byte_pack(bufp, p->blob[i]);
1160 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { 1159 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
1161 kfree(ascii_buf); 1160 kzfree(ascii_buf);
1162 return -EFAULT; 1161 return -EFAULT;
1163 } 1162 }
1164 kfree(ascii_buf); 1163 kzfree(ascii_buf);
1165 return 2 * p->blob_len; 1164 return 2 * p->blob_len;
1166} 1165}
1167 1166
1168/* 1167/*
1169 * trusted_destroy - before freeing the key, clear the decrypted data 1168 * trusted_destroy - clear and free the key's payload
1170 */ 1169 */
1171static void trusted_destroy(struct key *key) 1170static void trusted_destroy(struct key *key)
1172{ 1171{
1173 struct trusted_key_payload *p = key->payload.data[0]; 1172 kzfree(key->payload.data[0]);
1174
1175 if (!p)
1176 return;
1177 memset(p->key, 0, p->key_len);
1178 kfree(key->payload.data[0]);
1179} 1173}
1180 1174
1181struct key_type key_type_trusted = { 1175struct key_type key_type_trusted = {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 26605134f17a..3d8c68eba516 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -86,10 +86,18 @@ EXPORT_SYMBOL_GPL(user_preparse);
86 */ 86 */
87void user_free_preparse(struct key_preparsed_payload *prep) 87void user_free_preparse(struct key_preparsed_payload *prep)
88{ 88{
89 kfree(prep->payload.data[0]); 89 kzfree(prep->payload.data[0]);
90} 90}
91EXPORT_SYMBOL_GPL(user_free_preparse); 91EXPORT_SYMBOL_GPL(user_free_preparse);
92 92
93static void user_free_payload_rcu(struct rcu_head *head)
94{
95 struct user_key_payload *payload;
96
97 payload = container_of(head, struct user_key_payload, rcu);
98 kzfree(payload);
99}
100
93/* 101/*
94 * update a user defined key 102 * update a user defined key
95 * - the key's semaphore is write-locked 103 * - the key's semaphore is write-locked
@@ -112,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
112 prep->payload.data[0] = NULL; 120 prep->payload.data[0] = NULL;
113 121
114 if (zap) 122 if (zap)
115 kfree_rcu(zap, rcu); 123 call_rcu(&zap->rcu, user_free_payload_rcu);
116 return ret; 124 return ret;
117} 125}
118EXPORT_SYMBOL_GPL(user_update); 126EXPORT_SYMBOL_GPL(user_update);
@@ -130,7 +138,7 @@ void user_revoke(struct key *key)
130 138
131 if (upayload) { 139 if (upayload) {
132 rcu_assign_keypointer(key, NULL); 140 rcu_assign_keypointer(key, NULL);
133 kfree_rcu(upayload, rcu); 141 call_rcu(&upayload->rcu, user_free_payload_rcu);
134 } 142 }
135} 143}
136 144
@@ -143,7 +151,7 @@ void user_destroy(struct key *key)
143{ 151{
144 struct user_key_payload *upayload = key->payload.data[0]; 152 struct user_key_payload *upayload = key->payload.data[0];
145 153
146 kfree(upayload); 154 kzfree(upayload);
147} 155}
148 156
149EXPORT_SYMBOL_GPL(user_destroy); 157EXPORT_SYMBOL_GPL(user_destroy);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 2f836ca09860..cd67d1c12cf1 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
1618 if (err < 0) 1618 if (err < 0)
1619 goto __err; 1619 goto __err;
1620 1620
1621 tu->qhead = tu->qtail = tu->qused = 0;
1621 kfree(tu->queue); 1622 kfree(tu->queue);
1622 tu->queue = NULL; 1623 tu->queue = NULL;
1623 kfree(tu->tqueue); 1624 kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1959 1960
1960 tu = file->private_data; 1961 tu = file->private_data;
1961 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); 1962 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
1963 mutex_lock(&tu->ioctl_lock);
1962 spin_lock_irq(&tu->qlock); 1964 spin_lock_irq(&tu->qlock);
1963 while ((long)count - result >= unit) { 1965 while ((long)count - result >= unit) {
1964 while (!tu->qused) { 1966 while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1974 add_wait_queue(&tu->qchange_sleep, &wait); 1976 add_wait_queue(&tu->qchange_sleep, &wait);
1975 1977
1976 spin_unlock_irq(&tu->qlock); 1978 spin_unlock_irq(&tu->qlock);
1979 mutex_unlock(&tu->ioctl_lock);
1977 schedule(); 1980 schedule();
1981 mutex_lock(&tu->ioctl_lock);
1978 spin_lock_irq(&tu->qlock); 1982 spin_lock_irq(&tu->qlock);
1979 1983
1980 remove_wait_queue(&tu->qchange_sleep, &wait); 1984 remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1994 tu->qused--; 1998 tu->qused--;
1995 spin_unlock_irq(&tu->qlock); 1999 spin_unlock_irq(&tu->qlock);
1996 2000
1997 mutex_lock(&tu->ioctl_lock);
1998 if (tu->tread) { 2001 if (tu->tread) {
1999 if (copy_to_user(buffer, &tu->tqueue[qhead], 2002 if (copy_to_user(buffer, &tu->tqueue[qhead],
2000 sizeof(struct snd_timer_tread))) 2003 sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2004 sizeof(struct snd_timer_read))) 2007 sizeof(struct snd_timer_read)))
2005 err = -EFAULT; 2008 err = -EFAULT;
2006 } 2009 }
2007 mutex_unlock(&tu->ioctl_lock);
2008 2010
2009 spin_lock_irq(&tu->qlock); 2011 spin_lock_irq(&tu->qlock);
2010 if (err < 0) 2012 if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
2014 } 2016 }
2015 _error: 2017 _error:
2016 spin_unlock_irq(&tu->qlock); 2018 spin_unlock_irq(&tu->qlock);
2019 mutex_unlock(&tu->ioctl_lock);
2017 return result > 0 ? result : err; 2020 return result > 0 ? result : err;
2018} 2021}
2019 2022
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a57988d617e9..cbeebc0a9711 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5857 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5857 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5858 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5859 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5860 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5861 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5858 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), 5862 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
5859 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 5863 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
5860 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), 5864 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5862 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 5866 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
5863 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 5867 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
5864 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 5868 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
5869 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
5865 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 5870 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
5866 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5867 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5871 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5868 SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5872 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5869 SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
5870 SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5871 SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
5872 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 5873 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
5873 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 5874 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
5874 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), 5875 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 7ae46c2647d4..b7ef8c59b49a 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
301 return 0; 301 return 0;
302} 302}
303 303
304static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
305{
306 struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
307 struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
308
309 return regcache_sync(dd->regmap);
310}
311
304static struct regmap *atmel_classd_codec_get_remap(struct device *dev) 312static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
305{ 313{
306 return dev_get_regmap(dev, NULL); 314 return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
308 316
309static struct snd_soc_codec_driver soc_codec_dev_classd = { 317static struct snd_soc_codec_driver soc_codec_dev_classd = {
310 .probe = atmel_classd_codec_probe, 318 .probe = atmel_classd_codec_probe,
319 .resume = atmel_classd_codec_resume,
311 .get_regmap = atmel_classd_codec_get_remap, 320 .get_regmap = atmel_classd_codec_get_remap,
312 .component_driver = { 321 .component_driver = {
313 .controls = atmel_classd_snd_controls, 322 .controls = atmel_classd_snd_controls,
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 6dd7578f0bb8..024d83fa6a7f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
772 ++i; 772 ++i;
773 msleep(50); 773 msleep(50);
774 } 774 }
775 } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock)); 775 } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
776 776
777 if (!srm_lock) 777 if (!srm_lock)
778 dev_warn(codec->dev, "SRM failed to lock\n"); 778 dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 9c365a7f758d..7899a2cdeb42 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") 1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
1109 } 1109 }
1110 }, 1110 },
1111 {
1112 .ident = "Thinkpad Helix 2nd",
1113 .matches = {
1114 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1115 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
1116 }
1117 },
1111 1118
1112 { } 1119 { }
1113}; 1120};
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2c9dedab5184..bc136d2bd7cd 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
202 if (ret < 0) 202 if (ret < 0)
203 return ret; 203 return ret;
204 204
205 ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX); 205 ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
206 if (ret < 0) 206 if (ret < 0)
207 return ret; 207 return ret;
208 208
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 58c525096a7c..498b15345b1a 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; 413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
414 u64 *ipc_header = (u64 *)(&header); 414 u64 *ipc_header = (u64 *)(&header);
415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc); 415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
416 unsigned long flags;
416 417
418 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
417 msg = skl_ipc_reply_get_msg(ipc, *ipc_header); 419 msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
420 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
418 if (msg == NULL) { 421 if (msg == NULL) {
419 dev_dbg(ipc->dev, "ipc: rx list is empty\n"); 422 dev_dbg(ipc->dev, "ipc: rx list is empty\n");
420 return; 423 return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
456 } 459 }
457 } 460 }
458 461
462 spin_lock_irqsave(&ipc->dsp->spinlock, flags);
459 list_del(&msg->list); 463 list_del(&msg->list);
460 sst_ipc_tx_msg_reply_complete(ipc, msg); 464 sst_ipc_tx_msg_reply_complete(ipc, msg);
465 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
461} 466}
462 467
463irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context) 468irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 3a99712e44a8..64a0f8ed33e1 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
2502 2502
2503 if (ret < 0) 2503 if (ret < 0)
2504 return ret; 2504 return ret;
2505 tkn_count += ret; 2505 tkn_count = ret;
2506 2506
2507 tuple_size += tkn_count * 2507 tuple_size += tkn_count *
2508 sizeof(struct snd_soc_tplg_vendor_string_elem); 2508 sizeof(struct snd_soc_tplg_vendor_string_elem);
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 6df3b317a476..4c9b5781282b 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
410 struct skl *skl = ebus_to_skl(ebus); 410 struct skl *skl = ebus_to_skl(ebus);
411 struct hdac_bus *bus = ebus_to_hbus(ebus); 411 struct hdac_bus *bus = ebus_to_hbus(ebus);
412 412
413 skl->init_failed = 1; /* to be sure */ 413 skl->init_done = 0; /* to be sure */
414 414
415 snd_hdac_ext_stop_streams(ebus); 415 snd_hdac_ext_stop_streams(ebus);
416 416
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
428 428
429 snd_hdac_ext_bus_exit(ebus); 429 snd_hdac_ext_bus_exit(ebus);
430 430
431 cancel_work_sync(&skl->probe_work);
431 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 432 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
432 snd_hdac_i915_exit(&ebus->bus); 433 snd_hdac_i915_exit(&ebus->bus);
434
433 return 0; 435 return 0;
434} 436}
435 437
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
566 .get_response = snd_hdac_bus_get_response, 568 .get_response = snd_hdac_bus_get_response,
567}; 569};
568 570
571static int skl_i915_init(struct hdac_bus *bus)
572{
573 int err;
574
575 /*
576 * The HDMI codec is in GPU so we need to ensure that it is powered
577 * up and ready for probe
578 */
579 err = snd_hdac_i915_init(bus);
580 if (err < 0)
581 return err;
582
583 err = snd_hdac_display_power(bus, true);
584 if (err < 0)
585 dev_err(bus->dev, "Cannot turn on display power on i915\n");
586
587 return err;
588}
589
590static void skl_probe_work(struct work_struct *work)
591{
592 struct skl *skl = container_of(work, struct skl, probe_work);
593 struct hdac_ext_bus *ebus = &skl->ebus;
594 struct hdac_bus *bus = ebus_to_hbus(ebus);
595 struct hdac_ext_link *hlink = NULL;
596 int err;
597
598 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
599 err = skl_i915_init(bus);
600 if (err < 0)
601 return;
602 }
603
604 err = skl_init_chip(bus, true);
605 if (err < 0) {
606 dev_err(bus->dev, "Init chip failed with err: %d\n", err);
607 goto out_err;
608 }
609
610 /* codec detection */
611 if (!bus->codec_mask)
612 dev_info(bus->dev, "no hda codecs found!\n");
613
614 /* create codec instances */
615 err = skl_codec_create(ebus);
616 if (err < 0)
617 goto out_err;
618
619 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
620 err = snd_hdac_display_power(bus, false);
621 if (err < 0) {
622 dev_err(bus->dev, "Cannot turn off display power on i915\n");
623 return;
624 }
625 }
626
627 /* register platform dai and controls */
628 err = skl_platform_register(bus->dev);
629 if (err < 0)
630 return;
631 /*
632 * we are done probing so decrement link counts
633 */
634 list_for_each_entry(hlink, &ebus->hlink_list, list)
635 snd_hdac_ext_bus_link_put(ebus, hlink);
636
637 /* configure PM */
638 pm_runtime_put_noidle(bus->dev);
639 pm_runtime_allow(bus->dev);
640 skl->init_done = 1;
641
642 return;
643
644out_err:
645 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
646 err = snd_hdac_display_power(bus, false);
647}
648
569/* 649/*
570 * constructor 650 * constructor
571 */ 651 */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
593 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops); 673 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
594 ebus->bus.use_posbuf = 1; 674 ebus->bus.use_posbuf = 1;
595 skl->pci = pci; 675 skl->pci = pci;
676 INIT_WORK(&skl->probe_work, skl_probe_work);
596 677
597 ebus->bus.bdl_pos_adj = 0; 678 ebus->bus.bdl_pos_adj = 0;
598 679
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
601 return 0; 682 return 0;
602} 683}
603 684
604static int skl_i915_init(struct hdac_bus *bus)
605{
606 int err;
607
608 /*
609 * The HDMI codec is in GPU so we need to ensure that it is powered
610 * up and ready for probe
611 */
612 err = snd_hdac_i915_init(bus);
613 if (err < 0)
614 return err;
615
616 err = snd_hdac_display_power(bus, true);
617 if (err < 0) {
618 dev_err(bus->dev, "Cannot turn on display power on i915\n");
619 return err;
620 }
621
622 return err;
623}
624
625static int skl_first_init(struct hdac_ext_bus *ebus) 685static int skl_first_init(struct hdac_ext_bus *ebus)
626{ 686{
627 struct skl *skl = ebus_to_skl(ebus); 687 struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
684 /* initialize chip */ 744 /* initialize chip */
685 skl_init_pci(skl); 745 skl_init_pci(skl);
686 746
687 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 747 return skl_init_chip(bus, true);
688 err = skl_i915_init(bus);
689 if (err < 0)
690 return err;
691 }
692
693 skl_init_chip(bus, true);
694
695 /* codec detection */
696 if (!bus->codec_mask) {
697 dev_info(bus->dev, "no hda codecs found!\n");
698 }
699
700 return 0;
701} 748}
702 749
703static int skl_probe(struct pci_dev *pci, 750static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
706 struct skl *skl; 753 struct skl *skl;
707 struct hdac_ext_bus *ebus = NULL; 754 struct hdac_ext_bus *ebus = NULL;
708 struct hdac_bus *bus = NULL; 755 struct hdac_bus *bus = NULL;
709 struct hdac_ext_link *hlink = NULL;
710 int err; 756 int err;
711 757
712 /* we use ext core ops, so provide NULL for ops here */ 758 /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
729 775
730 if (skl->nhlt == NULL) { 776 if (skl->nhlt == NULL) {
731 err = -ENODEV; 777 err = -ENODEV;
732 goto out_display_power_off; 778 goto out_free;
733 } 779 }
734 780
735 err = skl_nhlt_create_sysfs(skl); 781 err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
760 if (bus->mlcap) 806 if (bus->mlcap)
761 snd_hdac_ext_bus_get_ml_capabilities(ebus); 807 snd_hdac_ext_bus_get_ml_capabilities(ebus);
762 808
809 snd_hdac_bus_stop_chip(bus);
810
763 /* create device for soc dmic */ 811 /* create device for soc dmic */
764 err = skl_dmic_device_register(skl); 812 err = skl_dmic_device_register(skl);
765 if (err < 0) 813 if (err < 0)
766 goto out_dsp_free; 814 goto out_dsp_free;
767 815
768 /* register platform dai and controls */ 816 schedule_work(&skl->probe_work);
769 err = skl_platform_register(bus->dev);
770 if (err < 0)
771 goto out_dmic_free;
772
773 /* create codec instances */
774 err = skl_codec_create(ebus);
775 if (err < 0)
776 goto out_unregister;
777
778 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
779 err = snd_hdac_display_power(bus, false);
780 if (err < 0) {
781 dev_err(bus->dev, "Cannot turn off display power on i915\n");
782 return err;
783 }
784 }
785
786 /*
787 * we are done probling so decrement link counts
788 */
789 list_for_each_entry(hlink, &ebus->hlink_list, list)
790 snd_hdac_ext_bus_link_put(ebus, hlink);
791
792 /* configure PM */
793 pm_runtime_put_noidle(bus->dev);
794 pm_runtime_allow(bus->dev);
795 817
796 return 0; 818 return 0;
797 819
798out_unregister:
799 skl_platform_unregister(bus->dev);
800out_dmic_free:
801 skl_dmic_device_unregister(skl);
802out_dsp_free: 820out_dsp_free:
803 skl_free_dsp(skl); 821 skl_free_dsp(skl);
804out_mach_free: 822out_mach_free:
805 skl_machine_device_unregister(skl); 823 skl_machine_device_unregister(skl);
806out_nhlt_free: 824out_nhlt_free:
807 skl_nhlt_free(skl->nhlt); 825 skl_nhlt_free(skl->nhlt);
808out_display_power_off:
809 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
810 snd_hdac_display_power(bus, false);
811out_free: 826out_free:
812 skl->init_failed = 1;
813 skl_free(ebus); 827 skl_free(ebus);
814 828
815 return err; 829 return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
828 842
829 skl = ebus_to_skl(ebus); 843 skl = ebus_to_skl(ebus);
830 844
831 if (skl->init_failed) 845 if (!skl->init_done)
832 return; 846 return;
833 847
834 snd_hdac_ext_stop_streams(ebus); 848 snd_hdac_ext_stop_streams(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index a454f6035f3e..2a630fcb7f08 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -46,7 +46,7 @@ struct skl {
46 struct hdac_ext_bus ebus; 46 struct hdac_ext_bus ebus;
47 struct pci_dev *pci; 47 struct pci_dev *pci;
48 48
49 unsigned int init_failed:1; /* delayed init failed */ 49 unsigned int init_done:1; /* delayed init status */
50 struct platform_device *dmic_dev; 50 struct platform_device *dmic_dev;
51 struct platform_device *i2s_dev; 51 struct platform_device *i2s_dev;
52 struct snd_soc_platform *platform; 52 struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
64 const struct firmware *tplg; 64 const struct firmware *tplg;
65 65
66 int supend_active; 66 int supend_active;
67
68 struct work_struct probe_work;
67}; 69};
68 70
69#define skl_to_ebus(s) (&(s)->ebus) 71#define skl_to_ebus(s) (&(s)->ebus)
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 66203d107a11..d3b0dc145a56 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
507 rbga = rbgx; 507 rbga = rbgx;
508 adg->rbga_rate_for_441khz = rate / div; 508 adg->rbga_rate_for_441khz = rate / div;
509 ckr |= brg_table[i] << 20; 509 ckr |= brg_table[i] << 20;
510 if (req_441kHz_rate) 510 if (req_441kHz_rate &&
511 !(adg_mode_flags(adg) & AUDIO_OUT_48))
511 parent_clk_name = __clk_get_name(clk); 512 parent_clk_name = __clk_get_name(clk);
512 } 513 }
513 } 514 }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
522 rbgb = rbgx; 523 rbgb = rbgx;
523 adg->rbgb_rate_for_48khz = rate / div; 524 adg->rbgb_rate_for_48khz = rate / div;
524 ckr |= brg_table[i] << 16; 525 ckr |= brg_table[i] << 16;
525 if (req_48kHz_rate) 526 if (req_48kHz_rate &&
527 (adg_mode_flags(adg) & AUDIO_OUT_48))
526 parent_clk_name = __clk_get_name(clk); 528 parent_clk_name = __clk_get_name(clk);
527 } 529 }
528 } 530 }
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index 7d92a24b7cfa..d879c010cf03 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
89 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
90 90
91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data); 91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
92 rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
92 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 93 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
93 94
94 rsnd_adg_set_cmd_timsel_gen2(mod, io); 95 rsnd_adg_set_cmd_timsel_gen2(mod, io);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1744015408c3..8c1f4e2e0c4f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
343 return 0x76543210; 343 return 0x76543210;
344} 344}
345 345
346u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
347{
348 enum rsnd_mod_type playback_mods[] = {
349 RSND_MOD_SRC,
350 RSND_MOD_CMD,
351 RSND_MOD_SSIU,
352 };
353 enum rsnd_mod_type capture_mods[] = {
354 RSND_MOD_CMD,
355 RSND_MOD_SRC,
356 RSND_MOD_SSIU,
357 };
358 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
359 struct rsnd_mod *tmod = NULL;
360 enum rsnd_mod_type *mods =
361 rsnd_io_is_play(io) ?
362 playback_mods : capture_mods;
363 int i;
364
365 /*
366 * This is needed for 24bit data
367 * We need to shift 8bit
368 *
369 * Linux 24bit data is located as 0x00******
370 * HW 24bit data is located as 0x******00
371 *
372 */
373 switch (runtime->sample_bits) {
374 case 16:
375 return 0;
376 case 32:
377 break;
378 }
379
380 for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
381 tmod = rsnd_io_to_mod(io, mods[i]);
382 if (tmod)
383 break;
384 }
385
386 if (tmod != mod)
387 return 0;
388
389 if (rsnd_io_is_play(io))
390 return (0 << 20) | /* shift to Left */
391 (8 << 16); /* 8bit */
392 else
393 return (1 << 20) | /* shift to Right */
394 (8 << 16); /* 8bit */
395}
396
346/* 397/*
347 * rsnd_dai functions 398 * rsnd_dai functions
348 */ 399 */
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 63b6d3c28021..4b0980728e13 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20), 236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20), 237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20), 238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_MODE, 0x184, 0x20),
239 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20), 240 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20),
240 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20), 241 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20),
241 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20), 242 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index dbf4163427e8..323af41ecfcb 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -73,6 +73,7 @@ enum rsnd_reg {
73 RSND_REG_SCU_SYS_INT_EN0, 73 RSND_REG_SCU_SYS_INT_EN0,
74 RSND_REG_SCU_SYS_INT_EN1, 74 RSND_REG_SCU_SYS_INT_EN1,
75 RSND_REG_CMD_CTRL, 75 RSND_REG_CMD_CTRL,
76 RSND_REG_CMD_BUSIF_MODE,
76 RSND_REG_CMD_BUSIF_DALIGN, 77 RSND_REG_CMD_BUSIF_DALIGN,
77 RSND_REG_CMD_ROUTE_SLCT, 78 RSND_REG_CMD_ROUTE_SLCT,
78 RSND_REG_CMDOUT_TIMSEL, 79 RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
204 u32 mask, u32 data); 205 u32 mask, u32 data);
205u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 206u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
206u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 207u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
208u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
207 209
208/* 210/*
209 * R-Car DMA 211 * R-Car DMA
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 20b5b2ec625e..76a477a3ccb5 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
191 struct device *dev = rsnd_priv_to_dev(priv); 191 struct device *dev = rsnd_priv_to_dev(priv);
192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
193 int is_play = rsnd_io_is_play(io);
193 int use_src = 0; 194 int use_src = 0;
194 u32 fin, fout; 195 u32 fin, fout;
195 u32 ifscr, fsrate, adinr; 196 u32 ifscr, fsrate, adinr;
196 u32 cr, route; 197 u32 cr, route;
197 u32 bsdsr, bsisr; 198 u32 bsdsr, bsisr;
199 u32 i_busif, o_busif, tmp;
198 uint ratio; 200 uint ratio;
199 201
200 if (!runtime) 202 if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
270 break; 272 break;
271 } 273 }
272 274
275 /* BUSIF_MODE */
276 tmp = rsnd_get_busif_shift(io, mod);
277 i_busif = ( is_play ? tmp : 0) | 1;
278 o_busif = (!is_play ? tmp : 0) | 1;
279
273 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route); 280 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
274 281
275 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ 282 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
281 rsnd_mod_write(mod, SRC_BSISR, bsisr); 288 rsnd_mod_write(mod, SRC_BSISR, bsisr);
282 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */ 289 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */
283 290
284 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1); 291 rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
285 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1); 292 rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
293
286 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 294 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
287 295
288 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout); 296 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 135c5669f796..91e5c07911b4 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
302 * always use 32bit system word. 302 * always use 32bit system word.
303 * see also rsnd_ssi_master_clk_enable() 303 * see also rsnd_ssi_master_clk_enable()
304 */ 304 */
305 cr_own = FORCE | SWL_32 | PDTA; 305 cr_own = FORCE | SWL_32;
306 306
307 if (rdai->bit_clk_inv) 307 if (rdai->bit_clk_inv)
308 cr_own |= SCKP; 308 cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
551 u32 *buf = (u32 *)(runtime->dma_area + 551 u32 *buf = (u32 *)(runtime->dma_area +
552 rsnd_dai_pointer_offset(io, 0)); 552 rsnd_dai_pointer_offset(io, 0));
553 int shift = 0;
554
555 switch (runtime->sample_bits) {
556 case 32:
557 shift = 8;
558 break;
559 }
553 560
554 /* 561 /*
555 * 8/16/32 data can be assesse to TDR/RDR register 562 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
557 * see rsnd_ssi_init() 564 * see rsnd_ssi_init()
558 */ 565 */
559 if (rsnd_io_is_play(io)) 566 if (rsnd_io_is_play(io))
560 rsnd_mod_write(mod, SSITDR, *buf); 567 rsnd_mod_write(mod, SSITDR, (*buf) << shift);
561 else 568 else
562 *buf = rsnd_mod_read(mod, SSIRDR); 569 *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
563 570
564 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); 571 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
565 } 572 }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
709 struct rsnd_priv *priv) 716 struct rsnd_priv *priv)
710{ 717{
711 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 718 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
719 struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
720
721 /* Do nothing for SSI parent mod */
722 if (ssi_parent_mod == mod)
723 return 0;
712 724
713 /* PIO will request IRQ again */ 725 /* PIO will request IRQ again */
714 free_irq(ssi->irq, mod); 726 free_irq(ssi->irq, mod);
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 14fafdaf1395..512d238b79e2 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
144 (rsnd_io_is_play(io) ? 144 (rsnd_io_is_play(io) ?
145 rsnd_runtime_channel_after_ctu(io) : 145 rsnd_runtime_channel_after_ctu(io) :
146 rsnd_runtime_channel_original(io))); 146 rsnd_runtime_channel_original(io)));
147 rsnd_mod_write(mod, SSI_BUSIF_MODE, 1); 147 rsnd_mod_write(mod, SSI_BUSIF_MODE,
148 rsnd_get_busif_shift(io, mod) | 1);
148 rsnd_mod_write(mod, SSI_BUSIF_DALIGN, 149 rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
149 rsnd_get_dalign(mod, io)); 150 rsnd_get_dalign(mod, io));
150 } 151 }
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index aae099c0e502..754e3ef8d7ae 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2286 list_for_each_entry(rtd, &card->rtd_list, list) 2286 list_for_each_entry(rtd, &card->rtd_list, list)
2287 flush_delayed_work(&rtd->delayed_work); 2287 flush_delayed_work(&rtd->delayed_work);
2288 2288
2289 /* free the ALSA card at first; this syncs with pending operations */
2290 snd_card_free(card->snd_card);
2291
2289 /* remove and free each DAI */ 2292 /* remove and free each DAI */
2290 soc_remove_dai_links(card); 2293 soc_remove_dai_links(card);
2291 soc_remove_pcm_runtimes(card); 2294 soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
2300 if (card->remove) 2303 if (card->remove)
2301 card->remove(card); 2304 card->remove(card);
2302 2305
2303 snd_card_free(card->snd_card);
2304 return 0; 2306 return 0;
2305
2306} 2307}
2307 2308
2308/* removes a socdev */ 2309/* removes a socdev */
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index e6c9902c6d82..165c2b1d4317 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable:
240 or 240 or
241 ./perf probe --add='schedule:12 cpu' 241 ./perf probe --add='schedule:12 cpu'
242 242
243 this will add one or more probes which has the name start with "schedule". 243Add one or more probes which has the name start with "schedule".
244 244
245 Add probes on lines in schedule() function which calls update_rq_clock(). 245 ./perf probe schedule*
246 or
247 ./perf probe --add='schedule*'
248
249Add probes on lines in schedule() function which calls update_rq_clock().
246 250
247 ./perf probe 'schedule;update_rq_clock*' 251 ./perf probe 'schedule;update_rq_clock*'
248 or 252 or
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index dfbb506d2c34..142606c0ec9c 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -39,7 +39,7 @@ EVENT HANDLERS
39When perf script is invoked using a trace script, a user-defined 39When perf script is invoked using a trace script, a user-defined
40'handler function' is called for each event in the trace. If there's 40'handler function' is called for each event in the trace. If there's
41no handler function defined for a given event type, the event is 41no handler function defined for a given event type, the event is
42ignored (or passed to a 'trace_handled' function, see below) and the 42ignored (or passed to a 'trace_unhandled' function, see below) and the
43next event is processed. 43next event is processed.
44 44
45Most of the event's field values are passed as arguments to the 45Most of the event's field values are passed as arguments to the
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 54acba221558..51ec2d20068a 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
149 print "id=%d, args=%s\n" % \ 149 print "id=%d, args=%s\n" % \
150 (id, args), 150 (id, args),
151 151
152def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, 152def trace_unhandled(event_name, context, event_fields_dict):
153 common_pid, common_comm): 153 print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
154 print_header(event_name, common_cpu, common_secs, common_nsecs,
155 common_pid, common_comm)
156 154
157def print_header(event_name, cpu, secs, nsecs, pid, comm): 155def print_header(event_name, cpu, secs, nsecs, pid, comm):
158 print "%-20s %5u %05u.%09u %8u %-20s " % \ 156 print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script. The
321process can be generalized to any tracepoint or set of tracepoints 319process can be generalized to any tracepoint or set of tracepoints
322you're interested in - basically find the tracepoint(s) you're 320you're interested in - basically find the tracepoint(s) you're
323interested in by looking at the list of available events shown by 321interested in by looking at the list of available events shown by
324'perf list' and/or look in /sys/kernel/debug/tracing events for 322'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
325detailed event and field info, record the corresponding trace data 323detailed event and field info, record the corresponding trace data
326using 'perf record', passing it the list of interesting events, 324using 'perf record', passing it the list of interesting events,
327generate a skeleton script using 'perf script -g python' and modify the 325generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other
334scripts listed by the 'perf script -l' command e.g.: 332scripts listed by the 'perf script -l' command e.g.:
335 333
336---- 334----
337root@tropicana:~# perf script -l 335# perf script -l
338List of available trace scripts: 336List of available trace scripts:
339 wakeup-latency system-wide min/max/avg wakeup latency 337 wakeup-latency system-wide min/max/avg wakeup latency
340 rw-by-file <comm> r/w activity for a program, by file 338 rw-by-file <comm> r/w activity for a program, by file
@@ -383,8 +381,6 @@ source tree:
383 381
384---- 382----
385# ls -al kernel-source/tools/perf/scripts/python 383# ls -al kernel-source/tools/perf/scripts/python
386
387root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
388total 32 384total 32
389drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . 385drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
390drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. 386drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l'
399should show a new entry for your script: 395should show a new entry for your script:
400 396
401---- 397----
402root@tropicana:~# perf script -l 398# perf script -l
403List of available trace scripts: 399List of available trace scripts:
404 wakeup-latency system-wide min/max/avg wakeup latency 400 wakeup-latency system-wide min/max/avg wakeup latency
405 rw-by-file <comm> r/w activity for a program, by file 401 rw-by-file <comm> r/w activity for a program, by file
@@ -437,7 +433,7 @@ EVENT HANDLERS
437When perf script is invoked using a trace script, a user-defined 433When perf script is invoked using a trace script, a user-defined
438'handler function' is called for each event in the trace. If there's 434'handler function' is called for each event in the trace. If there's
439no handler function defined for a given event type, the event is 435no handler function defined for a given event type, the event is
440ignored (or passed to a 'trace_handled' function, see below) and the 436ignored (or passed to a 'trace_unhandled' function, see below) and the
441next event is processed. 437next event is processed.
442 438
443Most of the event's field values are passed as arguments to the 439Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@ can implement a set of optional functions:
532gives scripts a chance to do setup tasks: 528gives scripts a chance to do setup tasks:
533 529
534---- 530----
535def trace_begin: 531def trace_begin():
536 pass 532 pass
537---- 533----
538 534
@@ -541,7 +537,7 @@ def trace_begin:
541 as display results: 537 as display results:
542 538
543---- 539----
544def trace_end: 540def trace_end():
545 pass 541 pass
546---- 542----
547 543
@@ -550,8 +546,7 @@ def trace_end:
550 of common arguments are passed into it: 546 of common arguments are passed into it:
551 547
552---- 548----
553def trace_unhandled(event_name, context, common_cpu, common_secs, 549def trace_unhandled(event_name, context, event_fields_dict):
554 common_nsecs, common_pid, common_comm):
555 pass 550 pass
556---- 551----
557 552
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8354d04b392f..1f4fbc9a3292 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -19,18 +19,18 @@ CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
19 19
20include $(srctree)/tools/scripts/Makefile.arch 20include $(srctree)/tools/scripts/Makefile.arch
21 21
22$(call detected_var,ARCH) 22$(call detected_var,SRCARCH)
23 23
24NO_PERF_REGS := 1 24NO_PERF_REGS := 1
25 25
26# Additional ARCH settings for ppc 26# Additional ARCH settings for ppc
27ifeq ($(ARCH),powerpc) 27ifeq ($(SRCARCH),powerpc)
28 NO_PERF_REGS := 0 28 NO_PERF_REGS := 0
29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
30endif 30endif
31 31
32# Additional ARCH settings for x86 32# Additional ARCH settings for x86
33ifeq ($(ARCH),x86) 33ifeq ($(SRCARCH),x86)
34 $(call detected,CONFIG_X86) 34 $(call detected,CONFIG_X86)
35 ifeq (${IS_64_BIT}, 1) 35 ifeq (${IS_64_BIT}, 1)
36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated 36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
@@ -43,12 +43,12 @@ ifeq ($(ARCH),x86)
43 NO_PERF_REGS := 0 43 NO_PERF_REGS := 0
44endif 44endif
45 45
46ifeq ($(ARCH),arm) 46ifeq ($(SRCARCH),arm)
47 NO_PERF_REGS := 0 47 NO_PERF_REGS := 0
48 LIBUNWIND_LIBS = -lunwind -lunwind-arm 48 LIBUNWIND_LIBS = -lunwind -lunwind-arm
49endif 49endif
50 50
51ifeq ($(ARCH),arm64) 51ifeq ($(SRCARCH),arm64)
52 NO_PERF_REGS := 0 52 NO_PERF_REGS := 0
53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
54endif 54endif
@@ -61,7 +61,7 @@ endif
61# Disable it on all other architectures in case libdw unwind 61# Disable it on all other architectures in case libdw unwind
62# support is detected in system. Add supported architectures 62# support is detected in system. Add supported architectures
63# to the check. 63# to the check.
64ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) 64ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
65 NO_LIBDW_DWARF_UNWIND := 1 65 NO_LIBDW_DWARF_UNWIND := 1
66endif 66endif
67 67
@@ -115,9 +115,9 @@ endif
115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) 115FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 116FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
117 117
118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi 118FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi
119# include ARCH specific config 119# include ARCH specific config
120-include $(src-perf)/arch/$(ARCH)/Makefile 120-include $(src-perf)/arch/$(SRCARCH)/Makefile
121 121
122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 122ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET
@@ -228,12 +228,12 @@ ifeq ($(DEBUG),0)
228endif 228endif
229 229
230INC_FLAGS += -I$(src-perf)/util/include 230INC_FLAGS += -I$(src-perf)/util/include
231INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include 231INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
232INC_FLAGS += -I$(srctree)/tools/include/uapi 232INC_FLAGS += -I$(srctree)/tools/include/uapi
233INC_FLAGS += -I$(srctree)/tools/include/ 233INC_FLAGS += -I$(srctree)/tools/include/
234INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi 234INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi
235INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ 235INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/
236INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ 236INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/
237 237
238# $(obj-perf) for generated common-cmds.h 238# $(obj-perf) for generated common-cmds.h
239# $(obj-perf)/util for generated bison/flex headers 239# $(obj-perf)/util for generated bison/flex headers
@@ -355,7 +355,7 @@ ifndef NO_LIBELF
355 355
356 ifndef NO_DWARF 356 ifndef NO_DWARF
357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
358 msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 358 msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
359 NO_DWARF := 1 359 NO_DWARF := 1
360 else 360 else
361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) 361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
@@ -380,7 +380,7 @@ ifndef NO_LIBELF
380 CFLAGS += -DHAVE_BPF_PROLOGUE 380 CFLAGS += -DHAVE_BPF_PROLOGUE
381 $(call detected,CONFIG_BPF_PROLOGUE) 381 $(call detected,CONFIG_BPF_PROLOGUE)
382 else 382 else
383 msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); 383 msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset());
384 endif 384 endif
385 else 385 else
386 msg := $(warning DWARF support is off, BPF prologue is disabled); 386 msg := $(warning DWARF support is off, BPF prologue is disabled);
@@ -406,7 +406,7 @@ ifdef PERF_HAVE_JITDUMP
406 endif 406 endif
407endif 407endif
408 408
409ifeq ($(ARCH),powerpc) 409ifeq ($(SRCARCH),powerpc)
410 ifndef NO_DWARF 410 ifndef NO_DWARF
411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX 411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
412 endif 412 endif
@@ -487,7 +487,7 @@ else
487endif 487endif
488 488
489ifndef NO_LOCAL_LIBUNWIND 489ifndef NO_LOCAL_LIBUNWIND
490 ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) 490 ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64))
491 $(call feature_check,libunwind-debug-frame) 491 $(call feature_check,libunwind-debug-frame)
492 ifneq ($(feature-libunwind-debug-frame), 1) 492 ifneq ($(feature-libunwind-debug-frame), 1)
493 msg := $(warning No debug_frame support found in libunwind); 493 msg := $(warning No debug_frame support found in libunwind);
@@ -740,7 +740,7 @@ ifeq (${IS_64_BIT}, 1)
740 NO_PERF_READ_VDSO32 := 1 740 NO_PERF_READ_VDSO32 := 1
741 endif 741 endif
742 endif 742 endif
743 ifneq ($(ARCH), x86) 743 ifneq ($(SRCARCH), x86)
744 NO_PERF_READ_VDSOX32 := 1 744 NO_PERF_READ_VDSOX32 := 1
745 endif 745 endif
746 ifndef NO_PERF_READ_VDSOX32 746 ifndef NO_PERF_READ_VDSOX32
@@ -769,7 +769,7 @@ ifdef LIBBABELTRACE
769endif 769endif
770 770
771ifndef NO_AUXTRACE 771ifndef NO_AUXTRACE
772 ifeq ($(ARCH),x86) 772 ifeq ($(SRCARCH),x86)
773 ifeq ($(feature-get_cpuid), 0) 773 ifeq ($(feature-get_cpuid), 0)
774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
775 NO_AUXTRACE := 1 775 NO_AUXTRACE := 1
@@ -872,7 +872,7 @@ sysconfdir = $(prefix)/etc
872ETC_PERFCONFIG = etc/perfconfig 872ETC_PERFCONFIG = etc/perfconfig
873endif 873endif
874ifndef lib 874ifndef lib
875ifeq ($(ARCH)$(IS_64_BIT), x861) 875ifeq ($(SRCARCH)$(IS_64_BIT), x861)
876lib = lib64 876lib = lib64
877else 877else
878lib = lib 878lib = lib
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 79fe31f20a17..5008f51a08a2 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -226,7 +226,7 @@ endif
226 226
227ifeq ($(config),0) 227ifeq ($(config),0)
228include $(srctree)/tools/scripts/Makefile.arch 228include $(srctree)/tools/scripts/Makefile.arch
229-include arch/$(ARCH)/Makefile 229-include arch/$(SRCARCH)/Makefile
230endif 230endif
231 231
232# The FEATURE_DUMP_EXPORT holds location of the actual 232# The FEATURE_DUMP_EXPORT holds location of the actual
diff --git a/tools/perf/arch/Build b/tools/perf/arch/Build
index 109eb75cf7de..d9b6af837c7d 100644
--- a/tools/perf/arch/Build
+++ b/tools/perf/arch/Build
@@ -1,2 +1,2 @@
1libperf-y += common.o 1libperf-y += common.o
2libperf-y += $(ARCH)/ 2libperf-y += $(SRCARCH)/
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 9213a1273697..999a4e878162 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -2,7 +2,7 @@ hostprogs := jevents
2 2
3jevents-y += json.o jsmn.o jevents.o 3jevents-y += json.o jsmn.o jevents.o
4pmu-events-y += pmu-events.o 4pmu-events-y += pmu-events.o
5JDIR = pmu-events/arch/$(ARCH) 5JDIR = pmu-events/arch/$(SRCARCH)
6JSON = $(shell [ -d $(JDIR) ] && \ 6JSON = $(shell [ -d $(JDIR) ] && \
7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv') 7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
8# 8#
@@ -10,4 +10,4 @@ JSON = $(shell [ -d $(JDIR) ] && \
10# directory and create tables in pmu-events.c. 10# directory and create tables in pmu-events.c.
11# 11#
12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) 12$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) 13 $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index af58ebc243ef..84222bdb8689 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -75,7 +75,7 @@ $(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/B
75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
76 $(Q)echo ';' >> $@ 76 $(Q)echo ';' >> $@
77 77
78ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) 78ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 79perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
80endif 80endif
81 81
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 1f14e7612cbb..94b7c7b02bde 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
229 unsigned char buf2[BUFSZ]; 229 unsigned char buf2[BUFSZ];
230 size_t ret_len; 230 size_t ret_len;
231 u64 objdump_addr; 231 u64 objdump_addr;
232 const char *objdump_name;
233 char decomp_name[KMOD_DECOMP_LEN];
232 int ret; 234 int ret;
233 235
234 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 236 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
289 state->done[state->done_cnt++] = al.map->start; 291 state->done[state->done_cnt++] = al.map->start;
290 } 292 }
291 293
294 objdump_name = al.map->dso->long_name;
295 if (dso__needs_decompress(al.map->dso)) {
296 if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
297 decomp_name,
298 sizeof(decomp_name)) < 0) {
299 pr_debug("decompression failed\n");
300 return -1;
301 }
302
303 objdump_name = decomp_name;
304 }
305
292 /* Read the object code using objdump */ 306 /* Read the object code using objdump */
293 objdump_addr = map__rip_2objdump(al.map, al.addr); 307 objdump_addr = map__rip_2objdump(al.map, al.addr);
294 ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); 308 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
309
310 if (dso__needs_decompress(al.map->dso))
311 unlink(objdump_name);
312
295 if (ret > 0) { 313 if (ret > 0) {
296 /* 314 /*
297 * The kernel maps are inaccurate - assume objdump is right in 315 * The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 32873ec91a4e..cf00ebad2ef5 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -83,7 +83,7 @@ int test__task_exit(int subtest __maybe_unused)
83 83
84 evsel = perf_evlist__first(evlist); 84 evsel = perf_evlist__first(evlist);
85 evsel->attr.task = 1; 85 evsel->attr.task = 1;
86 evsel->attr.sample_freq = 0; 86 evsel->attr.sample_freq = 1;
87 evsel->attr.inherit = 0; 87 evsel->attr.inherit = 0;
88 evsel->attr.watermark = 0; 88 evsel->attr.watermark = 0;
89 evsel->attr.wakeup_events = 1; 89 evsel->attr.wakeup_events = 1;
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 1367d7e35242..ddbd56df9187 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1321,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1321 char linkname[PATH_MAX]; 1321 char linkname[PATH_MAX];
1322 char *build_id_filename; 1322 char *build_id_filename;
1323 char *build_id_path = NULL; 1323 char *build_id_path = NULL;
1324 char *pos;
1324 1325
1325 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1326 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
1326 !dso__is_kcore(dso)) 1327 !dso__is_kcore(dso))
@@ -1340,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
1340 if (!build_id_path) 1341 if (!build_id_path)
1341 return -1; 1342 return -1;
1342 1343
1343 dirname(build_id_path); 1344 /*
1345 * old style build-id cache has name of XX/XXXXXXX.. while
1346 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
1347 * extract the build-id part of dirname in the new style only.
1348 */
1349 pos = strrchr(build_id_path, '/');
1350 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
1351 dirname(build_id_path);
1344 1352
1345 if (dso__is_kcore(dso) || 1353 if (dso__is_kcore(dso) ||
1346 readlink(build_id_path, linkname, sizeof(linkname)) < 0 || 1354 readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1423,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
1423 sizeof(symfs_filename)); 1431 sizeof(symfs_filename));
1424 } 1432 }
1425 } else if (dso__needs_decompress(dso)) { 1433 } else if (dso__needs_decompress(dso)) {
1426 char tmp[PATH_MAX]; 1434 char tmp[KMOD_DECOMP_LEN];
1427 struct kmod_path m;
1428 int fd;
1429 bool ret;
1430
1431 if (kmod_path__parse_ext(&m, symfs_filename))
1432 goto out;
1433
1434 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
1435
1436 fd = mkstemp(tmp);
1437 if (fd < 0) {
1438 free(m.ext);
1439 goto out;
1440 }
1441
1442 ret = decompress_to_file(m.ext, symfs_filename, fd);
1443
1444 if (ret)
1445 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
1446
1447 free(m.ext);
1448 close(fd);
1449 1435
1450 if (!ret) 1436 if (dso__decompress_kmodule_path(dso, symfs_filename,
1437 tmp, sizeof(tmp)) < 0)
1451 goto out; 1438 goto out;
1452 1439
1453 strcpy(symfs_filename, tmp); 1440 strcpy(symfs_filename, tmp);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 168cc49654e7..e0148b081bdf 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
278 return bf; 278 return bf;
279} 279}
280 280
281bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
282{
283 char *id_name = NULL, *ch;
284 struct stat sb;
285 char sbuild_id[SBUILD_ID_SIZE];
286
287 if (!dso->has_build_id)
288 goto err;
289
290 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
291 id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
292 if (!id_name)
293 goto err;
294 if (access(id_name, F_OK))
295 goto err;
296 if (lstat(id_name, &sb) == -1)
297 goto err;
298 if ((size_t)sb.st_size > size - 1)
299 goto err;
300 if (readlink(id_name, bf, size - 1) < 0)
301 goto err;
302
303 bf[sb.st_size] = '\0';
304
305 /*
306 * link should be:
307 * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
308 */
309 ch = strrchr(bf, '/');
310 if (!ch)
311 goto err;
312 if (ch - 3 < bf)
313 goto err;
314
315 free(id_name);
316 return strncmp(".ko", ch - 3, 3) == 0;
317err:
318 pr_err("Invalid build id: %s\n", id_name ? :
319 dso->long_name ? :
320 dso->short_name ? :
321 "[unknown]");
322 free(id_name);
323 return false;
324}
325
326#define dsos__for_each_with_build_id(pos, head) \ 281#define dsos__for_each_with_build_id(pos, head) \
327 list_for_each_entry(pos, head, node) \ 282 list_for_each_entry(pos, head, node) \
328 if (!pos->has_build_id) \ 283 if (!pos->has_build_id) \
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8a89b195c1fc..96690a55c62c 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
17 size_t size); 17 size_t size);
18 18
19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); 19char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
20bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
21 20
22int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, 21int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
23 struct perf_sample *sample, struct perf_evsel *evsel, 22 struct perf_sample *sample, struct perf_evsel *evsel,
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index b27d127cdf68..4e7ab611377a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
249} 249}
250 250
251static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
252{
253 int fd = -1;
254 struct kmod_path m;
255
256 if (!dso__needs_decompress(dso))
257 return -1;
258
259 if (kmod_path__parse_ext(&m, dso->long_name))
260 return -1;
261
262 if (!m.comp)
263 goto out;
264
265 fd = mkstemp(tmpbuf);
266 if (fd < 0) {
267 dso->load_errno = errno;
268 goto out;
269 }
270
271 if (!decompress_to_file(m.ext, name, fd)) {
272 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
273 close(fd);
274 fd = -1;
275 }
276
277out:
278 free(m.ext);
279 return fd;
280}
281
282int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
283{
284 char tmpbuf[] = KMOD_DECOMP_NAME;
285 int fd;
286
287 fd = decompress_kmodule(dso, name, tmpbuf);
288 unlink(tmpbuf);
289 return fd;
290}
291
292int dso__decompress_kmodule_path(struct dso *dso, const char *name,
293 char *pathname, size_t len)
294{
295 char tmpbuf[] = KMOD_DECOMP_NAME;
296 int fd;
297
298 fd = decompress_kmodule(dso, name, tmpbuf);
299 if (fd < 0) {
300 unlink(tmpbuf);
301 return -1;
302 }
303
304 strncpy(pathname, tmpbuf, len);
305 close(fd);
306 return 0;
307}
308
251/* 309/*
252 * Parses kernel module specified in @path and updates 310 * Parses kernel module specified in @path and updates
253 * @m argument like: 311 * @m argument like:
@@ -396,7 +454,7 @@ static int do_open(char *name)
396 454
397static int __open_dso(struct dso *dso, struct machine *machine) 455static int __open_dso(struct dso *dso, struct machine *machine)
398{ 456{
399 int fd; 457 int fd = -EINVAL;
400 char *root_dir = (char *)""; 458 char *root_dir = (char *)"";
401 char *name = malloc(PATH_MAX); 459 char *name = malloc(PATH_MAX);
402 460
@@ -407,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
407 root_dir = machine->root_dir; 465 root_dir = machine->root_dir;
408 466
409 if (dso__read_binary_type_filename(dso, dso->binary_type, 467 if (dso__read_binary_type_filename(dso, dso->binary_type,
410 root_dir, name, PATH_MAX)) { 468 root_dir, name, PATH_MAX))
411 free(name); 469 goto out;
412 return -EINVAL;
413 }
414 470
415 if (!is_regular_file(name)) 471 if (!is_regular_file(name))
416 return -EINVAL; 472 goto out;
473
474 if (dso__needs_decompress(dso)) {
475 char newpath[KMOD_DECOMP_LEN];
476 size_t len = sizeof(newpath);
477
478 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
479 fd = -dso->load_errno;
480 goto out;
481 }
482
483 strcpy(name, newpath);
484 }
417 485
418 fd = do_open(name); 486 fd = do_open(name);
487
488 if (dso__needs_decompress(dso))
489 unlink(name);
490
491out:
419 free(name); 492 free(name);
420 return fd; 493 return fd;
421} 494}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 5fe2ab5877bd..bd061ba7b47c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
244bool is_kernel_module(const char *pathname, int cpumode); 244bool is_kernel_module(const char *pathname, int cpumode);
245bool decompress_to_file(const char *ext, const char *filename, int output_fd); 245bool decompress_to_file(const char *ext, const char *filename, int output_fd);
246bool dso__needs_decompress(struct dso *dso); 246bool dso__needs_decompress(struct dso *dso);
247int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
248int dso__decompress_kmodule_path(struct dso *dso, const char *name,
249 char *pathname, size_t len);
250
251#define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX"
252#define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME)
247 253
248struct kmod_path { 254struct kmod_path {
249 char *name; 255 char *name;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e4f7902d5afa..cda44b0e821c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -273,8 +273,20 @@ struct perf_evsel *perf_evsel__new_cycles(void)
273 struct perf_evsel *evsel; 273 struct perf_evsel *evsel;
274 274
275 event_attr_init(&attr); 275 event_attr_init(&attr);
276 /*
277 * Unnamed union member, not supported as struct member named
278 * initializer in older compilers such as gcc 4.4.7
279 *
280 * Just for probing the precise_ip:
281 */
282 attr.sample_period = 1;
276 283
277 perf_event_attr__set_max_precise_ip(&attr); 284 perf_event_attr__set_max_precise_ip(&attr);
285 /*
286 * Now let the usual logic to set up the perf_event_attr defaults
287 * to kick in when we return and before perf_evsel__open() is called.
288 */
289 attr.sample_period = 0;
278 290
279 evsel = perf_evsel__new(&attr); 291 evsel = perf_evsel__new(&attr);
280 if (evsel == NULL) 292 if (evsel == NULL)
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 5cac8d5e009a..b5baff3007bb 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -841,7 +841,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
841 841
842/* 842/*
843 * default get_cpuid(): nothing gets recorded 843 * default get_cpuid(): nothing gets recorded
844 * actual implementation must be in arch/$(ARCH)/util/header.c 844 * actual implementation must be in arch/$(SRCARCH)/util/header.c
845 */ 845 */
846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
847{ 847{
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 9d92af7d0718..40de3cb40d21 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
1219 fprintf(ofp, "# be retrieved using Python functions of the form " 1219 fprintf(ofp, "# be retrieved using Python functions of the form "
1220 "common_*(context).\n"); 1220 "common_*(context).\n");
1221 1221
1222 fprintf(ofp, "# See the perf-trace-python Documentation for the list " 1222 fprintf(ofp, "# See the perf-script-python Documentation for the list "
1223 "of available functions.\n\n"); 1223 "of available functions.\n\n");
1224 1224
1225 fprintf(ofp, "import os\n"); 1225 fprintf(ofp, "import os\n");
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 1fb2efae4f02..502505cf236a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -637,40 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
637 return 0; 637 return 0;
638} 638}
639 639
640static int decompress_kmodule(struct dso *dso, const char *name,
641 enum dso_binary_type type)
642{
643 int fd = -1;
644 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
645 struct kmod_path m;
646
647 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
648 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
649 type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
650 return -1;
651
652 if (kmod_path__parse_ext(&m, dso->long_name) || !m.comp)
653 return -1;
654
655 fd = mkstemp(tmpbuf);
656 if (fd < 0) {
657 dso->load_errno = errno;
658 goto out;
659 }
660
661 if (!decompress_to_file(m.ext, name, fd)) {
662 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
663 close(fd);
664 fd = -1;
665 }
666
667 unlink(tmpbuf);
668
669out:
670 free(m.ext);
671 return fd;
672}
673
674bool symsrc__possibly_runtime(struct symsrc *ss) 640bool symsrc__possibly_runtime(struct symsrc *ss)
675{ 641{
676 return ss->dynsym || ss->opdsec; 642 return ss->dynsym || ss->opdsec;
@@ -702,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
702 int fd; 668 int fd;
703 669
704 if (dso__needs_decompress(dso)) { 670 if (dso__needs_decompress(dso)) {
705 fd = decompress_kmodule(dso, name, type); 671 fd = dso__decompress_kmodule_fd(dso, name);
706 if (fd < 0) 672 if (fd < 0)
707 return -1; 673 return -1;
674
675 type = dso->symtab_type;
708 } else { 676 } else {
709 fd = open(name, O_RDONLY); 677 fd = open(name, O_RDONLY);
710 if (fd < 0) { 678 if (fd < 0) {
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 8f2b068ff756..e7a98dbd2aed 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
1562 if (!runtime_ss && syms_ss) 1562 if (!runtime_ss && syms_ss)
1563 runtime_ss = syms_ss; 1563 runtime_ss = syms_ss;
1564 1564
1565 if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
1566 if (dso__build_id_is_kmod(dso, name, PATH_MAX))
1567 kmod = true;
1568
1569 if (syms_ss) 1565 if (syms_ss)
1570 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1566 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1571 else 1567 else
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index da45c4be5fb3..7755a5e0fe5e 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -178,6 +178,14 @@ frame_callback(Dwfl_Frame *state, void *arg)
178 Dwarf_Addr pc; 178 Dwarf_Addr pc;
179 bool isactivation; 179 bool isactivation;
180 180
181 if (!dwfl_frame_pc(state, &pc, NULL)) {
182 pr_err("%s", dwfl_errmsg(-1));
183 return DWARF_CB_ABORT;
184 }
185
186 // report the module before we query for isactivation
187 report_module(pc, ui);
188
181 if (!dwfl_frame_pc(state, &pc, &isactivation)) { 189 if (!dwfl_frame_pc(state, &pc, &isactivation)) {
182 pr_err("%s", dwfl_errmsg(-1)); 190 pr_err("%s", dwfl_errmsg(-1));
183 return DWARF_CB_ABORT; 191 return DWARF_CB_ABORT;
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 32c3295929b0..87940364570b 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -22,7 +22,7 @@
22#include <asm/kvm_hyp.h> 22#include <asm/kvm_hyp.h>
23 23
24#define vtr_to_max_lr_idx(v) ((v) & 0xf) 24#define vtr_to_max_lr_idx(v) ((v) & 0xf)
25#define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1) 25#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
26 26
27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 27static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
28{ 28{
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index a2d63247d1bb..e2e5effba2a9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
879 pmd_t *pmd; 879 pmd_t *pmd;
880 880
881 pud = stage2_get_pud(kvm, cache, addr); 881 pud = stage2_get_pud(kvm, cache, addr);
882 if (!pud)
883 return NULL;
884
882 if (stage2_pud_none(*pud)) { 885 if (stage2_pud_none(*pud)) {
883 if (!cache) 886 if (!cache)
884 return NULL; 887 return NULL;
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 0a4283ed9aa7..63e0bbdcddcc 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
226 226
227 switch (addr & 0xff) { 227 switch (addr & 0xff) {
228 case GIC_CPU_CTRL: 228 case GIC_CPU_CTRL:
229 val = vmcr.ctlr; 229 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
230 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
231 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
232 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
233 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
234 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
235
230 break; 236 break;
231 case GIC_CPU_PRIMASK: 237 case GIC_CPU_PRIMASK:
232 /* 238 /*
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
267 273
268 switch (addr & 0xff) { 274 switch (addr & 0xff) {
269 case GIC_CPU_CTRL: 275 case GIC_CPU_CTRL:
270 vmcr.ctlr = val; 276 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
277 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
278 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
279 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
280 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
281 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
282
271 break; 283 break;
272 case GIC_CPU_PRIMASK: 284 case GIC_CPU_PRIMASK:
273 /* 285 /*
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 504b4bd0d651..e4187e52bb26 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
178 u32 vmcr; 178 u32 vmcr;
179 179
180 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 180 vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
181 GICH_VMCR_ENABLE_GRP0_MASK;
182 vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
183 GICH_VMCR_ENABLE_GRP1_MASK;
184 vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
185 GICH_VMCR_ACK_CTL_MASK;
186 vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
187 GICH_VMCR_FIQ_EN_MASK;
188 vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
189 GICH_VMCR_CBPR_MASK;
190 vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
191 GICH_VMCR_EOI_MODE_MASK;
181 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & 192 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
182 GICH_VMCR_ALIAS_BINPOINT_MASK; 193 GICH_VMCR_ALIAS_BINPOINT_MASK;
183 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & 194 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
195 206
196 vmcr = cpu_if->vgic_vmcr; 207 vmcr = cpu_if->vgic_vmcr;
197 208
198 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> 209 vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
199 GICH_VMCR_CTRL_SHIFT; 210 GICH_VMCR_ENABLE_GRP0_SHIFT;
211 vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
212 GICH_VMCR_ENABLE_GRP1_SHIFT;
213 vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
214 GICH_VMCR_ACK_CTL_SHIFT;
215 vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
216 GICH_VMCR_FIQ_EN_SHIFT;
217 vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
218 GICH_VMCR_CBPR_SHIFT;
219 vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
220 GICH_VMCR_EOI_MODE_SHIFT;
221
200 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> 222 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
201 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 223 GICH_VMCR_ALIAS_BINPOINT_SHIFT;
202 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> 224 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 6fe3f003636a..030248e669f6 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
159void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 159void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
160{ 160{
161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
162 u32 model = vcpu->kvm->arch.vgic.vgic_model;
162 u32 vmcr; 163 u32 vmcr;
163 164
164 /* 165 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
165 * Ignore the FIQen bit, because GIC emulation always implies 166 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
166 * SRE=1 which means the vFIQEn bit is also RES1. 167 ICH_VMCR_ACK_CTL_MASK;
167 */ 168 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
168 vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << 169 ICH_VMCR_FIQ_EN_MASK;
169 ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 170 } else {
170 vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 171 /*
172 * When emulating GICv3 on GICv3 with SRE=1 on the
173 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
174 */
175 vmcr = ICH_VMCR_FIQ_EN_MASK;
176 }
177
178 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
179 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
171 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 180 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
172 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 181 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
173 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 182 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
180void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 189void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
181{ 190{
182 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 191 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
192 u32 model = vcpu->kvm->arch.vgic.vgic_model;
183 u32 vmcr; 193 u32 vmcr;
184 194
185 vmcr = cpu_if->vgic_vmcr; 195 vmcr = cpu_if->vgic_vmcr;
186 196
187 /* 197 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
188 * Ignore the FIQen bit, because GIC emulation always implies 198 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
189 * SRE=1 which means the vFIQEn bit is also RES1. 199 ICH_VMCR_ACK_CTL_SHIFT;
190 */ 200 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
191 vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << 201 ICH_VMCR_FIQ_EN_SHIFT;
192 ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 202 } else {
193 vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 203 /*
204 * When emulating GICv3 on GICv3 with SRE=1 on the
205 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
206 */
207 vmcrp->fiqen = 1;
208 vmcrp->ackctl = 0;
209 }
210
211 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
212 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
194 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 213 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
195 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 214 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
196 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 215 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index da83e4caa272..bba7fa22a7f7 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
111 * registers regardless of the hardware backed GIC used. 111 * registers regardless of the hardware backed GIC used.
112 */ 112 */
113struct vgic_vmcr { 113struct vgic_vmcr {
114 u32 ctlr; 114 u32 grpen0;
115 u32 grpen1;
116
117 u32 ackctl;
118 u32 fiqen;
119 u32 cbpr;
120 u32 eoim;
121
115 u32 abpr; 122 u32 abpr;
116 u32 bpr; 123 u32 bpr;
117 u32 pmr; /* Priority mask field in the GICC_PMR and 124 u32 pmr; /* Priority mask field in the GICC_PMR and
118 * ICC_PMR_EL1 priority field format */ 125 * ICC_PMR_EL1 priority field format */
119 /* Below member variable are valid only for GICv3 */
120 u32 grpen0;
121 u32 grpen1;
122}; 126};
123 127
124struct vgic_reg_attr { 128struct vgic_reg_attr {