aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-05-05 02:35:00 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-05 02:35:00 -0400
commit1fb48f8e54e5ed4d3d8599ba7e83f1f60530c81c (patch)
tree6b8c1ccdd461e211f72c674d183f5129f5fe4a5b
parent778843f934e362ed4ed734520f60a44a78a074b4 (diff)
parent04974df8049fc4240d22759a91e035082ccd18b4 (diff)
Merge tag 'v4.6-rc6' into x86/asm, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--.mailmap4
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt7
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt18
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/s3c-rtc.txt7
-rw-r--r--Documentation/input/event-codes.txt4
-rw-r--r--Documentation/sysctl/vm.txt19
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile7
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h36
-rw-r--r--arch/arc/kernel/entry-arcv2.S10
-rw-r--r--arch/arc/kernel/entry-compact.S3
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts17
-rw-r--r--arch/arm/boot/dts/dm814x-clocks.dtsi243
-rw-r--r--arch/arm/boot/dts/dra62x-clocks.dtsi26
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi14
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts14
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi5
-rw-r--r--arch/arm/include/asm/cputype.h2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c5
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/io.c3
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c7
-rw-r--r--arch/arm/mach-omap2/pm34xx.c23
-rw-r--r--arch/arm/mach-shmobile/timer.c28
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi20
-rw-r--r--arch/arm64/kernel/head.S13
-rw-r--r--arch/arm64/kernel/smp_spin_table.c11
-rw-r--r--arch/nios2/lib/memset.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/powerpc/kernel/prom.c26
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pci.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/seccomp.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/lib/spinlock.c1
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgalloc.c85
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c4
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/pt.c75
-rw-r--r--arch/x86/events/intel/pt.h3
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--crypto/rsa-pkcs1pad.c12
-rw-r--r--drivers/bcma/main.c17
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/clocksource/tango_xtal.c2
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c9
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c3
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c3
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/misc/arizona-haptics.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c7
-rw-r--r--drivers/input/misc/twl4030-vibra.c1
-rw-r--r--drivers/input/misc/twl6040-vibra.c8
-rw-r--r--drivers/input/tablet/gtco.c10
-rw-r--r--drivers/iommu/amd_iommu.c87
-rw-r--r--drivers/iommu/arm-smmu.c22
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/isdn/mISDN/socket.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/dsa/mv88e6xxx.c34
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c43
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c40
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c157
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c66
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c3
-rw-r--r--drivers/net/macsec.c65
-rw-r--r--drivers/net/phy/spi_ks8995.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c177
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/pci/access.c42
-rw-r--r--drivers/pci/host/pci-imx6.c20
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/phy/phy-rockchip-dp.c7
-rw-r--r--drivers/phy/phy-rockchip-emmc.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c9
-rw-r--r--drivers/pinctrl/pinctrl-single.c6
-rw-r--r--drivers/platform/x86/hp_accel.c6
-rw-r--r--drivers/platform/x86/intel-hid.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c48
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c48
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/mtk_thermal.c3
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/thermal/power_allocator.c2
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/pty.c79
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/uartlite.c8
-rw-r--r--drivers/tty/tty_io.c6
-rw-r--r--drivers/usb/dwc3/core.c23
-rw-r--r--drivers/usb/dwc3/debugfs.c13
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c12
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/video/fbdev/amba-clcd.c15
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c12
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/devpts/inode.c100
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/devpts_fs.h38
-rw-r--r--include/linux/huge_mm.h5
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mlx4/device.h7
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/rculist_nulls.h39
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/net/cls_cgroup.h7
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/route.h3
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/switchdev.h4
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/sound/hda_i915.h5
-rw-r--r--include/sound/hda_regmap.h2
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/if_macsec.h4
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--kernel/bpf/verifier.c2
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c55
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/irq/ipi.c1
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kexec_core.c7
-rw-r--r--kernel/locking/lockdep.c37
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/locking/qspinlock_stat.h8
-rw-r--r--kernel/workqueue.c29
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/memcontrol.c37
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/swap.c5
-rw-r--r--mm/vmscan.c30
-rw-r--r--net/bridge/br_mdb.c124
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_none.c71
-rw-r--r--net/ceph/auth_none.h3
-rw-r--r--net/ceph/auth_x.c21
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/decnet/dn_route.c9
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/netfilter/arptable_filter.c6
-rw-r--r--net/ipv4/route.c19
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/addrconf.c70
-rw-r--r--net/ipv6/datagram.c169
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/openvswitch/actions.c4
-rw-r--r--net/openvswitch/conntrack.c1
-rw-r--r--net/packet/af_packet.c1
-rw-r--r--net/rds/cong.c4
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sctp/outqueue.c15
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c36
-rw-r--r--net/sctp/transport.c19
-rw-r--r--net/switchdev/switchdev.c6
-rw-r--r--net/tipc/core.c1
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/vmw_vsock/vmci_transport.c7
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--sound/hda/ext/hdac_ext_stream.c5
-rw-r--r--sound/hda/hdac_device.c10
-rw-r--r--sound/hda/hdac_i915.c62
-rw-r--r--sound/hda/hdac_regmap.c40
-rw-r--r--sound/pci/hda/hda_generic.c6
-rw-r--r--sound/pci/hda/hda_intel.c59
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c1
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/arizona.h2
-rw-r--r--sound/soc/codecs/cs35l32.c17
-rw-r--r--sound/soc/codecs/cs47l24.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c94
-rw-r--r--sound/soc/codecs/nau8825.c126
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/wm5102.c5
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c42
-rw-r--r--sound/soc/intel/skylake/skl-topology.h8
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/soc-dapm.c7
-rw-r--r--tools/objtool/Documentation/stack-validation.txt38
-rw-r--r--tools/objtool/builtin-check.c97
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile2
-rw-r--r--tools/testing/selftests/net/reuseport_dualstack.c208
390 files changed, 4269 insertions, 2150 deletions
diff --git a/.mailmap b/.mailmap
index 90c0aefc276d..c156a8b4d845 100644
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
48Felix Moeller <felix@derklecks.de> 48Felix Moeller <felix@derklecks.de>
49Filipe Lautert <filipe@icewall.org> 49Filipe Lautert <filipe@icewall.org>
50Franck Bui-Huu <vagabon.xyz@gmail.com> 50Franck Bui-Huu <vagabon.xyz@gmail.com>
51Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
52Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
53Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
51Frank Zago <fzago@systemfabricworks.com> 54Frank Zago <fzago@systemfabricworks.com>
52Greg Kroah-Hartman <greg@echidna.(none)> 55Greg Kroah-Hartman <greg@echidna.(none)>
53Greg Kroah-Hartman <gregkh@suse.de> 56Greg Kroah-Hartman <gregkh@suse.de>
@@ -79,6 +82,7 @@ Kay Sievers <kay.sievers@vrfy.org>
79Kenneth W Chen <kenneth.w.chen@intel.com> 82Kenneth W Chen <kenneth.w.chen@intel.com>
80Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 83Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
81Koushik <raghavendra.koushik@neterion.com> 84Koushik <raghavendra.koushik@neterion.com>
85Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
82Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 86Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
83Leonid I Ananiev <leonid.i.ananiev@intel.com> 87Leonid I Ananiev <leonid.i.ananiev@intel.com>
84Linas Vepstas <linas@austin.ibm.com> 88Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87c640..e4b9dcee6d41 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC HS can be configured with a pipeline performance monitor for counting 3The ARC HS can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters. 5are 100+ hardware conditions dynamically mapped to up to 32 counters.
6It also supports overflow interrupts. 6It also supports overflow interrupts.
7 7
8Required properties: 8Required properties:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b9588444f20..4e874d9a38a6 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
2 2
3The ARC700 can be configured with a pipeline performance monitor for counting 3The ARC700 can be configured with a pipeline performance monitor for counting
4CPU and cache events like cache misses and hits. Like conventional PCT there 4CPU and cache events like cache misses and hits. Like conventional PCT there
5are 100+ hardware conditions dynamically mapped to upto 32 counters 5are 100+ hardware conditions dynamically mapped to up to 32 counters
6 6
7Note that: 7Note that:
8 * The ARC 700 PCT does not support interrupts; although HW events may be 8 * The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index ccc62f145306..3f0cbbb8395f 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
192 can be one of: 192 can be one of:
193 "allwinner,sun6i-a31" 193 "allwinner,sun6i-a31"
194 "allwinner,sun8i-a23" 194 "allwinner,sun8i-a23"
195 "arm,psci"
196 "arm,realview-smp" 195 "arm,realview-smp"
197 "brcm,bcm-nsp-smp" 196 "brcm,bcm-nsp-smp"
198 "brcm,brahma-b15" 197 "brcm,brahma-b15"
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc52e64..0b4a85fe2d86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@ RK3xxx SoCs.
6Required properties : 6Required properties :
7 7
8 - reg : Offset and length of the register set for the device 8 - reg : Offset and length of the register set for the device
9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or 9 - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
10 "rockchip,rk3288-i2c". 10 "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
11 - interrupts : interrupt number 11 - interrupts : interrupt number
12 - clocks : parent clock 12 - clocks : parent clock
13 13
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 5ca79290eabf..32eaaca04d9b 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
9Required properties: 9Required properties:
10- compatible: Should be "mediatek,mt7623-eth" 10- compatible: Should be "mediatek,mt7623-eth"
11- reg: Address and length of the register set for the device 11- reg: Address and length of the register set for the device
12- interrupts: Should contain the frame engines interrupt 12- interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2.
13- clocks: the clock used by the core 14- clocks: the clock used by the core
14- clock-names: the names of the clock listed in the clocks property. These are 15- clock-names: the names of the clock listed in the clocks property. These are
15 "ethif", "esw", "gp2", "gp1" 16 "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
42 <&ethsys CLK_ETHSYS_GP2>, 43 <&ethsys CLK_ETHSYS_GP2>,
43 <&ethsys CLK_ETHSYS_GP1>; 44 <&ethsys CLK_ETHSYS_GP1>;
44 clock-names = "ethif", "esw", "gp2", "gp1"; 45 clock-names = "ethif", "esw", "gp2", "gp1";
45 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>; 46 interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
47 GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
48 GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
46 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; 49 power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
47 resets = <&ethsys MT2701_ETHSYS_ETH_RST>; 50 resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
48 reset-names = "eth"; 51 reset-names = "eth";
diff --git a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
index 50c4f9b00adf..e3b4809fbe82 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
@@ -8,15 +8,19 @@ Required properties:
8 of memory mapped region. 8 of memory mapped region.
9- clock-names: from common clock binding: 9- clock-names: from common clock binding:
10 Required elements: "24m" 10 Required elements: "24m"
11- rockchip,grf: phandle to the syscon managing the "general register files"
12- #phy-cells : from the generic PHY bindings, must be 0; 11- #phy-cells : from the generic PHY bindings, must be 0;
13 12
14Example: 13Example:
15 14
16edp_phy: edp-phy { 15grf: syscon@ff770000 {
17 compatible = "rockchip,rk3288-dp-phy"; 16 compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
18 rockchip,grf = <&grf>; 17
19 clocks = <&cru SCLK_EDP_24M>; 18...
20 clock-names = "24m"; 19
21 #phy-cells = <0>; 20 edp_phy: edp-phy {
21 compatible = "rockchip,rk3288-dp-phy";
22 clocks = <&cru SCLK_EDP_24M>;
23 clock-names = "24m";
24 #phy-cells = <0>;
25 };
22}; 26};
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index 61916f15a949..555cb0f40690 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -3,17 +3,23 @@ Rockchip EMMC PHY
3 3
4Required properties: 4Required properties:
5 - compatible: rockchip,rk3399-emmc-phy 5 - compatible: rockchip,rk3399-emmc-phy
6 - rockchip,grf : phandle to the syscon managing the "general
7 register files"
8 - #phy-cells: must be 0 6 - #phy-cells: must be 0
9 - reg: PHY configure reg address offset in "general 7 - reg: PHY register address offset and length in "general
10 register files" 8 register files"
11 9
12Example: 10Example:
13 11
14emmcphy: phy { 12
15 compatible = "rockchip,rk3399-emmc-phy"; 13grf: syscon@ff770000 {
16 rockchip,grf = <&grf>; 14 compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
17 reg = <0xf780>; 15 #address-cells = <1>;
18 #phy-cells = <0>; 16 #size-cells = <1>;
17
18...
19
20 emmcphy: phy@f780 {
21 compatible = "rockchip,rk3399-emmc-phy";
22 reg = <0xf780 0x20>;
23 #phy-cells = <0>;
24 };
19}; 25};
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index 1068ffce9f91..fdde63a5419c 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -15,9 +15,10 @@ Required properties:
15 is the rtc tick interrupt. The number of cells representing a interrupt 15 is the rtc tick interrupt. The number of cells representing a interrupt
16 depends on the parent interrupt controller. 16 depends on the parent interrupt controller.
17- clocks: Must contain a list of phandle and clock specifier for the rtc 17- clocks: Must contain a list of phandle and clock specifier for the rtc
18 and source clocks. 18 clock and in the case of a s3c6410 compatible controller, also
19- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the 19 a source clock.
20 same order as the clocks property. 20- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
21 a "rtc_src" sorted in the same order as the clocks property.
21 22
22Example: 23Example:
23 24
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 3f0f5ce3338b..36ea940e5bb9 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
173 proximity of the device and while the value of the BTN_TOUCH code is 0. If 173 proximity of the device and while the value of the BTN_TOUCH code is 0. If
174 the input device may be used freely in three dimensions, consider ABS_Z 174 the input device may be used freely in three dimensions, consider ABS_Z
175 instead. 175 instead.
176 - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
177 proximity and set to 0 when the tool leaves detectable proximity.
178 BTN_TOOL_<name> signals the type of tool that is currently detected by the
179 hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
176 180
177* ABS_MT_<name>: 181* ABS_MT_<name>:
178 - Used to describe multitouch input events. Please see 182 - Used to describe multitouch input events. Please see
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb0368459da3..34a5fece3121 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
581"Zone Order" orders the zonelists by zone type, then by node within each 581"Zone Order" orders the zonelists by zone type, then by node within each
582zone. Specify "[Zz]one" for zone order. 582zone. Specify "[Zz]one" for zone order.
583 583
584Specify "[Dd]efault" to request automatic configuration. Autoconfiguration 584Specify "[Dd]efault" to request automatic configuration.
585will select "node" order in following case. 585
586(1) if the DMA zone does not exist or 586On 32-bit, the Normal zone needs to be preserved for allocations accessible
587(2) if the DMA zone comprises greater than 50% of the available memory or 587by the kernel, so "zone" order will be selected.
588(3) if any node's DMA zone comprises greater than 70% of its local memory and 588
589 the amount of local memory is big enough. 589On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
590 590order will be selected.
591Otherwise, "zone" order will be selected. Default order is recommended unless 591
592this is causing problems for your system/application. 592Default order is recommended unless this is causing problems for your
593system/application.
593 594
594============================================================== 595==============================================================
595 596
diff --git a/MAINTAINERS b/MAINTAINERS
index 1d5b4becab6f..42e65d128d01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6027,7 +6027,7 @@ F: include/scsi/*iscsi*
6027 6027
6028ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR 6028ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
6029M: Or Gerlitz <ogerlitz@mellanox.com> 6029M: Or Gerlitz <ogerlitz@mellanox.com>
6030M: Sagi Grimberg <sagig@mellanox.com> 6030M: Sagi Grimberg <sagi@grimberg.me>
6031M: Roi Dayan <roid@mellanox.com> 6031M: Roi Dayan <roid@mellanox.com>
6032L: linux-rdma@vger.kernel.org 6032L: linux-rdma@vger.kernel.org
6033S: Supported 6033S: Supported
@@ -6037,7 +6037,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
6037F: drivers/infiniband/ulp/iser/ 6037F: drivers/infiniband/ulp/iser/
6038 6038
6039ISCSI EXTENSIONS FOR RDMA (ISER) TARGET 6039ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
6040M: Sagi Grimberg <sagig@mellanox.com> 6040M: Sagi Grimberg <sagi@grimberg.me>
6041T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master 6041T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
6042L: linux-rdma@vger.kernel.org 6042L: linux-rdma@vger.kernel.org
6043L: target-devel@vger.kernel.org 6043L: target-devel@vger.kernel.org
@@ -6400,7 +6400,7 @@ F: mm/kmemleak.c
6400F: mm/kmemleak-test.c 6400F: mm/kmemleak-test.c
6401 6401
6402KPROBES 6402KPROBES
6403M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> 6403M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
6404M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6404M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
6405M: "David S. Miller" <davem@davemloft.net> 6405M: "David S. Miller" <davem@davemloft.net>
6406M: Masami Hiramatsu <mhiramat@kernel.org> 6406M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -11071,6 +11071,15 @@ S: Maintained
11071F: drivers/clk/ti/ 11071F: drivers/clk/ti/
11072F: include/linux/clk/ti.h 11072F: include/linux/clk/ti.h
11073 11073
11074TI ETHERNET SWITCH DRIVER (CPSW)
11075M: Mugunthan V N <mugunthanvnm@ti.com>
11076R: Grygorii Strashko <grygorii.strashko@ti.com>
11077L: linux-omap@vger.kernel.org
11078L: netdev@vger.kernel.org
11079S: Maintained
11080F: drivers/net/ethernet/ti/cpsw*
11081F: drivers/net/ethernet/ti/davinci*
11082
11074TI FLASH MEDIA INTERFACE DRIVER 11083TI FLASH MEDIA INTERFACE DRIVER
11075M: Alex Dubov <oakad@yahoo.com> 11084M: Alex Dubov <oakad@yahoo.com>
11076S: Maintained 11085S: Maintained
diff --git a/Makefile b/Makefile
index 873411873c03..7466de60ddc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc4 4EXTRAVERSION = -rc6
5NAME = Blurry Fish Butt 5NAME = Charred Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
1008prepare: prepare0 prepare-objtool 1008prepare: prepare0 prepare-objtool
1009 1009
1010ifdef CONFIG_STACK_VALIDATION 1010ifdef CONFIG_STACK_VALIDATION
1011 has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0) 1011 has_libelf := $(call try-run,\
1012 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
1012 ifeq ($(has_libelf),1) 1013 ifeq ($(has_libelf),1)
1013 objtool_target := tools/objtool FORCE 1014 objtool_target := tools/objtool FORCE
1014 else 1015 else
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 12d0284a46e5..ec4791ea6911 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,8 +35,10 @@ config ARC
35 select NO_BOOTMEM 35 select NO_BOOTMEM
36 select OF 36 select OF
37 select OF_EARLY_FLATTREE 37 select OF_EARLY_FLATTREE
38 select OF_RESERVED_MEM
38 select PERF_USE_VMALLOC 39 select PERF_USE_VMALLOC
39 select HAVE_DEBUG_STACKOVERFLOW 40 select HAVE_DEBUG_STACKOVERFLOW
41 select HAVE_GENERIC_DMA_COHERENT
40 42
41config MIGHT_HAVE_PCI 43config MIGHT_HAVE_PCI
42 bool 44 bool
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f751eebf..d1ec7f6b31e0 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
18#define STATUS_AD_MASK (1<<STATUS_AD_BIT) 18#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
19#define STATUS_IE_MASK (1<<STATUS_IE_BIT) 19#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
20 20
21/* status32 Bits as encoded/expected by CLRI/SETI */
22#define CLRI_STATUS_IE_BIT 4
23
24#define CLRI_STATUS_E_MASK 0xF
25#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
26
21#define AUX_USER_SP 0x00D 27#define AUX_USER_SP 0x00D
22#define AUX_IRQ_CTRL 0x00E 28#define AUX_IRQ_CTRL 0x00E
23#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ 29#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
100 : 106 :
101 : "memory"); 107 : "memory");
102 108
109 /* To be compatible with irq_save()/irq_restore()
110 * encode the irq bits as expected by CLRI/SETI
111 * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
112 */
113 temp = (1 << 5) |
114 ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
115 (temp & CLRI_STATUS_E_MASK);
103 return temp; 116 return temp;
104} 117}
105 118
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
108 */ 121 */
109static inline int arch_irqs_disabled_flags(unsigned long flags) 122static inline int arch_irqs_disabled_flags(unsigned long flags)
110{ 123{
111 return !(flags & (STATUS_IE_MASK)); 124 return !(flags & CLRI_STATUS_IE_MASK);
112} 125}
113 126
114static inline int arch_irqs_disabled(void) 127static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
128 141
129#else 142#else
130 143
144#ifdef CONFIG_TRACE_IRQFLAGS
145
146.macro TRACE_ASM_IRQ_DISABLE
147 bl trace_hardirqs_off
148.endm
149
150.macro TRACE_ASM_IRQ_ENABLE
151 bl trace_hardirqs_on
152.endm
153
154#else
155
156.macro TRACE_ASM_IRQ_DISABLE
157.endm
158
159.macro TRACE_ASM_IRQ_ENABLE
160.endm
161
162#endif
131.macro IRQ_DISABLE scratch 163.macro IRQ_DISABLE scratch
132 clri 164 clri
165 TRACE_ASM_IRQ_DISABLE
133.endm 166.endm
134 167
135.macro IRQ_ENABLE scratch 168.macro IRQ_ENABLE scratch
169 TRACE_ASM_IRQ_ENABLE
136 seti 170 seti
137.endm 171.endm
138 172
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c1264607bbff..7a1c124ff021 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
69 69
70 clri ; To make status32.IE agree with CPU internal state 70 clri ; To make status32.IE agree with CPU internal state
71 71
72 lr r0, [ICAUSE] 72#ifdef CONFIG_TRACE_IRQFLAGS
73 TRACE_ASM_IRQ_DISABLE
74#endif
73 75
76 lr r0, [ICAUSE]
74 mov blink, ret_from_exception 77 mov blink, ret_from_exception
75 78
76 b.d arch_do_IRQ 79 b.d arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
169 172
170.Lrestore_regs: 173.Lrestore_regs:
171 174
175 # Interrpts are actually disabled from this point on, but will get
176 # reenabled after we return from interrupt/exception.
177 # But irq tracer needs to be told now...
178 TRACE_ASM_IRQ_ENABLE
179
172 ld r0, [sp, PT_status32] ; U/K mode at time of entry 180 ld r0, [sp, PT_status32] ; U/K mode at time of entry
173 lr r10, [AUX_IRQ_ACT] 181 lr r10, [AUX_IRQ_ACT]
174 182
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 431433929189..0cb0abaa0479 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@ END(call_do_page_fault)
341 341
342.Lrestore_regs: 342.Lrestore_regs:
343 343
344 # Interrpts are actually disabled from this point on, but will get
345 # reenabled after we return from interrupt/exception.
346 # But irq tracer needs to be told now...
344 TRACE_ASM_IRQ_ENABLE 347 TRACE_ASM_IRQ_ENABLE
345 348
346 lr r10, [status32] 349 lr r10, [status32]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fbf4f22..5487d0b97400 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
13#ifdef CONFIG_BLK_DEV_INITRD 13#ifdef CONFIG_BLK_DEV_INITRD
14#include <linux/initrd.h> 14#include <linux/initrd.h>
15#endif 15#endif
16#include <linux/of_fdt.h>
16#include <linux/swap.h> 17#include <linux/swap.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
@@ -136,6 +137,9 @@ void __init setup_arch_memory(void)
136 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 137 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
137#endif 138#endif
138 139
140 early_init_fdt_reserve_self();
141 early_init_fdt_scan_reserved_mem();
142
139 memblock_dump_all(); 143 memblock_dump_all();
140 144
141 /*----------------- node/zones setup --------------------------*/ 145 /*----------------- node/zones setup --------------------------*/
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 55ca9c7dcf6a..0467846b4cc3 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -860,7 +860,7 @@
860 ti,no-idle-on-init; 860 ti,no-idle-on-init;
861 reg = <0x50000000 0x2000>; 861 reg = <0x50000000 0x2000>;
862 interrupts = <100>; 862 interrupts = <100>;
863 dmas = <&edma 52>; 863 dmas = <&edma 52 0>;
864 dma-names = "rxtx"; 864 dma-names = "rxtx";
865 gpmc,num-cs = <7>; 865 gpmc,num-cs = <7>;
866 gpmc,num-waitpins = <2>; 866 gpmc,num-waitpins = <2>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 344b861a55a5..ba580a9da390 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -884,7 +884,7 @@
884 gpmc: gpmc@50000000 { 884 gpmc: gpmc@50000000 {
885 compatible = "ti,am3352-gpmc"; 885 compatible = "ti,am3352-gpmc";
886 ti,hwmods = "gpmc"; 886 ti,hwmods = "gpmc";
887 dmas = <&edma 52>; 887 dmas = <&edma 52 0>;
888 dma-names = "rxtx"; 888 dma-names = "rxtx";
889 clocks = <&l3s_gclk>; 889 clocks = <&l3s_gclk>;
890 clock-names = "fck"; 890 clock-names = "fck";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 0a5fc5d02ce2..4168eb9dd369 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -99,13 +99,6 @@
99 #cooling-cells = <2>; 99 #cooling-cells = <2>;
100 }; 100 };
101 101
102 extcon_usb1: extcon_usb1 {
103 compatible = "linux,extcon-usb-gpio";
104 id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
105 pinctrl-names = "default";
106 pinctrl-0 = <&extcon_usb1_pins>;
107 };
108
109 hdmi0: connector { 102 hdmi0: connector {
110 compatible = "hdmi-connector"; 103 compatible = "hdmi-connector";
111 label = "hdmi"; 104 label = "hdmi";
@@ -349,12 +342,6 @@
349 >; 342 >;
350 }; 343 };
351 344
352 extcon_usb1_pins: extcon_usb1_pins {
353 pinctrl-single,pins = <
354 DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
355 >;
356 };
357
358 tpd12s015_pins: pinmux_tpd12s015_pins { 345 tpd12s015_pins: pinmux_tpd12s015_pins {
359 pinctrl-single,pins = < 346 pinctrl-single,pins = <
360 DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ 347 DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
@@ -706,10 +693,6 @@
706 pinctrl-0 = <&usb1_pins>; 693 pinctrl-0 = <&usb1_pins>;
707}; 694};
708 695
709&omap_dwc3_1 {
710 extcon = <&extcon_usb1>;
711};
712
713&omap_dwc3_2 { 696&omap_dwc3_2 {
714 extcon = <&extcon_usb2>; 697 extcon = <&extcon_usb2>;
715}; 698};
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
index e0ea6a93a22e..792a64ee0df7 100644
--- a/arch/arm/boot/dts/dm814x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -4,6 +4,157 @@
4 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
5 */ 5 */
6 6
7&pllss {
8 /*
9 * See TRM "2.6.10 Connected outputso DPLLS" and
10 * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
11 * connected except for hdmi and usb.
12 */
13 adpll_mpu_ck: adpll@40 {
14 #clock-cells = <1>;
15 compatible = "ti,dm814-adpll-s-clock";
16 reg = <0x40 0x40>;
17 clocks = <&devosc_ck &devosc_ck &devosc_ck>;
18 clock-names = "clkinp", "clkinpulow", "clkinphif";
19 clock-output-names = "481c5040.adpll.dcoclkldo",
20 "481c5040.adpll.clkout",
21 "481c5040.adpll.clkoutx2",
22 "481c5040.adpll.clkouthif";
23 };
24
25 adpll_dsp_ck: adpll@80 {
26 #clock-cells = <1>;
27 compatible = "ti,dm814-adpll-lj-clock";
28 reg = <0x80 0x30>;
29 clocks = <&devosc_ck &devosc_ck>;
30 clock-names = "clkinp", "clkinpulow";
31 clock-output-names = "481c5080.adpll.dcoclkldo",
32 "481c5080.adpll.clkout",
33 "481c5080.adpll.clkoutldo";
34 };
35
36 adpll_sgx_ck: adpll@b0 {
37 #clock-cells = <1>;
38 compatible = "ti,dm814-adpll-lj-clock";
39 reg = <0xb0 0x30>;
40 clocks = <&devosc_ck &devosc_ck>;
41 clock-names = "clkinp", "clkinpulow";
42 clock-output-names = "481c50b0.adpll.dcoclkldo",
43 "481c50b0.adpll.clkout",
44 "481c50b0.adpll.clkoutldo";
45 };
46
47 adpll_hdvic_ck: adpll@e0 {
48 #clock-cells = <1>;
49 compatible = "ti,dm814-adpll-lj-clock";
50 reg = <0xe0 0x30>;
51 clocks = <&devosc_ck &devosc_ck>;
52 clock-names = "clkinp", "clkinpulow";
53 clock-output-names = "481c50e0.adpll.dcoclkldo",
54 "481c50e0.adpll.clkout",
55 "481c50e0.adpll.clkoutldo";
56 };
57
58 adpll_l3_ck: adpll@110 {
59 #clock-cells = <1>;
60 compatible = "ti,dm814-adpll-lj-clock";
61 reg = <0x110 0x30>;
62 clocks = <&devosc_ck &devosc_ck>;
63 clock-names = "clkinp", "clkinpulow";
64 clock-output-names = "481c5110.adpll.dcoclkldo",
65 "481c5110.adpll.clkout",
66 "481c5110.adpll.clkoutldo";
67 };
68
69 adpll_isp_ck: adpll@140 {
70 #clock-cells = <1>;
71 compatible = "ti,dm814-adpll-lj-clock";
72 reg = <0x140 0x30>;
73 clocks = <&devosc_ck &devosc_ck>;
74 clock-names = "clkinp", "clkinpulow";
75 clock-output-names = "481c5140.adpll.dcoclkldo",
76 "481c5140.adpll.clkout",
77 "481c5140.adpll.clkoutldo";
78 };
79
80 adpll_dss_ck: adpll@170 {
81 #clock-cells = <1>;
82 compatible = "ti,dm814-adpll-lj-clock";
83 reg = <0x170 0x30>;
84 clocks = <&devosc_ck &devosc_ck>;
85 clock-names = "clkinp", "clkinpulow";
86 clock-output-names = "481c5170.adpll.dcoclkldo",
87 "481c5170.adpll.clkout",
88 "481c5170.adpll.clkoutldo";
89 };
90
91 adpll_video0_ck: adpll@1a0 {
92 #clock-cells = <1>;
93 compatible = "ti,dm814-adpll-lj-clock";
94 reg = <0x1a0 0x30>;
95 clocks = <&devosc_ck &devosc_ck>;
96 clock-names = "clkinp", "clkinpulow";
97 clock-output-names = "481c51a0.adpll.dcoclkldo",
98 "481c51a0.adpll.clkout",
99 "481c51a0.adpll.clkoutldo";
100 };
101
102 adpll_video1_ck: adpll@1d0 {
103 #clock-cells = <1>;
104 compatible = "ti,dm814-adpll-lj-clock";
105 reg = <0x1d0 0x30>;
106 clocks = <&devosc_ck &devosc_ck>;
107 clock-names = "clkinp", "clkinpulow";
108 clock-output-names = "481c51d0.adpll.dcoclkldo",
109 "481c51d0.adpll.clkout",
110 "481c51d0.adpll.clkoutldo";
111 };
112
113 adpll_hdmi_ck: adpll@200 {
114 #clock-cells = <1>;
115 compatible = "ti,dm814-adpll-lj-clock";
116 reg = <0x200 0x30>;
117 clocks = <&devosc_ck &devosc_ck>;
118 clock-names = "clkinp", "clkinpulow";
119 clock-output-names = "481c5200.adpll.dcoclkldo",
120 "481c5200.adpll.clkout",
121 "481c5200.adpll.clkoutldo";
122 };
123
124 adpll_audio_ck: adpll@230 {
125 #clock-cells = <1>;
126 compatible = "ti,dm814-adpll-lj-clock";
127 reg = <0x230 0x30>;
128 clocks = <&devosc_ck &devosc_ck>;
129 clock-names = "clkinp", "clkinpulow";
130 clock-output-names = "481c5230.adpll.dcoclkldo",
131 "481c5230.adpll.clkout",
132 "481c5230.adpll.clkoutldo";
133 };
134
135 adpll_usb_ck: adpll@260 {
136 #clock-cells = <1>;
137 compatible = "ti,dm814-adpll-lj-clock";
138 reg = <0x260 0x30>;
139 clocks = <&devosc_ck &devosc_ck>;
140 clock-names = "clkinp", "clkinpulow";
141 clock-output-names = "481c5260.adpll.dcoclkldo",
142 "481c5260.adpll.clkout",
143 "481c5260.adpll.clkoutldo";
144 };
145
146 adpll_ddr_ck: adpll@290 {
147 #clock-cells = <1>;
148 compatible = "ti,dm814-adpll-lj-clock";
149 reg = <0x290 0x30>;
150 clocks = <&devosc_ck &devosc_ck>;
151 clock-names = "clkinp", "clkinpulow";
152 clock-output-names = "481c5290.adpll.dcoclkldo",
153 "481c5290.adpll.clkout",
154 "481c5290.adpll.clkoutldo";
155 };
156};
157
7&pllss_clocks { 158&pllss_clocks {
8 timer1_fck: timer1_fck { 159 timer1_fck: timer1_fck {
9 #clock-cells = <0>; 160 #clock-cells = <0>;
@@ -23,6 +174,24 @@
23 reg = <0x2e0>; 174 reg = <0x2e0>;
24 }; 175 };
25 176
177 /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
178 cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
179 #clock-cells = <0>;
180 compatible = "ti,mux-clock";
181 clocks = <&adpll_video0_ck 1
182 &adpll_video1_ck 1
183 &adpll_audio_ck 1>;
184 ti,bit-shift = <1>;
185 reg = <0x2e8>;
186 };
187
188 /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
189 cpsw_125mhz_gclk: cpsw_125mhz_gclk {
190 #clock-cells = <0>;
191 compatible = "fixed-clock";
192 clock-frequency = <125000000>;
193 };
194
26 sysclk18_ck: sysclk18_ck { 195 sysclk18_ck: sysclk18_ck {
27 #clock-cells = <0>; 196 #clock-cells = <0>;
28 compatible = "ti,mux-clock"; 197 compatible = "ti,mux-clock";
@@ -79,37 +248,6 @@
79 compatible = "fixed-clock"; 248 compatible = "fixed-clock";
80 clock-frequency = <1000000000>; 249 clock-frequency = <1000000000>;
81 }; 250 };
82
83 sysclk4_ck: sysclk4_ck {
84 #clock-cells = <0>;
85 compatible = "fixed-clock";
86 clock-frequency = <222000000>;
87 };
88
89 sysclk6_ck: sysclk6_ck {
90 #clock-cells = <0>;
91 compatible = "fixed-clock";
92 clock-frequency = <100000000>;
93 };
94
95 sysclk10_ck: sysclk10_ck {
96 #clock-cells = <0>;
97 compatible = "fixed-clock";
98 clock-frequency = <48000000>;
99 };
100
101 cpsw_125mhz_gclk: cpsw_125mhz_gclk {
102 #clock-cells = <0>;
103 compatible = "fixed-clock";
104 clock-frequency = <125000000>;
105 };
106
107 cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
108 #clock-cells = <0>;
109 compatible = "fixed-clock";
110 clock-frequency = <250000000>;
111 };
112
113}; 251};
114 252
115&prcm_clocks { 253&prcm_clocks {
@@ -138,6 +276,49 @@
138 clock-div = <78125>; 276 clock-div = <78125>;
139 }; 277 };
140 278
279 /* L4_HS 220 MHz*/
280 sysclk4_ck: sysclk4_ck {
281 #clock-cells = <0>;
282 compatible = "ti,fixed-factor-clock";
283 clocks = <&adpll_l3_ck 1>;
284 ti,clock-mult = <1>;
285 ti,clock-div = <1>;
286 };
287
288 /* L4_FWCFG */
289 sysclk5_ck: sysclk5_ck {
290 #clock-cells = <0>;
291 compatible = "ti,fixed-factor-clock";
292 clocks = <&adpll_l3_ck 1>;
293 ti,clock-mult = <1>;
294 ti,clock-div = <2>;
295 };
296
297 /* L4_LS 110 MHz */
298 sysclk6_ck: sysclk6_ck {
299 #clock-cells = <0>;
300 compatible = "ti,fixed-factor-clock";
301 clocks = <&adpll_l3_ck 1>;
302 ti,clock-mult = <1>;
303 ti,clock-div = <2>;
304 };
305
306 sysclk8_ck: sysclk8_ck {
307 #clock-cells = <0>;
308 compatible = "ti,fixed-factor-clock";
309 clocks = <&adpll_usb_ck 1>;
310 ti,clock-mult = <1>;
311 ti,clock-div = <1>;
312 };
313
314 sysclk10_ck: sysclk10_ck {
315 compatible = "ti,divider-clock";
316 reg = <0x324>;
317 ti,max-div = <7>;
318 #clock-cells = <0>;
319 clocks = <&adpll_usb_ck 1>;
320 };
321
141 aud_clkin0_ck: aud_clkin0_ck { 322 aud_clkin0_ck: aud_clkin0_ck {
142 #clock-cells = <0>; 323 #clock-cells = <0>;
143 compatible = "fixed-clock"; 324 compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/dra62x-clocks.dtsi b/arch/arm/boot/dts/dra62x-clocks.dtsi
index 6f98dc8df9dd..0e49741747ef 100644
--- a/arch/arm/boot/dts/dra62x-clocks.dtsi
+++ b/arch/arm/boot/dts/dra62x-clocks.dtsi
@@ -6,6 +6,32 @@
6 6
7#include "dm814x-clocks.dtsi" 7#include "dm814x-clocks.dtsi"
8 8
9/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
10&adpll_hdvic_ck {
11 status = "disabled";
12};
13
14&adpll_l3_ck {
15 status = "disabled";
16};
17
18&adpll_dss_ck {
19 status = "disabled";
20};
21
22/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
23&sysclk4_ck {
24 clocks = <&adpll_isp_ck 1>;
25};
26
27&sysclk5_ck {
28 clocks = <&adpll_isp_ck 1>;
29};
30
31&sysclk6_ck {
32 clocks = <&adpll_isp_ck 1>;
33};
34
9/* 35/*
10 * Compared to dm814x, dra62x has different shifts and more mux options. 36 * Compared to dm814x, dra62x has different shifts and more mux options.
11 * Please add the extra options for ysclk_14 and 16 if really needed. 37 * Please add the extra options for ysclk_14 and 16 if really needed.
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index d0bae06b7eb7..ef2164a99d0f 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -98,12 +98,20 @@
98 clock-frequency = <32768>; 98 clock-frequency = <32768>;
99 }; 99 };
100 100
101 sys_32k_ck: sys_32k_ck { 101 sys_clk32_crystal_ck: sys_clk32_crystal_ck {
102 #clock-cells = <0>; 102 #clock-cells = <0>;
103 compatible = "fixed-clock"; 103 compatible = "fixed-clock";
104 clock-frequency = <32768>; 104 clock-frequency = <32768>;
105 }; 105 };
106 106
107 sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
108 #clock-cells = <0>;
109 compatible = "fixed-factor-clock";
110 clocks = <&sys_clkin1>;
111 clock-mult = <1>;
112 clock-div = <610>;
113 };
114
107 virt_12000000_ck: virt_12000000_ck { 115 virt_12000000_ck: virt_12000000_ck {
108 #clock-cells = <0>; 116 #clock-cells = <0>;
109 compatible = "fixed-clock"; 117 compatible = "fixed-clock";
@@ -2170,4 +2178,12 @@
2170 ti,bit-shift = <22>; 2178 ti,bit-shift = <22>;
2171 reg = <0x0558>; 2179 reg = <0x0558>;
2172 }; 2180 };
2181
2182 sys_32k_ck: sys_32k_ck {
2183 #clock-cells = <0>;
2184 compatible = "ti,mux-clock";
2185 clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
2186 ti,bit-shift = <8>;
2187 reg = <0x6c4>;
2188 };
2173}; 2189};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ef5330578431..8193139d0d87 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,6 @@
1/dts-v1/; 1/dts-v1/;
2 2
3#include <dt-bindings/interrupt-controller/arm-gic.h> 3#include <dt-bindings/interrupt-controller/irq.h>
4#include <dt-bindings/clock/qcom,gcc-msm8974.h> 4#include <dt-bindings/clock/qcom,gcc-msm8974.h>
5#include "skeleton.dtsi" 5#include "skeleton.dtsi"
6 6
@@ -460,8 +460,6 @@
460 clock-names = "core", "iface"; 460 clock-names = "core", "iface";
461 #address-cells = <1>; 461 #address-cells = <1>;
462 #size-cells = <0>; 462 #size-cells = <0>;
463 dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
464 dma-names = "tx", "rx";
465 }; 463 };
466 464
467 spmi_bus: spmi@fc4cf000 { 465 spmi_bus: spmi@fc4cf000 {
@@ -479,16 +477,6 @@
479 interrupt-controller; 477 interrupt-controller;
480 #interrupt-cells = <4>; 478 #interrupt-cells = <4>;
481 }; 479 };
482
483 blsp2_dma: dma-controller@f9944000 {
484 compatible = "qcom,bam-v1.4.0";
485 reg = <0xf9944000 0x19000>;
486 interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
487 clocks = <&gcc GCC_BLSP2_AHB_CLK>;
488 clock-names = "bam_clk";
489 #dma-cells = <1>;
490 qcom,ee = <0>;
491 };
492 }; 480 };
493 481
494 smd { 482 smd {
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 0ad71b81d3a2..cc6e28f81fe4 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -661,6 +661,7 @@
661}; 661};
662 662
663&pcie_bus_clk { 663&pcie_bus_clk {
664 clock-frequency = <100000000>;
664 status = "okay"; 665 status = "okay";
665}; 666};
666 667
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6c08314427d6..a9285d9a57cd 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -143,19 +143,11 @@
143}; 143};
144 144
145&pfc { 145&pfc {
146 pinctrl-0 = <&scif_clk_pins>;
147 pinctrl-names = "default";
148
149 scif0_pins: serial0 { 146 scif0_pins: serial0 {
150 renesas,groups = "scif0_data_d"; 147 renesas,groups = "scif0_data_d";
151 renesas,function = "scif0"; 148 renesas,function = "scif0";
152 }; 149 };
153 150
154 scif_clk_pins: scif_clk {
155 renesas,groups = "scif_clk";
156 renesas,function = "scif_clk";
157 };
158
159 ether_pins: ether { 151 ether_pins: ether {
160 renesas,groups = "eth_link", "eth_mdio", "eth_rmii"; 152 renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
161 renesas,function = "eth"; 153 renesas,function = "eth";
@@ -229,11 +221,6 @@
229 status = "okay"; 221 status = "okay";
230}; 222};
231 223
232&scif_clk {
233 clock-frequency = <14745600>;
234 status = "okay";
235};
236
237&ether { 224&ether {
238 pinctrl-0 = <&ether_pins &phy1_pins>; 225 pinctrl-0 = <&ether_pins &phy1_pins>;
239 pinctrl-names = "default"; 226 pinctrl-names = "default";
@@ -414,6 +401,7 @@
414}; 401};
415 402
416&pcie_bus_clk { 403&pcie_bus_clk {
404 clock-frequency = <100000000>;
417 status = "okay"; 405 status = "okay";
418}; 406};
419 407
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6439f0569fe2..1cd1b6a3a72a 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1083,9 +1083,8 @@
1083 pcie_bus_clk: pcie_bus_clk { 1083 pcie_bus_clk: pcie_bus_clk {
1084 compatible = "fixed-clock"; 1084 compatible = "fixed-clock";
1085 #clock-cells = <0>; 1085 #clock-cells = <0>;
1086 clock-frequency = <100000000>; 1086 clock-frequency = <0>;
1087 clock-output-names = "pcie_bus"; 1087 clock-output-names = "pcie_bus";
1088 status = "disabled";
1089 }; 1088 };
1090 1089
1091 /* External SCIF clock */ 1090 /* External SCIF clock */
@@ -1094,7 +1093,6 @@
1094 #clock-cells = <0>; 1093 #clock-cells = <0>;
1095 /* This value must be overridden by the board. */ 1094 /* This value must be overridden by the board. */
1096 clock-frequency = <0>; 1095 clock-frequency = <0>;
1097 status = "disabled";
1098 }; 1096 };
1099 1097
1100 /* External USB clock - can be overridden by the board */ 1098 /* External USB clock - can be overridden by the board */
@@ -1112,7 +1110,6 @@
1112 /* This value must be overridden by the board. */ 1110 /* This value must be overridden by the board. */
1113 clock-frequency = <0>; 1111 clock-frequency = <0>;
1114 clock-output-names = "can_clk"; 1112 clock-output-names = "can_clk";
1115 status = "disabled";
1116 }; 1113 };
1117 1114
1118 /* Special CPG clocks */ 1115 /* Special CPG clocks */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b23c6c81c9ad..1ee94c716a7f 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
276 int feature = (features >> field) & 15; 276 int feature = (features >> field) & 15;
277 277
278 /* feature registers are signed values */ 278 /* feature registers are signed values */
279 if (feature > 8) 279 if (feature > 7)
280 feature -= 16; 280 feature -= 16;
281 281
282 return feature; 282 return feature;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index a28fce0bdbbe..2c4bea39cf22 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -512,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
512 */ 512 */
513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || 513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && 514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
515 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3)) 515 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
516 elf_hwcap &= ~HWCAP_SWP; 516 elf_hwcap &= ~HWCAP_SWP;
517} 517}
518 518
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index a5edd7d60266..3d039ef021e0 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
71 if (!pdata) 71 if (!pdata)
72 pdata = &default_esdhc_pdata; 72 pdata = &default_esdhc_pdata;
73 73
74 return imx_add_platform_device(data->devid, data->id, res, 74 return imx_add_platform_device_dmamask(data->devid, data->id, res,
75 ARRAY_SIZE(res), pdata, sizeof(*pdata)); 75 ARRAY_SIZE(res), pdata, sizeof(*pdata),
76 DMA_BIT_MASK(32));
76} 77}
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 7581e036bda6..ef9ed36e8a61 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
461 .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, 461 .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST,
462 .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, 462 .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
463 .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, 463 .dep_bit = DRA7XX_IPU_STATDEP_SHIFT,
464 .flags = CLKDM_CAN_HWSUP_SWSUP, 464 .flags = CLKDM_CAN_SWSUP,
465}; 465};
466 466
467static struct clockdomain mpu1_7xx_clkdm = { 467static struct clockdomain mpu1_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 9821be6dfd5e..49de4dd227be 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -737,7 +737,8 @@ void __init omap5_init_late(void)
737#ifdef CONFIG_SOC_DRA7XX 737#ifdef CONFIG_SOC_DRA7XX
738void __init dra7xx_init_early(void) 738void __init dra7xx_init_early(void)
739{ 739{
740 omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); 740 omap2_set_globals_tap(DRA7XX_CLASS,
741 OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
741 omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); 742 omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
742 omap2_control_base_init(); 743 omap2_control_base_init();
743 omap4_pm_init_early(); 744 omap4_pm_init_early();
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f397bd6bd6e3..2c04f2741476 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
274 */ 274 */
275static void irq_save_context(void) 275static void irq_save_context(void)
276{ 276{
277 /* DRA7 has no SAR to save */
278 if (soc_is_dra7xx())
279 return;
280
277 if (!sar_base) 281 if (!sar_base)
278 sar_base = omap4_get_sar_ram_base(); 282 sar_base = omap4_get_sar_ram_base();
279 283
@@ -290,6 +294,9 @@ static void irq_sar_clear(void)
290{ 294{
291 u32 val; 295 u32 val;
292 u32 offset = SAR_BACKUP_STATUS_OFFSET; 296 u32 offset = SAR_BACKUP_STATUS_OFFSET;
297 /* DRA7 has no SAR to save */
298 if (soc_is_dra7xx())
299 return;
293 300
294 if (soc_is_omap54xx()) 301 if (soc_is_omap54xx())
295 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; 302 offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2dbd3785ee6f..d44e0e2f1106 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -198,7 +198,6 @@ void omap_sram_idle(void)
198 int per_next_state = PWRDM_POWER_ON; 198 int per_next_state = PWRDM_POWER_ON;
199 int core_next_state = PWRDM_POWER_ON; 199 int core_next_state = PWRDM_POWER_ON;
200 int per_going_off; 200 int per_going_off;
201 int core_prev_state;
202 u32 sdrc_pwr = 0; 201 u32 sdrc_pwr = 0;
203 202
204 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); 203 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@ void omap_sram_idle(void)
278 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 277 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
279 278
280 /* CORE */ 279 /* CORE */
281 if (core_next_state < PWRDM_POWER_ON) { 280 if (core_next_state < PWRDM_POWER_ON &&
282 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); 281 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
283 if (core_prev_state == PWRDM_POWER_OFF) { 282 omap3_core_restore_context();
284 omap3_core_restore_context(); 283 omap3_cm_restore_context();
285 omap3_cm_restore_context(); 284 omap3_sram_restore_context();
286 omap3_sram_restore_context(); 285 omap2_sms_restore_context();
287 omap2_sms_restore_context(); 286 } else {
288 } 287 /*
288 * In off-mode resume path above, omap3_core_restore_context
289 * also handles the INTC autoidle restore done here so limit
290 * this to non-off mode resume paths so we don't do it twice.
291 */
292 omap3_intc_resume_idle();
289 } 293 }
290 omap3_intc_resume_idle();
291 294
292 pwrdm_post_transition(NULL); 295 pwrdm_post_transition(NULL);
293 296
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index ad008e4b0c49..67d79f9c6bad 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
40void __init shmobile_init_delay(void) 40void __init shmobile_init_delay(void)
41{ 41{
42 struct device_node *np, *cpus; 42 struct device_node *np, *cpus;
43 bool is_a7_a8_a9 = false; 43 unsigned int div = 0;
44 bool is_a15 = false;
45 bool has_arch_timer = false; 44 bool has_arch_timer = false;
46 u32 max_freq = 0; 45 u32 max_freq = 0;
47 46
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
55 if (!of_property_read_u32(np, "clock-frequency", &freq)) 54 if (!of_property_read_u32(np, "clock-frequency", &freq))
56 max_freq = max(max_freq, freq); 55 max_freq = max(max_freq, freq);
57 56
58 if (of_device_is_compatible(np, "arm,cortex-a8") || 57 if (of_device_is_compatible(np, "arm,cortex-a8")) {
59 of_device_is_compatible(np, "arm,cortex-a9")) { 58 div = 2;
60 is_a7_a8_a9 = true; 59 } else if (of_device_is_compatible(np, "arm,cortex-a9")) {
61 } else if (of_device_is_compatible(np, "arm,cortex-a7")) { 60 div = 1;
62 is_a7_a8_a9 = true; 61 } else if (of_device_is_compatible(np, "arm,cortex-a7") ||
63 has_arch_timer = true; 62 of_device_is_compatible(np, "arm,cortex-a15")) {
64 } else if (of_device_is_compatible(np, "arm,cortex-a15")) { 63 div = 1;
65 is_a15 = true;
66 has_arch_timer = true; 64 has_arch_timer = true;
67 } 65 }
68 } 66 }
69 67
70 of_node_put(cpus); 68 of_node_put(cpus);
71 69
72 if (!max_freq) 70 if (!max_freq || !div)
73 return; 71 return;
74 72
75 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { 73 if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
76 if (is_a7_a8_a9) 74 shmobile_setup_delay_hz(max_freq, 1, div);
77 shmobile_setup_delay_hz(max_freq, 1, 3);
78 else if (is_a15)
79 shmobile_setup_delay_hz(max_freq, 2, 4);
80 }
81} 75}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index deac58d5f1f7..c941e93048ad 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
762 if (!mask) 762 if (!mask)
763 return NULL; 763 return NULL;
764 764
765 buf = kzalloc(sizeof(*buf), gfp); 765 buf = kzalloc(sizeof(*buf),
766 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
766 if (!buf) 767 if (!buf)
767 return NULL; 768 return NULL;
768 769
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
index 727ae5f8c4e7..b0ed44313a5b 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
@@ -70,7 +70,6 @@
70 i2c3 = &i2c3; 70 i2c3 = &i2c3;
71 i2c4 = &i2c4; 71 i2c4 = &i2c4;
72 i2c5 = &i2c5; 72 i2c5 = &i2c5;
73 i2c6 = &i2c6;
74 }; 73 };
75}; 74};
76 75
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index e682a3f52791..651c9d9d2d54 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -201,15 +201,12 @@
201 201
202 i2c2: i2c@58782000 { 202 i2c2: i2c@58782000 {
203 compatible = "socionext,uniphier-fi2c"; 203 compatible = "socionext,uniphier-fi2c";
204 status = "disabled";
205 reg = <0x58782000 0x80>; 204 reg = <0x58782000 0x80>;
206 #address-cells = <1>; 205 #address-cells = <1>;
207 #size-cells = <0>; 206 #size-cells = <0>;
208 interrupts = <0 43 4>; 207 interrupts = <0 43 4>;
209 pinctrl-names = "default";
210 pinctrl-0 = <&pinctrl_i2c2>;
211 clocks = <&i2c_clk>; 208 clocks = <&i2c_clk>;
212 clock-frequency = <100000>; 209 clock-frequency = <400000>;
213 }; 210 };
214 211
215 i2c3: i2c@58783000 { 212 i2c3: i2c@58783000 {
@@ -227,12 +224,15 @@
227 224
228 i2c4: i2c@58784000 { 225 i2c4: i2c@58784000 {
229 compatible = "socionext,uniphier-fi2c"; 226 compatible = "socionext,uniphier-fi2c";
227 status = "disabled";
230 reg = <0x58784000 0x80>; 228 reg = <0x58784000 0x80>;
231 #address-cells = <1>; 229 #address-cells = <1>;
232 #size-cells = <0>; 230 #size-cells = <0>;
233 interrupts = <0 45 4>; 231 interrupts = <0 45 4>;
232 pinctrl-names = "default";
233 pinctrl-0 = <&pinctrl_i2c4>;
234 clocks = <&i2c_clk>; 234 clocks = <&i2c_clk>;
235 clock-frequency = <400000>; 235 clock-frequency = <100000>;
236 }; 236 };
237 237
238 i2c5: i2c@58785000 { 238 i2c5: i2c@58785000 {
@@ -245,16 +245,6 @@
245 clock-frequency = <400000>; 245 clock-frequency = <400000>;
246 }; 246 };
247 247
248 i2c6: i2c@58786000 {
249 compatible = "socionext,uniphier-fi2c";
250 reg = <0x58786000 0x80>;
251 #address-cells = <1>;
252 #size-cells = <0>;
253 interrupts = <0 26 4>;
254 clocks = <&i2c_clk>;
255 clock-frequency = <400000>;
256 };
257
258 system_bus: system-bus@58c00000 { 248 system_bus: system-bus@58c00000 {
259 compatible = "socionext,uniphier-system-bus"; 249 compatible = "socionext,uniphier-system-bus";
260 status = "disabled"; 250 status = "disabled";
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4203d5f257bc..85da0f599cd6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -588,6 +588,15 @@ set_hcr:
588 msr vpidr_el2, x0 588 msr vpidr_el2, x0
589 msr vmpidr_el2, x1 589 msr vmpidr_el2, x1
590 590
591 /*
592 * When VHE is not in use, early init of EL2 and EL1 needs to be
593 * done here.
594 * When VHE _is_ in use, EL1 will not be used in the host and
595 * requires no configuration, and all non-hyp-specific EL2 setup
596 * will be done via the _EL1 system register aliases in __cpu_setup.
597 */
598 cbnz x2, 1f
599
591 /* sctlr_el1 */ 600 /* sctlr_el1 */
592 mov x0, #0x0800 // Set/clear RES{1,0} bits 601 mov x0, #0x0800 // Set/clear RES{1,0} bits
593CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems 602CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
597 /* Coprocessor traps. */ 606 /* Coprocessor traps. */
598 mov x0, #0x33ff 607 mov x0, #0x33ff
599 msr cptr_el2, x0 // Disable copro. traps to EL2 608 msr cptr_el2, x0 // Disable copro. traps to EL2
6091:
600 610
601#ifdef CONFIG_COMPAT 611#ifdef CONFIG_COMPAT
602 msr hstr_el2, xzr // Disable CP15 traps to EL2 612 msr hstr_el2, xzr // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
734 744
735 .macro update_early_cpu_boot_status status, tmp1, tmp2 745 .macro update_early_cpu_boot_status status, tmp1, tmp2
736 mov \tmp2, #\status 746 mov \tmp2, #\status
737 str_l \tmp2, __early_cpu_boot_status, \tmp1 747 adr_l \tmp1, __early_cpu_boot_status
748 str \tmp2, [\tmp1]
738 dmb sy 749 dmb sy
739 dc ivac, \tmp1 // Invalidate potentially stale cache line 750 dc ivac, \tmp1 // Invalidate potentially stale cache line
740 .endm 751 .endm
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605a8c47..18a71bcd26ee 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
52static int smp_spin_table_cpu_init(unsigned int cpu) 52static int smp_spin_table_cpu_init(unsigned int cpu)
53{ 53{
54 struct device_node *dn; 54 struct device_node *dn;
55 int ret;
55 56
56 dn = of_get_cpu_node(cpu, NULL); 57 dn = of_get_cpu_node(cpu, NULL);
57 if (!dn) 58 if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
60 /* 61 /*
61 * Determine the address from which the CPU is polling. 62 * Determine the address from which the CPU is polling.
62 */ 63 */
63 if (of_property_read_u64(dn, "cpu-release-addr", 64 ret = of_property_read_u64(dn, "cpu-release-addr",
64 &cpu_release_addr[cpu])) { 65 &cpu_release_addr[cpu]);
66 if (ret)
65 pr_err("CPU %d: missing or invalid cpu-release-addr property\n", 67 pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
66 cpu); 68 cpu);
67 69
68 return -1; 70 of_node_put(dn);
69 }
70 71
71 return 0; 72 return ret;
72} 73}
73 74
74static int smp_spin_table_cpu_prepare(unsigned int cpu) 75static int smp_spin_table_cpu_prepare(unsigned int cpu)
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb121e34..2fcefe720283 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
68 "=r" (charcnt), /* %1 Output */ 68 "=r" (charcnt), /* %1 Output */
69 "=r" (dwordcnt), /* %2 Output */ 69 "=r" (dwordcnt), /* %2 Output */
70 "=r" (fill8reg), /* %3 Output */ 70 "=r" (fill8reg), /* %3 Output */
71 "=r" (wrkrega) /* %4 Output */ 71 "=&r" (wrkrega) /* %4 Output only */
72 : "r" (c), /* %5 Input */ 72 : "r" (c), /* %5 Input */
73 "0" (s), /* %0 Input/Output */ 73 "0" (s), /* %0 Input/Output */
74 "1" (count) /* %1 Input/Output */ 74 "1" (count) /* %1 Input/Output */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df70aa20..2fc5d4db503c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
384SYSCALL(ni_syscall) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2) 385SYSCALL(mlock2)
386SYSCALL(copy_file_range) 386SYSCALL(copy_file_range)
387COMPAT_SYS_SPU(preadv2)
388COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d45605..cf12c580f6b2 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define NR_syscalls 380 15#define NR_syscalls 382
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18 18
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 8dde19962a5b..f63c96cd3608 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -31,6 +31,7 @@
31#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \ 31#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
32 0x00000040 32 0x00000040
33 33
34/* Reserved - do not use 0x00000004 */
34#define PPC_FEATURE_TRUE_LE 0x00000002 35#define PPC_FEATURE_TRUE_LE 0x00000002
35#define PPC_FEATURE_PPC_LE 0x00000001 36#define PPC_FEATURE_PPC_LE 0x00000001
36 37
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d45b08..e9f5f41aa55a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_mlock2 378 391#define __NR_mlock2 378
392#define __NR_copy_file_range 379 392#define __NR_copy_file_range 379
393#define __NR_preadv2 380
394#define __NR_pwritev2 381
393 395
394#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 396#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 7030b035905d..a15fe1d4e84a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
148 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 148 unsigned long cpu_features; /* CPU_FTR_xxx bit */
149 unsigned long mmu_features; /* MMU_FTR_xxx bit */ 149 unsigned long mmu_features; /* MMU_FTR_xxx bit */
150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 150 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
151 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
151 unsigned char pabyte; /* byte number in ibm,pa-features */ 152 unsigned char pabyte; /* byte number in ibm,pa-features */
152 unsigned char pabit; /* bit number (big-endian) */ 153 unsigned char pabit; /* bit number (big-endian) */
153 unsigned char invert; /* if 1, pa bit set => clear feature */ 154 unsigned char invert; /* if 1, pa bit set => clear feature */
154} ibm_pa_features[] __initdata = { 155} ibm_pa_features[] __initdata = {
155 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 156 {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
156 {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 157 {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
157 {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, 158 {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
158 {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, 159 {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
159 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 160 {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
160 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 161 {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
161 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 162 {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
162 /* 163 /*
163 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), 164 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
164 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP 165 * we don't want to turn on TM here, so we use the *_COMP versions
165 * which is 0 if the kernel doesn't support TM. 166 * which are 0 if the kernel doesn't support TM.
166 */ 167 */
167 {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, 168 {CPU_FTR_TM_COMP, 0, 0,
169 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
168}; 170};
169 171
170static void __init scan_features(unsigned long node, const unsigned char *ftrs, 172static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
195 if (bit ^ fp->invert) { 197 if (bit ^ fp->invert) {
196 cur_cpu_spec->cpu_features |= fp->cpu_features; 198 cur_cpu_spec->cpu_features |= fp->cpu_features;
197 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 199 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
200 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
198 cur_cpu_spec->mmu_features |= fp->mmu_features; 201 cur_cpu_spec->mmu_features |= fp->mmu_features;
199 } else { 202 } else {
200 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 203 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
201 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 204 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
205 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
202 cur_cpu_spec->mmu_features &= ~fp->mmu_features; 206 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
203 } 207 }
204 } 208 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index aad23e3dff2c..bf24ab188921 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -4,6 +4,9 @@ config MMU
4config ZONE_DMA 4config ZONE_DMA
5 def_bool y 5 def_bool y
6 6
7config CPU_BIG_ENDIAN
8 def_bool y
9
7config LOCKDEP_SUPPORT 10config LOCKDEP_SUPPORT
8 def_bool y 11 def_bool y
9 12
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
11 spinlock_t list_lock; 11 spinlock_t list_lock;
12 struct list_head pgtable_list; 12 struct list_head pgtable_list;
13 struct list_head gmap_list; 13 struct list_head gmap_list;
14 unsigned long asce_bits; 14 unsigned long asce;
15 unsigned long asce_limit; 15 unsigned long asce_limit;
16 unsigned long vdso_base; 16 unsigned long vdso_base;
17 /* The mmu context allocates 4K page tables. */ 17 /* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469eeda7..c837b79b455d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
26 mm->context.has_pgste = 0; 26 mm->context.has_pgste = 0;
27 mm->context.use_skey = 0; 27 mm->context.use_skey = 0;
28#endif 28#endif
29 if (mm->context.asce_limit == 0) { 29 switch (mm->context.asce_limit) {
30 case 1UL << 42:
31 /*
32 * forked 3-level task, fall through to set new asce with new
33 * mm->pgd
34 */
35 case 0:
30 /* context created by exec, set asce limit to 4TB */ 36 /* context created by exec, set asce limit to 4TB */
31 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
32 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
33 mm->context.asce_limit = STACK_TOP_MAX; 37 mm->context.asce_limit = STACK_TOP_MAX;
34 } else if (mm->context.asce_limit == (1UL << 31)) { 38 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
39 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
40 break;
41 case 1UL << 53:
42 /* forked 4-level task, set new asce with new mm->pgd */
43 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
44 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
45 break;
46 case 1UL << 31:
47 /* forked 2-level compat task, set new asce with new mm->pgd */
48 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
49 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
50 /* pgd_alloc() did not increase mm->nr_pmds */
35 mm_inc_nr_pmds(mm); 51 mm_inc_nr_pmds(mm);
36 } 52 }
37 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 53 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
42 58
43static inline void set_user_asce(struct mm_struct *mm) 59static inline void set_user_asce(struct mm_struct *mm)
44{ 60{
45 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); 61 S390_lowcore.user_asce = mm->context.asce;
46 if (current->thread.mm_segment.ar4) 62 if (current->thread.mm_segment.ar4)
47 __ctl_load(S390_lowcore.user_asce, 7, 7); 63 __ctl_load(S390_lowcore.user_asce, 7, 7);
48 set_cpu_flag(CIF_ASCE); 64 set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
71{ 87{
72 int cpu = smp_processor_id(); 88 int cpu = smp_processor_id();
73 89
74 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); 90 S390_lowcore.user_asce = next->context.asce;
75 if (prev == next) 91 if (prev == next)
76 return; 92 return;
77 if (MACHINE_HAS_TLB_LC) 93 if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index b6bfa169a002..535a46d46d28 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -44,7 +44,8 @@ struct zpci_fmb {
44 u64 rpcit_ops; 44 u64 rpcit_ops;
45 u64 dma_rbytes; 45 u64 dma_rbytes;
46 u64 dma_wbytes; 46 u64 dma_wbytes;
47} __packed __aligned(64); 47 u64 pad[2];
48} __packed __aligned(128);
48 49
49enum zpci_state { 50enum zpci_state {
50 ZPCI_FN_STATE_RESERVED, 51 ZPCI_FN_STATE_RESERVED,
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6099f2..da34cb6b1f3b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
52 return _REGION2_ENTRY_EMPTY; 52 return _REGION2_ENTRY_EMPTY;
53} 53}
54 54
55int crst_table_upgrade(struct mm_struct *, unsigned long limit); 55int crst_table_upgrade(struct mm_struct *);
56void crst_table_downgrade(struct mm_struct *, unsigned long limit); 56void crst_table_downgrade(struct mm_struct *);
57 57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) 58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
59{ 59{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22ea270d..18cdede1aeda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ 175 regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
176 regs->psw.addr = new_psw; \ 176 regs->psw.addr = new_psw; \
177 regs->gprs[15] = new_stackp; \ 177 regs->gprs[15] = new_stackp; \
178 crst_table_downgrade(current->mm, 1UL << 31); \ 178 crst_table_downgrade(current->mm); \
179 execve_tail(); \ 179 execve_tail(); \
180} while (0) 180} while (0)
181 181
diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h
index 781a9cf9b002..e10f8337367b 100644
--- a/arch/s390/include/asm/seccomp.h
+++ b/arch/s390/include/asm/seccomp.h
@@ -13,4 +13,6 @@
13#define __NR_seccomp_exit_32 __NR_exit 13#define __NR_seccomp_exit_32 __NR_exit
14#define __NR_seccomp_sigreturn_32 __NR_sigreturn 14#define __NR_seccomp_sigreturn_32 __NR_sigreturn
15 15
16#include <asm-generic/seccomp.h>
17
16#endif /* _ASM_S390_SECCOMP_H */ 18#endif /* _ASM_S390_SECCOMP_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
110static inline void __tlb_flush_kernel(void) 110static inline void __tlb_flush_kernel(void)
111{ 111{
112 if (MACHINE_HAS_IDTE) 112 if (MACHINE_HAS_IDTE)
113 __tlb_flush_idte((unsigned long) init_mm.pgd | 113 __tlb_flush_idte(init_mm.context.asce);
114 init_mm.context.asce_bits);
115 else 114 else
116 __tlb_flush_global(); 115 __tlb_flush_global();
117} 116}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
133static inline void __tlb_flush_kernel(void) 132static inline void __tlb_flush_kernel(void)
134{ 133{
135 if (MACHINE_HAS_TLB_LC) 134 if (MACHINE_HAS_TLB_LC)
136 __tlb_flush_idte_local((unsigned long) init_mm.pgd | 135 __tlb_flush_idte_local(init_mm.context.asce);
137 init_mm.context.asce_bits);
138 else 136 else
139 __tlb_flush_local(); 137 __tlb_flush_local();
140} 138}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
148 * only ran on the local cpu. 146 * only ran on the local cpu.
149 */ 147 */
150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) 148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
151 __tlb_flush_asce(mm, (unsigned long) mm->pgd | 149 __tlb_flush_asce(mm, mm->context.asce);
152 mm->context.asce_bits);
153 else 150 else
154 __tlb_flush_full(mm); 151 __tlb_flush_full(mm);
155} 152}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index d4549c964589..e5f50a7d2f4e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
105 if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 105 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
106 return; 106 return;
107 local_irq_restore(flags); 107 local_irq_restore(flags);
108 continue;
108 } 109 }
109 /* Check if the lock owner is running. */ 110 /* Check if the lock owner is running. */
110 if (first_diag && cpu_is_preempted(~owner)) { 111 if (first_diag && cpu_is_preempted(~owner)) {
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7b0451397d6..2489b2e917c8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
90 pgd_type = _REGION3_ENTRY_EMPTY; 90 pgd_type = _REGION3_ENTRY_EMPTY;
91 } 91 }
92 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 92 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
93 S390_lowcore.kernel_asce = init_mm.context.asce;
93 clear_table((unsigned long *) init_mm.pgd, pgd_type, 94 clear_table((unsigned long *) init_mm.pgd, pgd_type,
94 sizeof(unsigned long)*2048); 95 sizeof(unsigned long)*2048);
95 vmem_map_init(); 96 vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa49930..89cf09e5f168 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
174 if (!(flags & MAP_FIXED)) 174 if (!(flags & MAP_FIXED))
175 addr = 0; 175 addr = 0;
176 if ((addr + len) >= TASK_SIZE) 176 if ((addr + len) >= TASK_SIZE)
177 return crst_table_upgrade(current->mm, TASK_MAX_SIZE); 177 return crst_table_upgrade(current->mm);
178 return 0; 178 return 0;
179} 179}
180 180
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
191 return area; 191 return area;
192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 192 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
193 /* Upgrade the page table to 4 levels and retry. */ 193 /* Upgrade the page table to 4 levels and retry. */
194 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 194 rc = crst_table_upgrade(mm);
195 if (rc) 195 if (rc)
196 return (unsigned long) rc; 196 return (unsigned long) rc;
197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 197 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
213 return area; 213 return area;
214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { 214 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
215 /* Upgrade the page table to 4 levels and retry. */ 215 /* Upgrade the page table to 4 levels and retry. */
216 rc = crst_table_upgrade(mm, TASK_MAX_SIZE); 216 rc = crst_table_upgrade(mm);
217 if (rc) 217 if (rc)
218 return (unsigned long) rc; 218 return (unsigned long) rc;
219 area = arch_get_unmapped_area_topdown(filp, addr, len, 219 area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de26cda8..e8b5962ac12a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
76 __tlb_flush_local(); 76 __tlb_flush_local();
77} 77}
78 78
79int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 79int crst_table_upgrade(struct mm_struct *mm)
80{ 80{
81 unsigned long *table, *pgd; 81 unsigned long *table, *pgd;
82 unsigned long entry;
83 int flush;
84 82
85 BUG_ON(limit > TASK_MAX_SIZE); 83 /* upgrade should only happen from 3 to 4 levels */
86 flush = 0; 84 BUG_ON(mm->context.asce_limit != (1UL << 42));
87repeat: 85
88 table = crst_table_alloc(mm); 86 table = crst_table_alloc(mm);
89 if (!table) 87 if (!table)
90 return -ENOMEM; 88 return -ENOMEM;
89
91 spin_lock_bh(&mm->page_table_lock); 90 spin_lock_bh(&mm->page_table_lock);
92 if (mm->context.asce_limit < limit) { 91 pgd = (unsigned long *) mm->pgd;
93 pgd = (unsigned long *) mm->pgd; 92 crst_table_init(table, _REGION2_ENTRY_EMPTY);
94 if (mm->context.asce_limit <= (1UL << 31)) { 93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
95 entry = _REGION3_ENTRY_EMPTY; 94 mm->pgd = (pgd_t *) table;
96 mm->context.asce_limit = 1UL << 42; 95 mm->context.asce_limit = 1UL << 53;
97 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
98 _ASCE_USER_BITS | 97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
99 _ASCE_TYPE_REGION3; 98 mm->task_size = mm->context.asce_limit;
100 } else {
101 entry = _REGION2_ENTRY_EMPTY;
102 mm->context.asce_limit = 1UL << 53;
103 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
104 _ASCE_USER_BITS |
105 _ASCE_TYPE_REGION2;
106 }
107 crst_table_init(table, entry);
108 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
109 mm->pgd = (pgd_t *) table;
110 mm->task_size = mm->context.asce_limit;
111 table = NULL;
112 flush = 1;
113 }
114 spin_unlock_bh(&mm->page_table_lock); 99 spin_unlock_bh(&mm->page_table_lock);
115 if (table) 100
116 crst_table_free(mm, table); 101 on_each_cpu(__crst_table_upgrade, mm, 0);
117 if (mm->context.asce_limit < limit)
118 goto repeat;
119 if (flush)
120 on_each_cpu(__crst_table_upgrade, mm, 0);
121 return 0; 102 return 0;
122} 103}
123 104
124void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) 105void crst_table_downgrade(struct mm_struct *mm)
125{ 106{
126 pgd_t *pgd; 107 pgd_t *pgd;
127 108
109 /* downgrade should only happen from 3 to 2 levels (compat only) */
110 BUG_ON(mm->context.asce_limit != (1UL << 42));
111
128 if (current->active_mm == mm) { 112 if (current->active_mm == mm) {
129 clear_user_asce(); 113 clear_user_asce();
130 __tlb_flush_mm(mm); 114 __tlb_flush_mm(mm);
131 } 115 }
132 while (mm->context.asce_limit > limit) { 116
133 pgd = mm->pgd; 117 pgd = mm->pgd;
134 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { 118 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
135 case _REGION_ENTRY_TYPE_R2: 119 mm->context.asce_limit = 1UL << 31;
136 mm->context.asce_limit = 1UL << 42; 120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
137 mm->context.asce_bits = _ASCE_TABLE_LENGTH | 121 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
138 _ASCE_USER_BITS | 122 mm->task_size = mm->context.asce_limit;
139 _ASCE_TYPE_REGION3; 123 crst_table_free(mm, (unsigned long *) pgd);
140 break; 124
141 case _REGION_ENTRY_TYPE_R3:
142 mm->context.asce_limit = 1UL << 31;
143 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
144 _ASCE_USER_BITS |
145 _ASCE_TYPE_SEGMENT;
146 break;
147 default:
148 BUG();
149 }
150 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
151 mm->task_size = mm->context.asce_limit;
152 crst_table_free(mm, (unsigned long *) pgd);
153 }
154 if (current->active_mm == mm) 125 if (current->active_mm == mm)
155 set_user_asce(mm); 126 set_user_asce(mm);
156} 127}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89eac65..1ea8c07eab84 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
457 zdev->dma_table = dma_alloc_cpu_table(); 457 zdev->dma_table = dma_alloc_cpu_table();
458 if (!zdev->dma_table) { 458 if (!zdev->dma_table) {
459 rc = -ENOMEM; 459 rc = -ENOMEM;
460 goto out_clean; 460 goto out;
461 } 461 }
462 462
463 /* 463 /*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); 477 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
478 if (!zdev->iommu_bitmap) { 478 if (!zdev->iommu_bitmap) {
479 rc = -ENOMEM; 479 rc = -ENOMEM;
480 goto out_reg; 480 goto free_dma_table;
481 } 481 }
482 482
483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, 483 rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
484 (u64) zdev->dma_table); 484 (u64) zdev->dma_table);
485 if (rc) 485 if (rc)
486 goto out_reg; 486 goto free_bitmap;
487 return 0;
488 487
489out_reg: 488 return 0;
489free_bitmap:
490 vfree(zdev->iommu_bitmap);
491 zdev->iommu_bitmap = NULL;
492free_dma_table:
490 dma_free_cpu_table(zdev->dma_table); 493 dma_free_cpu_table(zdev->dma_table);
491out_clean: 494 zdev->dma_table = NULL;
495out:
492 return rc; 496 return rc;
493} 497}
494 498
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a8a0224fa0f8..081255cea1ee 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
453 453
454 req = cast_mcryptd_ctx_to_req(req_ctx); 454 req = cast_mcryptd_ctx_to_req(req_ctx);
455 if (irqs_disabled()) 455 if (irqs_disabled())
456 rctx->complete(&req->base, ret); 456 req_ctx->complete(&req->base, ret);
457 else { 457 else {
458 local_bh_disable(); 458 local_bh_disable();
459 rctx->complete(&req->base, ret); 459 req_ctx->complete(&req->base, ret);
460 local_bh_enable(); 460 local_bh_enable();
461 } 461 }
462 } 462 }
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 86a9bec18dab..bd3e8421b57c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
115/* 115/*
116 * AMD Performance Monitor K7 and later. 116 * AMD Performance Monitor K7 and later.
117 */ 117 */
118static const u64 amd_perfmon_event_map[] = 118static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
119{ 119{
120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, 120 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, 121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b4d42e..aff79884e17d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3639,6 +3639,7 @@ __init int intel_pmu_init(void)
3639 3639
3640 case 78: /* 14nm Skylake Mobile */ 3640 case 78: /* 14nm Skylake Mobile */
3641 case 94: /* 14nm Skylake Desktop */ 3641 case 94: /* 14nm Skylake Desktop */
3642 case 85: /* 14nm Skylake Server */
3642 x86_pmu.late_ack = true; 3643 x86_pmu.late_ack = true;
3643 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 3644 memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3644 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 3645 memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1780c9..1ca5d1e7d4f2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -63,7 +63,7 @@ static enum {
63 63
64#define LBR_PLM (LBR_KERNEL | LBR_USER) 64#define LBR_PLM (LBR_KERNEL | LBR_USER)
65 65
66#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ 66#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
67#define LBR_NOT_SUPP -1 /* LBR filter not supported */ 67#define LBR_NOT_SUPP -1 /* LBR filter not supported */
68#define LBR_IGN 0 /* ignored */ 68#define LBR_IGN 0 /* ignored */
69 69
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate 610 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
611 * in suppress mode. So LBR_SELECT should be set to 611 * in suppress mode. So LBR_SELECT should be set to
612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) 612 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
613 * But the 10th bit LBR_CALL_STACK does not operate
614 * in suppress mode.
613 */ 615 */
614 reg->config = mask ^ x86_pmu.lbr_sel_mask; 616 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
615 617
616 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && 618 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
617 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && 619 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf71d6b2..09a77dbc73c9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
136 struct dev_ext_attribute *de_attrs; 136 struct dev_ext_attribute *de_attrs;
137 struct attribute **attrs; 137 struct attribute **attrs;
138 size_t size; 138 size_t size;
139 u64 reg;
139 int ret; 140 int ret;
140 long i; 141 long i;
141 142
143 if (boot_cpu_has(X86_FEATURE_VMX)) {
144 /*
145 * Intel SDM, 36.5 "Tracing post-VMXON" says that
146 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
147 * post-VMXON.
148 */
149 rdmsrl(MSR_IA32_VMX_MISC, reg);
150 if (reg & BIT(14))
151 pt_pmu.vmx = true;
152 }
153
142 attrs = NULL; 154 attrs = NULL;
143 155
144 for (i = 0; i < PT_CPUID_LEAVES; i++) { 156 for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
269 281
270 reg |= (event->attr.config & PT_CONFIG_MASK); 282 reg |= (event->attr.config & PT_CONFIG_MASK);
271 283
284 event->hw.config = reg;
272 wrmsrl(MSR_IA32_RTIT_CTL, reg); 285 wrmsrl(MSR_IA32_RTIT_CTL, reg);
273} 286}
274 287
275static void pt_config_start(bool start) 288static void pt_config_stop(struct perf_event *event)
276{ 289{
277 u64 ctl; 290 u64 ctl = READ_ONCE(event->hw.config);
291
292 /* may be already stopped by a PMI */
293 if (!(ctl & RTIT_CTL_TRACEEN))
294 return;
278 295
279 rdmsrl(MSR_IA32_RTIT_CTL, ctl); 296 ctl &= ~RTIT_CTL_TRACEEN;
280 if (start)
281 ctl |= RTIT_CTL_TRACEEN;
282 else
283 ctl &= ~RTIT_CTL_TRACEEN;
284 wrmsrl(MSR_IA32_RTIT_CTL, ctl); 297 wrmsrl(MSR_IA32_RTIT_CTL, ctl);
285 298
299 WRITE_ONCE(event->hw.config, ctl);
300
286 /* 301 /*
287 * A wrmsr that disables trace generation serializes other PT 302 * A wrmsr that disables trace generation serializes other PT
288 * registers and causes all data packets to be written to memory, 303 * registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
291 * The below WMB, separating data store and aux_head store matches 306 * The below WMB, separating data store and aux_head store matches
292 * the consumer's RMB that separates aux_head load and data load. 307 * the consumer's RMB that separates aux_head load and data load.
293 */ 308 */
294 if (!start) 309 wmb();
295 wmb();
296} 310}
297 311
298static void pt_config_buffer(void *buf, unsigned int topa_idx, 312static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
942 if (!ACCESS_ONCE(pt->handle_nmi)) 956 if (!ACCESS_ONCE(pt->handle_nmi))
943 return; 957 return;
944 958
945 pt_config_start(false); 959 /*
960 * If VMX is on and PT does not support it, don't touch anything.
961 */
962 if (READ_ONCE(pt->vmx_on))
963 return;
946 964
947 if (!event) 965 if (!event)
948 return; 966 return;
949 967
968 pt_config_stop(event);
969
950 buf = perf_get_aux(&pt->handle); 970 buf = perf_get_aux(&pt->handle);
951 if (!buf) 971 if (!buf)
952 return; 972 return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
983 } 1003 }
984} 1004}
985 1005
1006void intel_pt_handle_vmx(int on)
1007{
1008 struct pt *pt = this_cpu_ptr(&pt_ctx);
1009 struct perf_event *event;
1010 unsigned long flags;
1011
1012 /* PT plays nice with VMX, do nothing */
1013 if (pt_pmu.vmx)
1014 return;
1015
1016 /*
1017 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1018 * sure to not try to set it while VMX is on. Disable
1019 * interrupts to avoid racing with pmu callbacks;
1020 * concurrent PMI should be handled fine.
1021 */
1022 local_irq_save(flags);
1023 WRITE_ONCE(pt->vmx_on, on);
1024
1025 if (on) {
1026 /* prevent pt_config_stop() from writing RTIT_CTL */
1027 event = pt->handle.event;
1028 if (event)
1029 event->hw.config = 0;
1030 }
1031 local_irq_restore(flags);
1032}
1033EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1034
986/* 1035/*
987 * PMU callbacks 1036 * PMU callbacks
988 */ 1037 */
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
992 struct pt *pt = this_cpu_ptr(&pt_ctx); 1041 struct pt *pt = this_cpu_ptr(&pt_ctx);
993 struct pt_buffer *buf = perf_get_aux(&pt->handle); 1042 struct pt_buffer *buf = perf_get_aux(&pt->handle);
994 1043
1044 if (READ_ONCE(pt->vmx_on))
1045 return;
1046
995 if (!buf || pt_buffer_is_full(buf, pt)) { 1047 if (!buf || pt_buffer_is_full(buf, pt)) {
996 event->hw.state = PERF_HES_STOPPED; 1048 event->hw.state = PERF_HES_STOPPED;
997 return; 1049 return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
1014 * see comment in intel_pt_interrupt(). 1066 * see comment in intel_pt_interrupt().
1015 */ 1067 */
1016 ACCESS_ONCE(pt->handle_nmi) = 0; 1068 ACCESS_ONCE(pt->handle_nmi) = 0;
1017 pt_config_start(false); 1069
1070 pt_config_stop(event);
1018 1071
1019 if (event->hw.state == PERF_HES_STOPPED) 1072 if (event->hw.state == PERF_HES_STOPPED)
1020 return; 1073 return;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..3abb5f5cccc8 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -65,6 +65,7 @@ enum pt_capabilities {
65struct pt_pmu { 65struct pt_pmu {
66 struct pmu pmu; 66 struct pmu pmu;
67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; 67 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
68 bool vmx;
68}; 69};
69 70
70/** 71/**
@@ -107,10 +108,12 @@ struct pt_buffer {
107 * struct pt - per-cpu pt context 108 * struct pt - per-cpu pt context
108 * @handle: perf output handle 109 * @handle: perf output handle
109 * @handle_nmi: do handle PT PMI on this cpu, there's an active event 110 * @handle_nmi: do handle PT PMI on this cpu, there's an active event
111 * @vmx_on: 1 if VMX is ON on this cpu
110 */ 112 */
111struct pt { 113struct pt {
112 struct perf_output_handle handle; 114 struct perf_output_handle handle;
113 int handle_nmi; 115 int handle_nmi;
116 int vmx_on;
114}; 117};
115 118
116#endif /* __INTEL_PT_H__ */ 119#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9b03ac..1705c9d75e44 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
718 break; 718 break;
719 case 60: /* Haswell */ 719 case 60: /* Haswell */
720 case 69: /* Haswell-Celeron */ 720 case 69: /* Haswell-Celeron */
721 case 70: /* Haswell GT3e */
721 case 61: /* Broadwell */ 722 case 61: /* Broadwell */
722 case 71: /* Broadwell-H */ 723 case 71: /* Broadwell-H */
723 rapl_cntr_mask = RAPL_IDX_HSW; 724 rapl_cntr_mask = RAPL_IDX_HSW;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3ed2f26..f353061bba1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { }
285static inline void perf_check_microcode(void) { } 285static inline void perf_check_microcode(void) { }
286#endif 286#endif
287 287
288#ifdef CONFIG_CPU_SUP_INTEL
289 extern void intel_pt_handle_vmx(int on);
290#endif
291
288#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 292#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
289 extern void amd_pmu_enable_virt(void); 293 extern void amd_pmu_enable_virt(void);
290 extern void amd_pmu_disable_virt(void); 294 extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d5908bde9342..cb47fe3da292 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
3103 3103
3104static void kvm_cpu_vmxon(u64 addr) 3104static void kvm_cpu_vmxon(u64 addr)
3105{ 3105{
3106 intel_pt_handle_vmx(1);
3107
3106 asm volatile (ASM_VMX_VMXON_RAX 3108 asm volatile (ASM_VMX_VMXON_RAX
3107 : : "a"(&addr), "m"(addr) 3109 : : "a"(&addr), "m"(addr)
3108 : "memory", "cc"); 3110 : "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
3172static void kvm_cpu_vmxoff(void) 3174static void kvm_cpu_vmxoff(void)
3173{ 3175{
3174 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); 3176 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3177
3178 intel_pt_handle_vmx(0);
3175} 3179}
3176 3180
3177static void hardware_disable(void) 3181static void hardware_disable(void)
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 1cea67d43e1d..ead8dc0d084e 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
387 req_ctx->child_req.src = req->src; 387 req_ctx->child_req.src = req->src;
388 req_ctx->child_req.src_len = req->src_len; 388 req_ctx->child_req.src_len = req->src_len;
389 req_ctx->child_req.dst = req_ctx->out_sg; 389 req_ctx->child_req.dst = req_ctx->out_sg;
390 req_ctx->child_req.dst_len = ctx->key_size - 1; 390 req_ctx->child_req.dst_len = ctx->key_size ;
391 391
392 req_ctx->out_buf = kmalloc(ctx->key_size - 1, 392 req_ctx->out_buf = kmalloc(ctx->key_size,
393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 393 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
394 GFP_KERNEL : GFP_ATOMIC); 394 GFP_KERNEL : GFP_ATOMIC);
395 if (!req_ctx->out_buf) 395 if (!req_ctx->out_buf)
396 return -ENOMEM; 396 return -ENOMEM;
397 397
398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 398 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
399 ctx->key_size - 1, NULL); 399 ctx->key_size, NULL);
400 400
401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 401 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 402 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
595 req_ctx->child_req.src = req->src; 595 req_ctx->child_req.src = req->src;
596 req_ctx->child_req.src_len = req->src_len; 596 req_ctx->child_req.src_len = req->src_len;
597 req_ctx->child_req.dst = req_ctx->out_sg; 597 req_ctx->child_req.dst = req_ctx->out_sg;
598 req_ctx->child_req.dst_len = ctx->key_size - 1; 598 req_ctx->child_req.dst_len = ctx->key_size;
599 599
600 req_ctx->out_buf = kmalloc(ctx->key_size - 1, 600 req_ctx->out_buf = kmalloc(ctx->key_size,
601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 601 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
602 GFP_KERNEL : GFP_ATOMIC); 602 GFP_KERNEL : GFP_ATOMIC);
603 if (!req_ctx->out_buf) 603 if (!req_ctx->out_buf)
604 return -ENOMEM; 604 return -ENOMEM;
605 605
606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf, 606 pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
607 ctx->key_size - 1, NULL); 607 ctx->key_size, NULL);
608 608
609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child); 609 akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags, 610 akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 786be8fed39e..1f635471f318 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
136 return false; 136 return false;
137} 137}
138 138
139#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
140static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 139static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
141 struct bcma_device *core) 140 struct bcma_device *core)
142{ 141{
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
184 struct of_phandle_args out_irq; 183 struct of_phandle_args out_irq;
185 int ret; 184 int ret;
186 185
187 if (!parent || !parent->dev.of_node) 186 if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
188 return 0; 187 return 0;
189 188
190 ret = bcma_of_irq_parse(parent, core, &out_irq, num); 189 ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
202{ 201{
203 struct device_node *node; 202 struct device_node *node;
204 203
204 if (!IS_ENABLED(CONFIG_OF_IRQ))
205 return;
206
205 node = bcma_of_find_child_device(parent, core); 207 node = bcma_of_find_child_device(parent, core);
206 if (node) 208 if (node)
207 core->dev.of_node = node; 209 core->dev.of_node = node;
208 210
209 core->irq = bcma_of_get_irq(parent, core, 0); 211 core->irq = bcma_of_get_irq(parent, core, 0);
210} 212}
211#else
212static void bcma_of_fill_device(struct platform_device *parent,
213 struct bcma_device *core)
214{
215}
216static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
217 struct bcma_device *core, int num)
218{
219 return 0;
220}
221#endif /* CONFIG_OF */
222 213
223unsigned int bcma_core_irq(struct bcma_device *core, int num) 214unsigned int bcma_core_irq(struct bcma_device *core, int num)
224{ 215{
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94a1843b0426..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
538 u8 *order, u64 *snap_size); 538 u8 *order, u64 *snap_size);
539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, 539static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
540 u64 *snap_features); 540 u64 *snap_features);
541static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
542 541
543static int rbd_open(struct block_device *bdev, fmode_t mode) 542static int rbd_open(struct block_device *bdev, fmode_t mode)
544{ 543{
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3127 struct rbd_device *rbd_dev = (struct rbd_device *)data; 3126 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3128 int ret; 3127 int ret;
3129 3128
3130 if (!rbd_dev)
3131 return;
3132
3133 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 3129 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3134 rbd_dev->header_name, (unsigned long long)notify_id, 3130 rbd_dev->header_name, (unsigned long long)notify_id,
3135 (unsigned int)opcode); 3131 (unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3263 3259
3264 ceph_osdc_cancel_event(rbd_dev->watch_event); 3260 ceph_osdc_cancel_event(rbd_dev->watch_event);
3265 rbd_dev->watch_event = NULL; 3261 rbd_dev->watch_event = NULL;
3262
3263 dout("%s flushing notifies\n", __func__);
3264 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3266} 3265}
3267 3266
3268/* 3267/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3642static void rbd_dev_update_size(struct rbd_device *rbd_dev) 3641static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3643{ 3642{
3644 sector_t size; 3643 sector_t size;
3645 bool removing;
3646 3644
3647 /* 3645 /*
3648 * Don't hold the lock while doing disk operations, 3646 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3649 * or lock ordering will conflict with the bdev mutex via: 3647 * try to update its size. If REMOVING is set, updating size
3650 * rbd_add() -> blkdev_get() -> rbd_open() 3648 * is just useless work since the device can't be opened.
3651 */ 3649 */
3652 spin_lock_irq(&rbd_dev->lock); 3650 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3653 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); 3651 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3654 spin_unlock_irq(&rbd_dev->lock);
3655 /*
3656 * If the device is being removed, rbd_dev->disk has
3657 * been destroyed, so don't try to update its size
3658 */
3659 if (!removing) {
3660 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; 3652 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3661 dout("setting size to %llu sectors", (unsigned long long)size); 3653 dout("setting size to %llu sectors", (unsigned long long)size);
3662 set_capacity(rbd_dev->disk, size); 3654 set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4191 __le64 features; 4183 __le64 features;
4192 __le64 incompat; 4184 __le64 incompat;
4193 } __attribute__ ((packed)) features_buf = { 0 }; 4185 } __attribute__ ((packed)) features_buf = { 0 };
4194 u64 incompat; 4186 u64 unsup;
4195 int ret; 4187 int ret;
4196 4188
4197 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, 4189 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4204 if (ret < sizeof (features_buf)) 4196 if (ret < sizeof (features_buf))
4205 return -ERANGE; 4197 return -ERANGE;
4206 4198
4207 incompat = le64_to_cpu(features_buf.incompat); 4199 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4208 if (incompat & ~RBD_FEATURES_SUPPORTED) 4200 if (unsup) {
4201 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4202 unsup);
4209 return -ENXIO; 4203 return -ENXIO;
4204 }
4210 4205
4211 *snap_features = le64_to_cpu(features_buf.features); 4206 *snap_features = le64_to_cpu(features_buf.features);
4212 4207
@@ -5187,6 +5182,10 @@ out_err:
5187 return ret; 5182 return ret;
5188} 5183}
5189 5184
5185/*
5186 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5187 * upon return.
5188 */
5190static int rbd_dev_device_setup(struct rbd_device *rbd_dev) 5189static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191{ 5190{
5192 int ret; 5191 int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5195 5194
5196 ret = rbd_dev_id_get(rbd_dev); 5195 ret = rbd_dev_id_get(rbd_dev);
5197 if (ret) 5196 if (ret)
5198 return ret; 5197 goto err_out_unlock;
5199 5198
5200 BUILD_BUG_ON(DEV_NAME_LEN 5199 BUILD_BUG_ON(DEV_NAME_LEN
5201 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); 5200 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5236 /* Everything's ready. Announce the disk to the world. */ 5235 /* Everything's ready. Announce the disk to the world. */
5237 5236
5238 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5237 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5239 add_disk(rbd_dev->disk); 5238 up_write(&rbd_dev->header_rwsem);
5240 5239
5240 add_disk(rbd_dev->disk);
5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, 5241 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5242 (unsigned long long) rbd_dev->mapping.size); 5242 (unsigned long long) rbd_dev->mapping.size);
5243 5243
@@ -5252,6 +5252,8 @@ err_out_blkdev:
5252 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5252 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5253err_out_id: 5253err_out_id:
5254 rbd_dev_id_put(rbd_dev); 5254 rbd_dev_id_put(rbd_dev);
5255err_out_unlock:
5256 up_write(&rbd_dev->header_rwsem);
5255 return ret; 5257 return ret;
5256} 5258}
5257 5259
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
5442 spec = NULL; /* rbd_dev now owns this */ 5444 spec = NULL; /* rbd_dev now owns this */
5443 rbd_opts = NULL; /* rbd_dev now owns this */ 5445 rbd_opts = NULL; /* rbd_dev now owns this */
5444 5446
5447 down_write(&rbd_dev->header_rwsem);
5445 rc = rbd_dev_image_probe(rbd_dev, 0); 5448 rc = rbd_dev_image_probe(rbd_dev, 0);
5446 if (rc < 0) 5449 if (rc < 0)
5447 goto err_out_rbd_dev; 5450 goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
5471 return rc; 5474 return rc;
5472 5475
5473err_out_rbd_dev: 5476err_out_rbd_dev:
5477 up_write(&rbd_dev->header_rwsem);
5474 rbd_dev_destroy(rbd_dev); 5478 rbd_dev_destroy(rbd_dev);
5475err_out_client: 5479err_out_client:
5476 rbd_put_client(rbdc); 5480 rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5577 return ret; 5581 return ret;
5578 5582
5579 rbd_dev_header_unwatch_sync(rbd_dev); 5583 rbd_dev_header_unwatch_sync(rbd_dev);
5580 /*
5581 * flush remaining watch callbacks - these must be complete
5582 * before the osd_client is shutdown
5583 */
5584 dout("%s: flushing notifies", __func__);
5585 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5586 5584
5587 /* 5585 /*
5588 * Don't free anything from rbd_dev->disk until after all 5586 * Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c
index 2bcecafdeaea..c407c47a3232 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/tango_xtal.c
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
42 42
43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350, 43 ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
44 32, clocksource_mmio_readl_up); 44 32, clocksource_mmio_readl_up);
45 if (!ret) { 45 if (ret) {
46 pr_err("%s: registration failed\n", np->full_name); 46 pr_err("%s: registration failed\n", np->full_name);
47 return; 47 return;
48 } 48 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b87596b591b3..e93405f0eac4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1491{ 1491{
1492 unsigned int new_freq; 1492 unsigned int new_freq;
1493 1493
1494 if (cpufreq_suspended)
1495 return 0;
1496
1494 new_freq = cpufreq_driver->get(policy->cpu); 1497 new_freq = cpufreq_driver->get(policy->cpu);
1495 if (!new_freq) 1498 if (!new_freq)
1496 return 0; 1499 return 0;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; 193 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
194 j_cdbs->prev_cpu_wall = cur_wall_time; 194 j_cdbs->prev_cpu_wall = cur_wall_time;
195 195
196 if (cur_idle_time <= j_cdbs->prev_cpu_idle) { 196 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
197 idle_time = 0; 197 j_cdbs->prev_cpu_idle = cur_idle_time;
198 } else {
199 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
200 j_cdbs->prev_cpu_idle = cur_idle_time;
201 }
202 198
203 if (ignore_nice) { 199 if (ignore_nice) {
204 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 200 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8b5a415ee14a..f502d5b90c25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -813,6 +813,11 @@ static int core_get_max_pstate(void)
813 if (err) 813 if (err)
814 goto skip_tar; 814 goto skip_tar;
815 815
816 /* For level 1 and 2, bits[23:16] contain the ratio */
817 if (tdp_ctrl)
818 tdp_ratio >>= 16;
819
820 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
816 if (tdp_ratio - 1 == tar) { 821 if (tdp_ratio - 1 == tar) {
817 max_pstate = tar; 822 max_pstate = tar;
818 pr_debug("max_pstate=TAC %x\n", max_pstate); 823 pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -1130,6 +1135,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1130 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), 1135 sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
1131 int_tofp(duration_ns)); 1136 int_tofp(duration_ns));
1132 core_busy = mul_fp(core_busy, sample_ratio); 1137 core_busy = mul_fp(core_busy, sample_ratio);
1138 } else {
1139 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
1140 if (sample_ratio < int_tofp(1))
1141 core_busy = 0;
1133 } 1142 }
1134 1143
1135 cpu->sample.busy_scaled = core_busy; 1144 cpu->sample.busy_scaled = core_busy;
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3d9acc53d247..60fc0fa26fd3 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); 225 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
226 struct ccp_aes_cmac_exp_ctx state; 226 struct ccp_aes_cmac_exp_ctx state;
227 227
228 /* Don't let anything leak to 'out' */
229 memset(&state, 0, sizeof(state));
230
228 state.null_msg = rctx->null_msg; 231 state.null_msg = rctx->null_msg;
229 memcpy(state.iv, rctx->iv, sizeof(state.iv)); 232 memcpy(state.iv, rctx->iv, sizeof(state.iv));
230 state.buf_count = rctx->buf_count; 233 state.buf_count = rctx->buf_count;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index b5ad72897dc2..8f36af62fe95 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 212 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
213 struct ccp_sha_exp_ctx state; 213 struct ccp_sha_exp_ctx state;
214 214
215 /* Don't let anything leak to 'out' */
216 memset(&state, 0, sizeof(state));
217
215 state.type = rctx->type; 218 state.type = rctx->type;
216 state.msg_bits = rctx->msg_bits; 219 state.msg_bits = rctx->msg_bits;
217 state.first = rctx->first; 220 state.first = rctx->first;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
63 ptr->eptr = upper_32_bits(dma_addr); 63 ptr->eptr = upper_32_bits(dma_addr);
64} 64}
65 65
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
67 bool is_sec1) 75 bool is_sec1)
68{ 76{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1083 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1091 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1084 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1092 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1085 : DMA_TO_DEVICE); 1093 : DMA_TO_DEVICE);
1086
1087 /* hmac data */ 1094 /* hmac data */
1088 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1095 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1089 if (sg_count > 1 && 1096 if (sg_count > 1 &&
1090 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1097 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1091 areq->assoclen, 1098 areq->assoclen,
1092 &edesc->link_tbl[tbl_off])) > 1) { 1099 &edesc->link_tbl[tbl_off])) > 1) {
1093 tbl_off += ret;
1094
1095 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1100 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1096 sizeof(struct talitos_ptr), 0); 1101 sizeof(struct talitos_ptr), 0);
1097 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1102 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1098 1103
1099 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1104 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1100 edesc->dma_len, DMA_BIDIRECTIONAL); 1105 edesc->dma_len, DMA_BIDIRECTIONAL);
1106
1107 tbl_off += ret;
1101 } else { 1108 } else {
1102 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1109 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1103 desc->ptr[1].j_extent = 0; 1110 desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1126 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1133 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1127 sg_link_tbl_len += authsize; 1134 sg_link_tbl_len += authsize;
1128 1135
1129 if (sg_count > 1 && 1136 if (sg_count == 1) {
1130 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, 1137 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1131 sg_link_tbl_len, 1138 areq->assoclen, 0);
1132 &edesc->link_tbl[tbl_off])) > 1) { 1139 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1133 tbl_off += ret; 1140 areq->assoclen, sg_link_tbl_len,
1141 &edesc->link_tbl[tbl_off])) >
1142 1) {
1134 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1143 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1135 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1144 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1136 tbl_off * 1145 tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1147 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1139 edesc->dma_len, 1148 edesc->dma_len,
1140 DMA_BIDIRECTIONAL); 1149 DMA_BIDIRECTIONAL);
1141 } else 1150 tbl_off += ret;
1142 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1151 } else {
1152 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1153 }
1143 1154
1144 /* cipher out */ 1155 /* cipher out */
1145 desc->ptr[5].len = cpu_to_be16(cryptlen); 1156 desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1151 1162
1152 edesc->icv_ool = false; 1163 edesc->icv_ool = false;
1153 1164
1154 if (sg_count > 1 && 1165 if (sg_count == 1) {
1155 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, 1166 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1167 areq->assoclen, 0);
1168 } else if ((sg_count =
1169 sg_to_link_tbl_offset(areq->dst, sg_count,
1156 areq->assoclen, cryptlen, 1170 areq->assoclen, cryptlen,
1157 &edesc->link_tbl[tbl_off])) > 1171 &edesc->link_tbl[tbl_off])) > 1) {
1158 1) {
1159 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1172 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1160 1173
1161 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1174 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1178 edesc->dma_len, DMA_BIDIRECTIONAL); 1191 edesc->dma_len, DMA_BIDIRECTIONAL);
1179 1192
1180 edesc->icv_ool = true; 1193 edesc->icv_ool = true;
1181 } else 1194 } else {
1182 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1195 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1196 }
1183 1197
1184 /* iv out */ 1198 /* iv out */
1185 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1199 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
2629 struct talitos_alg_template algt; 2643 struct talitos_alg_template algt;
2630}; 2644};
2631 2645
2632static int talitos_cra_init(struct crypto_tfm *tfm) 2646static int talitos_init_common(struct talitos_ctx *ctx,
2647 struct talitos_crypto_alg *talitos_alg)
2633{ 2648{
2634 struct crypto_alg *alg = tfm->__crt_alg;
2635 struct talitos_crypto_alg *talitos_alg;
2636 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2637 struct talitos_private *priv; 2649 struct talitos_private *priv;
2638 2650
2639 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2640 talitos_alg = container_of(__crypto_ahash_alg(alg),
2641 struct talitos_crypto_alg,
2642 algt.alg.hash);
2643 else
2644 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2645 algt.alg.crypto);
2646
2647 /* update context with ptr to dev */ 2651 /* update context with ptr to dev */
2648 ctx->dev = talitos_alg->dev; 2652 ctx->dev = talitos_alg->dev;
2649 2653
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
2661 return 0; 2665 return 0;
2662} 2666}
2663 2667
2668static int talitos_cra_init(struct crypto_tfm *tfm)
2669{
2670 struct crypto_alg *alg = tfm->__crt_alg;
2671 struct talitos_crypto_alg *talitos_alg;
2672 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2673
2674 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2675 talitos_alg = container_of(__crypto_ahash_alg(alg),
2676 struct talitos_crypto_alg,
2677 algt.alg.hash);
2678 else
2679 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2680 algt.alg.crypto);
2681
2682 return talitos_init_common(ctx, talitos_alg);
2683}
2684
2664static int talitos_cra_init_aead(struct crypto_aead *tfm) 2685static int talitos_cra_init_aead(struct crypto_aead *tfm)
2665{ 2686{
2666 talitos_cra_init(crypto_aead_tfm(tfm)); 2687 struct aead_alg *alg = crypto_aead_alg(tfm);
2667 return 0; 2688 struct talitos_crypto_alg *talitos_alg;
2689 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2690
2691 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2692 algt.alg.aead);
2693
2694 return talitos_init_common(ctx, talitos_alg);
2668} 2695}
2669 2696
2670static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2697static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1866 1866
1867 i7_dev = get_i7core_dev(mce->socketid); 1867 i7_dev = get_i7core_dev(mce->socketid);
1868 if (!i7_dev) 1868 if (!i7_dev)
1869 return NOTIFY_BAD; 1869 return NOTIFY_DONE;
1870 1870
1871 mci = i7_dev->mci; 1871 mci = i7_dev->mci;
1872 pvt = mci->pvt_info; 1872 pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 468447aff8eb..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3168,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3168 3168
3169 mci = get_mci_for_node_id(mce->socketid); 3169 mci = get_mci_for_node_id(mce->socketid);
3170 if (!mci) 3170 if (!mci)
3171 return NOTIFY_BAD; 3171 return NOTIFY_DONE;
3172 pvt = mci->pvt_info; 3172 pvt = mci->pvt_info;
3173 3173
3174 /* 3174 /*
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
202 { NULL_GUID, "", NULL }, 202 { NULL_GUID, "", NULL },
203}; 203};
204 204
205/*
206 * Check if @var_name matches the pattern given in @match_name.
207 *
208 * @var_name: an array of @len non-NUL characters.
209 * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
210 * final "*" character matches any trailing characters @var_name,
211 * including the case when there are none left in @var_name.
212 * @match: on output, the number of non-wildcard characters in @match_name
213 * that @var_name matches, regardless of the return value.
214 * @return: whether @var_name fully matches @match_name.
215 */
205static bool 216static bool
206variable_matches(const char *var_name, size_t len, const char *match_name, 217variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match) 218 int *match)
208{ 219{
209 for (*match = 0; ; (*match)++) { 220 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match]; 221 char c = match_name[*match];
211 char u = var_name[*match];
212 222
213 /* Wildcard in the matching name means we've matched */ 223 switch (c) {
214 if (c == '*') 224 case '*':
225 /* Wildcard in @match_name means we've matched. */
215 return true; 226 return true;
216 227
217 /* Case sensitive match */ 228 case '\0':
218 if (!c && *match == len) 229 /* @match_name has ended. Has @var_name too? */
219 return true; 230 return (*match == len);
220 231
221 if (c != u) 232 default:
233 /*
234 * We've reached a non-wildcard char in @match_name.
235 * Continue only if there's an identical character in
236 * @var_name.
237 */
238 if (*match < len && c == var_name[*match])
239 continue;
222 return false; 240 return false;
223 241 }
224 if (!c)
225 return true;
226 } 242 }
227 return true;
228} 243}
229 244
230bool 245bool
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8b79a9..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
360 .init = psci_dt_cpu_init_idle, 360 .init = psci_dt_cpu_init_idle,
361}; 361};
362 362
363CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); 363CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
364#endif 364#endif
365#endif 365#endif
366 366
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index b77489dec6e8..1bcbade479dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
1591 struct amdgpu_bo *vcpu_bo; 1591 struct amdgpu_bo *vcpu_bo;
1592 void *cpu_addr; 1592 void *cpu_addr;
1593 uint64_t gpu_addr; 1593 uint64_t gpu_addr;
1594 unsigned fw_version;
1594 void *saved_bo; 1595 void *saved_bo;
1595 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1596 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1596 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1597 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index d6b0bff510aa..b7b583c42ea8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
425 struct acp_pm_domain *apd; 425 struct acp_pm_domain *apd;
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 427
428 /* return early if no ACP */
429 if (!adev->acp.acp_genpd)
430 return 0;
431
428 /* SMU block will power on ACP irrespective of ACP runtime status. 432 /* SMU block will power on ACP irrespective of ACP runtime status.
429 * Power off explicitly based on genpd ACP runtime status so that ACP 433 * Power off explicitly based on genpd ACP runtime status so that ACP
430 * hw and ACP-genpd status are in sync. 434 * hw and ACP-genpd status are in sync.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
63 return amdgpu_atpx_priv.atpx_detected; 63 return amdgpu_atpx_priv.atpx_detected;
64} 64}
65 65
66bool amdgpu_has_atpx_dgpu_power_cntl(void) {
67 return amdgpu_atpx_priv.atpx.functions.power_cntl;
68}
69
70/** 66/**
71 * amdgpu_atpx_call - call an ATPX method 67 * amdgpu_atpx_call - call an ATPX method
72 * 68 *
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
146 */ 142 */
147static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 143static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
148{ 144{
145 /* make sure required functions are enabled */
146 /* dGPU power control is required */
147 if (atpx->functions.power_cntl == false) {
148 printk("ATPX dGPU power cntl not present, forcing\n");
149 atpx->functions.power_cntl = true;
150 }
151
149 if (atpx->functions.px_params) { 152 if (atpx->functions.px_params) {
150 union acpi_object *info; 153 union acpi_object *info;
151 struct atpx_px_params output; 154 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
62 "LAST", 62 "LAST",
63}; 63};
64 64
65#if defined(CONFIG_VGA_SWITCHEROO)
66bool amdgpu_has_atpx_dgpu_power_cntl(void);
67#else
68static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
69#endif
70
71bool amdgpu_device_is_px(struct drm_device *dev) 65bool amdgpu_device_is_px(struct drm_device *dev)
72{ 66{
73 struct amdgpu_device *adev = dev->dev_private; 67 struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1485 1479
1486 if (amdgpu_runtime_pm == 1) 1480 if (amdgpu_runtime_pm == 1)
1487 runtime = true; 1481 runtime = true;
1488 if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) 1482 if (amdgpu_device_is_px(ddev))
1489 runtime = true; 1483 runtime = true;
1490 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); 1484 vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1491 if (runtime) 1485 if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index aef70db16832..b04337de65d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
303 fw_info.feature = adev->vce.fb_version; 303 fw_info.feature = adev->vce.fb_version;
304 break; 304 break;
305 case AMDGPU_INFO_FW_UVD: 305 case AMDGPU_INFO_FW_UVD:
306 fw_info.ver = 0; 306 fw_info.ver = adev->uvd.fw_version;
307 fw_info.feature = 0; 307 fw_info.feature = 0;
308 break; 308 break;
309 case AMDGPU_INFO_FW_GMC: 309 case AMDGPU_INFO_FW_GMC:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8d432e6901af..81bd964d3dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
53 53
54#define AMDGPU_MAX_HPD_PINS 6 54#define AMDGPU_MAX_HPD_PINS 6
55#define AMDGPU_MAX_CRTCS 6 55#define AMDGPU_MAX_CRTCS 6
56#define AMDGPU_MAX_AFMT_BLOCKS 7 56#define AMDGPU_MAX_AFMT_BLOCKS 9
57 57
58enum amdgpu_rmx_type { 58enum amdgpu_rmx_type {
59 RMX_OFF, 59 RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
309 struct atom_context *atom_context; 309 struct atom_context *atom_context;
310 struct card_info *atom_card_info; 310 struct card_info *atom_card_info;
311 bool mode_config_initialized; 311 bool mode_config_initialized;
312 struct amdgpu_crtc *crtcs[6]; 312 struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
313 struct amdgpu_afmt *afmt[7]; 313 struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
314 /* DVI-I properties */ 314 /* DVI-I properties */
315 struct drm_property *coherent_mode_property; 315 struct drm_property *coherent_mode_property;
316 /* DAC enable load detect */ 316 /* DAC enable load detect */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6f3369de232f..11af4492b4be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
223{ 223{
224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); 224 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
225 225
226 if (amdgpu_ttm_tt_get_usermm(bo->ttm))
227 return -EPERM;
226 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 228 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
227} 229}
228 230
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 338da80006b6..871018c634e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159 version_major, version_minor, family_id); 159 version_major, version_minor, family_id);
160 160
161 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
162 (family_id << 8));
163
161 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 164 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
162 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 165 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
163 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 166 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
255 if (i == AMDGPU_MAX_UVD_HANDLES) 258 if (i == AMDGPU_MAX_UVD_HANDLES)
256 return 0; 259 return 0;
257 260
261 cancel_delayed_work_sync(&adev->uvd.idle_work);
262
258 size = amdgpu_bo_size(adev->uvd.vcpu_bo); 263 size = amdgpu_bo_size(adev->uvd.vcpu_bo);
259 ptr = adev->uvd.cpu_addr; 264 ptr = adev->uvd.cpu_addr;
260 265
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 4bec0c108cea..481a64fa9b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
234 if (i == AMDGPU_MAX_VCE_HANDLES) 234 if (i == AMDGPU_MAX_VCE_HANDLES)
235 return 0; 235 return 0;
236 236
237 cancel_delayed_work_sync(&adev->vce.idle_work);
237 /* TODO: suspending running encoding sessions isn't supported */ 238 /* TODO: suspending running encoding sessions isn't supported */
238 return -EINVAL; 239 return -EINVAL;
239} 240}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 05b0353d3880..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
910{ 910{
911 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
912 912
913 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 913 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
914 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
915 else
916 return 0;
914} 917}
915 918
916static int gmc_v7_0_sw_init(void *handle) 919static int gmc_v7_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 02deb3229405..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
870{ 870{
871 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 871 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
872 872
873 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); 873 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
874 return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
875 else
876 return 0;
874} 877}
875 878
876#define mmMC_SEQ_MISC0_FIJI 0xA71 879#define mmMC_SEQ_MISC0_FIJI 0xA71
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 27fbd79d0daf..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1673 int i; 1673 int i;
1674 1674
1675 port = drm_dp_get_validated_port_ref(mgr, port);
1676 if (!port)
1677 return -EINVAL;
1678
1675 port_num = port->port_num; 1679 port_num = port->port_num;
1676 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1677 if (!mstb) { 1681 if (!mstb) {
1678 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1679 1683
1680 if (!mstb) 1684 if (!mstb) {
1685 drm_dp_put_port(port);
1681 return -EINVAL; 1686 return -EINVAL;
1687 }
1682 } 1688 }
1683 1689
1684 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1707 kfree(txmsg); 1713 kfree(txmsg);
1708fail_put: 1714fail_put:
1709 drm_dp_put_mst_branch_device(mstb); 1715 drm_dp_put_mst_branch_device(mstb);
1716 drm_dp_put_port(port);
1710 return ret; 1717 return ret;
1711} 1718}
1712 1719
@@ -1789,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1789 req_payload.start_slot = cur_slots; 1796 req_payload.start_slot = cur_slots;
1790 if (mgr->proposed_vcpis[i]) { 1797 if (mgr->proposed_vcpis[i]) {
1791 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799 port = drm_dp_get_validated_port_ref(mgr, port);
1800 if (!port) {
1801 mutex_unlock(&mgr->payload_lock);
1802 return -EINVAL;
1803 }
1792 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1804 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1793 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; 1805 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1794 } else { 1806 } else {
@@ -1816,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1816 mgr->payloads[i].payload_state = req_payload.payload_state; 1828 mgr->payloads[i].payload_state = req_payload.payload_state;
1817 } 1829 }
1818 cur_slots += req_payload.num_slots; 1830 cur_slots += req_payload.num_slots;
1831
1832 if (port)
1833 drm_dp_put_port(port);
1819 } 1834 }
1820 1835
1821 for (i = 0; i < mgr->max_payloads; i++) { 1836 for (i = 0; i < mgr->max_payloads; i++) {
@@ -2121,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2121 2136
2122 if (mgr->mst_primary) { 2137 if (mgr->mst_primary) {
2123 int sret; 2138 int sret;
2139 u8 guid[16];
2140
2124 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 2141 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2125 if (sret != DP_RECEIVER_CAP_SIZE) { 2142 if (sret != DP_RECEIVER_CAP_SIZE) {
2126 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 2143 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2135,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2135 ret = -1; 2152 ret = -1;
2136 goto out_unlock; 2153 goto out_unlock;
2137 } 2154 }
2155
2156 /* Some hubs forget their guids after they resume */
2157 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2158 if (sret != 16) {
2159 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2160 ret = -1;
2161 goto out_unlock;
2162 }
2163 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2164
2138 ret = 0; 2165 ret = 0;
2139 } else 2166 } else
2140 ret = -1; 2167 ret = -1;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
572 goto fail; 572 goto fail;
573 } 573 }
574 574
575 /*
576 * Set the GPU linear window to be at the end of the DMA window, where
577 * the CMA area is likely to reside. This ensures that we are able to
578 * map the command buffers while having the linear window overlap as
579 * much RAM as possible, so we can optimize mappings for other buffers.
580 *
581 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
582 * to different views of the memory on the individual engines.
583 */
584 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
585 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
586 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
587 if (dma_mask < PHYS_OFFSET + SZ_2G)
588 gpu->memory_base = PHYS_OFFSET;
589 else
590 gpu->memory_base = dma_mask - SZ_2G + 1;
591 }
592
575 ret = etnaviv_hw_reset(gpu); 593 ret = etnaviv_hw_reset(gpu);
576 if (ret) 594 if (ret)
577 goto fail; 595 goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1566{ 1584{
1567 struct device *dev = &pdev->dev; 1585 struct device *dev = &pdev->dev;
1568 struct etnaviv_gpu *gpu; 1586 struct etnaviv_gpu *gpu;
1569 u32 dma_mask;
1570 int err = 0; 1587 int err = 0;
1571 1588
1572 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); 1589 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1576 gpu->dev = &pdev->dev; 1593 gpu->dev = &pdev->dev;
1577 mutex_init(&gpu->lock); 1594 mutex_init(&gpu->lock);
1578 1595
1579 /*
1580 * Set the GPU linear window to be at the end of the DMA window, where
1581 * the CMA area is likely to reside. This ensures that we are able to
1582 * map the command buffers while having the linear window overlap as
1583 * much RAM as possible, so we can optimize mappings for other buffers.
1584 */
1585 dma_mask = (u32)dma_get_required_mask(dev);
1586 if (dma_mask < PHYS_OFFSET + SZ_2G)
1587 gpu->memory_base = PHYS_OFFSET;
1588 else
1589 gpu->memory_base = dma_mask - SZ_2G + 1;
1590
1591 /* Map registers: */ 1596 /* Map registers: */
1592 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); 1597 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
1593 if (IS_ERR(gpu->mmio)) 1598 if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 10480939159c..daba7ebb9699 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
2634 2634
2635/* WaRsDisableCoarsePowerGating:skl,bxt */ 2635/* WaRsDisableCoarsePowerGating:skl,bxt */
2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2636#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
2637 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ 2637 IS_SKL_GT3(dev) || \
2638 IS_SKL_REVID(dev, 0, SKL_REVID_F0))) 2638 IS_SKL_GT4(dev))
2639
2639/* 2640/*
2640 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2641 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
2641 * even when in MSI mode. This results in spurious interrupt warnings if the 2642 * even when in MSI mode. This results in spurious interrupt warnings if the
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 18ba8139e922..4d30b60defda 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
501 if (pvec != NULL) { 501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm; 502 struct mm_struct *mm = obj->userptr.mm->mm;
503 503
504 down_read(&mm->mmap_sem); 504 ret = -EFAULT;
505 while (pinned < npages) { 505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 ret = get_user_pages_remote(work->task, mm, 506 down_read(&mm->mmap_sem);
507 obj->userptr.ptr + pinned * PAGE_SIZE, 507 while (pinned < npages) {
508 npages - pinned, 508 ret = get_user_pages_remote
509 !obj->userptr.read_only, 0, 509 (work->task, mm,
510 pvec + pinned, NULL); 510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 if (ret < 0) 511 npages - pinned,
512 break; 512 !obj->userptr.read_only, 0,
513 513 pvec + pinned, NULL);
514 pinned += ret; 514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
515 } 521 }
516 up_read(&mm->mmap_sem);
517 } 522 }
518 523
519 mutex_lock(&dev->struct_mutex); 524 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6a978ce80244..5c6080fd0968 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
841 if (unlikely(total_bytes > remain_usable)) { 841 if (unlikely(total_bytes > remain_usable)) {
842 /* 842 /*
843 * The base request will fit but the reserved space 843 * The base request will fit but the reserved space
844 * falls off the end. So only need to to wait for the 844 * falls off the end. So don't need an immediate wrap
845 * reserved size after flushing out the remainder. 845 * and only need to effectively wait for the reserved
846 * size space from the start of ringbuffer.
846 */ 847 */
847 wait_bytes = remain_actual + ringbuf->reserved_size; 848 wait_bytes = remain_actual + ringbuf->reserved_size;
848 need_wrap = true;
849 } else if (total_bytes > ringbuf->space) { 849 } else if (total_bytes > ringbuf->space) {
850 /* No wrapping required, just waiting. */ 850 /* No wrapping required, just waiting. */
851 wait_bytes = total_bytes; 851 wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1913 struct intel_ringbuffer *ringbuf = request->ringbuf; 1913 struct intel_ringbuffer *ringbuf = request->ringbuf;
1914 int ret; 1914 int ret;
1915 1915
1916 ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1916 ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
1917 if (ret) 1917 if (ret)
1918 return ret; 1918 return ret;
1919 1919
1920 /* We're using qword write, seqno should be aligned to 8 bytes. */
1921 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1922
1920 /* w/a for post sync ops following a GPGPU operation we 1923 /* w/a for post sync ops following a GPGPU operation we
1921 * need a prior CS_STALL, which is emitted by the flush 1924 * need a prior CS_STALL, which is emitted by the flush
1922 * following the batch. 1925 * following the batch.
1923 */ 1926 */
1924 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); 1927 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1925 intel_logical_ring_emit(ringbuf, 1928 intel_logical_ring_emit(ringbuf,
1926 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1929 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1927 PIPE_CONTROL_CS_STALL | 1930 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1929 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); 1932 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
1930 intel_logical_ring_emit(ringbuf, 0); 1933 intel_logical_ring_emit(ringbuf, 0);
1931 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1934 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1935 /* We're thrashing one dword of HWS. */
1936 intel_logical_ring_emit(ringbuf, 0);
1932 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1937 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1938 intel_logical_ring_emit(ringbuf, MI_NOOP);
1933 return intel_logical_ring_advance_and_submit(request); 1939 return intel_logical_ring_advance_and_submit(request);
1934} 1940}
1935 1941
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 347d4df49a9b..8ed3cf34f82d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2876 const struct drm_plane_state *pstate, 2876 const struct drm_plane_state *pstate,
2877 int y) 2877 int y)
2878{ 2878{
2879 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2879 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2880 struct drm_framebuffer *fb = pstate->fb; 2880 struct drm_framebuffer *fb = pstate->fb;
2881 uint32_t width = 0, height = 0;
2882
2883 width = drm_rect_width(&intel_pstate->src) >> 16;
2884 height = drm_rect_height(&intel_pstate->src) >> 16;
2885
2886 if (intel_rotation_90_or_270(pstate->rotation))
2887 swap(width, height);
2881 2888
2882 /* for planar format */ 2889 /* for planar format */
2883 if (fb->pixel_format == DRM_FORMAT_NV12) { 2890 if (fb->pixel_format == DRM_FORMAT_NV12) {
2884 if (y) /* y-plane data rate */ 2891 if (y) /* y-plane data rate */
2885 return intel_crtc->config->pipe_src_w * 2892 return width * height *
2886 intel_crtc->config->pipe_src_h *
2887 drm_format_plane_cpp(fb->pixel_format, 0); 2893 drm_format_plane_cpp(fb->pixel_format, 0);
2888 else /* uv-plane data rate */ 2894 else /* uv-plane data rate */
2889 return (intel_crtc->config->pipe_src_w/2) * 2895 return (width / 2) * (height / 2) *
2890 (intel_crtc->config->pipe_src_h/2) *
2891 drm_format_plane_cpp(fb->pixel_format, 1); 2896 drm_format_plane_cpp(fb->pixel_format, 1);
2892 } 2897 }
2893 2898
2894 /* for packed formats */ 2899 /* for packed formats */
2895 return intel_crtc->config->pipe_src_w * 2900 return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
2896 intel_crtc->config->pipe_src_h *
2897 drm_format_plane_cpp(fb->pixel_format, 0);
2898} 2901}
2899 2902
2900/* 2903/*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
2973 struct drm_framebuffer *fb = plane->state->fb; 2976 struct drm_framebuffer *fb = plane->state->fb;
2974 int id = skl_wm_plane_id(intel_plane); 2977 int id = skl_wm_plane_id(intel_plane);
2975 2978
2976 if (fb == NULL) 2979 if (!to_intel_plane_state(plane->state)->visible)
2977 continue; 2980 continue;
2981
2978 if (plane->type == DRM_PLANE_TYPE_CURSOR) 2982 if (plane->type == DRM_PLANE_TYPE_CURSOR)
2979 continue; 2983 continue;
2980 2984
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3000 uint16_t plane_blocks, y_plane_blocks = 0; 3004 uint16_t plane_blocks, y_plane_blocks = 0;
3001 int id = skl_wm_plane_id(intel_plane); 3005 int id = skl_wm_plane_id(intel_plane);
3002 3006
3003 if (pstate->fb == NULL) 3007 if (!to_intel_plane_state(pstate)->visible)
3004 continue; 3008 continue;
3005 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3009 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3006 continue; 3010 continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3123{ 3127{
3124 struct drm_plane *plane = &intel_plane->base; 3128 struct drm_plane *plane = &intel_plane->base;
3125 struct drm_framebuffer *fb = plane->state->fb; 3129 struct drm_framebuffer *fb = plane->state->fb;
3130 struct intel_plane_state *intel_pstate =
3131 to_intel_plane_state(plane->state);
3126 uint32_t latency = dev_priv->wm.skl_latency[level]; 3132 uint32_t latency = dev_priv->wm.skl_latency[level];
3127 uint32_t method1, method2; 3133 uint32_t method1, method2;
3128 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3134 uint32_t plane_bytes_per_line, plane_blocks_per_line;
3129 uint32_t res_blocks, res_lines; 3135 uint32_t res_blocks, res_lines;
3130 uint32_t selected_result; 3136 uint32_t selected_result;
3131 uint8_t cpp; 3137 uint8_t cpp;
3138 uint32_t width = 0, height = 0;
3132 3139
3133 if (latency == 0 || !cstate->base.active || !fb) 3140 if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
3134 return false; 3141 return false;
3135 3142
3143 width = drm_rect_width(&intel_pstate->src) >> 16;
3144 height = drm_rect_height(&intel_pstate->src) >> 16;
3145
3146 if (intel_rotation_90_or_270(plane->state->rotation))
3147 swap(width, height);
3148
3136 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3149 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3137 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3150 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
3138 cpp, latency); 3151 cpp, latency);
3139 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3152 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
3140 cstate->base.adjusted_mode.crtc_htotal, 3153 cstate->base.adjusted_mode.crtc_htotal,
3141 cstate->pipe_src_w, 3154 width,
3142 cpp, fb->modifier[0], 3155 cpp,
3156 fb->modifier[0],
3143 latency); 3157 latency);
3144 3158
3145 plane_bytes_per_line = cstate->pipe_src_w * cpp; 3159 plane_bytes_per_line = width * cpp;
3146 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3160 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
3147 3161
3148 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3162 if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 45ce45a5e122..9121646d7c4d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
968 968
969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 969 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 970 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
971 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 971 if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 972 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 973 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 974 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1085 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1085 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1086 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1087 1087
1088 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1088 /* This is tied to WaForceContextSaveRestoreNonCoherent */
1089 if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
1089 /* 1090 /*
1090 *Use Force Non-Coherent whenever executing a 3D context. This 1091 *Use Force Non-Coherent whenever executing a 3D context. This
1091 * is a workaround for a possible hang in the unlikely event 1092 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2090{ 2091{
2091 struct drm_i915_private *dev_priv = to_i915(dev); 2092 struct drm_i915_private *dev_priv = to_i915(dev);
2092 struct drm_i915_gem_object *obj = ringbuf->obj; 2093 struct drm_i915_gem_object *obj = ringbuf->obj;
2094 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2095 unsigned flags = PIN_OFFSET_BIAS | 4096;
2093 int ret; 2096 int ret;
2094 2097
2095 if (HAS_LLC(dev_priv) && !obj->stolen) { 2098 if (HAS_LLC(dev_priv) && !obj->stolen) {
2096 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2099 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
2097 if (ret) 2100 if (ret)
2098 return ret; 2101 return ret;
2099 2102
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2109 return -ENOMEM; 2112 return -ENOMEM;
2110 } 2113 }
2111 } else { 2114 } else {
2112 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2115 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
2116 flags | PIN_MAPPABLE);
2113 if (ret) 2117 if (ret)
2114 return ret; 2118 return ret;
2115 2119
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2454 if (unlikely(total_bytes > remain_usable)) { 2458 if (unlikely(total_bytes > remain_usable)) {
2455 /* 2459 /*
2456 * The base request will fit but the reserved space 2460 * The base request will fit but the reserved space
2457 * falls off the end. So only need to to wait for the 2461 * falls off the end. So don't need an immediate wrap
2458 * reserved size after flushing out the remainder. 2462 * and only need to effectively wait for the reserved
2463 * size space from the start of ringbuffer.
2459 */ 2464 */
2460 wait_bytes = remain_actual + ringbuf->reserved_size; 2465 wait_bytes = remain_actual + ringbuf->reserved_size;
2461 need_wrap = true;
2462 } else if (total_bytes > ringbuf->space) { 2466 } else if (total_bytes > ringbuf->space) {
2463 /* No wrapping required, just waiting. */ 2467 /* No wrapping required, just waiting. */
2464 wait_bytes = total_bytes; 2468 wait_bytes = total_bytes;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 436d8f2b8682..68b6f69aa682 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1189 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1190 dev_priv->uncore.funcs.force_wake_get = 1190 dev_priv->uncore.funcs.force_wake_get =
1191 fw_domains_get_with_thread_status; 1191 fw_domains_get_with_thread_status;
1192 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1192 if (IS_HASWELL(dev))
1193 dev_priv->uncore.funcs.force_wake_put =
1194 fw_domains_put_with_fifo;
1195 else
1196 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1193 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1197 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1194 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1198 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1195 } else if (IS_IVYBRIDGE(dev)) { 1199 } else if (IS_IVYBRIDGE(dev)) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index ae96ebc490fb..e81aefe5ffa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
1276 break; 1276 break;
1277 default: 1277 default:
1278 if (disp->dithering_mode) { 1278 if (disp->dithering_mode) {
1279 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1279 drm_object_attach_property(&connector->base, 1280 drm_object_attach_property(&connector->base,
1280 disp->dithering_mode, 1281 disp->dithering_mode,
1281 nv_connector-> 1282 nv_connector->
1282 dithering_mode); 1283 dithering_mode);
1283 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1284 } 1284 }
1285 if (disp->dithering_depth) { 1285 if (disp->dithering_depth) {
1286 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1286 drm_object_attach_property(&connector->base, 1287 drm_object_attach_property(&connector->base,
1287 disp->dithering_depth, 1288 disp->dithering_depth,
1288 nv_connector-> 1289 nv_connector->
1289 dithering_depth); 1290 dithering_depth);
1290 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1291 } 1291 }
1292 break; 1292 break;
1293 } 1293 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index c56a886229f1..b2de290da16f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
1832 1832
1833 gf100_gr_mmio(gr, gr->func->mmio); 1833 gf100_gr_mmio(gr, gr->func->mmio);
1834 1834
1835 nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1836
1835 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); 1837 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1836 for (i = 0, gpc = -1; i < gr->tpc_total; i++) { 1838 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1837 do { 1839 do {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
2608 WREG32(VM_CONTEXT1_CNTL, 0); 2608 WREG32(VM_CONTEXT1_CNTL, 0);
2609} 2609}
2610 2610
2611static const unsigned ni_dig_offsets[] =
2612{
2613 NI_DIG0_REGISTER_OFFSET,
2614 NI_DIG1_REGISTER_OFFSET,
2615 NI_DIG2_REGISTER_OFFSET,
2616 NI_DIG3_REGISTER_OFFSET,
2617 NI_DIG4_REGISTER_OFFSET,
2618 NI_DIG5_REGISTER_OFFSET
2619};
2620
2621static const unsigned ni_tx_offsets[] =
2622{
2623 NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
2624 NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
2625 NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
2626 NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
2627 NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
2628 NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
2629};
2630
2631static const unsigned evergreen_dp_offsets[] =
2632{
2633 EVERGREEN_DP0_REGISTER_OFFSET,
2634 EVERGREEN_DP1_REGISTER_OFFSET,
2635 EVERGREEN_DP2_REGISTER_OFFSET,
2636 EVERGREEN_DP3_REGISTER_OFFSET,
2637 EVERGREEN_DP4_REGISTER_OFFSET,
2638 EVERGREEN_DP5_REGISTER_OFFSET
2639};
2640
2641
2642/*
2643 * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
2644 * We go from crtc to connector and it is not relible since it
2645 * should be an opposite direction .If crtc is enable then
2646 * find the dig_fe which selects this crtc and insure that it enable.
2647 * if such dig_fe is found then find dig_be which selects found dig_be and
2648 * insure that it enable and in DP_SST mode.
2649 * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
2650 * from dp symbols clocks .
2651 */
2652static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
2653 unsigned crtc_id, unsigned *ret_dig_fe)
2654{
2655 unsigned i;
2656 unsigned dig_fe;
2657 unsigned dig_be;
2658 unsigned dig_en_be;
2659 unsigned uniphy_pll;
2660 unsigned digs_fe_selected;
2661 unsigned dig_be_mode;
2662 unsigned dig_fe_mask;
2663 bool is_enabled = false;
2664 bool found_crtc = false;
2665
2666 /* loop through all running dig_fe to find selected crtc */
2667 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2668 dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
2669 if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2670 crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2671 /* found running pipe */
2672 found_crtc = true;
2673 dig_fe_mask = 1 << i;
2674 dig_fe = i;
2675 break;
2676 }
2677 }
2678
2679 if (found_crtc) {
2680 /* loop through all running dig_be to find selected dig_fe */
2681 for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2682 dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2683 /* if dig_fe_selected by dig_be? */
2684 digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2685 dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2686 if (dig_fe_mask & digs_fe_selected &&
2687 /* if dig_be in sst mode? */
2688 dig_be_mode == NI_DIG_BE_DPSST) {
2689 dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2690 ni_dig_offsets[i]);
2691 uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2692 ni_tx_offsets[i]);
2693 /* dig_be enable and tx is running */
2694 if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2695 dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2696 uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2697 is_enabled = true;
2698 *ret_dig_fe = dig_fe;
2699 break;
2700 }
2701 }
2702 }
2703 }
2704
2705 return is_enabled;
2706}
2707
2708/*
2709 * Blank dig when in dp sst mode
2710 * Dig ignores crtc timing
2711 */
2712static void evergreen_blank_dp_output(struct radeon_device *rdev,
2713 unsigned dig_fe)
2714{
2715 unsigned stream_ctrl;
2716 unsigned fifo_ctrl;
2717 unsigned counter = 0;
2718
2719 if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2720 DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2721 return;
2722 }
2723
2724 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2725 evergreen_dp_offsets[dig_fe]);
2726 if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2727 DRM_ERROR("dig %d , should be enable\n", dig_fe);
2728 return;
2729 }
2730
2731 stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2732 WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2733 evergreen_dp_offsets[dig_fe], stream_ctrl);
2734
2735 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2736 evergreen_dp_offsets[dig_fe]);
2737 while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2738 msleep(1);
2739 counter++;
2740 stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2741 evergreen_dp_offsets[dig_fe]);
2742 }
2743 if (counter >= 32 )
2744 DRM_ERROR("counter exceeds %d\n", counter);
2745
2746 fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2747 fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2748 WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2749
2750}
2751
2611void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 2752void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2612{ 2753{
2613 u32 crtc_enabled, tmp, frame_count, blackout; 2754 u32 crtc_enabled, tmp, frame_count, blackout;
2614 int i, j; 2755 int i, j;
2756 unsigned dig_fe;
2615 2757
2616 if (!ASIC_IS_NODCE(rdev)) { 2758 if (!ASIC_IS_NODCE(rdev)) {
2617 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 2759 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2651 break; 2793 break;
2652 udelay(1); 2794 udelay(1);
2653 } 2795 }
2654 2796 /*we should disable dig if it drives dp sst*/
2797 /*but we are in radeon_device_init and the topology is unknown*/
2798 /*and it is available after radeon_modeset_init*/
2799 /*the following method radeon_atom_encoder_dpms_dig*/
2800 /*does the job if we initialize it properly*/
2801 /*for now we do it this manually*/
2802 /**/
2803 if (ASIC_IS_DCE5(rdev) &&
2804 evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2805 evergreen_blank_dp_output(rdev, dig_fe);
2806 /*we could remove 6 lines below*/
2655 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 2807 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2656 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2808 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2657 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2809 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
250 250
251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ 251/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
252#define EVERGREEN_HDMI_BASE 0x7030 252#define EVERGREEN_HDMI_BASE 0x7030
253/*DIG block*/
254#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
255#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
256#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
257#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
258#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
259#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
260
261
262#define NI_DIG_FE_CNTL 0x7000
263# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
264# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
265
266
267#define NI_DIG_BE_CNTL 0x7140
268# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
269# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
270
271#define NI_DIG_BE_EN_CNTL 0x7144
272# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
273# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
274# define NI_DIG_BE_DPSST 0
253 275
254/* Display Port block */ 276/* Display Port block */
277#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
278#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
279#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
280#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
281#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
282#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
283
284
285#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
286# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
287# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
288#define EVERGREEN_DP_STEER_FIFO 0x7310
289# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
255#define EVERGREEN_DP_SEC_CNTL 0x7280 290#define EVERGREEN_DP_SEC_CNTL 0x7280
256# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) 291# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
257# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) 292# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
266# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) 301# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
267# define EVERGREEN_DP_SEC_SS_EN (1 << 28) 302# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
268 303
304/*DCIO_UNIPHY block*/
305#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
306#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
307#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
308#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
309#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
310#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
311
312#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
313# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
314
269#endif 315#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fd8c4d317e60..95f4fea89302 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
62 return radeon_atpx_priv.atpx_detected; 62 return radeon_atpx_priv.atpx_detected;
63} 63}
64 64
65bool radeon_has_atpx_dgpu_power_cntl(void) {
66 return radeon_atpx_priv.atpx.functions.power_cntl;
67}
68
69/** 65/**
70 * radeon_atpx_call - call an ATPX method 66 * radeon_atpx_call - call an ATPX method
71 * 67 *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
145 */ 141 */
146static int radeon_atpx_validate(struct radeon_atpx *atpx) 142static int radeon_atpx_validate(struct radeon_atpx *atpx)
147{ 143{
144 /* make sure required functions are enabled */
145 /* dGPU power control is required */
146 if (atpx->functions.power_cntl == false) {
147 printk("ATPX dGPU power cntl not present, forcing\n");
148 atpx->functions.power_cntl = true;
149 }
150
148 if (atpx->functions.px_params) { 151 if (atpx->functions.px_params) {
149 union acpi_object *info; 152 union acpi_object *info;
150 struct atpx_px_params output; 153 struct atpx_px_params output;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cfcc099c537d..81a63d7f5cd9 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2002 rdev->mode_info.dither_property, 2002 rdev->mode_info.dither_property,
2003 RADEON_FMT_DITHER_DISABLE); 2003 RADEON_FMT_DITHER_DISABLE);
2004 2004
2005 if (radeon_audio != 0) 2005 if (radeon_audio != 0) {
2006 drm_object_attach_property(&radeon_connector->base.base, 2006 drm_object_attach_property(&radeon_connector->base.base,
2007 rdev->mode_info.audio_property, 2007 rdev->mode_info.audio_property,
2008 RADEON_AUDIO_AUTO); 2008 RADEON_AUDIO_AUTO);
2009 radeon_connector->audio = RADEON_AUDIO_AUTO;
2010 }
2009 if (ASIC_IS_DCE5(rdev)) 2011 if (ASIC_IS_DCE5(rdev))
2010 drm_object_attach_property(&radeon_connector->base.base, 2012 drm_object_attach_property(&radeon_connector->base.base,
2011 rdev->mode_info.output_csc_property, 2013 rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2130 drm_object_attach_property(&radeon_connector->base.base, 2132 drm_object_attach_property(&radeon_connector->base.base,
2131 rdev->mode_info.audio_property, 2133 rdev->mode_info.audio_property,
2132 RADEON_AUDIO_AUTO); 2134 RADEON_AUDIO_AUTO);
2135 radeon_connector->audio = RADEON_AUDIO_AUTO;
2133 } 2136 }
2134 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 2137 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2135 radeon_connector->dac_load_detect = true; 2138 radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2185 drm_object_attach_property(&radeon_connector->base.base, 2188 drm_object_attach_property(&radeon_connector->base.base,
2186 rdev->mode_info.audio_property, 2189 rdev->mode_info.audio_property,
2187 RADEON_AUDIO_AUTO); 2190 RADEON_AUDIO_AUTO);
2191 radeon_connector->audio = RADEON_AUDIO_AUTO;
2188 } 2192 }
2189 if (ASIC_IS_DCE5(rdev)) 2193 if (ASIC_IS_DCE5(rdev))
2190 drm_object_attach_property(&radeon_connector->base.base, 2194 drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2237 drm_object_attach_property(&radeon_connector->base.base, 2241 drm_object_attach_property(&radeon_connector->base.base,
2238 rdev->mode_info.audio_property, 2242 rdev->mode_info.audio_property,
2239 RADEON_AUDIO_AUTO); 2243 RADEON_AUDIO_AUTO);
2244 radeon_connector->audio = RADEON_AUDIO_AUTO;
2240 } 2245 }
2241 if (ASIC_IS_DCE5(rdev)) 2246 if (ASIC_IS_DCE5(rdev))
2242 drm_object_attach_property(&radeon_connector->base.base, 2247 drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4fd1a961012d..d0826fb0434c 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
103 "LAST", 103 "LAST",
104}; 104};
105 105
106#if defined(CONFIG_VGA_SWITCHEROO)
107bool radeon_has_atpx_dgpu_power_cntl(void);
108#else
109static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
110#endif
111
112#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) 106#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
113#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) 107#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
114 108
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
1305 } 1299 }
1306 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); 1300 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1307 1301
1308 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 1302 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1309 radeon_family_name[rdev->family], pdev->vendor, pdev->device, 1303 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1310 pdev->subsystem_vendor, pdev->subsystem_device); 1304 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1311 1305
1312 /* mutex initialization are all done here so we 1306 /* mutex initialization are all done here so we
1313 * can recall function without having locking issues */ 1307 * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
1439 * ignore it */ 1433 * ignore it */
1440 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1434 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1441 1435
1442 if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl()) 1436 if (rdev->flags & RADEON_IS_PX)
1443 runtime = true; 1437 runtime = true;
1444 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1438 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1445 if (runtime) 1439 if (runtime)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 7dddfdce85e6..90f739478a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
235{ 235{
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
237 237
238 if (radeon_ttm_tt_has_userptr(bo->ttm))
239 return -EPERM;
238 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); 240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
239} 241}
240 242
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index af4df81c4e0c..e6abc09b67e3 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, 2931 { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, 2932 { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 }, 2933 { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2934 { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2934 { 0, 0, 0, 0 }, 2935 { 0, 0, 0, 0 },
2935}; 2936};
2936 2937
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
230 230
231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) 231void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
232{ 232{
233 struct ttm_bo_device *bdev = bo->bdev; 233 int put_count = 0;
234 struct ttm_mem_type_manager *man;
235 234
236 lockdep_assert_held(&bo->resv->lock.base); 235 lockdep_assert_held(&bo->resv->lock.base);
237 236
238 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { 237 put_count = ttm_bo_del_from_lru(bo);
239 list_del_init(&bo->swap); 238 ttm_bo_list_ref_sub(bo, put_count, true);
240 list_del_init(&bo->lru); 239 ttm_bo_add_to_lru(bo);
241
242 } else {
243 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
244 list_move_tail(&bo->swap, &bo->glob->swap_lru);
245
246 man = &bdev->man[bo->mem.mem_type];
247 list_move_tail(&bo->lru, &man->lru);
248 }
249} 240}
250EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); 241EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
251 242
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
267 return 0; 267 return 0;
268} 268}
269 269
270static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
271 struct drm_crtc_state *old_state)
272{
273 unsigned long flags;
274
275 spin_lock_irqsave(&crtc->dev->event_lock, flags);
276 if (crtc->state->event)
277 drm_crtc_send_vblank_event(crtc, crtc->state->event);
278 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
279}
280
270static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { 281static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
271 .enable = virtio_gpu_crtc_enable, 282 .enable = virtio_gpu_crtc_enable,
272 .disable = virtio_gpu_crtc_disable, 283 .disable = virtio_gpu_crtc_disable,
273 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, 284 .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
274 .atomic_check = virtio_gpu_crtc_atomic_check, 285 .atomic_check = virtio_gpu_crtc_atomic_check,
286 .atomic_flush = virtio_gpu_crtc_atomic_flush,
275}; 287};
276 288
277static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, 289static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3293 &vmw_cmd_dx_cid_check, true, false, true), 3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, 3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true), 3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, 3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true), 3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, 3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true), 3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, 3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_ok, true, false, true), 3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, 3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true), 3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, 3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true), 3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true), 3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true), 3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true), 3311 true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
573 mode = old_mode; 573 mode = old_mode;
574 old_mode = NULL; 574 old_mode = NULL;
575 } else if (!vmw_kms_validate_mode_vram(vmw_priv, 575 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
576 mode->hdisplay * 576 mode->hdisplay *
577 (var->bits_per_pixel + 7) / 8, 577 DIV_ROUND_UP(var->bits_per_pixel, 8),
578 mode->vdisplay)) { 578 mode->vdisplay)) {
579 drm_mode_destroy(vmw_priv->dev, mode); 579 drm_mode_destroy(vmw_priv->dev, mode);
580 return -EINVAL; 580 return -EINVAL;
581 } 581 }
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
975 975
976config I2C_XLP9XX 976config I2C_XLP9XX
977 tristate "XLP9XX I2C support" 977 tristate "XLP9XX I2C support"
978 depends on CPU_XLP || COMPILE_TEST 978 depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
979 help 979 help
980 This driver enables support for the on-chip I2C interface of 980 This driver enables support for the on-chip I2C interface of
981 the Broadcom XLP9xx/XLP5xx MIPS processors. 981 the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
982 982
983 This driver can also be built as a module. If so, the module will 983 This driver can also be built as a module. If so, the module will
984 be called i2c-xlp9xx. 984 be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
116 cbd_t __iomem *rbase; 116 cbd_t __iomem *rbase;
117 u_char *txbuf[CPM_MAXBD]; 117 u_char *txbuf[CPM_MAXBD];
118 u_char *rxbuf[CPM_MAXBD]; 118 u_char *rxbuf[CPM_MAXBD];
119 u32 txdma[CPM_MAXBD]; 119 dma_addr_t txdma[CPM_MAXBD];
120 u32 rxdma[CPM_MAXBD]; 120 dma_addr_t rxdma[CPM_MAXBD];
121}; 121};
122 122
123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) 123static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
671 return -EIO; 671 return -EIO;
672 } 672 }
673 673
674 clk_prepare_enable(i2c->clk); 674 ret = clk_enable(i2c->clk);
675 if (ret)
676 return ret;
675 677
676 for (i = 0; i < num; i++, msgs++) { 678 for (i = 0; i < num; i++, msgs++) {
677 stop = (i == num - 1); 679 stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
695 } 697 }
696 698
697 out: 699 out:
698 clk_disable_unprepare(i2c->clk); 700 clk_disable(i2c->clk);
699 return ret; 701 return ret;
700} 702}
701 703
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
747 return -ENOENT; 749 return -ENOENT;
748 } 750 }
749 751
750 clk_prepare_enable(i2c->clk); 752 ret = clk_prepare_enable(i2c->clk);
753 if (ret)
754 return ret;
751 755
752 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 756 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
753 i2c->regs = devm_ioremap_resource(&pdev->dev, mem); 757 i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
799 803
800 platform_set_drvdata(pdev, i2c); 804 platform_set_drvdata(pdev, i2c);
801 805
806 clk_disable(i2c->clk);
807
808 return 0;
809
802 err_clk: 810 err_clk:
803 clk_disable_unprepare(i2c->clk); 811 clk_disable_unprepare(i2c->clk);
804 return ret; 812 return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
810 818
811 i2c_del_adapter(&i2c->adap); 819 i2c_del_adapter(&i2c->adap);
812 820
821 clk_unprepare(i2c->clk);
822
813 return 0; 823 return 0;
814} 824}
815 825
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
821 831
822 i2c->suspended = 1; 832 i2c->suspended = 1;
823 833
834 clk_unprepare(i2c->clk);
835
824 return 0; 836 return 0;
825} 837}
826 838
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
830 struct exynos5_i2c *i2c = platform_get_drvdata(pdev); 842 struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
831 int ret = 0; 843 int ret = 0;
832 844
833 clk_prepare_enable(i2c->clk); 845 ret = clk_prepare_enable(i2c->clk);
846 if (ret)
847 return ret;
834 848
835 ret = exynos5_hsi2c_clock_setup(i2c); 849 ret = exynos5_hsi2c_clock_setup(i2c);
836 if (ret) { 850 if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
839 } 853 }
840 854
841 exynos5_i2c_init(i2c); 855 exynos5_i2c_init(i2c);
842 clk_disable_unprepare(i2c->clk); 856 clk_disable(i2c->clk);
843 i2c->suspended = 0; 857 i2c->suspended = 0;
844 858
845 return 0; 859 return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ 75/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 76#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a 77#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
78#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
78#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 79#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
79 80
80#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ 81#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
180static const struct pci_device_id ismt_ids[] = { 181static const struct pci_device_id ismt_ids[] = {
181 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, 182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
182 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, 183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
183 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, 185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
184 { 0, } 186 { 0, }
185}; 187};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
855static const struct of_device_id rk3x_i2c_match[] = { 855static const struct of_device_id rk3x_i2c_match[] = {
856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, 856 { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, 857 { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
858 { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
858 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, 859 { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
859 {}, 860 {},
860}; 861};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
691 NULL); 691 NULL);
692 692
693 /* Coudn't find default GID location */ 693 /* Coudn't find default GID location */
694 WARN_ON(ix < 0); 694 if (WARN_ON(ix < 0))
695 goto release;
695 696
696 zattr_type.gid_type = gid_type; 697 zattr_type.gid_type = gid_type;
697 698
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
51#include <rdma/ib_cm.h> 52#include <rdma/ib_cm.h>
52#include <rdma/ib_user_cm.h> 53#include <rdma/ib_user_cm.h>
53#include <rdma/ib_marshall.h> 54#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
1103 struct ib_ucm_cmd_hdr hdr; 1104 struct ib_ucm_cmd_hdr hdr;
1104 ssize_t result; 1105 ssize_t result;
1105 1106
1107 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1108 return -EACCES;
1109
1106 if (len < sizeof(hdr)) 1110 if (len < sizeof(hdr))
1107 return -EINVAL; 1111 return -EINVAL;
1108 1112
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
1574 struct rdma_ucm_cmd_hdr hdr; 1574 struct rdma_ucm_cmd_hdr hdr;
1575 ssize_t ret; 1575 ssize_t ret;
1576 1576
1577 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1578 return -EACCES;
1579
1577 if (len < sizeof(hdr)) 1580 if (len < sizeof(hdr))
1578 return -EINVAL; 1581 return -EINVAL;
1579 1582
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
48 48
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#include <rdma/ib.h>
52
51#include "uverbs.h" 53#include "uverbs.h"
52 54
53MODULE_AUTHOR("Roland Dreier"); 55MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
709 int srcu_key; 711 int srcu_key;
710 ssize_t ret; 712 ssize_t ret;
711 713
714 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
715 return -EACCES;
716
712 if (count < sizeof hdr) 717 if (count < sizeof hdr)
713 return -EINVAL; 718 return -EINVAL;
714 719
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
1860void ib_drain_qp(struct ib_qp *qp) 1860void ib_drain_qp(struct ib_qp *qp)
1861{ 1861{
1862 ib_drain_sq(qp); 1862 ib_drain_sq(qp);
1863 ib_drain_rq(qp); 1863 if (!qp->srq)
1864 ib_drain_rq(qp);
1864} 1865}
1865EXPORT_SYMBOL(ib_drain_qp); 1866EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1390 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1391 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1392 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1392 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1393 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1394 sizeof(dev->ibdev.iwcm->ifname));
1393 1395
1394 ret = ib_register_device(&dev->ibdev, NULL); 1396 ret = ib_register_device(&dev->ibdev, NULL);
1395 if (ret) 1397 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, 162 cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
163 &cq->bar2_qid, 163 &cq->bar2_qid,
164 user ? &cq->bar2_pa : NULL); 164 user ? &cq->bar2_pa : NULL);
165 if (user && !cq->bar2_va) { 165 if (user && !cq->bar2_pa) {
166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", 166 pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
167 pci_name(rdev->lldi.pdev), cq->cqid); 167 pci_name(rdev->lldi.pdev), cq->cqid);
168 ret = -EINVAL; 168 ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; 580 dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; 581 dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
582 dev->ibdev.iwcm->get_qp = c4iw_get_qp; 582 dev->ibdev.iwcm->get_qp = c4iw_get_qp;
583 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
584 sizeof(dev->ibdev.iwcm->ifname));
583 585
584 ret = ib_register_device(&dev->ibdev, NULL); 586 ret = ib_register_device(&dev->ibdev, NULL);
585 if (ret) 587 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
185 185
186 if (pbar2_pa) 186 if (pbar2_pa)
187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; 187 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
188
189 if (is_t4(rdev->lldi.adapter_type))
190 return NULL;
191
188 return rdev->bar2_kva + bar2_qoffset; 192 return rdev->bar2_kva + bar2_qoffset;
189} 193}
190 194
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
270 /* 274 /*
271 * User mode must have bar2 access. 275 * User mode must have bar2 access.
272 */ 276 */
273 if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { 277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
274 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", 278 pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
275 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); 279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
276 goto free_dma; 280 goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1895void c4iw_drain_sq(struct ib_qp *ibqp) 1899void c4iw_drain_sq(struct ib_qp *ibqp)
1896{ 1900{
1897 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1901 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1902 unsigned long flag;
1903 bool need_to_wait;
1898 1904
1899 wait_for_completion(&qp->sq_drained); 1905 spin_lock_irqsave(&qp->lock, flag);
1906 need_to_wait = !t4_sq_empty(&qp->wq);
1907 spin_unlock_irqrestore(&qp->lock, flag);
1908
1909 if (need_to_wait)
1910 wait_for_completion(&qp->sq_drained);
1900} 1911}
1901 1912
1902void c4iw_drain_rq(struct ib_qp *ibqp) 1913void c4iw_drain_rq(struct ib_qp *ibqp)
1903{ 1914{
1904 struct c4iw_qp *qp = to_c4iw_qp(ibqp); 1915 struct c4iw_qp *qp = to_c4iw_qp(ibqp);
1916 unsigned long flag;
1917 bool need_to_wait;
1918
1919 spin_lock_irqsave(&qp->lock, flag);
1920 need_to_wait = !t4_rq_empty(&qp->wq);
1921 spin_unlock_irqrestore(&qp->lock, flag);
1905 1922
1906 wait_for_completion(&qp->rq_drained); 1923 if (need_to_wait)
1924 wait_for_completion(&qp->rq_drained);
1907} 1925}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346e048e..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
530 sizeof(struct mlx5_wqe_ctrl_seg)) / 530 sizeof(struct mlx5_wqe_ctrl_seg)) /
531 sizeof(struct mlx5_wqe_data_seg); 531 sizeof(struct mlx5_wqe_data_seg);
532 props->max_sge = min(max_rq_sg, max_sq_sg); 532 props->max_sge = min(max_rq_sg, max_sq_sg);
533 props->max_sge_rd = props->max_sge; 533 props->max_sge_rd = MLX5_MAX_SGE_RD;
534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 534 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 535 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 536 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
671 struct mlx5_ib_dev *dev = to_mdev(ibdev); 671 struct mlx5_ib_dev *dev = to_mdev(ibdev);
672 struct mlx5_core_dev *mdev = dev->mdev; 672 struct mlx5_core_dev *mdev = dev->mdev;
673 struct mlx5_hca_vport_context *rep; 673 struct mlx5_hca_vport_context *rep;
674 int max_mtu; 674 u16 max_mtu;
675 int oper_mtu; 675 u16 oper_mtu;
676 int err; 676 int err;
677 u8 ib_link_width_oper; 677 u8 ib_link_width_oper;
678 u8 vl_hw_cap; 678 u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e055fdd3..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); 500 * skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
501 */ 501 */
502 502
503 if (!netif_carrier_ok(netdev))
504 return NETDEV_TX_OK;
505
506 if (netif_queue_stopped(netdev)) 503 if (netif_queue_stopped(netdev))
507 return NETDEV_TX_BUSY; 504 return NETDEV_TX_BUSY;
508 505
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
45#include <linux/export.h> 45#include <linux/export.h>
46#include <linux/uio.h> 46#include <linux/uio.h>
47 47
48#include <rdma/ib.h>
49
48#include "qib.h" 50#include "qib.h"
49#include "qib_common.h" 51#include "qib_common.h"
50#include "qib_user_sdma.h" 52#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2067 ssize_t ret = 0; 2069 ssize_t ret = 0;
2068 void *dest; 2070 void *dest;
2069 2071
2072 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2073 return -EACCES;
2074
2070 if (count < sizeof(cmd.type)) { 2075 if (count < sizeof(cmd.type)) {
2071 ret = -EINVAL; 2076 ret = -EINVAL;
2072 goto bail; 2077 goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
1637 spin_unlock_irqrestore(&qp->s_hlock, flags); 1637 spin_unlock_irqrestore(&qp->s_hlock, flags);
1638 if (nreq) { 1638 if (nreq) {
1639 if (call_send) 1639 if (call_send)
1640 rdi->driver_f.schedule_send_no_lock(qp);
1641 else
1642 rdi->driver_f.do_send(qp); 1640 rdi->driver_f.do_send(qp);
1641 else
1642 rdi->driver_f.schedule_send_no_lock(qp);
1643 } 1643 }
1644 return err; 1644 return err;
1645} 1645}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index e8a84d12b7ff..1142a93dd90b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -153,6 +153,7 @@ static const struct xpad_device {
153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 153 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 154 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, 155 { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
156 { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
156 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 157 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
157 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, 158 { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
158 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 159 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
304 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ 305 XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
305 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ 306 XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
306 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ 307 { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
308 XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
307 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ 309 XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
308 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 310 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
309 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 311 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index d5994a745ffa..982936334537 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
178 input_set_drvdata(haptics->input_dev, haptics); 178 input_set_drvdata(haptics->input_dev, haptics);
179 179
180 haptics->input_dev->name = "arizona:haptics"; 180 haptics->input_dev->name = "arizona:haptics";
181 haptics->input_dev->dev.parent = pdev->dev.parent;
182 haptics->input_dev->close = arizona_haptics_close; 181 haptics->input_dev->close = arizona_haptics_close;
183 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit); 182 __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
184 183
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 3f02e0e03d12..67aab86048ad 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay)) 353 if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
354 kpd_delay = 15625; 354 kpd_delay = 15625;
355 355
356 if (kpd_delay > 62500 || kpd_delay == 0) { 356 /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
357 if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
357 dev_err(&pdev->dev, "invalid power key trigger delay\n"); 358 dev_err(&pdev->dev, "invalid power key trigger delay\n");
358 return -EINVAL; 359 return -EINVAL;
359 } 360 }
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
385 pwr->name = "pmic8xxx_pwrkey"; 386 pwr->name = "pmic8xxx_pwrkey";
386 pwr->phys = "pmic8xxx_pwrkey/input0"; 387 pwr->phys = "pmic8xxx_pwrkey/input0";
387 388
388 delay = (kpd_delay << 10) / USEC_PER_SEC; 389 delay = (kpd_delay << 6) / USEC_PER_SEC;
389 delay = 1 + ilog2(delay); 390 delay = ilog2(delay);
390 391
391 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl); 392 err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
392 if (err < 0) { 393 if (err < 0) {
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 10c4e3d462f1..caa5a62c42fb 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
222 222
223 info->input_dev->name = "twl4030:vibrator"; 223 info->input_dev->name = "twl4030:vibrator";
224 info->input_dev->id.version = 1; 224 info->input_dev->id.version = 1;
225 info->input_dev->dev.parent = pdev->dev.parent;
226 info->input_dev->close = twl4030_vibra_close; 225 info->input_dev->close = twl4030_vibra_close;
227 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 226 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
228 227
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ea63fad48de6..53e33fab3f7a 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -45,7 +45,6 @@
45struct vibra_info { 45struct vibra_info {
46 struct device *dev; 46 struct device *dev;
47 struct input_dev *input_dev; 47 struct input_dev *input_dev;
48 struct workqueue_struct *workqueue;
49 struct work_struct play_work; 48 struct work_struct play_work;
50 struct mutex mutex; 49 struct mutex mutex;
51 int irq; 50 int irq;
@@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
213 info->strong_speed = effect->u.rumble.strong_magnitude; 212 info->strong_speed = effect->u.rumble.strong_magnitude;
214 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1; 213 info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
215 214
216 ret = queue_work(info->workqueue, &info->play_work); 215 schedule_work(&info->play_work);
217 if (!ret) {
218 dev_info(&input->dev, "work is already on queue\n");
219 return ret;
220 }
221 216
222 return 0; 217 return 0;
223} 218}
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
362 357
363 info->input_dev->name = "twl6040:vibrator"; 358 info->input_dev->name = "twl6040:vibrator";
364 info->input_dev->id.version = 1; 359 info->input_dev->id.version = 1;
365 info->input_dev->dev.parent = pdev->dev.parent;
366 info->input_dev->close = twl6040_vibra_close; 360 info->input_dev->close = twl6040_vibra_close;
367 __set_bit(FF_RUMBLE, info->input_dev->ffbit); 361 __set_bit(FF_RUMBLE, info->input_dev->ffbit);
368 362
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3a7f3a4a4396..7c18249d6c8e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
858 goto err_free_buf; 858 goto err_free_buf;
859 } 859 }
860 860
861 /* Sanity check that a device has an endpoint */
862 if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
863 dev_err(&usbinterface->dev,
864 "Invalid number of endpoints\n");
865 error = -EINVAL;
866 goto err_free_urb;
867 }
868
861 /* 869 /*
862 * The endpoint is always altsetting 0, we know this since we know 870 * The endpoint is always altsetting 0, we know this since we know
863 * this device only has one interrupt endpoint 871 * this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
879 * HID report descriptor 887 * HID report descriptor
880 */ 888 */
881 if (usb_get_extra_descriptor(usbinterface->cur_altsetting, 889 if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
882 HID_DEVICE_TYPE, &hid_desc) != 0){ 890 HID_DEVICE_TYPE, &hid_desc) != 0) {
883 dev_err(&usbinterface->dev, 891 dev_err(&usbinterface->dev,
884 "Can't retrieve exta USB descriptor to get hid report descriptor length\n"); 892 "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
885 error = -EIO; 893 error = -EIO;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 374c129219ef..5efadad4615b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -92,6 +92,7 @@ struct iommu_dev_data {
92 struct list_head dev_data_list; /* For global dev_data_list */ 92 struct list_head dev_data_list; /* For global dev_data_list */
93 struct protection_domain *domain; /* Domain the device is bound to */ 93 struct protection_domain *domain; /* Domain the device is bound to */
94 u16 devid; /* PCI Device ID */ 94 u16 devid; /* PCI Device ID */
95 u16 alias; /* Alias Device ID */
95 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
96 bool passthrough; /* Device is identity mapped */ 97 bool passthrough; /* Device is identity mapped */
97 struct { 98 struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
166 return container_of(dom, struct protection_domain, domain); 167 return container_of(dom, struct protection_domain, domain);
167} 168}
168 169
170static inline u16 get_device_id(struct device *dev)
171{
172 struct pci_dev *pdev = to_pci_dev(dev);
173
174 return PCI_DEVID(pdev->bus->number, pdev->devfn);
175}
176
169static struct iommu_dev_data *alloc_dev_data(u16 devid) 177static struct iommu_dev_data *alloc_dev_data(u16 devid)
170{ 178{
171 struct iommu_dev_data *dev_data; 179 struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
203 return dev_data; 211 return dev_data;
204} 212}
205 213
214static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
215{
216 *(u16 *)data = alias;
217 return 0;
218}
219
220static u16 get_alias(struct device *dev)
221{
222 struct pci_dev *pdev = to_pci_dev(dev);
223 u16 devid, ivrs_alias, pci_alias;
224
225 devid = get_device_id(dev);
226 ivrs_alias = amd_iommu_alias_table[devid];
227 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
228
229 if (ivrs_alias == pci_alias)
230 return ivrs_alias;
231
232 /*
233 * DMA alias showdown
234 *
235 * The IVRS is fairly reliable in telling us about aliases, but it
236 * can't know about every screwy device. If we don't have an IVRS
237 * reported alias, use the PCI reported alias. In that case we may
238 * still need to initialize the rlookup and dev_table entries if the
239 * alias is to a non-existent device.
240 */
241 if (ivrs_alias == devid) {
242 if (!amd_iommu_rlookup_table[pci_alias]) {
243 amd_iommu_rlookup_table[pci_alias] =
244 amd_iommu_rlookup_table[devid];
245 memcpy(amd_iommu_dev_table[pci_alias].data,
246 amd_iommu_dev_table[devid].data,
247 sizeof(amd_iommu_dev_table[pci_alias].data));
248 }
249
250 return pci_alias;
251 }
252
253 pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
254 "for device %s[%04x:%04x], kernel reported alias "
255 "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
256 PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
257 PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
258 PCI_FUNC(pci_alias));
259
260 /*
261 * If we don't have a PCI DMA alias and the IVRS alias is on the same
262 * bus, then the IVRS table may know about a quirk that we don't.
263 */
264 if (pci_alias == devid &&
265 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
266 pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
267 pdev->dma_alias_devfn = ivrs_alias & 0xff;
268 pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
269 PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
270 dev_name(dev));
271 }
272
273 return ivrs_alias;
274}
275
206static struct iommu_dev_data *find_dev_data(u16 devid) 276static struct iommu_dev_data *find_dev_data(u16 devid)
207{ 277{
208 struct iommu_dev_data *dev_data; 278 struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
215 return dev_data; 285 return dev_data;
216} 286}
217 287
218static inline u16 get_device_id(struct device *dev)
219{
220 struct pci_dev *pdev = to_pci_dev(dev);
221
222 return PCI_DEVID(pdev->bus->number, pdev->devfn);
223}
224
225static struct iommu_dev_data *get_dev_data(struct device *dev) 288static struct iommu_dev_data *get_dev_data(struct device *dev)
226{ 289{
227 return dev->archdata.iommu; 290 return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
349 if (!dev_data) 412 if (!dev_data)
350 return -ENOMEM; 413 return -ENOMEM;
351 414
415 dev_data->alias = get_alias(dev);
416
352 if (pci_iommuv2_capable(pdev)) { 417 if (pci_iommuv2_capable(pdev)) {
353 struct amd_iommu *iommu; 418 struct amd_iommu *iommu;
354 419
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
369 u16 devid, alias; 434 u16 devid, alias;
370 435
371 devid = get_device_id(dev); 436 devid = get_device_id(dev);
372 alias = amd_iommu_alias_table[devid]; 437 alias = get_alias(dev);
373 438
374 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry)); 439 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
375 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry)); 440 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
1061 int ret; 1126 int ret;
1062 1127
1063 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1128 iommu = amd_iommu_rlookup_table[dev_data->devid];
1064 alias = amd_iommu_alias_table[dev_data->devid]; 1129 alias = dev_data->alias;
1065 1130
1066 ret = iommu_flush_dte(iommu, dev_data->devid); 1131 ret = iommu_flush_dte(iommu, dev_data->devid);
1067 if (!ret && alias != dev_data->devid) 1132 if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2039 bool ats; 2104 bool ats;
2040 2105
2041 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2106 iommu = amd_iommu_rlookup_table[dev_data->devid];
2042 alias = amd_iommu_alias_table[dev_data->devid]; 2107 alias = dev_data->alias;
2043 ats = dev_data->ats.enabled; 2108 ats = dev_data->ats.enabled;
2044 2109
2045 /* Update data structures */ 2110 /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2073 return; 2138 return;
2074 2139
2075 iommu = amd_iommu_rlookup_table[dev_data->devid]; 2140 iommu = amd_iommu_rlookup_table[dev_data->devid];
2076 alias = amd_iommu_alias_table[dev_data->devid]; 2141 alias = dev_data->alias;
2077 2142
2078 /* decrease reference counters */ 2143 /* decrease reference counters */
2079 dev_data->domain->dev_iommu[iommu->index] -= 1; 2144 dev_data->domain->dev_iommu[iommu->index] -= 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 2409e3bd3df2..7c39ac4b9c53 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
826 if (smmu_domain->smmu) 826 if (smmu_domain->smmu)
827 goto out_unlock; 827 goto out_unlock;
828 828
829 /* We're bypassing these SIDs, so don't allocate an actual context */
830 if (domain->type == IOMMU_DOMAIN_DMA) {
831 smmu_domain->smmu = smmu;
832 goto out_unlock;
833 }
834
829 /* 835 /*
830 * Mapping the requested stage onto what we support is surprisingly 836 * Mapping the requested stage onto what we support is surprisingly
831 * complicated, mainly because the spec allows S1+S2 SMMUs without 837 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
948 void __iomem *cb_base; 954 void __iomem *cb_base;
949 int irq; 955 int irq;
950 956
951 if (!smmu) 957 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
952 return; 958 return;
953 959
954 /* 960 /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1089 struct arm_smmu_device *smmu = smmu_domain->smmu; 1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1091 1097
1092 /* Devices in an IOMMU group may already be configured */
1093 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1094 if (ret)
1095 return ret == -EEXIST ? 0 : ret;
1096
1097 /* 1098 /*
1098 * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1099 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1099 * for all devices behind the SMMU. 1100 * for all devices behind the SMMU. Note that we need to take
1101 * care configuring SMRs for devices both a platform_device and
1102 * and a PCI device (i.e. a PCI host controller)
1100 */ 1103 */
1101 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1104 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1102 return 0; 1105 return 0;
1103 1106
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1111
1104 for (i = 0; i < cfg->num_streamids; ++i) { 1112 for (i = 0; i < cfg->num_streamids; ++i) {
1105 u32 idx, s2cr; 1113 u32 idx, s2cr;
1106 1114
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 94a30da0cfac..4dffccf532a2 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 467 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
468 468
469 /* Update the pcpu_masks */ 469 /* Update the pcpu_masks */
470 for (i = 0; i < gic_vpes; i++) 470 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
471 clear_bit(irq, pcpu_masks[i].pcpu_mask); 471 clear_bit(irq, pcpu_masks[i].pcpu_mask);
472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 472 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
473 473
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
707 spin_lock_irqsave(&gic_lock, flags); 707 spin_lock_irqsave(&gic_lock, flags);
708 gic_map_to_pin(intr, gic_cpu_pin); 708 gic_map_to_pin(intr, gic_cpu_pin);
709 gic_map_to_vpe(intr, vpe); 709 gic_map_to_vpe(intr, vpe);
710 for (i = 0; i < gic_vpes; i++) 710 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
711 clear_bit(intr, pcpu_masks[i].pcpu_mask); 711 clear_bit(intr, pcpu_masks[i].pcpu_mask);
712 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 712 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
713 spin_unlock_irqrestore(&gic_lock, flags); 713 spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 0d29b5a6356d..99e5f9751e8b 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
715 if (!maddr || maddr->family != AF_ISDN) 715 if (!maddr || maddr->family != AF_ISDN)
716 return -EINVAL; 716 return -EINVAL;
717 717
718 if (addr_len < sizeof(struct sockaddr_mISDN))
719 return -EINVAL;
720
718 lock_sock(sk); 721 lock_sock(sk);
719 722
720 if (_pms(sk)->dev) { 723 if (_pms(sk)->dev) {
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
1452 printk(KERN_INFO "%s: %s found\n", __func__, 1452 printk(KERN_INFO "%s: %s found\n", __func__,
1453 usbvision_device_data[model].model_string); 1453 usbvision_device_data[model].model_string);
1454 1454
1455 /*
1456 * this is a security check.
1457 * an exploit using an incorrect bInterfaceNumber is known
1458 */
1459 if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
1460 return -ENODEV;
1461
1462 if (usbvision_device_data[model].interface >= 0) 1455 if (usbvision_device_data[model].interface >= 0)
1463 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; 1456 interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
1464 else if (ifnum < dev->actconfig->desc.bNumInterfaces) 1457 else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1645 * Will sleep if required for nonblocking == false. 1645 * Will sleep if required for nonblocking == false.
1646 */ 1646 */
1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1647static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1648 int nonblocking) 1648 void *pb, int nonblocking)
1649{ 1649{
1650 unsigned long flags; 1650 unsigned long flags;
1651 int ret; 1651 int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1666 /* 1666 /*
1667 * Only remove the buffer from done_list if v4l2_buffer can handle all 1667 * Only remove the buffer from done_list if v4l2_buffer can handle all
1668 * the planes. 1668 * the planes.
1669 * Verifying planes is NOT necessary since it already has been checked
1670 * before the buffer is queued/prepared. So it can never fail.
1671 */ 1669 */
1672 list_del(&(*vb)->done_entry); 1670 ret = call_bufop(q, verify_planes_array, *vb, pb);
1671 if (!ret)
1672 list_del(&(*vb)->done_entry);
1673 spin_unlock_irqrestore(&q->done_lock, flags); 1673 spin_unlock_irqrestore(&q->done_lock, flags);
1674 1674
1675 return ret; 1675 return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1748 struct vb2_buffer *vb = NULL; 1748 struct vb2_buffer *vb = NULL;
1749 int ret; 1749 int ret;
1750 1750
1751 ret = __vb2_get_done_vb(q, &vb, nonblocking); 1751 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1752 if (ret < 0) 1752 if (ret < 0)
1753 return ret; 1753 return ret;
1754 1754
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
2298 return POLLERR; 2298 return POLLERR;
2299 2299
2300 /* 2300 /*
2301 * If this quirk is set and QBUF hasn't been called yet then
2302 * return POLLERR as well. This only affects capture queues, output
2303 * queues will always initialize waiting_for_buffers to false.
2304 * This quirk is set by V4L2 for backwards compatibility reasons.
2305 */
2306 if (q->quirk_poll_must_check_waiting_for_buffers &&
2307 q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
2308 return POLLERR;
2309
2310 /*
2301 * For output streams you can call write() as long as there are fewer 2311 * For output streams you can call write() as long as there are fewer
2302 * buffers queued than there are buffers available. 2312 * buffers queued than there are buffers available.
2303 */ 2313 */
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
49 vec = frame_vector_create(nr); 49 vec = frame_vector_create(nr);
50 if (!vec) 50 if (!vec)
51 return ERR_PTR(-ENOMEM); 51 return ERR_PTR(-ENOMEM);
52 ret = get_vaddr_frames(start, nr, write, 1, vec); 52 ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
53 if (ret < 0) 53 if (ret < 0)
54 goto out_destroy; 54 goto out_destroy;
55 /* We accept only complete set of PFNs */ 55 /* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
74 return 0; 74 return 0;
75} 75}
76 76
77static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
78{
79 return __verify_planes_array(vb, pb);
80}
81
77/** 82/**
78 * __verify_length() - Verify that the bytesused value for each plane fits in 83 * __verify_length() - Verify that the bytesused value for each plane fits in
79 * the plane length and that the data offset doesn't exceed the bytesused value. 84 * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
437} 442}
438 443
439static const struct vb2_buf_ops v4l2_buf_ops = { 444static const struct vb2_buf_ops v4l2_buf_ops = {
445 .verify_planes_array = __verify_planes_array_core,
440 .fill_user_buffer = __fill_v4l2_buffer, 446 .fill_user_buffer = __fill_v4l2_buffer,
441 .fill_vb2_buffer = __fill_vb2_buffer, 447 .fill_vb2_buffer = __fill_vb2_buffer,
442 .copy_timestamp = __copy_timestamp, 448 .copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
765 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 771 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
766 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 772 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
767 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 773 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
774 /*
775 * For compatibility with vb1: if QBUF hasn't been called yet, then
776 * return POLLERR as well. This only affects capture queues, output
777 * queues will always initialize waiting_for_buffers to false.
778 */
779 q->quirk_poll_must_check_waiting_for_buffers = true;
768 780
769 return vb2_core_queue_init(q); 781 return vb2_core_queue_init(q);
770} 782}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
818 poll_wait(file, &fh->wait, wait); 830 poll_wait(file, &fh->wait, wait);
819 } 831 }
820 832
821 /*
822 * For compatibility with vb1: if QBUF hasn't been called yet, then
823 * return POLLERR as well. This only affects capture queues, output
824 * queues will always initialize waiting_for_buffers to false.
825 */
826 if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
827 return POLLERR;
828
829 return res | vb2_core_poll(q, file, wait); 833 return res | vb2_core_poll(q, file, wait);
830} 834}
831EXPORT_SYMBOL_GPL(vb2_poll); 835EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); 223 cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ 224 flush_work(&ctx->fault_work); /* Only needed for dedicated process */
225 225
226 /*
227 * Wait until no further interrupts are presented by the PSL
228 * for this context.
229 */
230 if (cxl_ops->irq_wait)
231 cxl_ops->irq_wait(ctx);
232
226 /* release the reference to the group leader and mm handling pid */ 233 /* release the reference to the group leader and mm handling pid */
227 put_pid(ctx->pid); 234 put_pid(ctx->pid);
228 put_pid(ctx->glpid); 235 put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ 274#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ 275#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ 276#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
277#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
277/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ 278/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
278#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ 279#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
279#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ 280#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
855 u64 dsisr, u64 errstat); 856 u64 dsisr, u64 errstat);
856 irqreturn_t (*psl_interrupt)(int irq, void *data); 857 irqreturn_t (*psl_interrupt)(int irq, void *data);
857 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 858 int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
859 void (*irq_wait)(struct cxl_context *ctx);
858 int (*attach_process)(struct cxl_context *ctx, bool kernel, 860 int (*attach_process)(struct cxl_context *ctx, bool kernel,
859 u64 wed, u64 amr); 861 u64 wed, u64 amr);
860 int (*detach_process)(struct cxl_context *ctx); 862 int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
203void cxl_unmap_irq(unsigned int virq, void *cookie) 203void cxl_unmap_irq(unsigned int virq, void *cookie)
204{ 204{
205 free_irq(virq, cookie); 205 free_irq(virq, cookie);
206 irq_dispose_mapping(virq);
207} 206}
208 207
209int cxl_register_one_irq(struct cxl *adapter, 208int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/delay.h>
17#include <asm/synch.h> 18#include <asm/synch.h>
18#include <misc/cxl-base.h> 19#include <misc/cxl-base.h>
19 20
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
797 return fail_psl_irq(afu, &irq_info); 798 return fail_psl_irq(afu, &irq_info);
798} 799}
799 800
801void native_irq_wait(struct cxl_context *ctx)
802{
803 u64 dsisr;
804 int timeout = 1000;
805 int ph;
806
807 /*
808 * Wait until no further interrupts are presented by the PSL
809 * for this context.
810 */
811 while (timeout--) {
812 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
813 if (ph != ctx->pe)
814 return;
815 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
816 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
817 return;
818 /*
819 * We are waiting for the workqueue to process our
820 * irq, so need to let that run here.
821 */
822 msleep(1);
823 }
824
825 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
826 " DSISR %016llx!\n", ph, dsisr);
827 return;
828}
829
800static irqreturn_t native_slice_irq_err(int irq, void *data) 830static irqreturn_t native_slice_irq_err(int irq, void *data)
801{ 831{
802 struct cxl_afu *afu = data; 832 struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1076 .handle_psl_slice_error = native_handle_psl_slice_error, 1106 .handle_psl_slice_error = native_handle_psl_slice_error,
1077 .psl_interrupt = NULL, 1107 .psl_interrupt = NULL,
1078 .ack_irq = native_ack_irq, 1108 .ack_irq = native_ack_irq,
1109 .irq_wait = native_irq_wait,
1079 .attach_process = native_attach_process, 1110 .attach_process = native_attach_process,
1080 .detach_process = native_detach_process, 1111 .detach_process = native_detach_process,
1081 .support_attributes = native_support_attributes, 1112 .support_attributes = native_support_attributes,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
97config MMC_SDHCI_ACPI 97config MMC_SDHCI_ACPI
98 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 98 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
99 depends on MMC_SDHCI && ACPI 99 depends on MMC_SDHCI && ACPI
100 select IOSF_MBI if X86
100 help 101 help
101 This selects support for ACPI enumerated SDHCI controllers, 102 This selects support for ACPI enumerated SDHCI controllers,
102 identified by ACPI Compatibility ID PNP0D40 or specific 103 identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
41#include <linux/mmc/pm.h> 41#include <linux/mmc/pm.h>
42#include <linux/mmc/slot-gpio.h> 42#include <linux/mmc/slot-gpio.h>
43 43
44#ifdef CONFIG_X86
45#include <asm/cpu_device_id.h>
46#include <asm/iosf_mbi.h>
47#endif
48
44#include "sdhci.h" 49#include "sdhci.h"
45 50
46enum { 51enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
116 .ops = &sdhci_acpi_ops_int, 121 .ops = &sdhci_acpi_ops_int,
117}; 122};
118 123
124#ifdef CONFIG_X86
125
126static bool sdhci_acpi_byt(void)
127{
128 static const struct x86_cpu_id byt[] = {
129 { X86_VENDOR_INTEL, 6, 0x37 },
130 {}
131 };
132
133 return x86_match_cpu(byt);
134}
135
136#define BYT_IOSF_SCCEP 0x63
137#define BYT_IOSF_OCP_NETCTRL0 0x1078
138#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
139
140static void sdhci_acpi_byt_setting(struct device *dev)
141{
142 u32 val = 0;
143
144 if (!sdhci_acpi_byt())
145 return;
146
147 if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
148 &val)) {
149 dev_err(dev, "%s read error\n", __func__);
150 return;
151 }
152
153 if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
154 return;
155
156 val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
157
158 if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
159 val)) {
160 dev_err(dev, "%s write error\n", __func__);
161 return;
162 }
163
164 dev_dbg(dev, "%s completed\n", __func__);
165}
166
167static bool sdhci_acpi_byt_defer(struct device *dev)
168{
169 if (!sdhci_acpi_byt())
170 return false;
171
172 if (!iosf_mbi_available())
173 return true;
174
175 sdhci_acpi_byt_setting(dev);
176
177 return false;
178}
179
180#else
181
182static inline void sdhci_acpi_byt_setting(struct device *dev)
183{
184}
185
186static inline bool sdhci_acpi_byt_defer(struct device *dev)
187{
188 return false;
189}
190
191#endif
192
119static int bxt_get_cd(struct mmc_host *mmc) 193static int bxt_get_cd(struct mmc_host *mmc)
120{ 194{
121 int gpio_cd = mmc_gpio_get_cd(mmc); 195 int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
322 if (acpi_bus_get_status(device) || !device->status.present) 396 if (acpi_bus_get_status(device) || !device->status.present)
323 return -ENODEV; 397 return -ENODEV;
324 398
399 if (sdhci_acpi_byt_defer(dev))
400 return -EPROBE_DEFER;
401
325 hid = acpi_device_hid(device); 402 hid = acpi_device_hid(device);
326 uid = device->pnp.unique_id; 403 uid = device->pnp.unique_id;
327 404
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
447{ 524{
448 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 525 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
449 526
527 sdhci_acpi_byt_setting(&c->pdev->dev);
528
450 return sdhci_resume_host(c->host); 529 return sdhci_resume_host(c->host);
451} 530}
452 531
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
470{ 549{
471 struct sdhci_acpi_host *c = dev_get_drvdata(dev); 550 struct sdhci_acpi_host *c = dev_get_drvdata(dev);
472 551
552 sdhci_acpi_byt_setting(&c->pdev->dev);
553
473 return sdhci_runtime_resume_host(c->host); 554 return sdhci_runtime_resume_host(c->host);
474} 555}
475 556
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1129 MMC_CAP_1_8V_DDR | 1129 MMC_CAP_1_8V_DDR |
1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1130 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1131 1131
1132 /* TODO MMC DDR is not working on A80 */
1133 if (of_device_is_compatible(pdev->dev.of_node,
1134 "allwinner,sun9i-a80-mmc"))
1135 mmc->caps &= ~MMC_CAP_1_8V_DDR;
1136
1132 ret = mmc_of_parse(mmc); 1137 ret = mmc_of_parse(mmc);
1133 if (ret) 1138 if (ret)
1134 goto error_free_dma; 1139 goto error_free_dma;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a1ba62b7da2..befd67df08e1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@ config DUMMY
62 this device is consigned into oblivion) with a configurable IP 62 this device is consigned into oblivion) with a configurable IP
63 address. It is most commonly used in order to make your currently 63 address. It is most commonly used in order to make your currently
64 inactive SLIP address seem like a real address for local programs. 64 inactive SLIP address seem like a real address for local programs.
65 If you use SLIP or PPP, you might want to say Y here. Since this 65 If you use SLIP or PPP, you might want to say Y here. It won't
66 thing often comes in handy, the default is Y. It won't enlarge your 66 enlarge your kernel. What a deal. Read about it in the Network
67 kernel either. What a deal. Read about it in the Network
68 Administrator's Guide, available from 67 Administrator's Guide, available from
69 <http://www.tldp.org/docs.html#guide>. 68 <http://www.tldp.org/docs.html#guide>.
70 69
@@ -195,6 +194,7 @@ config GENEVE
195 194
196config MACSEC 195config MACSEC
197 tristate "IEEE 802.1AE MAC-level encryption (MACsec)" 196 tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
197 select CRYPTO
198 select CRYPTO_AES 198 select CRYPTO_AES
199 select CRYPTO_GCM 199 select CRYPTO_GCM
200 ---help--- 200 ---help---
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 50454be86570..a2904029cccc 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2181 struct net_device *bridge) 2181 struct net_device *bridge)
2182{ 2182{
2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2183 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2184 u16 fid;
2185 int i, err; 2184 int i, err;
2186 2185
2187 mutex_lock(&ps->smi_mutex); 2186 mutex_lock(&ps->smi_mutex);
2188 2187
2189 /* Get or create the bridge FID and assign it to the port */
2190 for (i = 0; i < ps->num_ports; ++i)
2191 if (ps->ports[i].bridge_dev == bridge)
2192 break;
2193
2194 if (i < ps->num_ports)
2195 err = _mv88e6xxx_port_fid_get(ds, i, &fid);
2196 else
2197 err = _mv88e6xxx_fid_new(ds, &fid);
2198 if (err)
2199 goto unlock;
2200
2201 err = _mv88e6xxx_port_fid_set(ds, port, fid);
2202 if (err)
2203 goto unlock;
2204
2205 /* Assign the bridge and remap each port's VLANTable */ 2188 /* Assign the bridge and remap each port's VLANTable */
2206 ps->ports[port].bridge_dev = bridge; 2189 ps->ports[port].bridge_dev = bridge;
2207 2190
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
2213 } 2196 }
2214 } 2197 }
2215 2198
2216unlock:
2217 mutex_unlock(&ps->smi_mutex); 2199 mutex_unlock(&ps->smi_mutex);
2218 2200
2219 return err; 2201 return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
2223{ 2205{
2224 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 2206 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2225 struct net_device *bridge = ps->ports[port].bridge_dev; 2207 struct net_device *bridge = ps->ports[port].bridge_dev;
2226 u16 fid;
2227 int i; 2208 int i;
2228 2209
2229 mutex_lock(&ps->smi_mutex); 2210 mutex_lock(&ps->smi_mutex);
2230 2211
2231 /* Give the port a fresh Filtering Information Database */
2232 if (_mv88e6xxx_fid_new(ds, &fid) ||
2233 _mv88e6xxx_port_fid_set(ds, port, fid))
2234 netdev_warn(ds->ports[port], "failed to assign a new FID\n");
2235
2236 /* Unassign the bridge and remap each port's VLANTable */ 2212 /* Unassign the bridge and remap each port's VLANTable */
2237 ps->ports[port].bridge_dev = NULL; 2213 ps->ports[port].bridge_dev = NULL;
2238 2214
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2476 * the other bits clear. 2452 * the other bits clear.
2477 */ 2453 */
2478 reg = 1 << port; 2454 reg = 1 << port;
2479 /* Disable learning for DSA and CPU ports */ 2455 /* Disable learning for CPU port */
2480 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) 2456 if (dsa_is_cpu_port(ds, port))
2481 reg = PORT_ASSOC_VECTOR_LOCKED_PORT; 2457 reg = 0;
2482 2458
2483 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg); 2459 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
2484 if (ret) 2460 if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2558 if (ret) 2534 if (ret)
2559 goto abort; 2535 goto abort;
2560 2536
2561 /* Port based VLAN map: give each port its own address 2537 /* Port based VLAN map: give each port the same default address
2562 * database, and allow bidirectional communication between the 2538 * database, and allow bidirectional communication between the
2563 * CPU and DSA port(s), and the other ports. 2539 * CPU and DSA port(s), and the other ports.
2564 */ 2540 */
2565 ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); 2541 ret = _mv88e6xxx_port_fid_set(ds, port, 0);
2566 if (ret) 2542 if (ret)
2567 goto abort; 2543 goto abort;
2568 2544
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 8f76f4558a88..2ff465848b65 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1412 1412
1413 err = -EIO; 1413 err = -EIO;
1414 1414
1415 netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; 1415 netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 1416 netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1417 1417
1418 /* Init PHY as early as possible due to power saving issue */ 1418 /* Init PHY as early as possible due to power saving issue */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 99b30a952b38..38db2e4d7d54 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac); 1572 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1573 } 1573 }
1574 1574
1575 /* This (reset &) enable is not preset in specs or reference driver but
1576 * Broadcom does it in arch PCI code when enabling fake PCI device.
1577 */
1578 bcma_core_enable(core, 0);
1579
1575 /* Allocation and references */ 1580 /* Allocation and references */
1576 net_dev = alloc_etherdev(sizeof(*bgmac)); 1581 net_dev = alloc_etherdev(sizeof(*bgmac));
1577 if (!net_dev) 1582 if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4fbb093e0d84..9a03c142b742 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -199,9 +199,9 @@
199#define BGMAC_CMDCFG_TAI 0x00000200 199#define BGMAC_CMDCFG_TAI 0x00000200
200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ 200#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
201#define BGMAC_CMDCFG_HD_SHIFT 10 201#define BGMAC_CMDCFG_HD_SHIFT 10
202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ 202#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ 203#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
204#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) 204#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ 205#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
206#define BGMAC_CMDCFG_AE 0x00400000 206#define BGMAC_CMDCFG_AE 0x00400000
207#define BGMAC_CMDCFG_CFE 0x00800000 207#define BGMAC_CMDCFG_CFE 0x00800000
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index cf6445d148ca..44ad1490b472 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
878 else 878 else
879 p = (char *)priv; 879 p = (char *)priv;
880 p += s->stat_offset; 880 p += s->stat_offset;
881 data[i] = *(u32 *)p; 881 if (sizeof(unsigned long) != sizeof(u32) &&
882 s->stat_sizeof == sizeof(unsigned long))
883 data[i] = *(unsigned long *)p;
884 else
885 data[i] = *(u32 *)p;
882 } 886 }
883} 887}
884 888
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 967951582e03..d20539a6d162 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
1011 } 1011 }
1012 1012
1013 lmac++; 1013 lmac++;
1014 if (lmac == MAX_LMAC_PER_BGX) 1014 if (lmac == MAX_LMAC_PER_BGX) {
1015 of_node_put(node);
1015 break; 1016 break;
1017 }
1016 } 1018 }
1017 of_node_put(node);
1018 return 0; 1019 return 0;
1019 1020
1020defer: 1021defer:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 984a3cc26f86..326d4009525e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1451 unsigned int mmd, unsigned int reg, u16 *valp); 1451 unsigned int mmd, unsigned int reg, u16 *valp);
1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1452int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1453 unsigned int mmd, unsigned int reg, u16 val); 1453 unsigned int mmd, unsigned int reg, u16 val);
1454int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id);
1454int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1457int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1455 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1458 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1456 unsigned int fl0id, unsigned int fl1id); 1459 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 13b144bcf725..6278e5a74b74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2981void t4_free_sge_resources(struct adapter *adap) 2981void t4_free_sge_resources(struct adapter *adap)
2982{ 2982{
2983 int i; 2983 int i;
2984 struct sge_eth_rxq *eq = adap->sge.ethrxq; 2984 struct sge_eth_rxq *eq;
2985 struct sge_eth_txq *etq = adap->sge.ethtxq; 2985 struct sge_eth_txq *etq;
2986
2987 /* stop all Rx queues in order to start them draining */
2988 for (i = 0; i < adap->sge.ethqsets; i++) {
2989 eq = &adap->sge.ethrxq[i];
2990 if (eq->rspq.desc)
2991 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
2992 FW_IQ_TYPE_FL_INT_CAP,
2993 eq->rspq.cntxt_id,
2994 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
2995 0xffff);
2996 }
2986 2997
2987 /* clean up Ethernet Tx/Rx queues */ 2998 /* clean up Ethernet Tx/Rx queues */
2988 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { 2999 for (i = 0; i < adap->sge.ethqsets; i++) {
3000 eq = &adap->sge.ethrxq[i];
2989 if (eq->rspq.desc) 3001 if (eq->rspq.desc)
2990 free_rspq_fl(adap, &eq->rspq, 3002 free_rspq_fl(adap, &eq->rspq,
2991 eq->fl.size ? &eq->fl : NULL); 3003 eq->fl.size ? &eq->fl : NULL);
3004
3005 etq = &adap->sge.ethtxq[i];
2992 if (etq->q.desc) { 3006 if (etq->q.desc) {
2993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, 3007 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
2994 etq->q.cntxt_id); 3008 etq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cc1736bece0f..71586a3e0f61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
2557} 2557}
2558 2558
2559#define EEPROM_STAT_ADDR 0x7bfc 2559#define EEPROM_STAT_ADDR 0x7bfc
2560#define VPD_SIZE 0x800
2560#define VPD_BASE 0x400 2561#define VPD_BASE 0x400
2561#define VPD_BASE_OLD 0 2562#define VPD_BASE_OLD 0
2562#define VPD_LEN 1024 2563#define VPD_LEN 1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2594 if (!vpd) 2595 if (!vpd)
2595 return -ENOMEM; 2596 return -ENOMEM;
2596 2597
2598 /* We have two VPD data structures stored in the adapter VPD area.
2599 * By default, Linux calculates the size of the VPD area by traversing
2600 * the first VPD area at offset 0x0, so we need to tell the OS what
2601 * our real VPD size is.
2602 */
2603 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2604 if (ret < 0)
2605 goto out;
2606
2597 /* Card information normally starts at VPD_BASE but early cards had 2607 /* Card information normally starts at VPD_BASE but early cards had
2598 * it at 0. 2608 * it at 0.
2599 */ 2609 */
@@ -6940,6 +6950,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
6940} 6950}
6941 6951
6942/** 6952/**
6953 * t4_iq_stop - stop an ingress queue and its FLs
6954 * @adap: the adapter
6955 * @mbox: mailbox to use for the FW command
6956 * @pf: the PF owning the queues
6957 * @vf: the VF owning the queues
6958 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
6959 * @iqid: ingress queue id
6960 * @fl0id: FL0 queue id or 0xffff if no attached FL0
6961 * @fl1id: FL1 queue id or 0xffff if no attached FL1
6962 *
6963 * Stops an ingress queue and its associated FLs, if any. This causes
6964 * any current or future data/messages destined for these queues to be
6965 * tossed.
6966 */
6967int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
6968 unsigned int vf, unsigned int iqtype, unsigned int iqid,
6969 unsigned int fl0id, unsigned int fl1id)
6970{
6971 struct fw_iq_cmd c;
6972
6973 memset(&c, 0, sizeof(c));
6974 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
6975 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
6976 FW_IQ_CMD_VFN_V(vf));
6977 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
6978 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
6979 c.iqid = cpu_to_be16(iqid);
6980 c.fl0id = cpu_to_be16(fl0id);
6981 c.fl1id = cpu_to_be16(fl1id);
6982 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6983}
6984
6985/**
6943 * t4_iq_free - free an ingress queue and its FLs 6986 * t4_iq_free - free an ingress queue and its FLs
6944 * @adap: the adapter 6987 * @adap: the adapter
6945 * @mbox: mailbox to use for the FW command 6988 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 62ccebc5f728..8cf943db5662 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1223 if (err) 1223 if (err)
1224 return err; 1224 return err;
1225 1225
1226 /* verify upper 16 bits are zero */
1227 if (vid >> 16)
1228 return FM10K_ERR_PARAM;
1229
1230 set = !(vid & FM10K_VLAN_CLEAR); 1226 set = !(vid & FM10K_VLAN_CLEAR);
1231 vid &= ~FM10K_VLAN_CLEAR; 1227 vid &= ~FM10K_VLAN_CLEAR;
1232 1228
1233 err = fm10k_iov_select_vid(vf_info, (u16)vid); 1229 /* if the length field has been set, this is a multi-bit
1234 if (err < 0) 1230 * update request. For multi-bit requests, simply disallow
1235 return err; 1231 * them when the pf_vid has been set. In this case, the PF
1232 * should have already cleared the VLAN_TABLE, and if we
1233 * allowed them, it could allow a rogue VF to receive traffic
1234 * on a VLAN it was not assigned. In the single-bit case, we
1235 * need to modify requests for VLAN 0 to use the default PF or
1236 * SW vid when assigned.
1237 */
1236 1238
1237 vid = err; 1239 if (vid >> 16) {
1240 /* prevent multi-bit requests when PF has
1241 * administratively set the VLAN for this VF
1242 */
1243 if (vf_info->pf_vid)
1244 return FM10K_ERR_PARAM;
1245 } else {
1246 err = fm10k_iov_select_vid(vf_info, (u16)vid);
1247 if (err < 0)
1248 return err;
1249
1250 vid = err;
1251 }
1238 1252
1239 /* update VSI info for VF in regards to VLAN table */ 1253 /* update VSI info for VF in regards to VLAN table */
1240 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); 1254 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 084d0ab316b7..6a49b7ae511c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2594} 2594}
2595 2595
2596/** 2596/**
2597 * __i40e_chk_linearize - Check if there are more than 8 fragments per packet 2597 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2598 * @skb: send buffer 2598 * @skb: send buffer
2599 * 2599 *
2600 * Note: Our HW can't scatter-gather more than 8 fragments to build 2600 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2601 * a packet on the wire and so we need to figure out the cases where we 2601 * and so we need to figure out the cases where we need to linearize the skb.
2602 * need to linearize the skb. 2602 *
2603 * For TSO we need to count the TSO header and segment payload separately.
2604 * As such we need to check cases where we have 7 fragments or more as we
2605 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2606 * the segment payload in the first descriptor, and another 7 for the
2607 * fragments.
2603 **/ 2608 **/
2604bool __i40e_chk_linearize(struct sk_buff *skb) 2609bool __i40e_chk_linearize(struct sk_buff *skb)
2605{ 2610{
2606 const struct skb_frag_struct *frag, *stale; 2611 const struct skb_frag_struct *frag, *stale;
2607 int gso_size, nr_frags, sum; 2612 int nr_frags, sum;
2608
2609 /* check to see if TSO is enabled, if so we may get a repreive */
2610 gso_size = skb_shinfo(skb)->gso_size;
2611 if (unlikely(!gso_size))
2612 return true;
2613 2613
2614 /* no need to check if number of frags is less than 8 */ 2614 /* no need to check if number of frags is less than 7 */
2615 nr_frags = skb_shinfo(skb)->nr_frags; 2615 nr_frags = skb_shinfo(skb)->nr_frags;
2616 if (nr_frags < I40E_MAX_BUFFER_TXD) 2616 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2617 return false; 2617 return false;
2618 2618
2619 /* We need to walk through the list and validate that each group 2619 /* We need to walk through the list and validate that each group
2620 * of 6 fragments totals at least gso_size. However we don't need 2620 * of 6 fragments totals at least gso_size. However we don't need
2621 * to perform such validation on the first or last 6 since the first 2621 * to perform such validation on the last 6 since the last 6 cannot
2622 * 6 cannot inherit any data from a descriptor before them, and the 2622 * inherit any data from a descriptor after them.
2623 * last 6 cannot inherit any data from a descriptor after them.
2624 */ 2623 */
2625 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 2624 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2626 frag = &skb_shinfo(skb)->frags[0]; 2625 frag = &skb_shinfo(skb)->frags[0];
2627 2626
2628 /* Initialize size to the negative value of gso_size minus 1. We 2627 /* Initialize size to the negative value of gso_size minus 1. We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2631 * descriptors for a single transmit as the header and previous 2630 * descriptors for a single transmit as the header and previous
2632 * fragment are already consuming 2 descriptors. 2631 * fragment are already consuming 2 descriptors.
2633 */ 2632 */
2634 sum = 1 - gso_size; 2633 sum = 1 - skb_shinfo(skb)->gso_size;
2635 2634
2636 /* Add size of frags 1 through 5 to create our initial sum */ 2635 /* Add size of frags 0 through 4 to create our initial sum */
2637 sum += skb_frag_size(++frag); 2636 sum += skb_frag_size(frag++);
2638 sum += skb_frag_size(++frag); 2637 sum += skb_frag_size(frag++);
2639 sum += skb_frag_size(++frag); 2638 sum += skb_frag_size(frag++);
2640 sum += skb_frag_size(++frag); 2639 sum += skb_frag_size(frag++);
2641 sum += skb_frag_size(++frag); 2640 sum += skb_frag_size(frag++);
2642 2641
2643 /* Walk through fragments adding latest fragment, testing it, and 2642 /* Walk through fragments adding latest fragment, testing it, and
2644 * then removing stale fragments from the sum. 2643 * then removing stale fragments from the sum.
2645 */ 2644 */
2646 stale = &skb_shinfo(skb)->frags[0]; 2645 stale = &skb_shinfo(skb)->frags[0];
2647 for (;;) { 2646 for (;;) {
2648 sum += skb_frag_size(++frag); 2647 sum += skb_frag_size(frag++);
2649 2648
2650 /* if sum is negative we failed to make sufficient progress */ 2649 /* if sum is negative we failed to make sufficient progress */
2651 if (sum < 0) 2650 if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
2655 if (!--nr_frags) 2654 if (!--nr_frags)
2656 break; 2655 break;
2657 2656
2658 sum -= skb_frag_size(++stale); 2657 sum -= skb_frag_size(stale++);
2659 } 2658 }
2660 2659
2661 return false; 2660 return false;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index cdd5dc00aec5..a9bd70537d65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
413 **/ 413 **/
414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 414static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
415{ 415{
416 /* we can only support up to 8 data buffers for a single send */ 416 /* Both TSO and single send will work if count is less than 8 */
417 if (likely(count <= I40E_MAX_BUFFER_TXD)) 417 if (likely(count < I40E_MAX_BUFFER_TXD))
418 return false; 418 return false;
419 419
420 return __i40e_chk_linearize(skb); 420 if (skb_is_gso(skb))
421 return __i40e_chk_linearize(skb);
422
423 /* we can support up to 8 data buffers for a single send */
424 return count != I40E_MAX_BUFFER_TXD;
421} 425}
422#endif /* _I40E_TXRX_H_ */ 426#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ebcc25c05796..cea97daa844c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1796} 1796}
1797 1797
1798/** 1798/**
1799 * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet 1799 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
1800 * @skb: send buffer 1800 * @skb: send buffer
1801 * 1801 *
1802 * Note: Our HW can't scatter-gather more than 8 fragments to build 1802 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1803 * a packet on the wire and so we need to figure out the cases where we 1803 * and so we need to figure out the cases where we need to linearize the skb.
1804 * need to linearize the skb. 1804 *
1805 * For TSO we need to count the TSO header and segment payload separately.
1806 * As such we need to check cases where we have 7 fragments or more as we
1807 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1808 * the segment payload in the first descriptor, and another 7 for the
1809 * fragments.
1805 **/ 1810 **/
1806bool __i40evf_chk_linearize(struct sk_buff *skb) 1811bool __i40evf_chk_linearize(struct sk_buff *skb)
1807{ 1812{
1808 const struct skb_frag_struct *frag, *stale; 1813 const struct skb_frag_struct *frag, *stale;
1809 int gso_size, nr_frags, sum; 1814 int nr_frags, sum;
1810
1811 /* check to see if TSO is enabled, if so we may get a repreive */
1812 gso_size = skb_shinfo(skb)->gso_size;
1813 if (unlikely(!gso_size))
1814 return true;
1815 1815
1816 /* no need to check if number of frags is less than 8 */ 1816 /* no need to check if number of frags is less than 7 */
1817 nr_frags = skb_shinfo(skb)->nr_frags; 1817 nr_frags = skb_shinfo(skb)->nr_frags;
1818 if (nr_frags < I40E_MAX_BUFFER_TXD) 1818 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
1819 return false; 1819 return false;
1820 1820
1821 /* We need to walk through the list and validate that each group 1821 /* We need to walk through the list and validate that each group
1822 * of 6 fragments totals at least gso_size. However we don't need 1822 * of 6 fragments totals at least gso_size. However we don't need
1823 * to perform such validation on the first or last 6 since the first 1823 * to perform such validation on the last 6 since the last 6 cannot
1824 * 6 cannot inherit any data from a descriptor before them, and the 1824 * inherit any data from a descriptor after them.
1825 * last 6 cannot inherit any data from a descriptor after them.
1826 */ 1825 */
1827 nr_frags -= I40E_MAX_BUFFER_TXD - 1; 1826 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
1828 frag = &skb_shinfo(skb)->frags[0]; 1827 frag = &skb_shinfo(skb)->frags[0];
1829 1828
1830 /* Initialize size to the negative value of gso_size minus 1. We 1829 /* Initialize size to the negative value of gso_size minus 1. We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1833 * descriptors for a single transmit as the header and previous 1832 * descriptors for a single transmit as the header and previous
1834 * fragment are already consuming 2 descriptors. 1833 * fragment are already consuming 2 descriptors.
1835 */ 1834 */
1836 sum = 1 - gso_size; 1835 sum = 1 - skb_shinfo(skb)->gso_size;
1837 1836
1838 /* Add size of frags 1 through 5 to create our initial sum */ 1837 /* Add size of frags 0 through 4 to create our initial sum */
1839 sum += skb_frag_size(++frag); 1838 sum += skb_frag_size(frag++);
1840 sum += skb_frag_size(++frag); 1839 sum += skb_frag_size(frag++);
1841 sum += skb_frag_size(++frag); 1840 sum += skb_frag_size(frag++);
1842 sum += skb_frag_size(++frag); 1841 sum += skb_frag_size(frag++);
1843 sum += skb_frag_size(++frag); 1842 sum += skb_frag_size(frag++);
1844 1843
1845 /* Walk through fragments adding latest fragment, testing it, and 1844 /* Walk through fragments adding latest fragment, testing it, and
1846 * then removing stale fragments from the sum. 1845 * then removing stale fragments from the sum.
1847 */ 1846 */
1848 stale = &skb_shinfo(skb)->frags[0]; 1847 stale = &skb_shinfo(skb)->frags[0];
1849 for (;;) { 1848 for (;;) {
1850 sum += skb_frag_size(++frag); 1849 sum += skb_frag_size(frag++);
1851 1850
1852 /* if sum is negative we failed to make sufficient progress */ 1851 /* if sum is negative we failed to make sufficient progress */
1853 if (sum < 0) 1852 if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
1857 if (!--nr_frags) 1856 if (!--nr_frags)
1858 break; 1857 break;
1859 1858
1860 sum -= skb_frag_size(++stale); 1859 sum -= skb_frag_size(stale++);
1861 } 1860 }
1862 1861
1863 return false; 1862 return false;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index c1dd8c5c9666..0429553fe887 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
395 **/ 395 **/
396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) 396static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
397{ 397{
398 /* we can only support up to 8 data buffers for a single send */ 398 /* Both TSO and single send will work if count is less than 8 */
399 if (likely(count <= I40E_MAX_BUFFER_TXD)) 399 if (likely(count < I40E_MAX_BUFFER_TXD))
400 return false; 400 return false;
401 401
402 return __i40evf_chk_linearize(skb); 402 if (skb_is_gso(skb))
403 return __i40evf_chk_linearize(skb);
404
405 /* we can support up to 8 data buffers for a single send */
406 return count != I40E_MAX_BUFFER_TXD;
403} 407}
404#endif /* _I40E_TXRX_H_ */ 408#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f69584a9b47f..c761194bb323 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
337 case ETH_SS_STATS: 337 case ETH_SS_STATS:
338 return bitmap_iterator_count(&it) + 338 return bitmap_iterator_count(&it) +
339 (priv->tx_ring_num * 2) + 339 (priv->tx_ring_num * 2) +
340 (priv->rx_ring_num * 2); 340 (priv->rx_ring_num * 3);
341 case ETH_SS_TEST: 341 case ETH_SS_TEST:
342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags 342 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; 343 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
404 for (i = 0; i < priv->rx_ring_num; i++) { 404 for (i = 0; i < priv->rx_ring_num; i++) {
405 data[index++] = priv->rx_ring[i]->packets; 405 data[index++] = priv->rx_ring[i]->packets;
406 data[index++] = priv->rx_ring[i]->bytes; 406 data[index++] = priv->rx_ring[i]->bytes;
407 data[index++] = priv->rx_ring[i]->dropped;
407 } 408 }
408 spin_unlock_bh(&priv->stats_lock); 409 spin_unlock_bh(&priv->stats_lock);
409 410
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
477 "rx%d_packets", i); 478 "rx%d_packets", i);
478 sprintf(data + (index++) * ETH_GSTRING_LEN, 479 sprintf(data + (index++) * ETH_GSTRING_LEN,
479 "rx%d_bytes", i); 480 "rx%d_bytes", i);
481 sprintf(data + (index++) * ETH_GSTRING_LEN,
482 "rx%d_dropped", i);
480 } 483 }
481 break; 484 break;
482 case ETH_SS_PRIV_FLAGS: 485 case ETH_SS_PRIV_FLAGS:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 3904b5fc0b7c..20b6c2e678b8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
158 u64 in_mod = reset << 8 | port; 158 u64 in_mod = reset << 8 | port;
159 int err; 159 int err;
160 int i, counter_index; 160 int i, counter_index;
161 unsigned long sw_rx_dropped = 0;
161 162
162 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 163 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
163 if (IS_ERR(mailbox)) 164 if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
180 for (i = 0; i < priv->rx_ring_num; i++) { 181 for (i = 0; i < priv->rx_ring_num; i++) {
181 stats->rx_packets += priv->rx_ring[i]->packets; 182 stats->rx_packets += priv->rx_ring[i]->packets;
182 stats->rx_bytes += priv->rx_ring[i]->bytes; 183 stats->rx_bytes += priv->rx_ring[i]->bytes;
184 sw_rx_dropped += priv->rx_ring[i]->dropped;
183 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; 185 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
184 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; 186 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
185 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; 187 priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
236 &mlx4_en_stats->MCAST_prio_1, 238 &mlx4_en_stats->MCAST_prio_1,
237 NUM_PRIORITIES); 239 NUM_PRIORITIES);
238 stats->collisions = 0; 240 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 241 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
242 sw_rx_dropped;
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 243 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = 0; 244 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 245 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 86bcfe510e4e..b723e3bcab39 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
61 gfp_t gfp = _gfp; 61 gfp_t gfp = _gfp;
62 62
63 if (order) 63 if (order)
64 gfp |= __GFP_COMP | __GFP_NOWARN; 64 gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
65 page = alloc_pages(gfp, order); 65 page = alloc_pages(gfp, order);
66 if (likely(page)) 66 if (likely(page))
67 break; 67 break;
@@ -126,7 +126,9 @@ out:
126 dma_unmap_page(priv->ddev, page_alloc[i].dma, 126 dma_unmap_page(priv->ddev, page_alloc[i].dma,
127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE); 127 page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
128 page = page_alloc[i].page; 128 page = page_alloc[i].page;
129 set_page_count(page, 1); 129 /* Revert changes done by mlx4_alloc_pages */
130 page_ref_sub(page, page_alloc[i].page_size /
131 priv->frag_info[i].frag_stride - 1);
130 put_page(page); 132 put_page(page);
131 } 133 }
132 } 134 }
@@ -176,7 +178,9 @@ out:
176 dma_unmap_page(priv->ddev, page_alloc->dma, 178 dma_unmap_page(priv->ddev, page_alloc->dma,
177 page_alloc->page_size, PCI_DMA_FROMDEVICE); 179 page_alloc->page_size, PCI_DMA_FROMDEVICE);
178 page = page_alloc->page; 180 page = page_alloc->page;
179 set_page_count(page, 1); 181 /* Revert changes done by mlx4_alloc_pages */
182 page_ref_sub(page, page_alloc->page_size /
183 priv->frag_info[i].frag_stride - 1);
180 put_page(page); 184 put_page(page);
181 page_alloc->page = NULL; 185 page_alloc->page = NULL;
182 } 186 }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
939 /* GRO not possible, complete processing here */ 943 /* GRO not possible, complete processing here */
940 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); 944 skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
941 if (!skb) { 945 if (!skb) {
942 priv->stats.rx_dropped++; 946 ring->dropped++;
943 goto next; 947 goto next;
944 } 948 }
945 949
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b7296236..a386f047c1af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
405 u32 packets = 0; 405 u32 packets = 0;
406 u32 bytes = 0; 406 u32 bytes = 0;
407 int factor = priv->cqe_factor; 407 int factor = priv->cqe_factor;
408 u64 timestamp = 0;
409 int done = 0; 408 int done = 0;
410 int budget = priv->tx_work_limit; 409 int budget = priv->tx_work_limit;
411 u32 last_nr_txbb; 410 u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
445 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 444 new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
446 445
447 do { 446 do {
447 u64 timestamp = 0;
448
448 txbbs_skipped += last_nr_txbb; 449 txbbs_skipped += last_nr_txbb;
449 ring_index = (ring_index + last_nr_txbb) & size_mask; 450 ring_index = (ring_index + last_nr_txbb) & size_mask;
450 if (ring->tx_info[ring_index].ts_requested) 451
452 if (unlikely(ring->tx_info[ring_index].ts_requested))
451 timestamp = mlx4_en_get_cqe_ts(cqe); 453 timestamp = mlx4_en_get_cqe_ts(cqe);
452 454
453 /* free next descriptor */ 455 /* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 358f7230da58..12c77a70abdb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
3172 return 0; 3172 return 0;
3173} 3173}
3174 3174
3175static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3176{
3177 struct pci_dev *pdev = dev->persist->pdev;
3178 int err = 0;
3179
3180 mutex_lock(&dev->persist->pci_status_mutex);
3181 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3182 err = pci_enable_device(pdev);
3183 if (!err)
3184 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3185 }
3186 mutex_unlock(&dev->persist->pci_status_mutex);
3187
3188 return err;
3189}
3190
3191static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3192{
3193 struct pci_dev *pdev = dev->persist->pdev;
3194
3195 mutex_lock(&dev->persist->pci_status_mutex);
3196 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3197 pci_disable_device(pdev);
3198 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3199 }
3200 mutex_unlock(&dev->persist->pci_status_mutex);
3201}
3202
3175static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3203static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3176 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3204 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3177 int reset_flow) 3205 int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3582 3610
3583 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3611 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3584 3612
3585 err = pci_enable_device(pdev); 3613 err = mlx4_pci_enable_device(&priv->dev);
3586 if (err) { 3614 if (err) {
3587 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3615 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3588 return err; 3616 return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
3715 pci_release_regions(pdev); 3743 pci_release_regions(pdev);
3716 3744
3717err_disable_pdev: 3745err_disable_pdev:
3718 pci_disable_device(pdev); 3746 mlx4_pci_disable_device(&priv->dev);
3719 pci_set_drvdata(pdev, NULL); 3747 pci_set_drvdata(pdev, NULL);
3720 return err; 3748 return err;
3721} 3749}
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
3775 priv->pci_dev_data = id->driver_data; 3803 priv->pci_dev_data = id->driver_data;
3776 mutex_init(&dev->persist->device_state_mutex); 3804 mutex_init(&dev->persist->device_state_mutex);
3777 mutex_init(&dev->persist->interface_state_mutex); 3805 mutex_init(&dev->persist->interface_state_mutex);
3806 mutex_init(&dev->persist->pci_status_mutex);
3778 3807
3779 ret = devlink_register(devlink, &pdev->dev); 3808 ret = devlink_register(devlink, &pdev->dev);
3780 if (ret) 3809 if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
3923 } 3952 }
3924 3953
3925 pci_release_regions(pdev); 3954 pci_release_regions(pdev);
3926 pci_disable_device(pdev); 3955 mlx4_pci_disable_device(dev);
3927 devlink_unregister(devlink); 3956 devlink_unregister(devlink);
3928 kfree(dev->persist); 3957 kfree(dev->persist);
3929 devlink_free(devlink); 3958 devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4042 if (state == pci_channel_io_perm_failure) 4071 if (state == pci_channel_io_perm_failure)
4043 return PCI_ERS_RESULT_DISCONNECT; 4072 return PCI_ERS_RESULT_DISCONNECT;
4044 4073
4045 pci_disable_device(pdev); 4074 mlx4_pci_disable_device(persist->dev);
4046 return PCI_ERS_RESULT_NEED_RESET; 4075 return PCI_ERS_RESULT_NEED_RESET;
4047} 4076}
4048 4077
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4050{ 4079{
4051 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4080 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4052 struct mlx4_dev *dev = persist->dev; 4081 struct mlx4_dev *dev = persist->dev;
4053 struct mlx4_priv *priv = mlx4_priv(dev); 4082 int err;
4054 int ret;
4055 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4056 int total_vfs;
4057 4083
4058 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 4084 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4059 ret = pci_enable_device(pdev); 4085 err = mlx4_pci_enable_device(dev);
4060 if (ret) { 4086 if (err) {
4061 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 4087 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4062 return PCI_ERS_RESULT_DISCONNECT; 4088 return PCI_ERS_RESULT_DISCONNECT;
4063 } 4089 }
4064 4090
4065 pci_set_master(pdev); 4091 pci_set_master(pdev);
4066 pci_restore_state(pdev); 4092 pci_restore_state(pdev);
4067 pci_save_state(pdev); 4093 pci_save_state(pdev);
4094 return PCI_ERS_RESULT_RECOVERED;
4095}
4068 4096
4097static void mlx4_pci_resume(struct pci_dev *pdev)
4098{
4099 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4100 struct mlx4_dev *dev = persist->dev;
4101 struct mlx4_priv *priv = mlx4_priv(dev);
4102 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4103 int total_vfs;
4104 int err;
4105
4106 mlx4_err(dev, "%s was called\n", __func__);
4069 total_vfs = dev->persist->num_vfs; 4107 total_vfs = dev->persist->num_vfs;
4070 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 4108 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4071 4109
4072 mutex_lock(&persist->interface_state_mutex); 4110 mutex_lock(&persist->interface_state_mutex);
4073 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 4111 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4074 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 4112 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4075 priv, 1); 4113 priv, 1);
4076 if (ret) { 4114 if (err) {
4077 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4115 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4078 __func__, ret); 4116 __func__, err);
4079 goto end; 4117 goto end;
4080 } 4118 }
4081 4119
4082 ret = restore_current_port_types(dev, dev->persist-> 4120 err = restore_current_port_types(dev, dev->persist->
4083 curr_port_type, dev->persist-> 4121 curr_port_type, dev->persist->
4084 curr_port_poss_type); 4122 curr_port_poss_type);
4085 if (ret) 4123 if (err)
4086 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4124 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4087 } 4125 }
4088end: 4126end:
4089 mutex_unlock(&persist->interface_state_mutex); 4127 mutex_unlock(&persist->interface_state_mutex);
4090 4128
4091 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
4092} 4129}
4093 4130
4094static void mlx4_shutdown(struct pci_dev *pdev) 4131static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4105static const struct pci_error_handlers mlx4_err_handler = { 4142static const struct pci_error_handlers mlx4_err_handler = {
4106 .error_detected = mlx4_pci_err_detected, 4143 .error_detected = mlx4_pci_err_detected,
4107 .slot_reset = mlx4_pci_slot_reset, 4144 .slot_reset = mlx4_pci_slot_reset,
4145 .resume = mlx4_pci_resume,
4108}; 4146};
4109 4147
4110static struct pci_driver mlx4_driver = { 4148static struct pci_driver mlx4_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index ef9683101ead..c9d7fc5159f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; 586 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
587 int init_port_ref[MLX4_MAX_PORTS + 1]; 587 int init_port_ref[MLX4_MAX_PORTS + 1];
588 u16 max_mtu[MLX4_MAX_PORTS + 1]; 588 u16 max_mtu[MLX4_MAX_PORTS + 1];
589 u8 pptx;
590 u8 pprx;
589 int disable_mcast_ref[MLX4_MAX_PORTS + 1]; 591 int disable_mcast_ref[MLX4_MAX_PORTS + 1];
590 struct mlx4_resource_tracker res_tracker; 592 struct mlx4_resource_tracker res_tracker;
591 struct workqueue_struct *comm_wq; 593 struct workqueue_struct *comm_wq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d12ab6a73344..63b1aeae2c03 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
323 unsigned long csum_ok; 323 unsigned long csum_ok;
324 unsigned long csum_none; 324 unsigned long csum_none;
325 unsigned long csum_complete; 325 unsigned long csum_complete;
326 unsigned long dropped;
326 int hwtstamp_rx_filter; 327 int hwtstamp_rx_filter;
327 cpumask_var_t affinity_mask; 328 cpumask_var_t affinity_mask;
328}; 329};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 211c65087997..087b23b320cb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1317 } 1317 }
1318 1318
1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 1319 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1320 /* Slave cannot change Global Pause configuration */
1321 if (slave != mlx4_master_func_num(dev) &&
1322 ((gen_context->pptx != master->pptx) ||
1323 (gen_context->pprx != master->pprx))) {
1324 gen_context->pptx = master->pptx;
1325 gen_context->pprx = master->pprx;
1326 mlx4_warn(dev,
1327 "denying Global Pause change for slave:%d\n",
1328 slave);
1329 } else {
1330 master->pptx = gen_context->pptx;
1331 master->pprx = gen_context->pprx;
1332 }
1320 break; 1333 break;
1321 case MLX4_SET_PORT_GID_TABLE: 1334 case MLX4_SET_PORT_GID_TABLE:
1322 /* change to MULTIPLE entries: number of guest's gids 1335 /* change to MULTIPLE entries: number of guest's gids
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e6276c473..e80ce94b5dcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -609,7 +609,7 @@ enum mlx5e_link_mode {
609 MLX5E_100GBASE_KR4 = 22, 609 MLX5E_100GBASE_KR4 = 22,
610 MLX5E_100GBASE_LR4 = 23, 610 MLX5E_100GBASE_LR4 = 23,
611 MLX5E_100BASE_TX = 24, 611 MLX5E_100BASE_TX = 24,
612 MLX5E_100BASE_T = 25, 612 MLX5E_1000BASE_T = 25,
613 MLX5E_10GBASE_T = 26, 613 MLX5E_10GBASE_T = 26,
614 MLX5E_25GBASE_CR = 27, 614 MLX5E_25GBASE_CR = 27,
615 MLX5E_25GBASE_KR = 28, 615 MLX5E_25GBASE_KR = 28,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b715f6c..3476ab844634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@ static const struct {
138 [MLX5E_100BASE_TX] = { 138 [MLX5E_100BASE_TX] = {
139 .speed = 100, 139 .speed = 100,
140 }, 140 },
141 [MLX5E_100BASE_T] = { 141 [MLX5E_1000BASE_T] = {
142 .supported = SUPPORTED_100baseT_Full, 142 .supported = SUPPORTED_1000baseT_Full,
143 .advertised = ADVERTISED_100baseT_Full, 143 .advertised = ADVERTISED_1000baseT_Full,
144 .speed = 100, 144 .speed = 1000,
145 }, 145 },
146 [MLX5E_10GBASE_T] = { 146 [MLX5E_10GBASE_T] = {
147 .supported = SUPPORTED_10000baseT_Full, 147 .supported = SUPPORTED_10000baseT_Full,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb604f461..67d548b70e14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1404 return 0; 1404 return 0;
1405} 1405}
1406 1406
1407static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1407static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
1408{ 1408{
1409 struct mlx5e_priv *priv = netdev_priv(netdev);
1410 struct mlx5_core_dev *mdev = priv->mdev; 1409 struct mlx5_core_dev *mdev = priv->mdev;
1411 int hw_mtu; 1410 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
1412 int err; 1411 int err;
1413 1412
1414 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); 1413 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
1415 if (err) 1414 if (err)
1416 return err; 1415 return err;
1417 1416
1418 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1417 /* Update vport context MTU */
1418 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
1419 return 0;
1420}
1419 1421
1420 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) 1422static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
1421 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", 1423{
1422 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); 1424 struct mlx5_core_dev *mdev = priv->mdev;
1425 u16 hw_mtu = 0;
1426 int err;
1423 1427
1424 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); 1428 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
1429 if (err || !hw_mtu) /* fallback to port oper mtu */
1430 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1431
1432 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
1433}
1434
1435static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1436{
1437 struct mlx5e_priv *priv = netdev_priv(netdev);
1438 u16 mtu;
1439 int err;
1440
1441 err = mlx5e_set_mtu(priv, netdev->mtu);
1442 if (err)
1443 return err;
1444
1445 mlx5e_query_mtu(priv, &mtu);
1446 if (mtu != netdev->mtu)
1447 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
1448 __func__, mtu, netdev->mtu);
1449
1450 netdev->mtu = mtu;
1425 return 0; 1451 return 0;
1426} 1452}
1427 1453
@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
1999 return err; 2025 return err;
2000} 2026}
2001 2027
2028#define MXL5_HW_MIN_MTU 64
2029#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2030
2002static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) 2031static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2003{ 2032{
2004 struct mlx5e_priv *priv = netdev_priv(netdev); 2033 struct mlx5e_priv *priv = netdev_priv(netdev);
2005 struct mlx5_core_dev *mdev = priv->mdev; 2034 struct mlx5_core_dev *mdev = priv->mdev;
2006 bool was_opened; 2035 bool was_opened;
2007 int max_mtu; 2036 u16 max_mtu;
2037 u16 min_mtu;
2008 int err = 0; 2038 int err = 0;
2009 2039
2010 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2040 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
2011 2041
2012 max_mtu = MLX5E_HW2SW_MTU(max_mtu); 2042 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
2043 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
2013 2044
2014 if (new_mtu > max_mtu) { 2045 if (new_mtu > max_mtu || new_mtu < min_mtu) {
2015 netdev_err(netdev, 2046 netdev_err(netdev,
2016 "%s: Bad MTU (%d) > (%d) Max\n", 2047 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2017 __func__, new_mtu, max_mtu); 2048 __func__, new_mtu, min_mtu, max_mtu);
2018 return -EINVAL; 2049 return -EINVAL;
2019 } 2050 }
2020 2051
@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2602 schedule_work(&priv->set_rx_mode_work); 2633 schedule_work(&priv->set_rx_mode_work);
2603 mlx5e_disable_async_events(priv); 2634 mlx5e_disable_async_events(priv);
2604 flush_scheduled_work(); 2635 flush_scheduled_work();
2605 unregister_netdev(netdev); 2636 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
2637 netif_device_detach(netdev);
2638 mutex_lock(&priv->state_lock);
2639 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
2640 mlx5e_close_locked(netdev);
2641 mutex_unlock(&priv->state_lock);
2642 } else {
2643 unregister_netdev(netdev);
2644 }
2645
2606 mlx5e_tc_cleanup(priv); 2646 mlx5e_tc_cleanup(priv);
2607 mlx5e_vxlan_cleanup(priv); 2647 mlx5e_vxlan_cleanup(priv);
2608 mlx5e_destroy_flow_tables(priv); 2648 mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2615 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 2655 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
2616 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2656 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2617 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2657 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
2618 free_netdev(netdev); 2658
2659 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
2660 free_netdev(netdev);
2619} 2661}
2620 2662
2621static void *mlx5e_get_netdev(void *vpriv) 2663static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4675d1..89cce97d46c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1065,33 +1065,6 @@ unlock_fg:
1065 return rule; 1065 return rule;
1066} 1066}
1067 1067
1068static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
1069 u8 match_criteria_enable,
1070 u32 *match_criteria,
1071 u32 *match_value,
1072 u8 action,
1073 u32 flow_tag,
1074 struct mlx5_flow_destination *dest)
1075{
1076 struct mlx5_flow_rule *rule;
1077 struct mlx5_flow_group *g;
1078
1079 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1080 if (IS_ERR(g))
1081 return (void *)g;
1082
1083 rule = add_rule_fg(g, match_value,
1084 action, flow_tag, dest);
1085 if (IS_ERR(rule)) {
1086 /* Remove assumes refcount > 0 and autogroup creates a group
1087 * with a refcount = 0.
1088 */
1089 tree_get_node(&g->node);
1090 tree_remove_node(&g->node);
1091 }
1092 return rule;
1093}
1094
1095static struct mlx5_flow_rule * 1068static struct mlx5_flow_rule *
1096_mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1069_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1097 u8 match_criteria_enable, 1070 u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1119 goto unlock; 1092 goto unlock;
1120 } 1093 }
1121 1094
1122 rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, 1095 g = create_autogroup(ft, match_criteria_enable, match_criteria);
1123 match_value, action, flow_tag, dest); 1096 if (IS_ERR(g)) {
1097 rule = (void *)g;
1098 goto unlock;
1099 }
1100
1101 rule = add_rule_fg(g, match_value,
1102 action, flow_tag, dest);
1103 if (IS_ERR(rule)) {
1104 /* Remove assumes refcount > 0 and autogroup creates a group
1105 * with a refcount = 0.
1106 */
1107 unlock_ref_node(&ft->node);
1108 tree_get_node(&g->node);
1109 tree_remove_node(&g->node);
1110 return rule;
1111 }
1124unlock: 1112unlock:
1125 unlock_ref_node(&ft->node); 1113 unlock_ref_node(&ft->node);
1126 return rule; 1114 return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1288{ 1276{
1289 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1277 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
1290 int prio; 1278 int prio;
1291 static struct fs_prio *fs_prio; 1279 struct fs_prio *fs_prio;
1292 struct mlx5_flow_namespace *ns; 1280 struct mlx5_flow_namespace *ns;
1293 1281
1294 if (!root_ns) 1282 if (!root_ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fae4991..6892746fd10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
966 int err; 966 int err;
967 967
968 mutex_lock(&dev->intf_state_mutex); 968 mutex_lock(&dev->intf_state_mutex);
969 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { 969 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
971 __func__); 971 __func__);
972 goto out; 972 goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1133 if (err) 1133 if (err)
1134 pr_info("failed request module on %s\n", MLX5_IB_MOD); 1134 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1135 1135
1136 dev->interface_state = MLX5_INTERFACE_STATE_UP; 1136 clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1137 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1137out: 1138out:
1138 mutex_unlock(&dev->intf_state_mutex); 1139 mutex_unlock(&dev->intf_state_mutex);
1139 1140
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1207 } 1208 }
1208 1209
1209 mutex_lock(&dev->intf_state_mutex); 1210 mutex_lock(&dev->intf_state_mutex);
1210 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { 1211 if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
1211 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1212 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1212 __func__); 1213 __func__);
1213 goto out; 1214 goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1241 mlx5_cmd_cleanup(dev); 1242 mlx5_cmd_cleanup(dev);
1242 1243
1243out: 1244out:
1244 dev->interface_state = MLX5_INTERFACE_STATE_DOWN; 1245 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1246 set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
1245 mutex_unlock(&dev->intf_state_mutex); 1247 mutex_unlock(&dev->intf_state_mutex);
1246 return err; 1248 return err;
1247} 1249}
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
1452 .resume = mlx5_pci_resume 1454 .resume = mlx5_pci_resume
1453}; 1455};
1454 1456
1457static void shutdown(struct pci_dev *pdev)
1458{
1459 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1460 struct mlx5_priv *priv = &dev->priv;
1461
1462 dev_info(&pdev->dev, "Shutdown was called\n");
1463 /* Notify mlx5 clients that the kernel is being shut down */
1464 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
1465 mlx5_unload_one(dev, priv);
1466 mlx5_pci_disable_device(dev);
1467}
1468
1455static const struct pci_device_id mlx5_core_pci_table[] = { 1469static const struct pci_device_id mlx5_core_pci_table[] = {
1456 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ 1470 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1457 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ 1471 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
1459 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1473 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1460 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1474 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1461 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1475 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1476 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
1477 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1462 { 0, } 1478 { 0, }
1463}; 1479};
1464 1480
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
1469 .id_table = mlx5_core_pci_table, 1485 .id_table = mlx5_core_pci_table,
1470 .probe = init_one, 1486 .probe = init_one,
1471 .remove = remove_one, 1487 .remove = remove_one,
1488 .shutdown = shutdown,
1472 .err_handler = &mlx5_err_handler, 1489 .err_handler = &mlx5_err_handler,
1473 .sriov_configure = mlx5_core_sriov_configure, 1490 .sriov_configure = mlx5_core_sriov_configure,
1474}; 1491};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c575deb..53cc1e2c693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
247} 247}
248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); 248EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
249 249
250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, 250static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
251 int *max_mtu, int *oper_mtu, u8 port) 251 u16 *max_mtu, u16 *oper_mtu, u8 port)
252{ 252{
253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); 268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
269} 269}
270 270
271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) 271int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
272{ 272{
273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; 274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
283} 283}
284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); 284EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
285 285
286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, 286void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
287 u8 port) 287 u8 port)
288{ 288{
289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); 289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
290} 290}
291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); 291EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
292 292
293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 293void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
294 u8 port) 294 u8 port)
295{ 295{
296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); 296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd518405859e..b69dadcfb897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
196} 196}
197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 197EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
198 198
199int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
200{
201 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
202 u32 *out;
203 int err;
204
205 out = mlx5_vzalloc(outlen);
206 if (!out)
207 return -ENOMEM;
208
209 err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
210 if (!err)
211 *mtu = MLX5_GET(query_nic_vport_context_out, out,
212 nic_vport_context.mtu);
213
214 kvfree(out);
215 return err;
216}
217EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
218
219int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
220{
221 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
222 void *in;
223 int err;
224
225 in = mlx5_vzalloc(inlen);
226 if (!in)
227 return -ENOMEM;
228
229 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
230 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
231
232 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
233
234 kvfree(in);
235 return err;
236}
237EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
238
199int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 239int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
200 u32 vport, 240 u32 vport,
201 enum mlx5_list_type list_type, 241 enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 518af329502d..7869465435fa 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
750 return false; 750 return false;
751} 751}
752 752
753static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
754{
755 qed_chain_consume(&rxq->rx_bd_ring);
756 rxq->sw_rx_cons++;
757}
758
753/* This function reuses the buffer(from an offset) from 759/* This function reuses the buffer(from an offset) from
754 * consumer index to producer index in the bd ring 760 * consumer index to producer index in the bd ring
755 */ 761 */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
773 curr_cons->data = NULL; 779 curr_cons->data = NULL;
774} 780}
775 781
782/* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
784 */
785static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
786 struct qede_dev *edev, u8 count)
787{
788 struct sw_rx_data *curr_cons;
789
790 for (; count > 0; count--) {
791 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
792 qede_reuse_page(edev, rxq, curr_cons);
793 qede_rx_bd_ring_consume(rxq);
794 }
795}
796
776static inline int qede_realloc_rx_buffer(struct qede_dev *edev, 797static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
777 struct qede_rx_queue *rxq, 798 struct qede_rx_queue *rxq,
778 struct sw_rx_data *curr_cons) 799 struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
781 curr_cons->page_offset += rxq->rx_buf_seg_size; 802 curr_cons->page_offset += rxq->rx_buf_seg_size;
782 803
783 if (curr_cons->page_offset == PAGE_SIZE) { 804 if (curr_cons->page_offset == PAGE_SIZE) {
784 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) 805 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
808 */
809 curr_cons->page_offset -= rxq->rx_buf_seg_size;
810
785 return -ENOMEM; 811 return -ENOMEM;
812 }
786 813
787 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, 814 dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
788 PAGE_SIZE, DMA_FROM_DEVICE); 815 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
901 len_on_bd); 928 len_on_bd);
902 929
903 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { 930 if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
904 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
933 */
934 atomic_inc(&current_bd->data->_count);
905 goto out; 935 goto out;
906 } 936 }
907 937
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
915 return 0; 945 return 0;
916 946
917out: 947out:
948 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
949 qede_recycle_rx_bd_ring(rxq, edev, 1);
918 return -ENOMEM; 950 return -ENOMEM;
919} 951}
920 952
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
966 tpa_info->skb = netdev_alloc_skb(edev->ndev, 998 tpa_info->skb = netdev_alloc_skb(edev->ndev,
967 le16_to_cpu(cqe->len_on_first_bd)); 999 le16_to_cpu(cqe->len_on_first_bd));
968 if (unlikely(!tpa_info->skb)) { 1000 if (unlikely(!tpa_info->skb)) {
1001 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
969 tpa_info->agg_state = QEDE_AGG_STATE_ERROR; 1002 tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
970 return; 1003 goto cons_buf;
971 } 1004 }
972 1005
973 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); 1006 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
990 /* This is needed in order to enable forwarding support */ 1023 /* This is needed in order to enable forwarding support */
991 qede_set_gro_params(edev, tpa_info->skb, cqe); 1024 qede_set_gro_params(edev, tpa_info->skb, cqe);
992 1025
1026cons_buf: /* We still need to handle bd_len_list to consume buffers */
993 if (likely(cqe->ext_bd_len_list[0])) 1027 if (likely(cqe->ext_bd_len_list[0]))
994 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, 1028 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
995 le16_to_cpu(cqe->ext_bd_len_list[0])); 1029 le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
1007 const struct iphdr *iph = ip_hdr(skb); 1041 const struct iphdr *iph = ip_hdr(skb);
1008 struct tcphdr *th; 1042 struct tcphdr *th;
1009 1043
1010 skb_set_network_header(skb, 0);
1011 skb_set_transport_header(skb, sizeof(struct iphdr)); 1044 skb_set_transport_header(skb, sizeof(struct iphdr));
1012 th = tcp_hdr(skb); 1045 th = tcp_hdr(skb);
1013 1046
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
1022 struct ipv6hdr *iph = ipv6_hdr(skb); 1055 struct ipv6hdr *iph = ipv6_hdr(skb);
1023 struct tcphdr *th; 1056 struct tcphdr *th;
1024 1057
1025 skb_set_network_header(skb, 0);
1026 skb_set_transport_header(skb, sizeof(struct ipv6hdr)); 1058 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1027 th = tcp_hdr(skb); 1059 th = tcp_hdr(skb);
1028 1060
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
1037 struct sk_buff *skb, 1069 struct sk_buff *skb,
1038 u16 vlan_tag) 1070 u16 vlan_tag)
1039{ 1071{
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1076 */
1077 if (unlikely(!skb->data_len)) {
1078 skb_shinfo(skb)->gso_type = 0;
1079 skb_shinfo(skb)->gso_size = 0;
1080 goto send_skb;
1081 }
1082
1040#ifdef CONFIG_INET 1083#ifdef CONFIG_INET
1041 if (skb_shinfo(skb)->gso_size) { 1084 if (skb_shinfo(skb)->gso_size) {
1085 skb_set_network_header(skb, 0);
1086
1042 switch (skb->protocol) { 1087 switch (skb->protocol) {
1043 case htons(ETH_P_IP): 1088 case htons(ETH_P_IP):
1044 qede_gro_ip_csum(skb); 1089 qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
1053 } 1098 }
1054 } 1099 }
1055#endif 1100#endif
1101
1102send_skb:
1056 skb_record_rx_queue(skb, fp->rss_id); 1103 skb_record_rx_queue(skb, fp->rss_id);
1057 qede_skb_receive(edev, fp, skb, vlan_tag); 1104 qede_skb_receive(edev, fp, skb, vlan_tag);
1058} 1105}
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1244 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", 1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1245 sw_comp_cons, parse_flag); 1292 sw_comp_cons, parse_flag);
1246 rxq->rx_hw_errors++; 1293 rxq->rx_hw_errors++;
1247 qede_reuse_page(edev, rxq, sw_rx_data); 1294 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1248 goto next_rx; 1295 goto next_cqe;
1249 } 1296 }
1250 1297
1251 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); 1298 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1252 if (unlikely(!skb)) { 1299 if (unlikely(!skb)) {
1253 DP_NOTICE(edev, 1300 DP_NOTICE(edev,
1254 "Build_skb failed, dropping incoming packet\n"); 1301 "Build_skb failed, dropping incoming packet\n");
1255 qede_reuse_page(edev, rxq, sw_rx_data); 1302 qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
1256 rxq->rx_alloc_errors++; 1303 rxq->rx_alloc_errors++;
1257 goto next_rx; 1304 goto next_cqe;
1258 } 1305 }
1259 1306
1260 /* Copy data into SKB */ 1307 /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1288 if (unlikely(qede_realloc_rx_buffer(edev, rxq, 1335 if (unlikely(qede_realloc_rx_buffer(edev, rxq,
1289 sw_rx_data))) { 1336 sw_rx_data))) {
1290 DP_ERR(edev, "Failed to allocate rx buffer\n"); 1337 DP_ERR(edev, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1340 * freeing SKB.
1341 */
1342
1343 atomic_inc(&sw_rx_data->data->_count);
1291 rxq->rx_alloc_errors++; 1344 rxq->rx_alloc_errors++;
1345 qede_recycle_rx_bd_ring(rxq, edev,
1346 fp_cqe->bd_num);
1347 dev_kfree_skb_any(skb);
1292 goto next_cqe; 1348 goto next_cqe;
1293 } 1349 }
1294 } 1350 }
1295 1351
1352 qede_rx_bd_ring_consume(rxq);
1353
1296 if (fp_cqe->bd_num != 1) { 1354 if (fp_cqe->bd_num != 1) {
1297 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); 1355 u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
1298 u8 num_frags; 1356 u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1303 num_frags--) { 1361 num_frags--) {
1304 u16 cur_size = pkt_len > rxq->rx_buf_size ? 1362 u16 cur_size = pkt_len > rxq->rx_buf_size ?
1305 rxq->rx_buf_size : pkt_len; 1363 rxq->rx_buf_size : pkt_len;
1364 if (unlikely(!cur_size)) {
1365 DP_ERR(edev,
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1367 num_frags);
1368 qede_recycle_rx_bd_ring(rxq, edev,
1369 num_frags);
1370 dev_kfree_skb_any(skb);
1371 goto next_cqe;
1372 }
1306 1373
1307 WARN_ONCE(!cur_size, 1374 if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
1308 "Still got %d BDs for mapping jumbo, but length became 0\n", 1375 qede_recycle_rx_bd_ring(rxq, edev,
1309 num_frags); 1376 num_frags);
1310 1377 dev_kfree_skb_any(skb);
1311 if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
1312 goto next_cqe; 1378 goto next_cqe;
1379 }
1313 1380
1314 rxq->sw_rx_cons++;
1315 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; 1381 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1316 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; 1382 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1317 qed_chain_consume(&rxq->rx_bd_ring); 1383 qede_rx_bd_ring_consume(rxq);
1384
1318 dma_unmap_page(&edev->pdev->dev, 1385 dma_unmap_page(&edev->pdev->dev,
1319 sw_rx_data->mapping, 1386 sw_rx_data->mapping,
1320 PAGE_SIZE, DMA_FROM_DEVICE); 1387 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1330 pkt_len -= cur_size; 1397 pkt_len -= cur_size;
1331 } 1398 }
1332 1399
1333 if (pkt_len) 1400 if (unlikely(pkt_len))
1334 DP_ERR(edev, 1401 DP_ERR(edev,
1335 "Mapped all BDs of jumbo, but still have %d bytes\n", 1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1336 pkt_len); 1403 pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
1349 skb_record_rx_queue(skb, fp->rss_id); 1416 skb_record_rx_queue(skb, fp->rss_id);
1350 1417
1351 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); 1418 qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
1352
1353 qed_chain_consume(&rxq->rx_bd_ring);
1354next_rx:
1355 rxq->sw_rx_cons++;
1356next_rx_only: 1419next_rx_only:
1357 rx_pkt++; 1420 rx_pkt++;
1358 1421
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
2257 struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; 2320 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
2258 struct sw_rx_data *replace_buf = &tpa_info->replace_buf; 2321 struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
2259 2322
2260 if (replace_buf) { 2323 if (replace_buf->data) {
2261 dma_unmap_page(&edev->pdev->dev, 2324 dma_unmap_page(&edev->pdev->dev,
2262 dma_unmap_addr(replace_buf, mapping), 2325 dma_unmap_addr(replace_buf, mapping),
2263 PAGE_SIZE, DMA_FROM_DEVICE); 2326 PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
2377static int qede_alloc_mem_rxq(struct qede_dev *edev, 2440static int qede_alloc_mem_rxq(struct qede_dev *edev,
2378 struct qede_rx_queue *rxq) 2441 struct qede_rx_queue *rxq)
2379{ 2442{
2380 int i, rc, size, num_allocated; 2443 int i, rc, size;
2381 2444
2382 rxq->num_rx_buffers = edev->q_num_rx_buffers; 2445 rxq->num_rx_buffers = edev->q_num_rx_buffers;
2383 2446
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2394 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); 2457 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
2395 if (!rxq->sw_rx_ring) { 2458 if (!rxq->sw_rx_ring) {
2396 DP_ERR(edev, "Rx buffers ring allocation failed\n"); 2459 DP_ERR(edev, "Rx buffers ring allocation failed\n");
2460 rc = -ENOMEM;
2397 goto err; 2461 goto err;
2398 } 2462 }
2399 2463
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
2421 /* Allocate buffers for the Rx ring */ 2485 /* Allocate buffers for the Rx ring */
2422 for (i = 0; i < rxq->num_rx_buffers; i++) { 2486 for (i = 0; i < rxq->num_rx_buffers; i++) {
2423 rc = qede_alloc_rx_buffer(edev, rxq); 2487 rc = qede_alloc_rx_buffer(edev, rxq);
2424 if (rc) 2488 if (rc) {
2425 break; 2489 DP_ERR(edev,
2426 } 2490 "Rx buffers allocation failed at index %d\n", i);
2427 num_allocated = i; 2491 goto err;
2428 if (!num_allocated) { 2492 }
2429 DP_ERR(edev, "Rx buffers allocation failed\n");
2430 goto err;
2431 } else if (num_allocated < rxq->num_rx_buffers) {
2432 DP_NOTICE(edev,
2433 "Allocated less buffers than desired (%d allocated)\n",
2434 num_allocated);
2435 } 2493 }
2436 2494
2437 qede_alloc_sge_mem(edev, rxq); 2495 rc = qede_alloc_sge_mem(edev, rxq);
2438
2439 return 0;
2440
2441err: 2496err:
2442 qede_free_mem_rxq(edev, rxq); 2497 return rc;
2443 return -ENOMEM;
2444} 2498}
2445 2499
2446static void qede_free_mem_txq(struct qede_dev *edev, 2500static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
2523 } 2577 }
2524 2578
2525 return 0; 2579 return 0;
2526
2527err: 2580err:
2528 qede_free_mem_fp(edev, fp); 2581 return rc;
2529 return -ENOMEM;
2530} 2582}
2531 2583
2532static void qede_free_mem_load(struct qede_dev *edev) 2584static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
2549 struct qede_fastpath *fp = &edev->fp_array[rss_id]; 2601 struct qede_fastpath *fp = &edev->fp_array[rss_id];
2550 2602
2551 rc = qede_alloc_mem_fp(edev, fp); 2603 rc = qede_alloc_mem_fp(edev, fp);
2552 if (rc) 2604 if (rc) {
2553 break;
2554 }
2555
2556 if (rss_id != QEDE_RSS_CNT(edev)) {
2557 /* Failed allocating memory for all the queues */
2558 if (!rss_id) {
2559 DP_ERR(edev, 2605 DP_ERR(edev,
2560 "Failed to allocate memory for the leading queue\n"); 2606 "Failed to allocate memory for fastpath - rss id = %d\n",
2561 rc = -ENOMEM; 2607 rss_id);
2562 } else { 2608 qede_free_mem_load(edev);
2563 DP_NOTICE(edev, 2609 return rc;
2564 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
2565 QEDE_RSS_CNT(edev), rss_id);
2566 } 2610 }
2567 edev->num_rss = rss_id;
2568 } 2611 }
2569 2612
2570 return 0; 2613 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbc..caf6ddb7ea76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
37 37
38#define _QLCNIC_LINUX_MAJOR 5 38#define _QLCNIC_LINUX_MAJOR 5
39#define _QLCNIC_LINUX_MINOR 3 39#define _QLCNIC_LINUX_MINOR 3
40#define _QLCNIC_LINUX_SUBVERSION 63 40#define _QLCNIC_LINUX_SUBVERSION 64
41#define QLCNIC_LINUX_VERSIONID "5.3.63" 41#define QLCNIC_LINUX_VERSIONID "5.3.64"
42#define QLCNIC_DRV_IDC_VER 0x01 42#define QLCNIC_DRV_IDC_VER 0x01
43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 43#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 087e14a3fba7..9e2a0bd8f5a8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
1691 rate = clk_get_rate(clk); 1691 rate = clk_get_rate(clk);
1692 clk_put(clk); 1692 clk_put(clk);
1693 1693
1694 if (!rate)
1695 return -EINVAL;
1696
1694 inc = 1000000000ULL << 20; 1697 inc = 1000000000ULL << 20;
1695 do_div(inc, rate); 1698 do_div(inc, rate);
1696 1699
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 004e2d7560fd..ceea74cc2229 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
2194 __func__); 2194 __func__);
2195 return ret; 2195 return ret;
2196 } 2196 }
2197 ret = sh_eth_dev_init(ndev, false); 2197 ret = sh_eth_dev_init(ndev, true);
2198 if (ret < 0) { 2198 if (ret < 0) {
2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", 2199 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2200 __func__); 2200 __func__);
2201 return ret; 2201 return ret;
2202 } 2202 }
2203 2203
2204 mdp->irq_enabled = true;
2205 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2206 /* Setting the Rx mode will start the Rx process. */
2207 sh_eth_write(ndev, EDRRR_R, EDRRR);
2208 netif_device_attach(ndev); 2204 netif_device_attach(ndev);
2209 } 2205 }
2210 2206
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index f0d797ab74d8..afb90d129cb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -34,6 +34,9 @@
34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 34#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 35#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
36 36
37#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
38#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
39
37#define EMAC_SPLITTER_CTRL_REG 0x0 40#define EMAC_SPLITTER_CTRL_REG 0x0
38#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 41#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
39#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 42#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
@@ -46,7 +49,6 @@ struct socfpga_dwmac {
46 u32 reg_shift; 49 u32 reg_shift;
47 struct device *dev; 50 struct device *dev;
48 struct regmap *sys_mgr_base_addr; 51 struct regmap *sys_mgr_base_addr;
49 struct reset_control *stmmac_rst;
50 void __iomem *splitter_base; 52 void __iomem *splitter_base;
51 bool f2h_ptp_ref_clk; 53 bool f2h_ptp_ref_clk;
52}; 54};
@@ -89,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
89 struct device_node *np_splitter; 91 struct device_node *np_splitter;
90 struct resource res_splitter; 92 struct resource res_splitter;
91 93
92 dwmac->stmmac_rst = devm_reset_control_get(dev,
93 STMMAC_RESOURCE_NAME);
94 if (IS_ERR(dwmac->stmmac_rst)) {
95 dev_info(dev, "Could not get reset control!\n");
96 if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
97 return -EPROBE_DEFER;
98 dwmac->stmmac_rst = NULL;
99 }
100
101 dwmac->interface = of_get_phy_mode(np); 94 dwmac->interface = of_get_phy_mode(np);
102 95
103 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); 96 sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -148,7 +141,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
148 int phymode = dwmac->interface; 141 int phymode = dwmac->interface;
149 u32 reg_offset = dwmac->reg_offset; 142 u32 reg_offset = dwmac->reg_offset;
150 u32 reg_shift = dwmac->reg_shift; 143 u32 reg_shift = dwmac->reg_shift;
151 u32 ctrl, val; 144 u32 ctrl, val, module;
152 145
153 switch (phymode) { 146 switch (phymode) {
154 case PHY_INTERFACE_MODE_RGMII: 147 case PHY_INTERFACE_MODE_RGMII:
@@ -175,39 +168,39 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
175 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); 168 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
176 ctrl |= val << reg_shift; 169 ctrl |= val << reg_shift;
177 170
178 if (dwmac->f2h_ptp_ref_clk) 171 if (dwmac->f2h_ptp_ref_clk) {
179 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); 172 ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
180 else 173 regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
174 &module);
175 module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
176 regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
177 module);
178 } else {
181 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); 179 ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
180 }
182 181
183 regmap_write(sys_mgr_base_addr, reg_offset, ctrl); 182 regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
184 return 0;
185}
186
187static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
188{
189 struct socfpga_dwmac *dwmac = priv;
190 183
191 /* On socfpga platform exit, assert and hold reset to the 184 return 0;
192 * enet controller - the default state after a hard reset.
193 */
194 if (dwmac->stmmac_rst)
195 reset_control_assert(dwmac->stmmac_rst);
196} 185}
197 186
198static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) 187static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
199{ 188{
200 struct socfpga_dwmac *dwmac = priv; 189 struct socfpga_dwmac *dwmac = priv;
201 struct net_device *ndev = platform_get_drvdata(pdev); 190 struct net_device *ndev = platform_get_drvdata(pdev);
202 struct stmmac_priv *stpriv = NULL; 191 struct stmmac_priv *stpriv = NULL;
203 int ret = 0; 192 int ret = 0;
204 193
205 if (ndev) 194 if (!ndev)
206 stpriv = netdev_priv(ndev); 195 return -EINVAL;
196
197 stpriv = netdev_priv(ndev);
198 if (!stpriv)
199 return -EINVAL;
207 200
208 /* Assert reset to the enet controller before changing the phy mode */ 201 /* Assert reset to the enet controller before changing the phy mode */
209 if (dwmac->stmmac_rst) 202 if (stpriv->stmmac_rst)
210 reset_control_assert(dwmac->stmmac_rst); 203 reset_control_assert(stpriv->stmmac_rst);
211 204
212 /* Setup the phy mode in the system manager registers according to 205 /* Setup the phy mode in the system manager registers according to
213 * devicetree configuration 206 * devicetree configuration
@@ -217,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
217 /* Deassert reset for the phy configuration to be sampled by 210 /* Deassert reset for the phy configuration to be sampled by
218 * the enet controller, and operation to start in requested mode 211 * the enet controller, and operation to start in requested mode
219 */ 212 */
220 if (dwmac->stmmac_rst) 213 if (stpriv->stmmac_rst)
221 reset_control_deassert(dwmac->stmmac_rst); 214 reset_control_deassert(stpriv->stmmac_rst);
222 215
223 /* Before the enet controller is suspended, the phy is suspended. 216 /* Before the enet controller is suspended, the phy is suspended.
224 * This causes the phy clock to be gated. The enet controller is 217 * This causes the phy clock to be gated. The enet controller is
@@ -235,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
235 * control register 0, and can be modified by the phy driver 228 * control register 0, and can be modified by the phy driver
236 * framework. 229 * framework.
237 */ 230 */
238 if (stpriv && stpriv->phydev) 231 if (stpriv->phydev)
239 phy_resume(stpriv->phydev); 232 phy_resume(stpriv->phydev);
240 233
241 return ret; 234 return ret;
@@ -275,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
275 268
276 plat_dat->bsp_priv = dwmac; 269 plat_dat->bsp_priv = dwmac;
277 plat_dat->init = socfpga_dwmac_init; 270 plat_dat->init = socfpga_dwmac_init;
278 plat_dat->exit = socfpga_dwmac_exit;
279 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 271 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
280 272
281 ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); 273 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
282 if (ret) 274 if (!ret)
283 return ret; 275 ret = socfpga_dwmac_init(pdev, dwmac);
284 276
285 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 277 return ret;
286} 278}
287 279
288static const struct of_device_id socfpga_dwmac_match[] = { 280static const struct of_device_id socfpga_dwmac_match[] = {
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 42fdfd4d9d4f..bbb77cd8ad67 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1251 int i, ret; 1251 int i, ret;
1252 u32 reg; 1252 u32 reg;
1253 1253
1254 pm_runtime_get_sync(&priv->pdev->dev);
1255
1254 if (!cpsw_common_res_usage_state(priv)) 1256 if (!cpsw_common_res_usage_state(priv))
1255 cpsw_intr_disable(priv); 1257 cpsw_intr_disable(priv);
1256 netif_carrier_off(ndev); 1258 netif_carrier_off(ndev);
1257 1259
1258 pm_runtime_get_sync(&priv->pdev->dev);
1259
1260 reg = priv->version; 1260 reg = priv->version;
1261 1261
1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 1262 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 5d9abedd6b75..58d58f002559 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
1878 pdata->hw_ram_addr = auxdata->hw_ram_addr; 1878 pdata->hw_ram_addr = auxdata->hw_ram_addr;
1879 } 1879 }
1880 1880
1881 pdev->dev.platform_data = pdata;
1882
1883 return pdata; 1881 return pdata;
1884} 1882}
1885 1883
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2101 cpdma_ctlr_destroy(priv->dma); 2099 cpdma_ctlr_destroy(priv->dma);
2102 2100
2103 unregister_netdev(ndev); 2101 unregister_netdev(ndev);
2102 pm_runtime_disable(&pdev->dev);
2104 free_netdev(ndev); 2103 free_netdev(ndev);
2105 2104
2106 return 0; 2105 return 0;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5ca8817..c6385617bfb2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
880 macsec_skb_cb(skb)->valid = false; 880 macsec_skb_cb(skb)->valid = false;
881 skb = skb_share_check(skb, GFP_ATOMIC); 881 skb = skb_share_check(skb, GFP_ATOMIC);
882 if (!skb) 882 if (!skb)
883 return NULL; 883 return ERR_PTR(-ENOMEM);
884 884
885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
886 if (!req) { 886 if (!req) {
887 kfree_skb(skb); 887 kfree_skb(skb);
888 return NULL; 888 return ERR_PTR(-ENOMEM);
889 } 889 }
890 890
891 hdr = (struct macsec_eth_header *)skb->data; 891 hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
905 skb = skb_unshare(skb, GFP_ATOMIC); 905 skb = skb_unshare(skb, GFP_ATOMIC);
906 if (!skb) { 906 if (!skb) {
907 aead_request_free(req); 907 aead_request_free(req);
908 return NULL; 908 return ERR_PTR(-ENOMEM);
909 } 909 }
910 } else { 910 } else {
911 /* integrity only: all headers + data authenticated */ 911 /* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
921 dev_hold(dev); 921 dev_hold(dev);
922 ret = crypto_aead_decrypt(req); 922 ret = crypto_aead_decrypt(req);
923 if (ret == -EINPROGRESS) { 923 if (ret == -EINPROGRESS) {
924 return NULL; 924 return ERR_PTR(ret);
925 } else if (ret != 0) { 925 } else if (ret != 0) {
926 /* decryption/authentication failed 926 /* decryption/authentication failed
927 * 10.6 if validateFrames is disabled, deliver anyway 927 * 10.6 if validateFrames is disabled, deliver anyway
928 */ 928 */
929 if (ret != -EBADMSG) { 929 if (ret != -EBADMSG) {
930 kfree_skb(skb); 930 kfree_skb(skb);
931 skb = NULL; 931 skb = ERR_PTR(ret);
932 } 932 }
933 } else { 933 } else {
934 macsec_skb_cb(skb)->valid = true; 934 macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1148 1148
1149 if (!skb) { 1149 if (IS_ERR(skb)) {
1150 macsec_rxsa_put(rx_sa); 1150 /* the decrypt callback needs the reference */
1151 if (PTR_ERR(skb) != -EINPROGRESS)
1152 macsec_rxsa_put(rx_sa);
1151 rcu_read_unlock(); 1153 rcu_read_unlock();
1152 *pskb = NULL; 1154 *pskb = NULL;
1153 return RX_HANDLER_CONSUMED; 1155 return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
1161 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1163 macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1162 macsec_reset_skb(skb, secy->netdev); 1164 macsec_reset_skb(skb, secy->netdev);
1163 1165
1164 macsec_rxsa_put(rx_sa); 1166 if (rx_sa)
1167 macsec_rxsa_put(rx_sa);
1165 count_rx(dev, skb->len); 1168 count_rx(dev, skb->len);
1166 1169
1167 rcu_read_unlock(); 1170 rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1622 } 1625 }
1623 1626
1624 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1627 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1625 if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, 1628 if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1626 secy->icv_len)) { 1629 secy->key_len, secy->icv_len)) {
1630 kfree(rx_sa);
1627 rtnl_unlock(); 1631 rtnl_unlock();
1628 return -ENOMEM; 1632 return -ENOMEM;
1629 } 1633 }
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1768 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1772 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
1769 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1773 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1770 secy->key_len, secy->icv_len)) { 1774 secy->key_len, secy->icv_len)) {
1775 kfree(tx_sa);
1771 rtnl_unlock(); 1776 rtnl_unlock();
1772 return -ENOMEM; 1777 return -ENOMEM;
1773 } 1778 }
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
2227 return 1; 2232 return 1;
2228 2233
2229 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || 2234 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
2230 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 2235 nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
2236 MACSEC_DEFAULT_CIPHER_ID) ||
2231 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2237 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
2232 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2238 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
2233 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || 2239 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2268 if (!hdr) 2274 if (!hdr)
2269 return -EMSGSIZE; 2275 return -EMSGSIZE;
2270 2276
2271 rtnl_lock(); 2277 genl_dump_check_consistent(cb, hdr, &macsec_fam);
2272 2278
2273 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2279 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
2274 goto nla_put_failure; 2280 goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
2429 2435
2430 nla_nest_end(skb, rxsc_list); 2436 nla_nest_end(skb, rxsc_list);
2431 2437
2432 rtnl_unlock();
2433
2434 genlmsg_end(skb, hdr); 2438 genlmsg_end(skb, hdr);
2435 2439
2436 return 0; 2440 return 0;
2437 2441
2438nla_put_failure: 2442nla_put_failure:
2439 rtnl_unlock();
2440 genlmsg_cancel(skb, hdr); 2443 genlmsg_cancel(skb, hdr);
2441 return -EMSGSIZE; 2444 return -EMSGSIZE;
2442} 2445}
2443 2446
2447static int macsec_generation = 1; /* protected by RTNL */
2448
2444static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2449static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2445{ 2450{
2446 struct net *net = sock_net(skb->sk); 2451 struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
2450 dev_idx = cb->args[0]; 2455 dev_idx = cb->args[0];
2451 2456
2452 d = 0; 2457 d = 0;
2458 rtnl_lock();
2459
2460 cb->seq = macsec_generation;
2461
2453 for_each_netdev(net, dev) { 2462 for_each_netdev(net, dev) {
2454 struct macsec_secy *secy; 2463 struct macsec_secy *secy;
2455 2464
@@ -2467,6 +2476,7 @@ next:
2467 } 2476 }
2468 2477
2469done: 2478done:
2479 rtnl_unlock();
2470 cb->args[0] = d; 2480 cb->args[0] = d;
2471 return skb->len; 2481 return skb->len;
2472} 2482}
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
2920 struct net_device *real_dev = macsec->real_dev; 2930 struct net_device *real_dev = macsec->real_dev;
2921 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2931 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
2922 2932
2933 macsec_generation++;
2934
2923 unregister_netdevice_queue(dev, head); 2935 unregister_netdevice_queue(dev, head);
2924 list_del_rcu(&macsec->secys); 2936 list_del_rcu(&macsec->secys);
2925 if (list_empty(&rxd->secys)) 2937 if (list_empty(&rxd->secys)) {
2926 netdev_rx_handler_unregister(real_dev); 2938 netdev_rx_handler_unregister(real_dev);
2939 kfree(rxd);
2940 }
2927 2941
2928 macsec_del_dev(macsec); 2942 macsec_del_dev(macsec);
2929} 2943}
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
2945 2959
2946 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 2960 err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
2947 rxd); 2961 rxd);
2948 if (err < 0) 2962 if (err < 0) {
2963 kfree(rxd);
2949 return err; 2964 return err;
2965 }
2950 } 2966 }
2951 2967
2952 list_add_tail_rcu(&macsec->secys, &rxd->secys); 2968 list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3066 if (err < 0) 3082 if (err < 0)
3067 goto del_dev; 3083 goto del_dev;
3068 3084
3085 macsec_generation++;
3086
3069 dev_hold(real_dev); 3087 dev_hold(real_dev);
3070 3088
3071 return 0; 3089 return 0;
@@ -3079,7 +3097,7 @@ unregister:
3079 3097
3080static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3098static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3081{ 3099{
3082 u64 csid = DEFAULT_CIPHER_ID; 3100 u64 csid = MACSEC_DEFAULT_CIPHER_ID;
3083 u8 icv_len = DEFAULT_ICV_LEN; 3101 u8 icv_len = DEFAULT_ICV_LEN;
3084 int flag; 3102 int flag;
3085 bool es, scb, sci; 3103 bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3094 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3112 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
3095 3113
3096 switch (csid) { 3114 switch (csid) {
3097 case DEFAULT_CIPHER_ID: 3115 case MACSEC_DEFAULT_CIPHER_ID:
3098 case DEFAULT_CIPHER_ALT: 3116 case MACSEC_DEFAULT_CIPHER_ALT:
3099 if (icv_len < MACSEC_MIN_ICV_LEN || 3117 if (icv_len < MACSEC_MIN_ICV_LEN ||
3100 icv_len > MACSEC_MAX_ICV_LEN) 3118 icv_len > MACSEC_MAX_ICV_LEN)
3101 return -EINVAL; 3119 return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
3129 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3147 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
3130 return -EINVAL; 3148 return -EINVAL;
3131 3149
3132 if ((data[IFLA_MACSEC_PROTECT] && 3150 if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
3133 nla_get_u8(data[IFLA_MACSEC_PROTECT])) && 3151 nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
3134 !data[IFLA_MACSEC_WINDOW]) 3152 !data[IFLA_MACSEC_WINDOW])
3135 return -EINVAL; 3153 return -EINVAL;
3136 3154
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
3168 3186
3169 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || 3187 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
3170 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3188 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
3171 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 3189 nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
3190 MACSEC_DEFAULT_CIPHER_ID) ||
3172 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3191 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
3173 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3192 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
3174 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || 3193 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index b5d50d458728..93ffedfa2994 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
441 return -ENOMEM; 441 return -ENOMEM;
442 442
443 mutex_init(&ks->lock); 443 mutex_init(&ks->lock);
444 ks->spi = spi_dev_get(spi); 444 ks->spi = spi;
445 ks->chip = &ks8995_chip[variant]; 445 ks->chip = &ks8995_chip[variant];
446 446
447 if (ks->spi->dev.of_node) { 447 if (ks->spi->dev.of_node) {
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bdd83d95ec0a..96a5028621c8 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 617 { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long)&cdc_mbim_info, 618 .driver_info = (unsigned long)&cdc_mbim_info,
619 }, 619 },
620 /* Huawei E3372 fails unless NDP comes after the IP packets */ 620
621 { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 621 /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
622 * (12d1:157d), are known to fail unless the NDP is placed
623 * after the IP packets. Applying the quirk to all Huawei
624 * devices is broader than necessary, but harmless.
625 */
626 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
622 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, 627 .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
623 }, 628 },
624 /* default entry */ 629 /* default entry */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b2348f67b00a..db8022ae415b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1152 union Vmxnet3_GenericDesc *gdesc) 1152 union Vmxnet3_GenericDesc *gdesc)
1153{ 1153{
1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) { 1154 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1155 /* typical case: TCP/UDP over IP and both csums are correct */ 1155 if (gdesc->rcd.v4 &&
1156 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == 1156 (le32_to_cpu(gdesc->dword[3]) &
1157 VMXNET3_RCD_CSUM_OK) { 1157 VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(gdesc->rcd.frg);
1161 } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
1162 (1 << VMXNET3_RCD_TUC_SHIFT))) {
1158 skb->ip_summed = CHECKSUM_UNNECESSARY; 1163 skb->ip_summed = CHECKSUM_UNNECESSARY;
1159 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1164 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1160 BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6));
1161 BUG_ON(gdesc->rcd.frg); 1165 BUG_ON(gdesc->rcd.frg);
1162 } else { 1166 } else {
1163 if (gdesc->rcd.csum) { 1167 if (gdesc->rcd.csum) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 729c344e6774..c4825392d64b 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9a9fabb900c1..8a8f1e58b415 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -60,41 +60,6 @@ struct pcpu_dstats {
60 struct u64_stats_sync syncp; 60 struct u64_stats_sync syncp;
61}; 61};
62 62
63static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
64{
65 return dst;
66}
67
68static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
69{
70 return ip_local_out(net, sk, skb);
71}
72
73static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
74{
75 /* TO-DO: return max ethernet size? */
76 return dst->dev->mtu;
77}
78
79static void vrf_dst_destroy(struct dst_entry *dst)
80{
81 /* our dst lives forever - or until the device is closed */
82}
83
84static unsigned int vrf_default_advmss(const struct dst_entry *dst)
85{
86 return 65535 - 40;
87}
88
89static struct dst_ops vrf_dst_ops = {
90 .family = AF_INET,
91 .local_out = vrf_ip_local_out,
92 .check = vrf_ip_check,
93 .mtu = vrf_v4_mtu,
94 .destroy = vrf_dst_destroy,
95 .default_advmss = vrf_default_advmss,
96};
97
98/* neighbor handling is done with actual device; do not want 63/* neighbor handling is done with actual device; do not want
99 * to flip skb->dev for those ndisc packets. This really fails 64 * to flip skb->dev for those ndisc packets. This really fails
100 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is 65 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
349} 314}
350 315
351#if IS_ENABLED(CONFIG_IPV6) 316#if IS_ENABLED(CONFIG_IPV6)
352static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
353{
354 return dst;
355}
356
357static struct dst_ops vrf_dst_ops6 = {
358 .family = AF_INET6,
359 .local_out = ip6_local_out,
360 .check = vrf_ip6_check,
361 .mtu = vrf_v4_mtu,
362 .destroy = vrf_dst_destroy,
363 .default_advmss = vrf_default_advmss,
364};
365
366static int init_dst_ops6_kmem_cachep(void)
367{
368 vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
369 sizeof(struct rt6_info),
370 0,
371 SLAB_HWCACHE_ALIGN,
372 NULL);
373
374 if (!vrf_dst_ops6.kmem_cachep)
375 return -ENOMEM;
376
377 return 0;
378}
379
380static void free_dst_ops6_kmem_cachep(void)
381{
382 kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
383}
384
385static int vrf_input6(struct sk_buff *skb)
386{
387 skb->dev->stats.rx_errors++;
388 kfree_skb(skb);
389 return 0;
390}
391
392/* modelled after ip6_finish_output2 */ 317/* modelled after ip6_finish_output2 */
393static int vrf_finish_output6(struct net *net, struct sock *sk, 318static int vrf_finish_output6(struct net *net, struct sock *sk,
394 struct sk_buff *skb) 319 struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
429 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 354 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
430} 355}
431 356
432static void vrf_rt6_destroy(struct net_vrf *vrf) 357static void vrf_rt6_release(struct net_vrf *vrf)
433{ 358{
434 dst_destroy(&vrf->rt6->dst); 359 dst_release(&vrf->rt6->dst);
435 free_percpu(vrf->rt6->rt6i_pcpu);
436 vrf->rt6 = NULL; 360 vrf->rt6 = NULL;
437} 361}
438 362
439static int vrf_rt6_create(struct net_device *dev) 363static int vrf_rt6_create(struct net_device *dev)
440{ 364{
441 struct net_vrf *vrf = netdev_priv(dev); 365 struct net_vrf *vrf = netdev_priv(dev);
442 struct dst_entry *dst; 366 struct net *net = dev_net(dev);
443 struct rt6_info *rt6; 367 struct rt6_info *rt6;
444 int cpu;
445 int rc = -ENOMEM; 368 int rc = -ENOMEM;
446 369
447 rt6 = dst_alloc(&vrf_dst_ops6, dev, 0, 370 rt6 = ip6_dst_alloc(net, dev,
448 DST_OBSOLETE_NONE, 371 DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
449 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
450 if (!rt6) 372 if (!rt6)
451 goto out; 373 goto out;
452 374
453 dst = &rt6->dst;
454
455 rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
456 if (!rt6->rt6i_pcpu) {
457 dst_destroy(dst);
458 goto out;
459 }
460 for_each_possible_cpu(cpu) {
461 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
462 *p = NULL;
463 }
464
465 memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
466
467 INIT_LIST_HEAD(&rt6->rt6i_siblings);
468 INIT_LIST_HEAD(&rt6->rt6i_uncached);
469
470 rt6->dst.input = vrf_input6;
471 rt6->dst.output = vrf_output6; 375 rt6->dst.output = vrf_output6;
472 376 rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
473 rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id); 377 dst_hold(&rt6->dst);
474
475 atomic_set(&rt6->dst.__refcnt, 2);
476
477 vrf->rt6 = rt6; 378 vrf->rt6 = rt6;
478 rc = 0; 379 rc = 0;
479out: 380out:
480 return rc; 381 return rc;
481} 382}
482#else 383#else
483static int init_dst_ops6_kmem_cachep(void) 384static void vrf_rt6_release(struct net_vrf *vrf)
484{
485 return 0;
486}
487
488static void free_dst_ops6_kmem_cachep(void)
489{
490}
491
492static void vrf_rt6_destroy(struct net_vrf *vrf)
493{ 385{
494} 386}
495 387
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
557 !(IPCB(skb)->flags & IPSKB_REROUTED)); 449 !(IPCB(skb)->flags & IPSKB_REROUTED));
558} 450}
559 451
560static void vrf_rtable_destroy(struct net_vrf *vrf) 452static void vrf_rtable_release(struct net_vrf *vrf)
561{ 453{
562 struct dst_entry *dst = (struct dst_entry *)vrf->rth; 454 struct dst_entry *dst = (struct dst_entry *)vrf->rth;
563 455
564 dst_destroy(dst); 456 dst_release(dst);
565 vrf->rth = NULL; 457 vrf->rth = NULL;
566} 458}
567 459
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
570 struct net_vrf *vrf = netdev_priv(dev); 462 struct net_vrf *vrf = netdev_priv(dev);
571 struct rtable *rth; 463 struct rtable *rth;
572 464
573 rth = dst_alloc(&vrf_dst_ops, dev, 2, 465 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
574 DST_OBSOLETE_NONE,
575 (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
576 if (rth) { 466 if (rth) {
577 rth->dst.output = vrf_output; 467 rth->dst.output = vrf_output;
578 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
579 rth->rt_flags = 0;
580 rth->rt_type = RTN_UNICAST;
581 rth->rt_is_input = 0;
582 rth->rt_iif = 0;
583 rth->rt_pmtu = 0;
584 rth->rt_gateway = 0;
585 rth->rt_uses_gateway = 0;
586 rth->rt_table_id = vrf->tb_id; 468 rth->rt_table_id = vrf->tb_id;
587 INIT_LIST_HEAD(&rth->rt_uncached);
588 rth->rt_uncached_list = NULL;
589 } 469 }
590 470
591 return rth; 471 return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
673 struct net_device *port_dev; 553 struct net_device *port_dev;
674 struct list_head *iter; 554 struct list_head *iter;
675 555
676 vrf_rtable_destroy(vrf); 556 vrf_rtable_release(vrf);
677 vrf_rt6_destroy(vrf); 557 vrf_rt6_release(vrf);
678 558
679 netdev_for_each_lower_dev(dev, port_dev, iter) 559 netdev_for_each_lower_dev(dev, port_dev, iter)
680 vrf_del_slave(dev, port_dev); 560 vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
704 return 0; 584 return 0;
705 585
706out_rth: 586out_rth:
707 vrf_rtable_destroy(vrf); 587 vrf_rtable_release(vrf);
708out_stats: 588out_stats:
709 free_percpu(dev->dstats); 589 free_percpu(dev->dstats);
710 dev->dstats = NULL; 590 dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
737 struct net_vrf *vrf = netdev_priv(dev); 617 struct net_vrf *vrf = netdev_priv(dev);
738 618
739 rth = vrf->rth; 619 rth = vrf->rth;
740 atomic_inc(&rth->dst.__refcnt); 620 dst_hold(&rth->dst);
741 } 621 }
742 622
743 return rth; 623 return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
788 struct net_vrf *vrf = netdev_priv(dev); 668 struct net_vrf *vrf = netdev_priv(dev);
789 669
790 rt = vrf->rt6; 670 rt = vrf->rt6;
791 atomic_inc(&rt->dst.__refcnt); 671 dst_hold(&rt->dst);
792 } 672 }
793 673
794 return (struct dst_entry *)rt; 674 return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
946{ 826{
947 int rc; 827 int rc;
948 828
949 vrf_dst_ops.kmem_cachep =
950 kmem_cache_create("vrf_ip_dst_cache",
951 sizeof(struct rtable), 0,
952 SLAB_HWCACHE_ALIGN,
953 NULL);
954
955 if (!vrf_dst_ops.kmem_cachep)
956 return -ENOMEM;
957
958 rc = init_dst_ops6_kmem_cachep();
959 if (rc != 0)
960 goto error2;
961
962 register_netdevice_notifier(&vrf_notifier_block); 829 register_netdevice_notifier(&vrf_notifier_block);
963 830
964 rc = rtnl_link_register(&vrf_link_ops); 831 rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
969 836
970error: 837error:
971 unregister_netdevice_notifier(&vrf_notifier_block); 838 unregister_netdevice_notifier(&vrf_notifier_block);
972 free_dst_ops6_kmem_cachep();
973error2:
974 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
975 return rc; 839 return rc;
976} 840}
977 841
978static void __exit vrf_cleanup_module(void)
979{
980 rtnl_link_unregister(&vrf_link_ops);
981 unregister_netdevice_notifier(&vrf_notifier_block);
982 kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
983 free_dst_ops6_kmem_cachep();
984}
985
986module_init(vrf_init_module); 842module_init(vrf_init_module);
987module_exit(vrf_cleanup_module);
988MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 843MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
989MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 844MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
990MODULE_LICENSE("GPL"); 845MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 72380af9dc52..b0603e796ad8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
5680 INIT_WORK(&wl->firmware_load, b43_request_firmware); 5680 INIT_WORK(&wl->firmware_load, b43_request_firmware);
5681 schedule_work(&wl->firmware_load); 5681 schedule_work(&wl->firmware_load);
5682 5682
5683bcma_out:
5684 return err; 5683 return err;
5685 5684
5686bcma_err_wireless_exit: 5685bcma_err_wireless_exit:
5687 ieee80211_free_hw(wl->hw); 5686 ieee80211_free_hw(wl->hw);
5687bcma_out:
5688 kfree(dev);
5688 return err; 5689 return err;
5689} 5690}
5690 5691
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
5712 b43_rng_exit(wl); 5713 b43_rng_exit(wl);
5713 5714
5714 b43_leds_unregister(wl); 5715 b43_leds_unregister(wl);
5715
5716 ieee80211_free_hw(wl->hw); 5716 ieee80211_free_hw(wl->hw);
5717 kfree(wldev->dev);
5717} 5718}
5718 5719
5719static struct bcma_driver b43_bcma_driver = { 5720static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5796 5797
5797 b43_leds_unregister(wl); 5798 b43_leds_unregister(wl);
5798 b43_wireless_exit(dev, wl); 5799 b43_wireless_exit(dev, wl);
5800 kfree(dev);
5799} 5801}
5800 5802
5801static struct ssb_driver b43_ssb_driver = { 5803static struct ssb_driver b43_ssb_driver = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 76e649c680a1..a50f4df7eae7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1147 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1147 /* the fw is stopped, the aux sta is dead: clean up driver state */
1148 iwl_mvm_del_aux_sta(mvm); 1148 iwl_mvm_del_aux_sta(mvm);
1149 1149
1150 iwl_free_fw_paging(mvm);
1151
1150 /* 1152 /*
1151 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete() 1153 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1152 * won't be called in this case). 1154 * won't be called in this case).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5e8ab796d5bc..d278399097dc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 761 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
762 kfree(mvm->nvm_sections[i].data); 762 kfree(mvm->nvm_sections[i].data);
763 763
764 iwl_free_fw_paging(mvm);
765
766 iwl_mvm_tof_clean(mvm); 764 iwl_mvm_tof_clean(mvm);
767 765
768 ieee80211_free_hw(mvm->hw); 766 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index eb39c7e09781..b2b79354d5c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
732 */ 732 */
733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 733 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
734 if (val & (BIT(1) | BIT(17))) { 734 if (val & (BIT(1) | BIT(17))) {
735 IWL_INFO(trans, 735 IWL_DEBUG_INFO(trans,
736 "can't access the RSA semaphore it is write protected\n"); 736 "can't access the RSA semaphore it is write protected\n");
737 return 0; 737 return 0;
738 } 738 }
739 739
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 95dcbff4673b..6a8245c4ea48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) 2488 for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p]; 2489 rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
2490 2490
2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, 2491 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n", 2492 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
2493 rtldm->thermalvalue, thermal_value); 2493 rtldm->thermalvalue, thermal_value);
2494 /*Record last Power Tracking Thermal Value*/ 2494 /*Record last Power Tracking Thermal Value*/
2495 rtldm->thermalvalue = thermal_value; 2495 rtldm->thermalvalue = thermal_value;
2496 } 2496 }
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 01b9d0a00abc..d11cdbb8fba3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
275} 275}
276EXPORT_SYMBOL(pci_write_vpd); 276EXPORT_SYMBOL(pci_write_vpd);
277 277
278/**
279 * pci_set_vpd_size - Set size of Vital Product Data space
280 * @dev: pci device struct
281 * @len: size of vpd space
282 */
283int pci_set_vpd_size(struct pci_dev *dev, size_t len)
284{
285 if (!dev->vpd || !dev->vpd->ops)
286 return -ENODEV;
287 return dev->vpd->ops->set_size(dev, len);
288}
289EXPORT_SYMBOL(pci_set_vpd_size);
290
278#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) 291#define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
279 292
280/** 293/**
@@ -498,9 +511,23 @@ out:
498 return ret ? ret : count; 511 return ret ? ret : count;
499} 512}
500 513
514static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
515{
516 struct pci_vpd *vpd = dev->vpd;
517
518 if (len == 0 || len > PCI_VPD_MAX_SIZE)
519 return -EIO;
520
521 vpd->valid = 1;
522 vpd->len = len;
523
524 return 0;
525}
526
501static const struct pci_vpd_ops pci_vpd_ops = { 527static const struct pci_vpd_ops pci_vpd_ops = {
502 .read = pci_vpd_read, 528 .read = pci_vpd_read,
503 .write = pci_vpd_write, 529 .write = pci_vpd_write,
530 .set_size = pci_vpd_set_size,
504}; 531};
505 532
506static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 533static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
533 return ret; 560 return ret;
534} 561}
535 562
563static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
564{
565 struct pci_dev *tdev = pci_get_slot(dev->bus,
566 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
567 int ret;
568
569 if (!tdev)
570 return -ENODEV;
571
572 ret = pci_set_vpd_size(tdev, len);
573 pci_dev_put(tdev);
574 return ret;
575}
576
536static const struct pci_vpd_ops pci_vpd_f0_ops = { 577static const struct pci_vpd_ops pci_vpd_f0_ops = {
537 .read = pci_vpd_f0_read, 578 .read = pci_vpd_f0_read,
538 .write = pci_vpd_f0_write, 579 .write = pci_vpd_f0_write,
580 .set_size = pci_vpd_f0_set_size,
539}; 581};
540 582
541int pci_vpd_init(struct pci_dev *dev) 583int pci_vpd_init(struct pci_dev *dev)
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index eb5a2755a164..2f817fa4c661 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -32,7 +32,7 @@
32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) 32#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
33 33
34struct imx6_pcie { 34struct imx6_pcie {
35 struct gpio_desc *reset_gpio; 35 int reset_gpio;
36 struct clk *pcie_bus; 36 struct clk *pcie_bus;
37 struct clk *pcie_phy; 37 struct clk *pcie_phy;
38 struct clk *pcie; 38 struct clk *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
309 usleep_range(200, 500); 309 usleep_range(200, 500);
310 310
311 /* Some boards don't have PCIe reset GPIO. */ 311 /* Some boards don't have PCIe reset GPIO. */
312 if (imx6_pcie->reset_gpio) { 312 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
313 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); 313 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
314 msleep(100); 314 msleep(100);
315 gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); 315 gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
316 } 316 }
317 return 0; 317 return 0;
318 318
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
523{ 523{
524 struct imx6_pcie *imx6_pcie; 524 struct imx6_pcie *imx6_pcie;
525 struct pcie_port *pp; 525 struct pcie_port *pp;
526 struct device_node *np = pdev->dev.of_node;
526 struct resource *dbi_base; 527 struct resource *dbi_base;
527 struct device_node *node = pdev->dev.of_node; 528 struct device_node *node = pdev->dev.of_node;
528 int ret; 529 int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
544 return PTR_ERR(pp->dbi_base); 545 return PTR_ERR(pp->dbi_base);
545 546
546 /* Fetch GPIOs */ 547 /* Fetch GPIOs */
547 imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", 548 imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
548 GPIOD_OUT_LOW); 549 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
550 ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
551 GPIOF_OUT_INIT_LOW, "PCIe reset");
552 if (ret) {
553 dev_err(&pdev->dev, "unable to get reset gpio\n");
554 return ret;
555 }
556 }
549 557
550 /* Fetch clocks */ 558 /* Fetch clocks */
551 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); 559 imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d0fb93481573..a814bbb80fcb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
97struct pci_vpd_ops { 97struct pci_vpd_ops {
98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 98 ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 99 ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
100 int (*set_size)(struct pci_dev *dev, size_t len);
100}; 101};
101 102
102struct pci_vpd { 103struct pci_vpd {
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 32346b5a8a11..f70090897fdf 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
737 break; 737 break;
738 case CPU_PM_EXIT: 738 case CPU_PM_EXIT:
739 case CPU_PM_ENTER_FAILED: 739 case CPU_PM_ENTER_FAILED:
740 /* Restore and enable the counter */ 740 /*
741 armpmu_start(event, PERF_EF_RELOAD); 741 * Restore and enable the counter.
742 * armpmu_start() indirectly calls
743 *
744 * perf_event_update_userpage()
745 *
746 * that requires RCU read locking to be functional,
747 * wrap the call within RCU_NONIDLE to make the
748 * RCU subsystem aware this cpu is not idle from
749 * an RCU perspective for the armpmu_start() call
750 * duration.
751 */
752 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
742 break; 753 break;
743 default: 754 default:
744 break; 755 break;
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 77e2d02e6bee..793ecb6d87bc 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
86 if (!np) 86 if (!np)
87 return -ENODEV; 87 return -ENODEV;
88 88
89 if (!dev->parent || !dev->parent->of_node)
90 return -ENODEV;
91
89 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 92 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
90 if (IS_ERR(dp)) 93 if (IS_ERR(dp))
91 return -ENOMEM; 94 return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
104 return ret; 107 return ret;
105 } 108 }
106 109
107 dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); 110 dp->grf = syscon_node_to_regmap(dev->parent->of_node);
108 if (IS_ERR(dp->grf)) { 111 if (IS_ERR(dp->grf)) {
109 dev_err(dev, "rk3288-dp needs rockchip,grf property\n"); 112 dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
110 return PTR_ERR(dp->grf); 113 return PTR_ERR(dp->grf);
111 } 114 }
112 115
diff --git a/drivers/phy/phy-rockchip-emmc.c b/drivers/phy/phy-rockchip-emmc.c
index 887b4c27195f..6ebcf3e41c46 100644
--- a/drivers/phy/phy-rockchip-emmc.c
+++ b/drivers/phy/phy-rockchip-emmc.c
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
176 struct regmap *grf; 176 struct regmap *grf;
177 unsigned int reg_offset; 177 unsigned int reg_offset;
178 178
179 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 179 if (!dev->parent || !dev->parent->of_node)
180 return -ENODEV;
181
182 grf = syscon_node_to_regmap(dev->parent->of_node);
180 if (IS_ERR(grf)) { 183 if (IS_ERR(grf)) {
181 dev_err(dev, "Missing rockchip,grf property\n"); 184 dev_err(dev, "Missing rockchip,grf property\n");
182 return PTR_ERR(grf); 185 return PTR_ERR(grf);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index debe1219d76d..fc8cbf611723 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,6 +2,7 @@ config PINCTRL_IMX
2 bool 2 bool
3 select PINMUX 3 select PINMUX
4 select PINCONF 4 select PINCONF
5 select REGMAP
5 6
6config PINCTRL_IMX1_CORE 7config PINCTRL_IMX1_CORE
7 bool 8 bool
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 2bbe6f7964a7..6ab8c3ccdeea 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent); 1004 struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
1005 int eint_num, virq, eint_offset; 1005 int eint_num, virq, eint_offset;
1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; 1006 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
1007 static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256}; 1007 static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
1008 128000, 256000};
1008 const struct mtk_desc_pin *pin; 1009 const struct mtk_desc_pin *pin;
1009 struct irq_data *d; 1010 struct irq_data *d;
1010 1011
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
1022 if (!mtk_eint_can_en_debounce(pctl, eint_num)) 1023 if (!mtk_eint_can_en_debounce(pctl, eint_num))
1023 return -ENOSYS; 1024 return -ENOSYS;
1024 1025
1025 dbnc = ARRAY_SIZE(dbnc_arr); 1026 dbnc = ARRAY_SIZE(debounce_time);
1026 for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) { 1027 for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
1027 if (debounce <= dbnc_arr[i]) { 1028 if (debounce <= debounce_time[i]) {
1028 dbnc = i; 1029 dbnc = i;
1029 break; 1030 break;
1030 } 1031 }
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index fb126d56ad40..cf9bafa10acf 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
1280 1280
1281 /* Parse pins in each row from LSB */ 1281 /* Parse pins in each row from LSB */
1282 while (mask) { 1282 while (mask) {
1283 bit_pos = ffs(mask); 1283 bit_pos = __ffs(mask);
1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin; 1284 pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
1285 mask_pos = ((pcs->fmask) << (bit_pos - 1)); 1285 mask_pos = ((pcs->fmask) << bit_pos);
1286 val_pos = val & mask_pos; 1286 val_pos = val & mask_pos;
1287 submask = mask & mask_pos; 1287 submask = mask & mask_pos;
1288 1288
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask", 1852 ret = of_property_read_u32(np, "pinctrl-single,function-mask",
1853 &pcs->fmask); 1853 &pcs->fmask);
1854 if (!ret) { 1854 if (!ret) {
1855 pcs->fshift = ffs(pcs->fmask) - 1; 1855 pcs->fshift = __ffs(pcs->fmask);
1856 pcs->fmax = pcs->fmask >> pcs->fshift; 1856 pcs->fmax = pcs->fmask >> pcs->fshift;
1857 } else { 1857 } else {
1858 /* If mask property doesn't exist, function mux is invalid. */ 1858 /* If mask property doesn't exist, function mux is invalid. */
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 10ce6cba4455..09356684c32f 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
127 arg0.integer.value = reg; 127 arg0.integer.value = reg;
128 128
129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret); 129 status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
130 if (ACPI_FAILURE(status))
131 return -EINVAL;
130 *ret = lret; 132 *ret = lret;
131 return (status != AE_OK) ? -EINVAL : 0; 133 return 0;
132} 134}
133 135
134/** 136/**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
173DEFINE_CONV(normal, 1, 2, 3); 175DEFINE_CONV(normal, 1, 2, 3);
174DEFINE_CONV(y_inverted, 1, -2, 3); 176DEFINE_CONV(y_inverted, 1, -2, 3);
175DEFINE_CONV(x_inverted, -1, 2, 3); 177DEFINE_CONV(x_inverted, -1, 2, 3);
178DEFINE_CONV(x_inverted_usd, -1, 2, -3);
176DEFINE_CONV(z_inverted, 1, 2, -3); 179DEFINE_CONV(z_inverted, 1, 2, -3);
177DEFINE_CONV(xy_swap, 2, 1, 3); 180DEFINE_CONV(xy_swap, 2, 1, 3);
178DEFINE_CONV(xy_rotated_left, -2, 1, 3); 181DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
236 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), 239 AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
237 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), 240 AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
238 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), 241 AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
242 AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
239 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 243 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
240 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 244 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
241 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 245 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index f93abc8c1424..a818db6aa08f 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
91} 91}
92 92
93static const struct dev_pm_ops intel_hid_pl_pm_ops = { 93static const struct dev_pm_ops intel_hid_pl_pm_ops = {
94 .freeze = intel_hid_pl_suspend_handler,
95 .restore = intel_hid_pl_resume_handler,
94 .suspend = intel_hid_pl_suspend_handler, 96 .suspend = intel_hid_pl_suspend_handler,
95 .resume = intel_hid_pl_resume_handler, 97 .resume = intel_hid_pl_resume_handler,
96}; 98};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 3fb1d85c70a8..6f497e80c9df 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
687 ipcdev.acpi_io_size = size; 687 ipcdev.acpi_io_size = size;
688 dev_info(&pdev->dev, "io res: %pR\n", res); 688 dev_info(&pdev->dev, "io res: %pR\n", res);
689 689
690 /* This is index 0 to cover BIOS data register */
691 punit_res = punit_res_array; 690 punit_res = punit_res_array;
691 /* This is index 0 to cover BIOS data register */
692 res = platform_get_resource(pdev, IORESOURCE_MEM, 692 res = platform_get_resource(pdev, IORESOURCE_MEM,
693 PLAT_RESOURCE_BIOS_DATA_INDEX); 693 PLAT_RESOURCE_BIOS_DATA_INDEX);
694 if (!res) { 694 if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
698 *punit_res = *res; 698 *punit_res = *res;
699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); 699 dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
700 700
701 /* This is index 1 to cover BIOS interface register */
701 res = platform_get_resource(pdev, IORESOURCE_MEM, 702 res = platform_get_resource(pdev, IORESOURCE_MEM,
702 PLAT_RESOURCE_BIOS_IFACE_INDEX); 703 PLAT_RESOURCE_BIOS_IFACE_INDEX);
703 if (!res) { 704 if (!res) {
704 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); 705 dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
705 return -ENXIO; 706 return -ENXIO;
706 } 707 }
707 /* This is index 1 to cover BIOS interface register */
708 *++punit_res = *res; 708 *++punit_res = *res;
709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); 709 dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
710 710
711 /* This is index 2 to cover ISP data register, optional */
711 res = platform_get_resource(pdev, IORESOURCE_MEM, 712 res = platform_get_resource(pdev, IORESOURCE_MEM,
712 PLAT_RESOURCE_ISP_DATA_INDEX); 713 PLAT_RESOURCE_ISP_DATA_INDEX);
713 if (!res) { 714 ++punit_res;
714 dev_err(&pdev->dev, "Failed to get res of punit ISP data\n"); 715 if (res) {
715 return -ENXIO; 716 *punit_res = *res;
717 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
716 } 718 }
717 /* This is index 2 to cover ISP data register */
718 *++punit_res = *res;
719 dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
720 719
720 /* This is index 3 to cover ISP interface register, optional */
721 res = platform_get_resource(pdev, IORESOURCE_MEM, 721 res = platform_get_resource(pdev, IORESOURCE_MEM,
722 PLAT_RESOURCE_ISP_IFACE_INDEX); 722 PLAT_RESOURCE_ISP_IFACE_INDEX);
723 if (!res) { 723 ++punit_res;
724 dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n"); 724 if (res) {
725 return -ENXIO; 725 *punit_res = *res;
726 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
726 } 727 }
727 /* This is index 3 to cover ISP interface register */
728 *++punit_res = *res;
729 dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
730 728
729 /* This is index 4 to cover GTD data register, optional */
731 res = platform_get_resource(pdev, IORESOURCE_MEM, 730 res = platform_get_resource(pdev, IORESOURCE_MEM,
732 PLAT_RESOURCE_GTD_DATA_INDEX); 731 PLAT_RESOURCE_GTD_DATA_INDEX);
733 if (!res) { 732 ++punit_res;
734 dev_err(&pdev->dev, "Failed to get res of punit GTD data\n"); 733 if (res) {
735 return -ENXIO; 734 *punit_res = *res;
735 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
736 } 736 }
737 /* This is index 4 to cover GTD data register */
738 *++punit_res = *res;
739 dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
740 737
738 /* This is index 5 to cover GTD interface register, optional */
741 res = platform_get_resource(pdev, IORESOURCE_MEM, 739 res = platform_get_resource(pdev, IORESOURCE_MEM,
742 PLAT_RESOURCE_GTD_IFACE_INDEX); 740 PLAT_RESOURCE_GTD_IFACE_INDEX);
743 if (!res) { 741 ++punit_res;
744 dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n"); 742 if (res) {
745 return -ENXIO; 743 *punit_res = *res;
744 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
746 } 745 }
747 /* This is index 5 to cover GTD interface register */
748 *++punit_res = *res;
749 dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
750 746
751 res = platform_get_resource(pdev, IORESOURCE_MEM, 747 res = platform_get_resource(pdev, IORESOURCE_MEM,
752 PLAT_RESOURCE_IPC_INDEX); 748 PLAT_RESOURCE_IPC_INDEX);
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index bd875409a02d..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
227 struct resource *res; 227 struct resource *res;
228 void __iomem *addr; 228 void __iomem *addr;
229 229
230 /*
231 * The following resources are required
232 * - BIOS_IPC BASE_DATA
233 * - BIOS_IPC BASE_IFACE
234 */
230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 235 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
231 addr = devm_ioremap_resource(&pdev->dev, res); 236 addr = devm_ioremap_resource(&pdev->dev, res);
232 if (IS_ERR(addr)) 237 if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
239 return PTR_ERR(addr); 244 return PTR_ERR(addr);
240 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr; 245 punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
241 246
247 /*
248 * The following resources are optional
249 * - ISPDRIVER_IPC BASE_DATA
250 * - ISPDRIVER_IPC BASE_IFACE
251 * - GTDRIVER_IPC BASE_DATA
252 * - GTDRIVER_IPC BASE_IFACE
253 */
242 res = platform_get_resource(pdev, IORESOURCE_MEM, 2); 254 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
243 addr = devm_ioremap_resource(&pdev->dev, res); 255 if (res) {
244 if (IS_ERR(addr)) 256 addr = devm_ioremap_resource(&pdev->dev, res);
245 return PTR_ERR(addr); 257 if (!IS_ERR(addr))
246 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; 258 punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
259 }
247 260
248 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 261 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
249 addr = devm_ioremap_resource(&pdev->dev, res); 262 if (res) {
250 if (IS_ERR(addr)) 263 addr = devm_ioremap_resource(&pdev->dev, res);
251 return PTR_ERR(addr); 264 if (!IS_ERR(addr))
252 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; 265 punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
266 }
253 267
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 4); 268 res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
255 addr = devm_ioremap_resource(&pdev->dev, res); 269 if (res) {
256 if (IS_ERR(addr)) 270 addr = devm_ioremap_resource(&pdev->dev, res);
257 return PTR_ERR(addr); 271 if (!IS_ERR(addr))
258 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; 272 punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
273 }
259 274
260 res = platform_get_resource(pdev, IORESOURCE_MEM, 5); 275 res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
261 addr = devm_ioremap_resource(&pdev->dev, res); 276 if (res) {
262 if (IS_ERR(addr)) 277 addr = devm_ioremap_resource(&pdev->dev, res);
263 return PTR_ERR(addr); 278 if (!IS_ERR(addr))
264 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; 279 punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
280 }
265 281
266 return 0; 282 return 0;
267} 283}
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 397119f83e82..781bd10ca7ac 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period) 659static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
660{ 660{
661 u32 telem_ctrl = 0; 661 u32 telem_ctrl = 0;
662 int ret; 662 int ret = 0;
663 663
664 mutex_lock(&(telm_conf->telem_lock)); 664 mutex_lock(&(telm_conf->telem_lock));
665 if (ioss_period) { 665 if (ioss_period) {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e305ab541a22..9255ff3ee81a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
7972 fan_update_desired_level(s); 7972 fan_update_desired_level(s);
7973 mutex_unlock(&fan_mutex); 7973 mutex_unlock(&fan_mutex);
7974 7974
7975 if (rc)
7976 return rc;
7975 if (status) 7977 if (status)
7976 *status = s; 7978 *status = s;
7977 7979
7978 return rc; 7980 return 0;
7979} 7981}
7980 7982
7981static int fan_get_speed(unsigned int *speed) 7983static int fan_get_speed(unsigned int *speed)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
135/* Field definitions */ 135/* Field definitions */
136#define HCI_ACCEL_MASK 0x7fff 136#define HCI_ACCEL_MASK 0x7fff
137#define HCI_HOTKEY_DISABLE 0x0b 137#define HCI_HOTKEY_DISABLE 0x0b
138#define HCI_HOTKEY_ENABLE 0x01 138#define HCI_HOTKEY_ENABLE 0x09
139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 139#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
140#define HCI_LCD_BRIGHTNESS_BITS 3 140#define HCI_LCD_BRIGHTNESS_BITS 3
141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) 141#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..96168b819044 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2669,9 +2669,9 @@ static int __init mport_init(void)
2669 2669
2670 /* Create device class needed by udev */ 2670 /* Create device class needed by udev */
2671 dev_class = class_create(THIS_MODULE, DRV_NAME); 2671 dev_class = class_create(THIS_MODULE, DRV_NAME);
2672 if (!dev_class) { 2672 if (IS_ERR(dev_class)) {
2673 rmcd_error("Unable to create " DRV_NAME " class"); 2673 rmcd_error("Unable to create " DRV_NAME " class");
2674 return -EINVAL; 2674 return PTR_ERR(dev_class);
2675 } 2675 }
2676 2676
2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); 2677 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b2156ee5bae1..ecb7dbae9be9 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -863,7 +863,7 @@ out:
863 * A user-initiated temperature conversion is not started by this function, 863 * A user-initiated temperature conversion is not started by this function,
864 * so the temperature is updated once every 64 seconds. 864 * so the temperature is updated once every 64 seconds.
865 */ 865 */
866static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC) 866static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
867{ 867{
868 struct ds1307 *ds1307 = dev_get_drvdata(dev); 868 struct ds1307 *ds1307 = dev_get_drvdata(dev);
869 u8 temp_buf[2]; 869 u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
892 struct device_attribute *attr, char *buf) 892 struct device_attribute *attr, char *buf)
893{ 893{
894 int ret; 894 int ret;
895 s16 temp; 895 s32 temp;
896 896
897 ret = ds3231_hwmon_read_temp(dev, &temp); 897 ret = ds3231_hwmon_read_temp(dev, &temp);
898 if (ret) 898 if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
1531 return PTR_ERR(ds1307->rtc); 1531 return PTR_ERR(ds1307->rtc);
1532 } 1532 }
1533 1533
1534 if (ds1307_can_wakeup_device) { 1534 if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
1535 /* Disable request for an IRQ */ 1535 /* Disable request for an IRQ */
1536 want_irq = false; 1536 want_irq = false;
1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n"); 1537 dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1bce9cf51b1e..b83908670a9a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
756 blk_cleanup_queue(dev_info->dcssblk_queue); 756 blk_cleanup_queue(dev_info->dcssblk_queue);
757 dev_info->gd->queue = NULL; 757 dev_info->gd->queue = NULL;
758 put_disk(dev_info->gd); 758 put_disk(dev_info->gd);
759 device_unregister(&dev_info->dev);
760 759
761 /* unload all related segments */ 760 /* unload all related segments */
762 list_for_each_entry(entry, &dev_info->seg_list, lh) 761 list_for_each_entry(entry, &dev_info->seg_list, lh)
763 segment_unload(entry->segment_name); 762 segment_unload(entry->segment_name);
764 763
765 put_device(&dev_info->dev);
766 up_write(&dcssblk_devices_sem); 764 up_write(&dcssblk_devices_sem);
767 765
766 device_unregister(&dev_info->dev);
767 put_device(&dev_info->dev);
768
768 rc = count; 769 rc = count;
769out_buf: 770out_buf:
770 kfree(local_buf); 771 kfree(local_buf);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 75d9896deccb..e6f54d3b8969 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
303 if (req->cmd_type != REQ_TYPE_FS) { 303 if (req->cmd_type != REQ_TYPE_FS) {
304 blk_start_request(req); 304 blk_start_request(req);
305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request"); 305 blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
306 blk_end_request_all(req, -EIO); 306 __blk_end_request_all(req, -EIO);
307 continue; 307 continue;
308 } 308 }
309 309
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
56{ 56{
57 struct sclp_ctl_sccb ctl_sccb; 57 struct sclp_ctl_sccb ctl_sccb;
58 struct sccb_header *sccb; 58 struct sccb_header *sccb;
59 unsigned long copied;
59 int rc; 60 int rc;
60 61
61 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) 62 if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
65 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 66 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
66 if (!sccb) 67 if (!sccb)
67 return -ENOMEM; 68 return -ENOMEM;
68 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { 69 copied = PAGE_SIZE -
70 copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
71 if (offsetof(struct sccb_header, length) +
72 sizeof(sccb->length) > copied || sccb->length > copied) {
69 rc = -EFAULT; 73 rc = -EFAULT;
70 goto out_free; 74 goto out_free;
71 } 75 }
72 if (sccb->length > PAGE_SIZE || sccb->length < 8) 76 if (sccb->length < 8) {
73 return -EINVAL; 77 rc = -EINVAL;
74 if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
75 rc = -EFAULT;
76 goto out_free; 78 goto out_free;
77 } 79 }
78 rc = sclp_sync_request(ctl_sccb.cmdw, sccb); 80 rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af4e984..ead83a24bcd1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
688{ 688{
689 struct flowi6 fl; 689 struct flowi6 fl;
690 690
691 memset(&fl, 0, sizeof(fl));
691 if (saddr) 692 if (saddr)
692 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 693 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
693 if (daddr) 694 if (daddr)
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c71e67..837effe19907 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup; 491 genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
492 492
493 /* 493 /*
494 * With CONFIG_PM disabled turn on all domains to make the 494 * Initially turn on all domains to make the domains usable
495 * hardware usable. 495 * with !CONFIG_PM and to get the hardware in sync with the
496 * software. The unused domains will be switched off during
497 * late_init time.
496 */ 498 */
497 if (!IS_ENABLED(CONFIG_PM)) 499 genpd->power_on(genpd);
498 genpd->power_on(genpd);
499 500
500 pm_genpd_init(genpd, NULL, true); 501 pm_genpd_init(genpd, NULL, false);
501 } 502 }
502 503
503 /* 504 /*
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
172static int vpfe_update_pipe_state(struct vpfe_video_device *video) 172static int vpfe_update_pipe_state(struct vpfe_video_device *video)
173{ 173{
174 struct vpfe_pipeline *pipe = &video->pipe; 174 struct vpfe_pipeline *pipe = &video->pipe;
175 int ret;
175 176
176 if (vpfe_prepare_pipeline(video)) 177 ret = vpfe_prepare_pipeline(video);
177 return vpfe_prepare_pipeline(video); 178 if (ret)
179 return ret;
178 180
179 /* 181 /*
180 * Find out if there is any input video 182 * Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
182 */ 184 */
183 if (pipe->input_num == 0) { 185 if (pipe->input_num == 0) {
184 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; 186 pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
185 if (vpfe_update_current_ext_subdev(video)) { 187 ret = vpfe_update_current_ext_subdev(video);
188 if (ret) {
186 pr_err("Invalid external subdev\n"); 189 pr_err("Invalid external subdev\n");
187 return vpfe_update_current_ext_subdev(video); 190 return ret;
188 } 191 }
189 } else { 192 } else {
190 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; 193 pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
667 struct v4l2_subdev *subdev; 670 struct v4l2_subdev *subdev;
668 struct v4l2_format format; 671 struct v4l2_format format;
669 struct media_pad *remote; 672 struct media_pad *remote;
673 int ret;
670 674
671 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); 675 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
672 676
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
695 sd_fmt.pad = remote->index; 699 sd_fmt.pad = remote->index;
696 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 700 sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
697 /* get output format of remote subdev */ 701 /* get output format of remote subdev */
698 if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { 702 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
703 if (ret) {
699 v4l2_err(&vpfe_dev->v4l2_dev, 704 v4l2_err(&vpfe_dev->v4l2_dev,
700 "invalid remote subdev for video node\n"); 705 "invalid remote subdev for video node\n");
701 return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); 706 return ret;
702 } 707 }
703 /* convert to pix format */ 708 /* convert to pix format */
704 mbus.code = sd_fmt.format.code; 709 mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
725 struct vpfe_video_device *video = video_drvdata(file); 730 struct vpfe_video_device *video = video_drvdata(file);
726 struct vpfe_device *vpfe_dev = video->vpfe_dev; 731 struct vpfe_device *vpfe_dev = video->vpfe_dev;
727 struct v4l2_format format; 732 struct v4l2_format format;
733 int ret;
728 734
729 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); 735 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
730 /* If streaming is started, return error */ 736 /* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
733 return -EBUSY; 739 return -EBUSY;
734 } 740 }
735 /* get adjacent subdev's output pad format */ 741 /* get adjacent subdev's output pad format */
736 if (__vpfe_video_get_format(video, &format)) 742 ret = __vpfe_video_get_format(video, &format);
737 return __vpfe_video_get_format(video, &format); 743 if (ret)
744 return ret;
738 *fmt = format; 745 *fmt = format;
739 video->fmt = *fmt; 746 video->fmt = *fmt;
740 return 0; 747 return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
757 struct vpfe_video_device *video = video_drvdata(file); 764 struct vpfe_video_device *video = video_drvdata(file);
758 struct vpfe_device *vpfe_dev = video->vpfe_dev; 765 struct vpfe_device *vpfe_dev = video->vpfe_dev;
759 struct v4l2_format format; 766 struct v4l2_format format;
767 int ret;
760 768
761 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); 769 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
762 /* get adjacent subdev's output pad format */ 770 /* get adjacent subdev's output pad format */
763 if (__vpfe_video_get_format(video, &format)) 771 ret = __vpfe_video_get_format(video, &format);
764 return __vpfe_video_get_format(video, &format); 772 if (ret)
773 return ret;
765 774
766 *fmt = format; 775 *fmt = format;
767 return 0; 776 return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
838 847
839 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); 848 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
840 849
841 if (mutex_lock_interruptible(&video->lock)) 850 ret = mutex_lock_interruptible(&video->lock);
842 return mutex_lock_interruptible(&video->lock); 851 if (ret)
852 return ret;
843 /* 853 /*
844 * If streaming is started return device busy 854 * If streaming is started return device busy
845 * error 855 * error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
940 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); 950 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
941 951
942 /* Call decoder driver function to set the standard */ 952 /* Call decoder driver function to set the standard */
943 if (mutex_lock_interruptible(&video->lock)) 953 ret = mutex_lock_interruptible(&video->lock);
944 return mutex_lock_interruptible(&video->lock); 954 if (ret)
955 return ret;
945 sdinfo = video->current_ext_subdev; 956 sdinfo = video->current_ext_subdev;
946 /* If streaming is started, return device busy error */ 957 /* If streaming is started, return device busy error */
947 if (video->started) { 958 if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1327 return -EINVAL; 1338 return -EINVAL;
1328 } 1339 }
1329 1340
1330 if (mutex_lock_interruptible(&video->lock)) 1341 ret = mutex_lock_interruptible(&video->lock);
1331 return mutex_lock_interruptible(&video->lock); 1342 if (ret)
1343 return ret;
1332 1344
1333 if (video->io_usrs != 0) { 1345 if (video->io_usrs != 0) {
1334 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); 1346 v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
1354 q->buf_struct_size = sizeof(struct vpfe_cap_buffer); 1366 q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
1355 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1367 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1356 1368
1357 if (vb2_queue_init(q)) { 1369 ret = vb2_queue_init(q);
1370 if (ret) {
1358 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); 1371 v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
1359 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); 1372 vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
1360 return vb2_queue_init(q); 1373 return ret;
1361 } 1374 }
1362 1375
1363 fh->io_allowed = 1; 1376 fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
1533 return -EINVAL; 1546 return -EINVAL;
1534 } 1547 }
1535 1548
1536 if (mutex_lock_interruptible(&video->lock)) 1549 ret = mutex_lock_interruptible(&video->lock);
1537 return mutex_lock_interruptible(&video->lock); 1550 if (ret)
1551 return ret;
1538 1552
1539 vpfe_stop_capture(video); 1553 vpfe_stop_capture(video);
1540 ret = vb2_streamoff(&video->buffer_queue, buf_type); 1554 ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
3- Remove unneeded file entries in sysfs 3- Remove unneeded file entries in sysfs
4- Remove software processing of IB protocol and place in library for use 4- Remove software processing of IB protocol and place in library for use
5 by qib, ipath (if still present), hfi1, and eventually soft-roce 5 by qib, ipath (if still present), hfi1, and eventually soft-roce
6 6- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
49#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
50#include <linux/io.h> 50#include <linux/io.h>
51 51
52#include <rdma/ib.h>
53
52#include "hfi.h" 54#include "hfi.h"
53#include "pio.h" 55#include "pio.h"
54#include "device.h" 56#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
190 int uctxt_required = 1; 192 int uctxt_required = 1;
191 int must_be_root = 0; 193 int must_be_root = 0;
192 194
195 /* FIXME: This interface cannot continue out of staging */
196 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
197 return -EACCES;
198
193 if (count < sizeof(cmd)) { 199 if (count < sizeof(cmd)) {
194 ret = -EINVAL; 200 ret = -EINVAL;
195 goto bail; 201 goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
791 spin_unlock_irqrestore(&dd->uctxt_lock, flags); 797 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
792 798
793 dd->rcd[uctxt->ctxt] = NULL; 799 dd->rcd[uctxt->ctxt] = NULL;
800
801 hfi1_user_exp_rcv_free(fdata);
802 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
803
794 uctxt->rcvwait_to = 0; 804 uctxt->rcvwait_to = 0;
795 uctxt->piowait_to = 0; 805 uctxt->piowait_to = 0;
796 uctxt->rcvnowait = 0; 806 uctxt->rcvnowait = 0;
797 uctxt->pionowait = 0; 807 uctxt->pionowait = 0;
798 uctxt->event_flags = 0; 808 uctxt->event_flags = 0;
799 809
800 hfi1_user_exp_rcv_free(fdata);
801 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
802
803 hfi1_stats.sps_ctxts--; 810 hfi1_stats.sps_ctxts--;
804 if (++dd->freectxts == dd->num_user_contexts) 811 if (++dd->freectxts == dd->num_user_contexts)
805 aspm_enable_all(dd); 812 aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
1127 1134
1128static int user_init(struct file *fp) 1135static int user_init(struct file *fp)
1129{ 1136{
1130 int ret;
1131 unsigned int rcvctrl_ops = 0; 1137 unsigned int rcvctrl_ops = 0;
1132 struct hfi1_filedata *fd = fp->private_data; 1138 struct hfi1_filedata *fd = fp->private_data;
1133 struct hfi1_ctxtdata *uctxt = fd->uctxt; 1139 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1134 1140
1135 /* make sure that the context has already been setup */ 1141 /* make sure that the context has already been setup */
1136 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { 1142 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1137 ret = -EFAULT; 1143 return -EFAULT;
1138 goto done;
1139 }
1140
1141 /*
1142 * Subctxts don't need to initialize anything since master
1143 * has done it.
1144 */
1145 if (fd->subctxt) {
1146 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1147 HFI1_CTXT_MASTER_UNINIT,
1148 &uctxt->event_flags));
1149 goto expected;
1150 }
1151 1144
1152 /* initialize poll variables... */ 1145 /* initialize poll variables... */
1153 uctxt->urgent = 0; 1146 uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
1202 wake_up(&uctxt->wait); 1195 wake_up(&uctxt->wait);
1203 } 1196 }
1204 1197
1205expected: 1198 return 0;
1206 /*
1207 * Expected receive has to be setup for all processes (including
1208 * shared contexts). However, it has to be done after the master
1209 * context has been fully configured as it depends on the
1210 * eager/expected split of the RcvArray entries.
1211 * Setting it up here ensures that the subcontexts will be waiting
1212 * (due to the above wait_event_interruptible() until the master
1213 * is setup.
1214 */
1215 ret = hfi1_user_exp_rcv_init(fp);
1216done:
1217 return ret;
1218} 1199}
1219 1200
1220static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) 1201static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
1261 int ret = 0; 1242 int ret = 0;
1262 1243
1263 /* 1244 /*
1264 * Context should be set up only once (including allocation and 1245 * Context should be set up only once, including allocation and
1265 * programming of eager buffers. This is done if context sharing 1246 * programming of eager buffers. This is done if context sharing
1266 * is not requested or by the master process. 1247 * is not requested or by the master process.
1267 */ 1248 */
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
1282 if (ret) 1263 if (ret)
1283 goto done; 1264 goto done;
1284 } 1265 }
1266 } else {
1267 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1268 HFI1_CTXT_MASTER_UNINIT,
1269 &uctxt->event_flags));
1270 if (ret)
1271 goto done;
1285 } 1272 }
1273
1286 ret = hfi1_user_sdma_alloc_queues(uctxt, fp); 1274 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1287 if (ret) 1275 if (ret)
1288 goto done; 1276 goto done;
1277 /*
1278 * Expected receive has to be setup for all processes (including
1279 * shared contexts). However, it has to be done after the master
1280 * context has been fully configured as it depends on the
1281 * eager/expected split of the RcvArray entries.
1282 * Setting it up here ensures that the subcontexts will be waiting
1283 * (due to the above wait_event_interruptible() until the master
1284 * is setup.
1285 */
1286 ret = hfi1_user_exp_rcv_init(fp);
1287 if (ret)
1288 goto done;
1289 1289
1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); 1290 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1291done: 1291done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1565{ 1565{
1566 struct hfi1_devdata *dd = filp->private_data; 1566 struct hfi1_devdata *dd = filp->private_data;
1567 1567
1568 switch (whence) { 1568 return fixed_size_llseek(filp, offset, whence,
1569 case SEEK_SET: 1569 (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
1570 break;
1571 case SEEK_CUR:
1572 offset += filp->f_pos;
1573 break;
1574 case SEEK_END:
1575 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1576 offset;
1577 break;
1578 default:
1579 return -EINVAL;
1580 }
1581
1582 if (offset < 0)
1583 return -EINVAL;
1584
1585 if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1586 return -EINVAL;
1587
1588 filp->f_pos = offset;
1589
1590 return filp->f_pos;
1591} 1570}
1592 1571
1593/* NOTE: assumes unsigned long is 8 bytes */ 1572/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
71 struct mm_struct *, 71 struct mm_struct *,
72 unsigned long, unsigned long); 72 unsigned long, unsigned long);
73static void mmu_notifier_mem_invalidate(struct mmu_notifier *, 73static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
74 struct mm_struct *,
74 unsigned long, unsigned long); 75 unsigned long, unsigned long);
75static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, 76static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
76 unsigned long, unsigned long); 77 unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
137 rbnode = rb_entry(node, struct mmu_rb_node, node); 138 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase(node, root); 139 rb_erase(node, root);
139 if (handler->ops->remove) 140 if (handler->ops->remove)
140 handler->ops->remove(root, rbnode, false); 141 handler->ops->remove(root, rbnode, NULL);
141 } 142 }
142 } 143 }
143 144
@@ -176,7 +177,7 @@ unlock:
176 return ret; 177 return ret;
177} 178}
178 179
179/* Caller must host handler lock */ 180/* Caller must hold handler lock */
180static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, 181static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
181 unsigned long addr, 182 unsigned long addr,
182 unsigned long len) 183 unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
200 return node; 201 return node;
201} 202}
202 203
204/* Caller must *not* hold handler lock. */
203static void __mmu_rb_remove(struct mmu_rb_handler *handler, 205static void __mmu_rb_remove(struct mmu_rb_handler *handler,
204 struct mmu_rb_node *node, bool arg) 206 struct mmu_rb_node *node, struct mm_struct *mm)
205{ 207{
208 unsigned long flags;
209
206 /* Validity of handler and node pointers has been checked by caller. */ 210 /* Validity of handler and node pointers has been checked by caller. */
207 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, 211 hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
208 node->len); 212 node->len);
213 spin_lock_irqsave(&handler->lock, flags);
209 __mmu_int_rb_remove(node, handler->root); 214 __mmu_int_rb_remove(node, handler->root);
215 spin_unlock_irqrestore(&handler->lock, flags);
216
210 if (handler->ops->remove) 217 if (handler->ops->remove)
211 handler->ops->remove(handler->root, node, arg); 218 handler->ops->remove(handler->root, node, mm);
212} 219}
213 220
214struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, 221struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
231void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) 238void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
232{ 239{
233 struct mmu_rb_handler *handler = find_mmu_handler(root); 240 struct mmu_rb_handler *handler = find_mmu_handler(root);
234 unsigned long flags;
235 241
236 if (!handler || !node) 242 if (!handler || !node)
237 return; 243 return;
238 244
239 spin_lock_irqsave(&handler->lock, flags); 245 __mmu_rb_remove(handler, node, NULL);
240 __mmu_rb_remove(handler, node, false);
241 spin_unlock_irqrestore(&handler->lock, flags);
242} 246}
243 247
244static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) 248static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
260static inline void mmu_notifier_page(struct mmu_notifier *mn, 264static inline void mmu_notifier_page(struct mmu_notifier *mn,
261 struct mm_struct *mm, unsigned long addr) 265 struct mm_struct *mm, unsigned long addr)
262{ 266{
263 mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); 267 mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
264} 268}
265 269
266static inline void mmu_notifier_range_start(struct mmu_notifier *mn, 270static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
268 unsigned long start, 272 unsigned long start,
269 unsigned long end) 273 unsigned long end)
270{ 274{
271 mmu_notifier_mem_invalidate(mn, start, end); 275 mmu_notifier_mem_invalidate(mn, mm, start, end);
272} 276}
273 277
274static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, 278static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
279 struct mm_struct *mm,
275 unsigned long start, unsigned long end) 280 unsigned long start, unsigned long end)
276{ 281{
277 struct mmu_rb_handler *handler = 282 struct mmu_rb_handler *handler =
278 container_of(mn, struct mmu_rb_handler, mn); 283 container_of(mn, struct mmu_rb_handler, mn);
279 struct rb_root *root = handler->root; 284 struct rb_root *root = handler->root;
280 struct mmu_rb_node *node; 285 struct mmu_rb_node *node, *ptr = NULL;
281 unsigned long flags; 286 unsigned long flags;
282 287
283 spin_lock_irqsave(&handler->lock, flags); 288 spin_lock_irqsave(&handler->lock, flags);
284 for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; 289 for (node = __mmu_int_rb_iter_first(root, start, end - 1);
285 node = __mmu_int_rb_iter_next(node, start, end - 1)) { 290 node; node = ptr) {
291 /* Guard against node removal. */
292 ptr = __mmu_int_rb_iter_next(node, start, end - 1);
286 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", 293 hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
287 node->addr, node->len); 294 node->addr, node->len);
288 if (handler->ops->invalidate(root, node)) 295 if (handler->ops->invalidate(root, node)) {
289 __mmu_rb_remove(handler, node, true); 296 spin_unlock_irqrestore(&handler->lock, flags);
297 __mmu_rb_remove(handler, node, mm);
298 spin_lock_irqsave(&handler->lock, flags);
299 }
290 } 300 }
291 spin_unlock_irqrestore(&handler->lock, flags); 301 spin_unlock_irqrestore(&handler->lock, flags);
292} 302}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
59struct mmu_rb_ops { 59struct mmu_rb_ops {
60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); 60 bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
61 int (*insert)(struct rb_root *, struct mmu_rb_node *); 61 int (*insert)(struct rb_root *, struct mmu_rb_node *);
62 void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); 62 void (*remove)(struct rb_root *, struct mmu_rb_node *,
63 struct mm_struct *);
63 int (*invalidate)(struct rb_root *, struct mmu_rb_node *); 64 int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
64}; 65};
65 66
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
519 * do the flush work until that QP's 519 * do the flush work until that QP's
520 * sdma work has finished. 520 * sdma work has finished.
521 */ 521 */
522 spin_lock(&qp->s_lock);
522 if (qp->s_flags & RVT_S_WAIT_DMA) { 523 if (qp->s_flags & RVT_S_WAIT_DMA) {
523 qp->s_flags &= ~RVT_S_WAIT_DMA; 524 qp->s_flags &= ~RVT_S_WAIT_DMA;
524 hfi1_schedule_send(qp); 525 hfi1_schedule_send(qp);
525 } 526 }
527 spin_unlock(&qp->s_lock);
526} 528}
527 529
528/** 530/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
87static int set_rcvarray_entry(struct file *, unsigned long, u32, 87static int set_rcvarray_entry(struct file *, unsigned long, u32,
88 struct tid_group *, struct page **, unsigned); 88 struct tid_group *, struct page **, unsigned);
89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); 89static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 90static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
91 struct mm_struct *);
91static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 92static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
92static int program_rcvarray(struct file *, unsigned long, struct tid_group *, 93static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
93 struct tid_pageset *, unsigned, u16, struct page **, 94 struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
254 struct hfi1_ctxtdata *uctxt = fd->uctxt; 255 struct hfi1_ctxtdata *uctxt = fd->uctxt;
255 struct tid_group *grp, *gptr; 256 struct tid_group *grp, *gptr;
256 257
258 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
259 return 0;
257 /* 260 /*
258 * The notifier would have been removed when the process'es mm 261 * The notifier would have been removed when the process'es mm
259 * was freed. 262 * was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
899 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 902 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
900 return -EBADF; 903 return -EBADF;
901 if (HFI1_CAP_IS_USET(TID_UNMAP)) 904 if (HFI1_CAP_IS_USET(TID_UNMAP))
902 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); 905 mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
903 else 906 else
904 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); 907 hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
905 908
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
965 continue; 968 continue;
966 if (HFI1_CAP_IS_USET(TID_UNMAP)) 969 if (HFI1_CAP_IS_USET(TID_UNMAP))
967 mmu_rb_remove(&fd->tid_rb_root, 970 mmu_rb_remove(&fd->tid_rb_root,
968 &node->mmu, false); 971 &node->mmu, NULL);
969 else 972 else
970 hfi1_mmu_rb_remove(&fd->tid_rb_root, 973 hfi1_mmu_rb_remove(&fd->tid_rb_root,
971 &node->mmu); 974 &node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
1032} 1035}
1033 1036
1034static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, 1037static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
1035 bool notifier) 1038 struct mm_struct *mm)
1036{ 1039{
1037 struct hfi1_filedata *fdata = 1040 struct hfi1_filedata *fdata =
1038 container_of(root, struct hfi1_filedata, tid_rb_root); 1041 container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
278static void user_sdma_free_request(struct user_sdma_request *, bool); 278static void user_sdma_free_request(struct user_sdma_request *, bool);
279static int pin_vector_pages(struct user_sdma_request *, 279static int pin_vector_pages(struct user_sdma_request *,
280 struct user_sdma_iovec *); 280 struct user_sdma_iovec *);
281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); 281static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
282 unsigned);
282static int check_header_template(struct user_sdma_request *, 283static int check_header_template(struct user_sdma_request *,
283 struct hfi1_pkt_header *, u32, u32); 284 struct hfi1_pkt_header *, u32, u32);
284static int set_txreq_header(struct user_sdma_request *, 285static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
299static void activate_packet_queue(struct iowait *, int); 300static void activate_packet_queue(struct iowait *, int);
300static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); 301static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
301static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); 302static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
302static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); 303static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
304 struct mm_struct *);
303static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); 305static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
304 306
305static struct mmu_rb_ops sdma_rb_ops = { 307static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
1063 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, 1065 rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
1064 (unsigned long)iovec->iov.iov_base, 1066 (unsigned long)iovec->iov.iov_base,
1065 iovec->iov.iov_len); 1067 iovec->iov.iov_len);
1066 if (rb_node) 1068 if (rb_node && !IS_ERR(rb_node))
1067 node = container_of(rb_node, struct sdma_mmu_node, rb); 1069 node = container_of(rb_node, struct sdma_mmu_node, rb);
1070 else
1071 rb_node = NULL;
1068 1072
1069 if (!node) { 1073 if (!node) {
1070 node = kzalloc(sizeof(*node), GFP_KERNEL); 1074 node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
1107 goto bail; 1111 goto bail;
1108 } 1112 }
1109 if (pinned != npages) { 1113 if (pinned != npages) {
1110 unpin_vector_pages(current->mm, pages, pinned); 1114 unpin_vector_pages(current->mm, pages, node->npages,
1115 pinned);
1111 ret = -EFAULT; 1116 ret = -EFAULT;
1112 goto bail; 1117 goto bail;
1113 } 1118 }
@@ -1147,9 +1152,9 @@ bail:
1147} 1152}
1148 1153
1149static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, 1154static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1150 unsigned npages) 1155 unsigned start, unsigned npages)
1151{ 1156{
1152 hfi1_release_user_pages(mm, pages, npages, 0); 1157 hfi1_release_user_pages(mm, pages + start, npages, 0);
1153 kfree(pages); 1158 kfree(pages);
1154} 1159}
1155 1160
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1502 &req->pq->sdma_rb_root, 1507 &req->pq->sdma_rb_root,
1503 (unsigned long)req->iovs[i].iov.iov_base, 1508 (unsigned long)req->iovs[i].iov.iov_base,
1504 req->iovs[i].iov.iov_len); 1509 req->iovs[i].iov.iov_len);
1505 if (!mnode) 1510 if (!mnode || IS_ERR(mnode))
1506 continue; 1511 continue;
1507 1512
1508 node = container_of(mnode, struct sdma_mmu_node, rb); 1513 node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
1547} 1552}
1548 1553
1549static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, 1554static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1550 bool notifier) 1555 struct mm_struct *mm)
1551{ 1556{
1552 struct sdma_mmu_node *node = 1557 struct sdma_mmu_node *node =
1553 container_of(mnode, struct sdma_mmu_node, rb); 1558 container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
1557 node->pq->n_locked -= node->npages; 1562 node->pq->n_locked -= node->npages;
1558 spin_unlock(&node->pq->evict_lock); 1563 spin_unlock(&node->pq->evict_lock);
1559 1564
1560 unpin_vector_pages(notifier ? NULL : current->mm, node->pages, 1565 /*
1566 * If mm is set, we are being called by the MMU notifier and we
1567 * should not pass a mm_struct to unpin_vector_page(). This is to
1568 * prevent a deadlock when hfi1_release_user_pages() attempts to
1569 * take the mmap_sem, which the MMU notifier has already taken.
1570 */
1571 unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
1561 node->npages); 1572 node->npages);
1562 /* 1573 /*
1563 * If called by the MMU notifier, we have to adjust the pinned 1574 * If called by the MMU notifier, we have to adjust the pinned
1564 * page count ourselves. 1575 * page count ourselves.
1565 */ 1576 */
1566 if (notifier) 1577 if (mm)
1567 current->mm->pinned_vm -= node->npages; 1578 mm->pinned_vm -= node->npages;
1568 kfree(node); 1579 kfree(node);
1569} 1580}
1570 1581
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index c37eedc35a24..3c3dc4a3d52c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -376,6 +376,8 @@ config MTK_THERMAL
376 tristate "Temperature sensor driver for mediatek SoCs" 376 tristate "Temperature sensor driver for mediatek SoCs"
377 depends on ARCH_MEDIATEK || COMPILE_TEST 377 depends on ARCH_MEDIATEK || COMPILE_TEST
378 depends on HAS_IOMEM 378 depends on HAS_IOMEM
379 depends on NVMEM || NVMEM=n
380 depends on RESET_CONTROLLER
379 default y 381 default y
380 help 382 help
381 Enable this option if you want to have support for thermal management 383 Enable this option if you want to have support for thermal management
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
68 * Every step equals (1 * 200) / 255 celsius, and finally 68 * Every step equals (1 * 200) / 255 celsius, and finally
69 * need convert to millicelsius. 69 * need convert to millicelsius.
70 */ 70 */
71 return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 71 return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
72} 72}
73 73
74static inline long _temp_to_step(long temp) 74static inline long _temp_to_step(long temp)
75{ 75{
76 return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 76 return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
77} 77}
78 78
79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, 79static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 3d93b1c07cee..507632b9648e 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -27,7 +27,6 @@
27#include <linux/thermal.h> 27#include <linux/thermal.h>
28#include <linux/reset.h> 28#include <linux/reset.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/nvmem-consumer.h>
31 30
32/* AUXADC Registers */ 31/* AUXADC Registers */
33#define AUXADC_CON0_V 0x000 32#define AUXADC_CON0_V 0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
619 618
620module_platform_driver(mtk_thermal_driver); 619module_platform_driver(mtk_thermal_driver);
621 620
622MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); 621MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
623MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>"); 622MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
624MODULE_DESCRIPTION("Mediatek thermal driver"); 623MODULE_DESCRIPTION("Mediatek thermal driver");
625MODULE_LICENSE("GPL v2"); 624MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 49ac23d3e776..d8ec44b194d6 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
803 * otherwise, it returns a corresponding ERR_PTR(). Caller must 803 * otherwise, it returns a corresponding ERR_PTR(). Caller must
804 * check the return value with help of IS_ERR() helper. 804 * check the return value with help of IS_ERR() helper.
805 */ 805 */
806static struct __thermal_zone * 806static struct __thermal_zone
807thermal_of_build_thermal_zone(struct device_node *np) 807__init *thermal_of_build_thermal_zone(struct device_node *np)
808{ 808{
809 struct device_node *child = NULL, *gchild; 809 struct device_node *child = NULL, *gchild;
810 struct __thermal_zone *tz; 810 struct __thermal_zone *tz;
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 1246aa6fcab0..2f1a863a8e15 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
301 capped_extra_power = 0; 301 capped_extra_power = 0;
302 extra_power = 0; 302 extra_power = 0;
303 for (i = 0; i < num_actors; i++) { 303 for (i = 0; i < num_actors; i++) {
304 u64 req_range = req_power[i] * power_range; 304 u64 req_range = (u64)req_power[i] * power_range;
305 305
306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 306 granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
307 total_req_power); 307 total_req_power);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d4b54653ecf8..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
688{ 688{
689 struct thermal_zone_device *tz = to_thermal_zone(dev); 689 struct thermal_zone_device *tz = to_thermal_zone(dev);
690 int trip, ret; 690 int trip, ret;
691 unsigned long temperature; 691 int temperature;
692 692
693 if (!tz->ops->set_trip_temp) 693 if (!tz->ops->set_trip_temp)
694 return -EPERM; 694 return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip)) 696 if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
697 return -EINVAL; 697 return -EINVAL;
698 698
699 if (kstrtoul(buf, 10, &temperature)) 699 if (kstrtoint(buf, 10, &temperature))
700 return -EINVAL; 700 return -EINVAL;
701 701
702 ret = tz->ops->set_trip_temp(tz, trip, temperature); 702 ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
899{ 899{
900 struct thermal_zone_device *tz = to_thermal_zone(dev); 900 struct thermal_zone_device *tz = to_thermal_zone(dev);
901 int ret = 0; 901 int ret = 0;
902 unsigned long temperature; 902 int temperature;
903 903
904 if (kstrtoul(buf, 10, &temperature)) 904 if (kstrtoint(buf, 10, &temperature))
905 return -EINVAL; 905 return -EINVAL;
906 906
907 if (!tz->ops->set_emul_temp) { 907 if (!tz->ops->set_emul_temp) {
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
959 struct thermal_zone_device *tz = to_thermal_zone(dev); \ 959 struct thermal_zone_device *tz = to_thermal_zone(dev); \
960 \ 960 \
961 if (tz->tzp) \ 961 if (tz->tzp) \
962 return sprintf(buf, "%u\n", tz->tzp->name); \ 962 return sprintf(buf, "%d\n", tz->tzp->name); \
963 else \ 963 else \
964 return -EIO; \ 964 return -EIO; \
965 } \ 965 } \
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e16a49b507ef..cf0dc51a2690 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
626 */ 626 */
627 627
628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, 628static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
629 struct inode *ptm_inode, int idx) 629 struct file *file, int idx)
630{ 630{
631 /* Master must be open via /dev/ptmx */ 631 /* Master must be open via /dev/ptmx */
632 return ERR_PTR(-EIO); 632 return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
642 */ 642 */
643 643
644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, 644static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
645 struct inode *pts_inode, int idx) 645 struct file *file, int idx)
646{ 646{
647 struct tty_struct *tty; 647 struct tty_struct *tty;
648 648
649 mutex_lock(&devpts_mutex); 649 mutex_lock(&devpts_mutex);
650 tty = devpts_get_priv(pts_inode); 650 tty = devpts_get_priv(file->f_path.dentry);
651 mutex_unlock(&devpts_mutex); 651 mutex_unlock(&devpts_mutex);
652 /* Master must be open before slave */ 652 /* Master must be open before slave */
653 if (!tty) 653 if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
663/* this is called once with whichever end is closed last */ 663/* this is called once with whichever end is closed last */
664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) 664static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
665{ 665{
666 struct inode *ptmx_inode; 666 struct pts_fs_info *fsi;
667 667
668 if (tty->driver->subtype == PTY_TYPE_MASTER) 668 if (tty->driver->subtype == PTY_TYPE_MASTER)
669 ptmx_inode = tty->driver_data; 669 fsi = tty->driver_data;
670 else 670 else
671 ptmx_inode = tty->link->driver_data; 671 fsi = tty->link->driver_data;
672 devpts_kill_index(ptmx_inode, tty->index); 672 devpts_kill_index(fsi, tty->index);
673 devpts_del_ref(ptmx_inode); 673 devpts_put_ref(fsi);
674} 674}
675 675
676static const struct tty_operations ptm_unix98_ops = { 676static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
720 720
721static int ptmx_open(struct inode *inode, struct file *filp) 721static int ptmx_open(struct inode *inode, struct file *filp)
722{ 722{
723 struct pts_fs_info *fsi;
723 struct tty_struct *tty; 724 struct tty_struct *tty;
724 struct inode *slave_inode; 725 struct dentry *dentry;
725 int retval; 726 int retval;
726 int index; 727 int index;
727 728
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
734 if (retval) 735 if (retval)
735 return retval; 736 return retval;
736 737
738 fsi = devpts_get_ref(inode, filp);
739 retval = -ENODEV;
740 if (!fsi)
741 goto out_free_file;
742
737 /* find a device that is not in use. */ 743 /* find a device that is not in use. */
738 mutex_lock(&devpts_mutex); 744 mutex_lock(&devpts_mutex);
739 index = devpts_new_index(inode); 745 index = devpts_new_index(fsi);
740 if (index < 0) {
741 retval = index;
742 mutex_unlock(&devpts_mutex);
743 goto err_file;
744 }
745
746 mutex_unlock(&devpts_mutex); 746 mutex_unlock(&devpts_mutex);
747 747
748 mutex_lock(&tty_mutex); 748 retval = index;
749 tty = tty_init_dev(ptm_driver, index); 749 if (index < 0)
750 goto out_put_ref;
750 751
751 if (IS_ERR(tty)) {
752 retval = PTR_ERR(tty);
753 goto out;
754 }
755 752
753 mutex_lock(&tty_mutex);
754 tty = tty_init_dev(ptm_driver, index);
756 /* The tty returned here is locked so we can safely 755 /* The tty returned here is locked so we can safely
757 drop the mutex */ 756 drop the mutex */
758 mutex_unlock(&tty_mutex); 757 mutex_unlock(&tty_mutex);
759 758
760 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 759 retval = PTR_ERR(tty);
761 tty->driver_data = inode; 760 if (IS_ERR(tty))
761 goto out;
762 762
763 /* 763 /*
764 * In the case where all references to ptmx inode are dropped and we 764 * From here on out, the tty is "live", and the index and
765 * still have /dev/tty opened pointing to the master/slave pair (ptmx 765 * fsi will be killed/put by the tty_release()
766 * is closed/released before /dev/tty), we must make sure that the inode
767 * is still valid when we call the final pty_unix98_shutdown, thus we
768 * hold an additional reference to the ptmx inode. For the same /dev/tty
769 * last close case, we also need to make sure the super_block isn't
770 * destroyed (devpts instance unmounted), before /dev/tty is closed and
771 * on its release devpts_kill_index is called.
772 */ 766 */
773 devpts_add_ref(inode); 767 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
768 tty->driver_data = fsi;
774 769
775 tty_add_file(tty, filp); 770 tty_add_file(tty, filp);
776 771
777 slave_inode = devpts_pty_new(inode, 772 dentry = devpts_pty_new(fsi, index, tty->link);
778 MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, 773 if (IS_ERR(dentry)) {
779 tty->link); 774 retval = PTR_ERR(dentry);
780 if (IS_ERR(slave_inode)) {
781 retval = PTR_ERR(slave_inode);
782 goto err_release; 775 goto err_release;
783 } 776 }
784 tty->link->driver_data = slave_inode; 777 tty->link->driver_data = dentry;
785 778
786 retval = ptm_driver->ops->open(tty, filp); 779 retval = ptm_driver->ops->open(tty, filp);
787 if (retval) 780 if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
793 return 0; 786 return 0;
794err_release: 787err_release:
795 tty_unlock(tty); 788 tty_unlock(tty);
789 // This will also put-ref the fsi
796 tty_release(inode, filp); 790 tty_release(inode, filp);
797 return retval; 791 return retval;
798out: 792out:
799 mutex_unlock(&tty_mutex); 793 devpts_kill_index(fsi, index);
800 devpts_kill_index(inode, index); 794out_put_ref:
801err_file: 795 devpts_put_ref(fsi);
796out_free_file:
802 tty_free_file(filp); 797 tty_free_file(filp);
803 return retval; 798 return retval;
804} 799}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e213da01a3d7..00ad2637b08c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
1403 /* 1403 /*
1404 * Empty the RX FIFO, we are not interested in anything 1404 * Empty the RX FIFO, we are not interested in anything
1405 * received during the half-duplex transmission. 1405 * received during the half-duplex transmission.
1406 * Enable previously disabled RX interrupts.
1406 */ 1407 */
1407 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) 1408 if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
1408 serial8250_clear_fifos(p); 1409 serial8250_clear_fifos(p);
1410
1411 serial8250_rpm_get(p);
1412
1413 p->ier |= UART_IER_RLSI | UART_IER_RDI;
1414 serial_port_out(&p->port, UART_IER, p->ier);
1415
1416 serial8250_rpm_put(p);
1417 }
1409} 1418}
1410 1419
1411static void serial8250_em485_handle_stop_tx(unsigned long arg) 1420static void serial8250_em485_handle_stop_tx(unsigned long arg)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 64742a086ae3..4d7cb9c04fce 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
324config SERIAL_8250_RT288X 324config SERIAL_8250_RT288X
325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" 325 bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
326 depends on SERIAL_8250 326 depends on SERIAL_8250
327 depends on MIPS || COMPILE_TEST
328 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620 327 default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
329 help 328 help
330 Selecting this option will add support for the alternate register 329 Selecting this option will add support for the alternate register
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index c9fdfc8bf47f..d08baa668d5d 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
72 iowrite32be(val, addr); 72 iowrite32be(val, addr);
73} 73}
74 74
75static const struct uartlite_reg_ops uartlite_be = { 75static struct uartlite_reg_ops uartlite_be = {
76 .in = uartlite_inbe32, 76 .in = uartlite_inbe32,
77 .out = uartlite_outbe32, 77 .out = uartlite_outbe32,
78}; 78};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
87 iowrite32(val, addr); 87 iowrite32(val, addr);
88} 88}
89 89
90static const struct uartlite_reg_ops uartlite_le = { 90static struct uartlite_reg_ops uartlite_le = {
91 .in = uartlite_inle32, 91 .in = uartlite_inle32,
92 .out = uartlite_outle32, 92 .out = uartlite_outle32,
93}; 93};
94 94
95static inline u32 uart_in32(u32 offset, struct uart_port *port) 95static inline u32 uart_in32(u32 offset, struct uart_port *port)
96{ 96{
97 const struct uartlite_reg_ops *reg_ops = port->private_data; 97 struct uartlite_reg_ops *reg_ops = port->private_data;
98 98
99 return reg_ops->in(port->membase + offset); 99 return reg_ops->in(port->membase + offset);
100} 100}
101 101
102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port) 102static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
103{ 103{
104 const struct uartlite_reg_ops *reg_ops = port->private_data; 104 struct uartlite_reg_ops *reg_ops = port->private_data;
105 105
106 reg_ops->out(val, port->membase + offset); 106 reg_ops->out(val, port->membase + offset);
107} 107}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9b04d72e752e..24d5491ef0da 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. 1367 * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
1368 */ 1368 */
1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, 1369static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
1370 struct inode *inode, int idx) 1370 struct file *file, int idx)
1371{ 1371{
1372 struct tty_struct *tty; 1372 struct tty_struct *tty;
1373 1373
1374 if (driver->ops->lookup) 1374 if (driver->ops->lookup)
1375 tty = driver->ops->lookup(driver, inode, idx); 1375 tty = driver->ops->lookup(driver, file, idx);
1376 else 1376 else
1377 tty = driver->ttys[idx]; 1377 tty = driver->ttys[idx];
1378 1378
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
2040 } 2040 }
2041 2041
2042 /* check whether we're reopening an existing tty */ 2042 /* check whether we're reopening an existing tty */
2043 tty = tty_driver_lookup_tty(driver, inode, index); 2043 tty = tty_driver_lookup_tty(driver, filp, index);
2044 if (IS_ERR(tty)) { 2044 if (IS_ERR(tty)) {
2045 mutex_unlock(&tty_mutex); 2045 mutex_unlock(&tty_mutex);
2046 goto out; 2046 goto out;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fa20f5a99d12..34277ced26bd 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1150,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
1150 phy_exit(dwc->usb2_generic_phy); 1150 phy_exit(dwc->usb2_generic_phy);
1151 phy_exit(dwc->usb3_generic_phy); 1151 phy_exit(dwc->usb3_generic_phy);
1152 1152
1153 usb_phy_set_suspend(dwc->usb2_phy, 1);
1154 usb_phy_set_suspend(dwc->usb3_phy, 1);
1155 WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
1156 WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
1157
1153 pinctrl_pm_select_sleep_state(dev); 1158 pinctrl_pm_select_sleep_state(dev);
1154 1159
1155 return 0; 1160 return 0;
@@ -1163,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
1163 1168
1164 pinctrl_pm_select_default_state(dev); 1169 pinctrl_pm_select_default_state(dev);
1165 1170
1171 usb_phy_set_suspend(dwc->usb2_phy, 0);
1172 usb_phy_set_suspend(dwc->usb3_phy, 0);
1173 ret = phy_power_on(dwc->usb2_generic_phy);
1174 if (ret < 0)
1175 return ret;
1176
1177 ret = phy_power_on(dwc->usb3_generic_phy);
1178 if (ret < 0)
1179 goto err_usb2phy_power;
1180
1166 usb_phy_init(dwc->usb3_phy); 1181 usb_phy_init(dwc->usb3_phy);
1167 usb_phy_init(dwc->usb2_phy); 1182 usb_phy_init(dwc->usb2_phy);
1168 ret = phy_init(dwc->usb2_generic_phy); 1183 ret = phy_init(dwc->usb2_generic_phy);
1169 if (ret < 0) 1184 if (ret < 0)
1170 return ret; 1185 goto err_usb3phy_power;
1171 1186
1172 ret = phy_init(dwc->usb3_generic_phy); 1187 ret = phy_init(dwc->usb3_generic_phy);
1173 if (ret < 0) 1188 if (ret < 0)
@@ -1200,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
1200err_usb2phy_init: 1215err_usb2phy_init:
1201 phy_exit(dwc->usb2_generic_phy); 1216 phy_exit(dwc->usb2_generic_phy);
1202 1217
1218err_usb3phy_power:
1219 phy_power_off(dwc->usb3_generic_phy);
1220
1221err_usb2phy_power:
1222 phy_power_off(dwc->usb2_generic_phy);
1223
1203 return ret; 1224 return ret;
1204} 1225}
1205 1226
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9ac37fe1b6a7..cebf9e38b60a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); 645 file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
646 if (!file) { 646 if (!file) {
647 ret = -ENOMEM; 647 ret = -ENOMEM;
648 goto err1; 648 goto err2;
649 } 649 }
650 650
651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { 651 if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
653 dwc, &dwc3_mode_fops); 653 dwc, &dwc3_mode_fops);
654 if (!file) { 654 if (!file) {
655 ret = -ENOMEM; 655 ret = -ENOMEM;
656 goto err1; 656 goto err2;
657 } 657 }
658 } 658 }
659 659
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
663 dwc, &dwc3_testmode_fops); 663 dwc, &dwc3_testmode_fops);
664 if (!file) { 664 if (!file) {
665 ret = -ENOMEM; 665 ret = -ENOMEM;
666 goto err1; 666 goto err2;
667 } 667 }
668 668
669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, 669 file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
670 dwc, &dwc3_link_state_fops); 670 dwc, &dwc3_link_state_fops);
671 if (!file) { 671 if (!file) {
672 ret = -ENOMEM; 672 ret = -ENOMEM;
673 goto err1; 673 goto err2;
674 } 674 }
675 } 675 }
676 676
677 return 0; 677 return 0;
678 678
679err2:
680 kfree(dwc->regset);
681
679err1: 682err1:
680 debugfs_remove_recursive(root); 683 debugfs_remove_recursive(root);
681 684
@@ -686,5 +689,5 @@ err0:
686void dwc3_debugfs_exit(struct dwc3 *dwc) 689void dwc3_debugfs_exit(struct dwc3 *dwc)
687{ 690{
688 debugfs_remove_recursive(dwc->root); 691 debugfs_remove_recursive(dwc->root);
689 dwc->root = NULL; 692 kfree(dwc->regset);
690} 693}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 22e9606d8e08..55da2c7f727f 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
496 ret = pm_runtime_get_sync(dev); 496 ret = pm_runtime_get_sync(dev);
497 if (ret < 0) { 497 if (ret < 0) {
498 dev_err(dev, "get_sync failed with err %d\n", ret); 498 dev_err(dev, "get_sync failed with err %d\n", ret);
499 goto err0; 499 goto err1;
500 } 500 }
501 501
502 dwc3_omap_map_offset(omap); 502 dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
516 516
517 ret = dwc3_omap_extcon_register(omap); 517 ret = dwc3_omap_extcon_register(omap);
518 if (ret < 0) 518 if (ret < 0)
519 goto err2; 519 goto err1;
520 520
521 ret = of_platform_populate(node, NULL, NULL, dev); 521 ret = of_platform_populate(node, NULL, NULL, dev);
522 if (ret) { 522 if (ret) {
523 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 523 dev_err(&pdev->dev, "failed to create dwc3 core\n");
524 goto err3; 524 goto err2;
525 } 525 }
526 526
527 dwc3_omap_enable_irqs(omap); 527 dwc3_omap_enable_irqs(omap);
528 528
529 return 0; 529 return 0;
530 530
531err3: 531err2:
532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 532 extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 533 extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
534err2:
535 dwc3_omap_disable_irqs(omap);
536 534
537err1: 535err1:
538 pm_runtime_put_sync(dev); 536 pm_runtime_put_sync(dev);
539
540err0:
541 pm_runtime_disable(dev); 537 pm_runtime_disable(dev);
542 538
543 return ret; 539 return ret;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d54a028cdfeb..8e4a1b195e9b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2936,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
2936 2936
2937int dwc3_gadget_suspend(struct dwc3 *dwc) 2937int dwc3_gadget_suspend(struct dwc3 *dwc)
2938{ 2938{
2939 if (!dwc->gadget_driver)
2940 return 0;
2941
2939 if (dwc->pullups_connected) { 2942 if (dwc->pullups_connected) {
2940 dwc3_gadget_disable_irq(dwc); 2943 dwc3_gadget_disable_irq(dwc);
2941 dwc3_gadget_run_stop(dwc, true, true); 2944 dwc3_gadget_run_stop(dwc, true, true);
@@ -2954,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
2954 struct dwc3_ep *dep; 2957 struct dwc3_ep *dep;
2955 int ret; 2958 int ret;
2956 2959
2960 if (!dwc->gadget_driver)
2961 return 0;
2962
2957 /* Start with SuperSpeed Default */ 2963 /* Start with SuperSpeed Default */
2958 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 2964 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2959 2965
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index de9ffd60fcfa..524e233d48de 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -651,6 +651,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1); 651 ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 652 ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE; 653 ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
654 ssp_cap->bReserved = 0;
655 ssp_cap->wReserved = 0;
654 656
655 /* SSAC = 1 (2 attributes) */ 657 /* SSAC = 1 (2 attributes) */
656 ssp_cap->bmAttributes = cpu_to_le32(1); 658 ssp_cap->bmAttributes = cpu_to_le32(1);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e21ca2bd6839..15b648cbc75c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
646 work); 646 work);
647 int ret = io_data->req->status ? io_data->req->status : 647 int ret = io_data->req->status ? io_data->req->status :
648 io_data->req->actual; 648 io_data->req->actual;
649 bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
649 650
650 if (io_data->read && ret > 0) { 651 if (io_data->read && ret > 0) {
651 use_mm(io_data->mm); 652 use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
657 658
658 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); 659 io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
659 660
660 if (io_data->ffs->ffs_eventfd && 661 if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
661 !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
662 eventfd_signal(io_data->ffs->ffs_eventfd, 1); 662 eventfd_signal(io_data->ffs->ffs_eventfd, 1);
663 663
664 usb_ep_free_request(io_data->ep, io_data->req); 664 usb_ep_free_request(io_data->ep, io_data->req);
665 665
666 io_data->kiocb->private = NULL;
667 if (io_data->read) 666 if (io_data->read)
668 kfree(io_data->to_free); 667 kfree(io_data->to_free);
669 kfree(io_data->buf); 668 kfree(io_data->buf);
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index fe274b5851c7..93e66a9148b9 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
440 fb->off_ienb = CLCD_PL111_IENB; 440 fb->off_ienb = CLCD_PL111_IENB;
441 fb->off_cntl = CLCD_PL111_CNTL; 441 fb->off_cntl = CLCD_PL111_CNTL;
442 } else { 442 } else {
443#ifdef CONFIG_ARCH_VERSATILE 443 if (of_machine_is_compatible("arm,versatile-ab") ||
444 fb->off_ienb = CLCD_PL111_IENB; 444 of_machine_is_compatible("arm,versatile-pb")) {
445 fb->off_cntl = CLCD_PL111_CNTL; 445 fb->off_ienb = CLCD_PL111_IENB;
446#else 446 fb->off_cntl = CLCD_PL111_CNTL;
447 fb->off_ienb = CLCD_PL110_IENB; 447 } else {
448 fb->off_cntl = CLCD_PL110_CNTL; 448 fb->off_ienb = CLCD_PL110_IENB;
449#endif 449 fb->off_cntl = CLCD_PL110_CNTL;
450 }
450 } 451 }
451 452
452 fb->clk = clk_get(&fb->dev->dev, NULL); 453 fb->clk = clk_get(&fb->dev->dev, NULL);
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index abfd1f6e3327..1954ec913ce5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, 200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod) 201 char *desc, struct gpio_desc **gpiod)
202{ 202{
203 struct gpio_desc *gd;
204 int r; 203 int r;
205 204
206 *gpiod = NULL;
207
208 r = devm_gpio_request_one(dev, gpio, flags, desc); 205 r = devm_gpio_request_one(dev, gpio, flags, desc);
209 if (r) 206 if (r) {
207 *gpiod = NULL;
210 return r == -ENOENT ? 0 : r; 208 return r == -ENOENT ? 0 : r;
209 }
211 210
212 gd = gpio_to_desc(gpio); 211 *gpiod = gpio_to_desc(gpio);
213 if (IS_ERR(gd))
214 return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
215 212
216 *gpiod = gd;
217 return 0; 213 return 0;
218} 214}
219 215
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 541ead4d8965..85b8517f17a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
387 if (atomic_dec_and_test(&s->s_ref)) { 387 if (atomic_dec_and_test(&s->s_ref)) {
388 if (s->s_auth.authorizer) 388 if (s->s_auth.authorizer)
389 ceph_auth_destroy_authorizer( 389 ceph_auth_destroy_authorizer(s->s_auth.authorizer);
390 s->s_mdsc->fsc->client->monc.auth,
391 s->s_auth.authorizer);
392 kfree(s); 390 kfree(s);
393 } 391 }
394} 392}
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3900 struct ceph_auth_handshake *auth = &s->s_auth; 3898 struct ceph_auth_handshake *auth = &s->s_auth;
3901 3899
3902 if (force_new && auth->authorizer) { 3900 if (force_new && auth->authorizer) {
3903 ceph_auth_destroy_authorizer(ac, auth->authorizer); 3901 ceph_auth_destroy_authorizer(auth->authorizer);
3904 auth->authorizer = NULL; 3902 auth->authorizer = NULL;
3905 } 3903 }
3906 if (!auth->authorizer) { 3904 if (!auth->authorizer) {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 655f21f99160..0b2954d7172d 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
128struct pts_fs_info { 128struct pts_fs_info {
129 struct ida allocated_ptys; 129 struct ida allocated_ptys;
130 struct pts_mount_opts mount_opts; 130 struct pts_mount_opts mount_opts;
131 struct super_block *sb;
131 struct dentry *ptmx_dentry; 132 struct dentry *ptmx_dentry;
132}; 133};
133 134
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
358 .show_options = devpts_show_options, 359 .show_options = devpts_show_options,
359}; 360};
360 361
361static void *new_pts_fs_info(void) 362static void *new_pts_fs_info(struct super_block *sb)
362{ 363{
363 struct pts_fs_info *fsi; 364 struct pts_fs_info *fsi;
364 365
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
369 ida_init(&fsi->allocated_ptys); 370 ida_init(&fsi->allocated_ptys);
370 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; 371 fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
371 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; 372 fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
373 fsi->sb = sb;
372 374
373 return fsi; 375 return fsi;
374} 376}
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
384 s->s_op = &devpts_sops; 386 s->s_op = &devpts_sops;
385 s->s_time_gran = 1; 387 s->s_time_gran = 1;
386 388
387 s->s_fs_info = new_pts_fs_info(); 389 s->s_fs_info = new_pts_fs_info(s);
388 if (!s->s_fs_info) 390 if (!s->s_fs_info)
389 goto fail; 391 goto fail;
390 392
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
524 * to the System V naming convention 526 * to the System V naming convention
525 */ 527 */
526 528
527int devpts_new_index(struct inode *ptmx_inode) 529int devpts_new_index(struct pts_fs_info *fsi)
528{ 530{
529 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
530 struct pts_fs_info *fsi;
531 int index; 531 int index;
532 int ida_ret; 532 int ida_ret;
533 533
534 if (!sb) 534 if (!fsi)
535 return -ENODEV; 535 return -ENODEV;
536 536
537 fsi = DEVPTS_SB(sb);
538retry: 537retry:
539 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) 538 if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
540 return -ENOMEM; 539 return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
564 return index; 563 return index;
565} 564}
566 565
567void devpts_kill_index(struct inode *ptmx_inode, int idx) 566void devpts_kill_index(struct pts_fs_info *fsi, int idx)
568{ 567{
569 struct super_block *sb = pts_sb_from_inode(ptmx_inode);
570 struct pts_fs_info *fsi = DEVPTS_SB(sb);
571
572 mutex_lock(&allocated_ptys_lock); 568 mutex_lock(&allocated_ptys_lock);
573 ida_remove(&fsi->allocated_ptys, idx); 569 ida_remove(&fsi->allocated_ptys, idx);
574 pty_count--; 570 pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
578/* 574/*
579 * pty code needs to hold extra references in case of last /dev/tty close 575 * pty code needs to hold extra references in case of last /dev/tty close
580 */ 576 */
581 577struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
582void devpts_add_ref(struct inode *ptmx_inode)
583{ 578{
584 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 579 struct super_block *sb;
580 struct pts_fs_info *fsi;
581
582 sb = pts_sb_from_inode(ptmx_inode);
583 if (!sb)
584 return NULL;
585 fsi = DEVPTS_SB(sb);
586 if (!fsi)
587 return NULL;
585 588
586 atomic_inc(&sb->s_active); 589 atomic_inc(&sb->s_active);
587 ihold(ptmx_inode); 590 return fsi;
588} 591}
589 592
590void devpts_del_ref(struct inode *ptmx_inode) 593void devpts_put_ref(struct pts_fs_info *fsi)
591{ 594{
592 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 595 deactivate_super(fsi->sb);
593
594 iput(ptmx_inode);
595 deactivate_super(sb);
596} 596}
597 597
598/** 598/**
@@ -604,22 +604,20 @@ void devpts_del_ref(struct inode *ptmx_inode)
604 * 604 *
605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. 605 * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
606 */ 606 */
607struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 607struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
608 void *priv)
609{ 608{
610 struct dentry *dentry; 609 struct dentry *dentry;
611 struct super_block *sb = pts_sb_from_inode(ptmx_inode); 610 struct super_block *sb;
612 struct inode *inode; 611 struct inode *inode;
613 struct dentry *root; 612 struct dentry *root;
614 struct pts_fs_info *fsi;
615 struct pts_mount_opts *opts; 613 struct pts_mount_opts *opts;
616 char s[12]; 614 char s[12];
617 615
618 if (!sb) 616 if (!fsi)
619 return ERR_PTR(-ENODEV); 617 return ERR_PTR(-ENODEV);
620 618
619 sb = fsi->sb;
621 root = sb->s_root; 620 root = sb->s_root;
622 fsi = DEVPTS_SB(sb);
623 opts = &fsi->mount_opts; 621 opts = &fsi->mount_opts;
624 622
625 inode = new_inode(sb); 623 inode = new_inode(sb);
@@ -630,25 +628,21 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
630 inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); 628 inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
631 inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); 629 inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
632 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 630 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
633 init_special_inode(inode, S_IFCHR|opts->mode, device); 631 init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
634 inode->i_private = priv;
635 632
636 sprintf(s, "%d", index); 633 sprintf(s, "%d", index);
637 634
638 inode_lock(d_inode(root));
639
640 dentry = d_alloc_name(root, s); 635 dentry = d_alloc_name(root, s);
641 if (dentry) { 636 if (dentry) {
637 dentry->d_fsdata = priv;
642 d_add(dentry, inode); 638 d_add(dentry, inode);
643 fsnotify_create(d_inode(root), dentry); 639 fsnotify_create(d_inode(root), dentry);
644 } else { 640 } else {
645 iput(inode); 641 iput(inode);
646 inode = ERR_PTR(-ENOMEM); 642 dentry = ERR_PTR(-ENOMEM);
647 } 643 }
648 644
649 inode_unlock(d_inode(root)); 645 return dentry;
650
651 return inode;
652} 646}
653 647
654/** 648/**
@@ -657,24 +651,10 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
657 * 651 *
658 * Returns whatever was passed as priv in devpts_pty_new for a given inode. 652 * Returns whatever was passed as priv in devpts_pty_new for a given inode.
659 */ 653 */
660void *devpts_get_priv(struct inode *pts_inode) 654void *devpts_get_priv(struct dentry *dentry)
661{ 655{
662 struct dentry *dentry; 656 WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
663 void *priv = NULL; 657 return dentry->d_fsdata;
664
665 BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
666
667 /* Ensure dentry has not been deleted by devpts_pty_kill() */
668 dentry = d_find_alias(pts_inode);
669 if (!dentry)
670 return NULL;
671
672 if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
673 priv = pts_inode->i_private;
674
675 dput(dentry);
676
677 return priv;
678} 658}
679 659
680/** 660/**
@@ -683,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode)
683 * 663 *
684 * This is an inverse operation of devpts_pty_new. 664 * This is an inverse operation of devpts_pty_new.
685 */ 665 */
686void devpts_pty_kill(struct inode *inode) 666void devpts_pty_kill(struct dentry *dentry)
687{ 667{
688 struct super_block *sb = pts_sb_from_inode(inode); 668 WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
689 struct dentry *root = sb->s_root;
690 struct dentry *dentry;
691 669
692 BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); 670 dentry->d_fsdata = NULL;
693 671 drop_nlink(dentry->d_inode);
694 inode_lock(d_inode(root));
695
696 dentry = d_find_alias(inode);
697
698 drop_nlink(inode);
699 d_delete(dentry); 672 d_delete(dentry);
700 dput(dentry); /* d_alloc_name() in devpts_pty_new() */ 673 dput(dentry); /* d_alloc_name() in devpts_pty_new() */
701 dput(dentry); /* d_find_alias above */
702
703 inode_unlock(d_inode(root));
704} 674}
705 675
706static int __init init_devpts_fs(void) 676static int __init init_devpts_fs(void)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e202201..13719d3f35f8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
2455 2455
2456 spin_unlock(&dlm->spinlock); 2456 spin_unlock(&dlm->spinlock);
2457 2457
2458 ret = 0;
2459
2458done: 2460done:
2459 dlm_put(dlm); 2461 dlm_put(dlm);
2460 return ret; 2462 return ret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 229cb546bee0..541583510cfb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1518 return page; 1518 return page;
1519} 1519}
1520 1520
1521#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1522static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1523 struct vm_area_struct *vma,
1524 unsigned long addr)
1525{
1526 struct page *page;
1527 int nid;
1528
1529 if (!pmd_present(pmd))
1530 return NULL;
1531
1532 page = vm_normal_page_pmd(vma, addr, pmd);
1533 if (!page)
1534 return NULL;
1535
1536 if (PageReserved(page))
1537 return NULL;
1538
1539 nid = page_to_nid(page);
1540 if (!node_isset(nid, node_states[N_MEMORY]))
1541 return NULL;
1542
1543 return page;
1544}
1545#endif
1546
1521static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1547static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1522 unsigned long end, struct mm_walk *walk) 1548 unsigned long end, struct mm_walk *walk)
1523{ 1549{
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1527 pte_t *orig_pte; 1553 pte_t *orig_pte;
1528 pte_t *pte; 1554 pte_t *pte;
1529 1555
1556#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1530 ptl = pmd_trans_huge_lock(pmd, vma); 1557 ptl = pmd_trans_huge_lock(pmd, vma);
1531 if (ptl) { 1558 if (ptl) {
1532 pte_t huge_pte = *(pte_t *)pmd;
1533 struct page *page; 1559 struct page *page;
1534 1560
1535 page = can_gather_numa_stats(huge_pte, vma, addr); 1561 page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1536 if (page) 1562 if (page)
1537 gather_stats(page, md, pte_dirty(huge_pte), 1563 gather_stats(page, md, pmd_dirty(*pmd),
1538 HPAGE_PMD_SIZE/PAGE_SIZE); 1564 HPAGE_PMD_SIZE/PAGE_SIZE);
1539 spin_unlock(ptl); 1565 spin_unlock(ptl);
1540 return 0; 1566 return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1542 1568
1543 if (pmd_trans_unstable(pmd)) 1569 if (pmd_trans_unstable(pmd))
1544 return 0; 1570 return 0;
1571#endif
1545 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1572 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1546 do { 1573 do {
1547 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1574 struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index e56272c919b5..bf2d34c9d804 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
108 u32 val; 108 u32 val;
109 109
110 preempt_disable(); 110 preempt_disable();
111 if (unlikely(get_user(val, uaddr) != 0)) 111 if (unlikely(get_user(val, uaddr) != 0)) {
112 preempt_enable();
112 return -EFAULT; 113 return -EFAULT;
114 }
113 115
114 if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) 116 if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
117 preempt_enable();
115 return -EFAULT; 118 return -EFAULT;
119 }
116 120
117 *uval = val; 121 *uval = val;
118 preempt_enable(); 122 preempt_enable();
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 461a0558bca4..cebecff536a3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
39{ 39{
40#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) 40#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
41 return false; 41 return false;
42#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
43 return false;
42#else 44#else
43 return true; 45 return true;
44#endif 46#endif
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
12 */ 12 */
13 13
14struct ceph_auth_client; 14struct ceph_auth_client;
15struct ceph_authorizer;
16struct ceph_msg; 15struct ceph_msg;
17 16
17struct ceph_authorizer {
18 void (*destroy)(struct ceph_authorizer *);
19};
20
18struct ceph_auth_handshake { 21struct ceph_auth_handshake {
19 struct ceph_authorizer *authorizer; 22 struct ceph_authorizer *authorizer;
20 void *authorizer_buf; 23 void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
62 struct ceph_auth_handshake *auth); 65 struct ceph_auth_handshake *auth);
63 int (*verify_authorizer_reply)(struct ceph_auth_client *ac, 66 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
64 struct ceph_authorizer *a, size_t len); 67 struct ceph_authorizer *a, size_t len);
65 void (*destroy_authorizer)(struct ceph_auth_client *ac,
66 struct ceph_authorizer *a);
67 void (*invalidate_authorizer)(struct ceph_auth_client *ac, 68 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
68 int peer_type); 69 int peer_type);
69 70
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
112extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, 113extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
113 int peer_type, 114 int peer_type,
114 struct ceph_auth_handshake *auth); 115 struct ceph_auth_handshake *auth);
115extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 116void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
116 struct ceph_authorizer *a);
117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, 117extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
118 int peer_type, 118 int peer_type,
119 struct ceph_auth_handshake *a); 119 struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
16struct ceph_snap_context; 16struct ceph_snap_context;
17struct ceph_osd_request; 17struct ceph_osd_request;
18struct ceph_osd_client; 18struct ceph_osd_client;
19struct ceph_authorizer;
20 19
21/* 20/*
22 * completion callback for async writepages 21 * completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
444 int (*can_attach)(struct cgroup_taskset *tset); 444 int (*can_attach)(struct cgroup_taskset *tset);
445 void (*cancel_attach)(struct cgroup_taskset *tset); 445 void (*cancel_attach)(struct cgroup_taskset *tset);
446 void (*attach)(struct cgroup_taskset *tset); 446 void (*attach)(struct cgroup_taskset *tset);
447 void (*post_attach)(void);
447 int (*can_fork)(struct task_struct *task); 448 int (*can_fork)(struct task_struct *task);
448 void (*cancel_fork)(struct task_struct *task); 449 void (*cancel_fork)(struct task_struct *task);
449 void (*fork)(struct task_struct *task); 450 void (*fork)(struct task_struct *task);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
142#else /* !CONFIG_CPUSETS */ 140#else /* !CONFIG_CPUSETS */
143 141
144static inline bool cpusets_enabled(void) { return false; } 142static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
245 return false; 243 return false;
246} 244}
247 245
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
252#endif /* !CONFIG_CPUSETS */ 246#endif /* !CONFIG_CPUSETS */
253 247
254#endif /* _LINUX_CPUSET_H */ 248#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index e0ee0b3000b2..5871f292b596 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -15,37 +15,23 @@
15 15
16#include <linux/errno.h> 16#include <linux/errno.h>
17 17
18struct pts_fs_info;
19
18#ifdef CONFIG_UNIX98_PTYS 20#ifdef CONFIG_UNIX98_PTYS
19 21
20int devpts_new_index(struct inode *ptmx_inode); 22/* Look up a pts fs info and get a ref to it */
21void devpts_kill_index(struct inode *ptmx_inode, int idx); 23struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
22void devpts_add_ref(struct inode *ptmx_inode); 24void devpts_put_ref(struct pts_fs_info *);
23void devpts_del_ref(struct inode *ptmx_inode); 25
26int devpts_new_index(struct pts_fs_info *);
27void devpts_kill_index(struct pts_fs_info *, int);
28
24/* mknod in devpts */ 29/* mknod in devpts */
25struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, 30struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
26 void *priv);
27/* get private structure */ 31/* get private structure */
28void *devpts_get_priv(struct inode *pts_inode); 32void *devpts_get_priv(struct dentry *);
29/* unlink */ 33/* unlink */
30void devpts_pty_kill(struct inode *inode); 34void devpts_pty_kill(struct dentry *);
31
32#else
33
34/* Dummy stubs in the no-pty case */
35static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
36static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
37static inline void devpts_add_ref(struct inode *ptmx_inode) { }
38static inline void devpts_del_ref(struct inode *ptmx_inode) { }
39static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
40 dev_t device, int index, void *priv)
41{
42 return ERR_PTR(-EINVAL);
43}
44static inline void *devpts_get_priv(struct inode *pts_inode)
45{
46 return NULL;
47}
48static inline void devpts_pty_kill(struct inode *inode) { }
49 35
50#endif 36#endif
51 37
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
152} 152}
153 153
154struct page *get_huge_zero_page(void); 154struct page *get_huge_zero_page(void);
155void put_huge_zero_page(void);
155 156
156#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 157#else /* CONFIG_TRANSPARENT_HUGEPAGE */
157#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) 158#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
208 return false; 209 return false;
209} 210}
210 211
212static inline void put_huge_zero_page(void)
213{
214 BUILD_BUG();
215}
211 216
212static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, 217static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
213 unsigned long addr, pmd_t *pmd, int flags) 218 unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
196 * We record lock dependency chains, so that we can cache them: 196 * We record lock dependency chains, so that we can cache them:
197 */ 197 */
198struct lock_chain { 198struct lock_chain {
199 u8 irq_context; 199 /* see BUILD_BUG_ON()s in lookup_chain_cache() */
200 u8 depth; 200 unsigned int irq_context : 2,
201 u16 base; 201 depth : 6,
202 base : 24;
203 /* 4 byte hole */
202 struct hlist_node entry; 204 struct hlist_node entry;
203 u64 chain_key; 205 u64 chain_key;
204}; 206};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8541a913f6a3..d1f904c8b2cb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
828 u8 n_ports; 828 u8 n_ports;
829}; 829};
830 830
831enum mlx4_pci_status {
832 MLX4_PCI_STATUS_DISABLED,
833 MLX4_PCI_STATUS_ENABLED,
834};
835
831struct mlx4_dev_persistent { 836struct mlx4_dev_persistent {
832 struct pci_dev *pdev; 837 struct pci_dev *pdev;
833 struct mlx4_dev *dev; 838 struct mlx4_dev *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
841 u8 state; 846 u8 state;
842 struct mutex interface_state_mutex; /* protect SW state */ 847 struct mutex interface_state_mutex; /* protect SW state */
843 u8 interface_state; 848 u8 interface_state;
849 struct mutex pci_status_mutex; /* sync pci state */
850 enum mlx4_pci_status pci_status;
844}; 851};
845 852
846struct mlx4_dev { 853struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c9239c..b3575f392492 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -392,6 +392,17 @@ enum {
392 MLX5_CAP_OFF_CMDIF_CSUM = 46, 392 MLX5_CAP_OFF_CMDIF_CSUM = 46,
393}; 393};
394 394
395enum {
396 /*
397 * Max wqe size for rdma read is 512 bytes, so this
398 * limits our max_sge_rd as the wqe needs to fit:
399 * - ctrl segment (16 bytes)
400 * - rdma segment (16 bytes)
401 * - scatter elements (16 bytes each)
402 */
403 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
404};
405
395struct mlx5_inbox_hdr { 406struct mlx5_inbox_hdr {
396 __be16 opcode; 407 __be16 opcode;
397 u8 rsvd[4]; 408 u8 rsvd[4];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8d3b14..369c837d40f5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -519,8 +519,9 @@ enum mlx5_device_state {
519}; 519};
520 520
521enum mlx5_interface_state { 521enum mlx5_interface_state {
522 MLX5_INTERFACE_STATE_DOWN, 522 MLX5_INTERFACE_STATE_DOWN = BIT(0),
523 MLX5_INTERFACE_STATE_UP, 523 MLX5_INTERFACE_STATE_UP = BIT(1),
524 MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
524}; 525};
525 526
526enum mlx5_pci_status { 527enum mlx5_pci_status {
@@ -544,7 +545,7 @@ struct mlx5_core_dev {
544 enum mlx5_device_state state; 545 enum mlx5_device_state state;
545 /* sync interface state */ 546 /* sync interface state */
546 struct mutex intf_state_mutex; 547 struct mutex intf_state_mutex;
547 enum mlx5_interface_state interface_state; 548 unsigned long intf_state;
548 void (*event) (struct mlx5_core_dev *dev, 549 void (*event) (struct mlx5_core_dev *dev,
549 enum mlx5_dev_event event, 550 enum mlx5_dev_event event,
550 unsigned long param); 551 unsigned long param);
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145abd4eb..b30250ab7604 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
54int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, 54int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
55 enum mlx5_port_status *status); 55 enum mlx5_port_status *status);
56 56
57int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 57int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
58void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 58void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
59void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 59void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
60 u8 port); 60 u8 port);
61 61
62int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, 62int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e6323603..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
45 u16 vport, u8 *addr); 45 u16 vport, u8 *addr);
46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 46int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
47 u16 vport, u8 *addr); 47 u16 vport, u8 *addr);
48int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
49int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
48int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 50int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
49 u64 *system_image_guid); 51 u64 *system_image_guid);
50int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 52int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a55e5be0894f..864d7221de84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
1031 page = compound_head(page); 1031 page = compound_head(page);
1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0) 1032 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
1033 return true; 1033 return true;
1034 if (PageHuge(page))
1035 return false;
1034 for (i = 0; i < hpage_nr_pages(page); i++) { 1036 for (i = 0; i < hpage_nr_pages(page); i++) {
1035 if (atomic_read(&page[i]._mapcount) >= 0) 1037 if (atomic_read(&page[i]._mapcount) >= 0)
1036 return true; 1038 return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
1138 1140
1139struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 1141struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1140 pte_t pte); 1142 pte_t pte);
1143struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1144 pmd_t pmd);
1141 1145
1142int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1146int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1143 unsigned long size); 1147 unsigned long size);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 004b8133417d..932ec74909c6 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
1111/* Vital product data routines */ 1111/* Vital product data routines */
1112ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1112ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1113ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1114int pci_set_vpd_size(struct pci_dev *dev, size_t len);
1114 1115
1115/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1116/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1116resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1117resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index 1c33dd7da4a7..4ae95f7e8597 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
98 if (!is_a_nulls(first)) 98 if (!is_a_nulls(first))
99 first->pprev = &n->next; 99 first->pprev = &n->next;
100} 100}
101
102/**
103 * hlist_nulls_add_tail_rcu
104 * @n: the element to add to the hash list.
105 * @h: the list to add to.
106 *
107 * Description:
108 * Adds the specified element to the end of the specified hlist_nulls,
109 * while permitting racing traversals. NOTE: tail insertion requires
110 * list traversal.
111 *
112 * The caller must take whatever precautions are necessary
113 * (such as holding appropriate locks) to avoid racing
114 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
115 * or hlist_nulls_del_rcu(), running on this same list.
116 * However, it is perfectly legal to run concurrently with
117 * the _rcu list-traversal primitives, such as
118 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
119 * problems on Alpha CPUs. Regardless of the type of CPU, the
120 * list-traversal primitive must be guarded by rcu_read_lock().
121 */
122static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
123 struct hlist_nulls_head *h)
124{
125 struct hlist_nulls_node *i, *last = NULL;
126
127 for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
128 i = hlist_nulls_next_rcu(i))
129 last = i;
130
131 if (last) {
132 n->next = last->next;
133 n->pprev = &last->next;
134 rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
135 } else {
136 hlist_nulls_add_head_rcu(n, h);
137 }
138}
139
101/** 140/**
102 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type 141 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
103 * @tpos: the type * to use as a loop cursor. 142 * @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a55d0523f75d..1b8a5a7876ce 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
352 352
353struct thermal_trip { 353struct thermal_trip {
354 struct device_node *np; 354 struct device_node *np;
355 unsigned long int temperature; 355 int temperature;
356 unsigned long int hysteresis; 356 int hysteresis;
357 enum thermal_trip_type type; 357 enum thermal_trip_type type;
358}; 358};
359 359
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
7 * defined; unless noted otherwise, they are optional, and can be 7 * defined; unless noted otherwise, they are optional, and can be
8 * filled in with a null pointer. 8 * filled in with a null pointer.
9 * 9 *
10 * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) 10 * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
11 * 11 *
12 * Return the tty device corresponding to idx, NULL if there is not 12 * Return the tty device corresponding to idx, NULL if there is not
13 * one currently in use and an ERR_PTR value on error. Called under 13 * one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
250 250
251struct tty_operations { 251struct tty_operations {
252 struct tty_struct * (*lookup)(struct tty_driver *driver, 252 struct tty_struct * (*lookup)(struct tty_driver *driver,
253 struct inode *inode, int idx); 253 struct file *filp, int idx);
254 int (*install)(struct tty_driver *driver, struct tty_struct *tty); 254 int (*install)(struct tty_driver *driver, struct tty_struct *tty);
255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty); 255 void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
256 int (*open)(struct tty_struct * tty, struct file * filp); 256 int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
375/** 375/**
376 * struct vb2_ops - driver-specific callbacks 376 * struct vb2_ops - driver-specific callbacks
377 * 377 *
378 * @verify_planes_array: Verify that a given user space structure contains
379 * enough planes for the buffer. This is called
380 * for each dequeued buffer.
378 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. 381 * @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
379 * For V4L2 this is a struct v4l2_buffer. 382 * For V4L2 this is a struct v4l2_buffer.
380 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. 383 * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
384 * the vb2_buffer struct. 387 * the vb2_buffer struct.
385 */ 388 */
386struct vb2_buf_ops { 389struct vb2_buf_ops {
390 int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
387 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); 391 void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
388 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, 392 int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
389 struct vb2_plane *planes); 393 struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
400 * @fileio_read_once: report EOF after reading the first buffer 404 * @fileio_read_once: report EOF after reading the first buffer
401 * @fileio_write_immediately: queue buffer after each write() call 405 * @fileio_write_immediately: queue buffer after each write() call
402 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver 406 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
407 * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
408 * has not been called. This is a vb1 idiom that has been adopted
409 * also by vb2.
403 * @lock: pointer to a mutex that protects the vb2_queue struct. The 410 * @lock: pointer to a mutex that protects the vb2_queue struct. The
404 * driver can set this to a mutex to let the v4l2 core serialize 411 * driver can set this to a mutex to let the v4l2 core serialize
405 * the queuing ioctls. If the driver wants to handle locking 412 * the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
463 unsigned fileio_read_once:1; 470 unsigned fileio_read_once:1;
464 unsigned fileio_write_immediately:1; 471 unsigned fileio_write_immediately:1;
465 unsigned allow_zero_bytesused:1; 472 unsigned allow_zero_bytesused:1;
473 unsigned quirk_poll_must_check_waiting_for_buffers:1;
466 474
467 struct mutex *lock; 475 struct mutex *lock;
468 void *owner; 476 void *owner;
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c0a92e2c286d..74c9693d4941 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -17,6 +17,7 @@
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/inet_sock.h>
20 21
21#ifdef CONFIG_CGROUP_NET_CLASSID 22#ifdef CONFIG_CGROUP_NET_CLASSID
22struct cgroup_cls_state { 23struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
63 * softirqs always disables bh. 64 * softirqs always disables bh.
64 */ 65 */
65 if (in_serving_softirq()) { 66 if (in_serving_softirq()) {
67 struct sock *sk = skb_to_full_sk(skb);
68
66 /* If there is an sock_cgroup_classid we'll use that. */ 69 /* If there is an sock_cgroup_classid we'll use that. */
67 if (!skb->sk) 70 if (!sk || !sk_fullsock(sk))
68 return 0; 71 return 0;
69 72
70 classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data); 73 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
71 } 74 }
72 75
73 return classid; 76 return classid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 295d291269e2..54c779416eec 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, 101struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
102 const struct in6_addr *addr, bool anycast); 102 const struct in6_addr *addr, bool anycast);
103 103
104struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
105 int flags);
106
104/* 107/*
105 * support functions for ND 108 * support functions for ND
106 * 109 *
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d0aeb97aec5d..1be050ada8c5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 959int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 960int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
961 int addr_len); 961 int addr_len);
962int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
963void ip6_datagram_release_cb(struct sock *sk);
962 964
963int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, 965int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
964 int *addr_len); 966 int *addr_len);
diff --git a/include/net/route.h b/include/net/route.h
index 9b0a523bb428..6de665bf1750 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
209void ip_rt_multicast_event(struct in_device *); 209void ip_rt_multicast_event(struct in_device *);
210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 210int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 211void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
212struct rtable *rt_dst_alloc(struct net_device *dev,
213 unsigned int flags, u16 type,
214 bool nopolicy, bool noxfrm, bool will_cache);
212 215
213struct in_ifaddr; 216struct in_ifaddr;
214void fib_add_ifaddr(struct in_ifaddr *); 217void fib_add_ifaddr(struct in_ifaddr *);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6df1ce7a411c..5a404c354f4c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -847,6 +847,11 @@ struct sctp_transport {
847 */ 847 */
848 ktime_t last_time_heard; 848 ktime_t last_time_heard;
849 849
850 /* When was the last time that we sent a chunk using this
851 * transport? We use this to check for idle transports
852 */
853 unsigned long last_time_sent;
854
850 /* Last time(in jiffies) when cwnd is reduced due to the congestion 855 /* Last time(in jiffies) when cwnd is reduced due to the congestion
851 * indication based on ECNE chunk. 856 * indication based on ECNE chunk.
852 */ 857 */
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
952 struct sctp_sock *); 957 struct sctp_sock *);
953void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); 958void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
954void sctp_transport_free(struct sctp_transport *); 959void sctp_transport_free(struct sctp_transport *);
955void sctp_transport_reset_timers(struct sctp_transport *); 960void sctp_transport_reset_t3_rtx(struct sctp_transport *);
961void sctp_transport_reset_hb_timer(struct sctp_transport *);
956int sctp_transport_hold(struct sctp_transport *); 962int sctp_transport_hold(struct sctp_transport *);
957void sctp_transport_put(struct sctp_transport *); 963void sctp_transport_put(struct sctp_transport *);
958void sctp_transport_update_rto(struct sctp_transport *, __u32); 964void sctp_transport_update_rto(struct sctp_transport *, __u32);
diff --git a/include/net/sock.h b/include/net/sock.h
index 255d3e03727b..121ffc115c4f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
630 630
631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 631static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
632{ 632{
633 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 633 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
634 sk->sk_family == AF_INET6)
635 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
636 else
637 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
634} 638}
635 639
636static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 640static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122e8404..51d77b2ce2b2 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@ struct switchdev_attr {
54 struct net_device *orig_dev; 54 struct net_device *orig_dev;
55 enum switchdev_attr_id id; 55 enum switchdev_attr_id id;
56 u32 flags; 56 u32 flags;
57 void *complete_priv;
58 void (*complete)(struct net_device *dev, int err, void *priv);
57 union { 59 union {
58 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
59 u8 stp_state; /* PORT_STP_STATE */ 61 u8 stp_state; /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
75 struct net_device *orig_dev; 77 struct net_device *orig_dev;
76 enum switchdev_obj_id id; 78 enum switchdev_obj_id id;
77 u32 flags; 79 u32 flags;
80 void *complete_priv;
81 void (*complete)(struct net_device *dev, int err, void *priv);
78}; 82};
79 83
80/* SWITCHDEV_OBJ_ID_PORT_VLAN */ 84/* SWITCHDEV_OBJ_ID_PORT_VLAN */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index b91370f61be6..6db10228113f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
552void tcp_send_delayed_ack(struct sock *sk); 552void tcp_send_delayed_ack(struct sock *sk);
553void tcp_send_loss_probe(struct sock *sk); 553void tcp_send_loss_probe(struct sock *sk);
554bool tcp_schedule_loss_probe(struct sock *sk); 554bool tcp_schedule_loss_probe(struct sock *sk);
555void tcp_skb_collapse_tstamp(struct sk_buff *skb,
556 const struct sk_buff *next_skb);
555 557
556/* tcp_input.c */ 558/* tcp_input.c */
557void tcp_resume_early_retransmit(struct sock *sk); 559void tcp_resume_early_retransmit(struct sock *sk);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
34#define _RDMA_IB_H 34#define _RDMA_IB_H
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/sched.h>
37 38
38struct ib_addr { 39struct ib_addr {
39 union { 40 union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
86 __u64 sib_scope_id; 87 __u64 sib_scope_id;
87}; 88};
88 89
90/*
91 * The IB interfaces that use write() as bi-directional ioctl() are
92 * fundamentally unsafe, since there are lots of ways to trigger "write()"
93 * calls from various contexts with elevated privileges. That includes the
94 * traditional suid executable error message writes, but also various kernel
95 * interfaces that can write to file descriptors.
96 *
97 * This function provides protection for the legacy API by restricting the
98 * calling context.
99 */
100static inline bool ib_safe_file_access(struct file *filp)
101{
102 return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
103}
104
89#endif /* _RDMA_IB_H */ 105#endif /* _RDMA_IB_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..f5842bcd9c94 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,7 +9,7 @@
9#ifdef CONFIG_SND_HDA_I915 9#ifdef CONFIG_SND_HDA_I915
10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); 10int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
11int snd_hdac_display_power(struct hdac_bus *bus, bool enable); 11int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
12int snd_hdac_get_display_clk(struct hdac_bus *bus); 12void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); 13int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, 14int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
15 bool *audio_enabled, char *buffer, int max_bytes); 15 bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
25{ 25{
26 return 0; 26 return 0;
27} 27}
28static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) 28static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
29{ 29{
30 return 0;
31} 30}
32static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, 31static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
33 int rate) 32 int rate)
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 2767c55a641e..ca64f0f50b45 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
17 unsigned int verb); 17 unsigned int verb);
18int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 18int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
19 unsigned int *val); 19 unsigned int *val);
20int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
21 unsigned int reg, unsigned int *val);
20int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, 22int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
21 unsigned int val); 23 unsigned int val);
22int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg, 24int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 2622b33fb2ec..6e0f5f01734c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
717__SYSCALL(__NR_mlock2, sys_mlock2) 717__SYSCALL(__NR_mlock2, sys_mlock2)
718#define __NR_copy_file_range 285 718#define __NR_copy_file_range 285
719__SYSCALL(__NR_copy_file_range, sys_copy_file_range) 719__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
720#define __NR_preadv2 286
721__SYSCALL(__NR_preadv2, sys_preadv2)
722#define __NR_pwritev2 287
723__SYSCALL(__NR_pwritev2, sys_pwritev2)
720 724
721#undef __NR_syscalls 725#undef __NR_syscalls
722#define __NR_syscalls 286 726#define __NR_syscalls 288
723 727
724/* 728/*
725 * All syscalls below here should go away really, 729 * All syscalls below here should go away really,
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b71fd0b5cbad..813ffb2e22c9 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -96,6 +96,7 @@ header-y += cyclades.h
96header-y += cycx_cfm.h 96header-y += cycx_cfm.h
97header-y += dcbnl.h 97header-y += dcbnl.h
98header-y += dccp.h 98header-y += dccp.h
99header-y += devlink.h
99header-y += dlmconstants.h 100header-y += dlmconstants.h
100header-y += dlm_device.h 101header-y += dlm_device.h
101header-y += dlm.h 102header-y += dlm.h
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e3e3e7..4c58d9917aa4 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,8 @@
19 19
20#define MACSEC_MAX_KEY_LEN 128 20#define MACSEC_MAX_KEY_LEN 128
21 21
22#define DEFAULT_CIPHER_ID 0x0080020001000001ULL 22#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
23#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 23#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
24 24
25#define MACSEC_MIN_ICV_LEN 8 25#define MACSEC_MIN_ICV_LEN 8
26#define MACSEC_MAX_ICV_LEN 32 26#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
183 183
184#define V4L2_DV_BT_CEA_3840X2160P24 { \ 184#define V4L2_DV_BT_CEA_3840X2160P24 { \
185 .type = V4L2_DV_BT_656_1120, \ 185 .type = V4L2_DV_BT_656_1120, \
186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 186 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
187 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
187 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ 188 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
188 V4L2_DV_BT_STD_CEA861, \ 189 V4L2_DV_BT_STD_CEA861, \
189 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 190 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
191 192
192#define V4L2_DV_BT_CEA_3840X2160P25 { \ 193#define V4L2_DV_BT_CEA_3840X2160P25 { \
193 .type = V4L2_DV_BT_656_1120, \ 194 .type = V4L2_DV_BT_656_1120, \
194 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 195 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
196 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
195 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 197 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
196 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 198 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
197} 199}
198 200
199#define V4L2_DV_BT_CEA_3840X2160P30 { \ 201#define V4L2_DV_BT_CEA_3840X2160P30 { \
200 .type = V4L2_DV_BT_656_1120, \ 202 .type = V4L2_DV_BT_656_1120, \
201 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 203 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
204 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
202 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 205 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
203 V4L2_DV_BT_STD_CEA861, \ 206 V4L2_DV_BT_STD_CEA861, \
204 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 207 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
206 209
207#define V4L2_DV_BT_CEA_3840X2160P50 { \ 210#define V4L2_DV_BT_CEA_3840X2160P50 { \
208 .type = V4L2_DV_BT_656_1120, \ 211 .type = V4L2_DV_BT_656_1120, \
209 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 212 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
213 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
210 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ 214 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
211 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 215 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
212} 216}
213 217
214#define V4L2_DV_BT_CEA_3840X2160P60 { \ 218#define V4L2_DV_BT_CEA_3840X2160P60 { \
215 .type = V4L2_DV_BT_656_1120, \ 219 .type = V4L2_DV_BT_656_1120, \
216 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 220 V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
221 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
217 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ 222 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
218 V4L2_DV_BT_STD_CEA861, \ 223 V4L2_DV_BT_STD_CEA861, \
219 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 224 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
221 226
222#define V4L2_DV_BT_CEA_4096X2160P24 { \ 227#define V4L2_DV_BT_CEA_4096X2160P24 { \
223 .type = V4L2_DV_BT_656_1120, \ 228 .type = V4L2_DV_BT_656_1120, \
224 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 229 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
230 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
225 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ 231 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
226 V4L2_DV_BT_STD_CEA861, \ 232 V4L2_DV_BT_STD_CEA861, \
227 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 233 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
229 235
230#define V4L2_DV_BT_CEA_4096X2160P25 { \ 236#define V4L2_DV_BT_CEA_4096X2160P25 { \
231 .type = V4L2_DV_BT_656_1120, \ 237 .type = V4L2_DV_BT_656_1120, \
232 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 238 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
239 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
233 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 240 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
234 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 241 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
235} 242}
236 243
237#define V4L2_DV_BT_CEA_4096X2160P30 { \ 244#define V4L2_DV_BT_CEA_4096X2160P30 { \
238 .type = V4L2_DV_BT_656_1120, \ 245 .type = V4L2_DV_BT_656_1120, \
239 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 246 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
247 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
240 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 248 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
241 V4L2_DV_BT_STD_CEA861, \ 249 V4L2_DV_BT_STD_CEA861, \
242 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 250 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
244 252
245#define V4L2_DV_BT_CEA_4096X2160P50 { \ 253#define V4L2_DV_BT_CEA_4096X2160P50 { \
246 .type = V4L2_DV_BT_656_1120, \ 254 .type = V4L2_DV_BT_656_1120, \
247 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 255 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
256 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
248 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ 257 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
249 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ 258 V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
250} 259}
251 260
252#define V4L2_DV_BT_CEA_4096X2160P60 { \ 261#define V4L2_DV_BT_CEA_4096X2160P60 { \
253 .type = V4L2_DV_BT_656_1120, \ 262 .type = V4L2_DV_BT_656_1120, \
254 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ 263 V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
264 V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
255 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ 265 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
256 V4L2_DV_BT_STD_CEA861, \ 266 V4L2_DV_BT_STD_CEA861, \
257 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ 267 V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e08f8e9b771..db2574e7b8b0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1374 } 1374 }
1375 1375
1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 1376 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
1377 BPF_SIZE(insn->code) == BPF_DW ||
1377 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 1378 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
1378 verbose("BPF_LD_ABS uses reserved fields\n"); 1379 verbose("BPF_LD_ABS uses reserved fields\n");
1379 return -EINVAL; 1380 return -EINVAL;
@@ -2029,7 +2030,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2029 if (IS_ERR(map)) { 2030 if (IS_ERR(map)) {
2030 verbose("fd %d is not pointing to valid bpf_map\n", 2031 verbose("fd %d is not pointing to valid bpf_map\n",
2031 insn->imm); 2032 insn->imm);
2032 fdput(f);
2033 return PTR_ERR(map); 2033 return PTR_ERR(map);
2034 } 2034 }
2035 2035
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05c0b0f..909a7d31ffd3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2825 size_t nbytes, loff_t off, bool threadgroup) 2825 size_t nbytes, loff_t off, bool threadgroup)
2826{ 2826{
2827 struct task_struct *tsk; 2827 struct task_struct *tsk;
2828 struct cgroup_subsys *ss;
2828 struct cgroup *cgrp; 2829 struct cgroup *cgrp;
2829 pid_t pid; 2830 pid_t pid;
2830 int ret; 2831 int ssid, ret;
2831 2832
2832 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 2833 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2833 return -EINVAL; 2834 return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
2875 rcu_read_unlock(); 2876 rcu_read_unlock();
2876out_unlock_threadgroup: 2877out_unlock_threadgroup:
2877 percpu_up_write(&cgroup_threadgroup_rwsem); 2878 percpu_up_write(&cgroup_threadgroup_rwsem);
2879 for_each_subsys(ss, ssid)
2880 if (ss->post_attach)
2881 ss->post_attach();
2878 cgroup_kn_unlock(of->kn); 2882 cgroup_kn_unlock(of->kn);
2879 cpuset_post_attach_flush();
2880 return ret ?: nbytes; 2883 return ret ?: nbytes;
2881} 2884}
2882 2885
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8da861..3e3f6e49eabb 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -36,6 +36,7 @@
36 * @target: The target state 36 * @target: The target state
37 * @thread: Pointer to the hotplug thread 37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute 38 * @should_run: Thread should execute
39 * @rollback: Perform a rollback
39 * @cb_stat: The state for a single callback (install/uninstall) 40 * @cb_stat: The state for a single callback (install/uninstall)
40 * @cb: Single callback function (install/uninstall) 41 * @cb: Single callback function (install/uninstall)
41 * @result: Result of the operation 42 * @result: Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
48 struct task_struct *thread; 49 struct task_struct *thread;
49 bool should_run; 50 bool should_run;
51 bool rollback;
50 enum cpuhp_state cb_state; 52 enum cpuhp_state cb_state;
51 int (*cb)(unsigned int cpu); 53 int (*cb)(unsigned int cpu);
52 int result; 54 int result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
301 return __cpu_notify(val, cpu, -1, NULL); 303 return __cpu_notify(val, cpu, -1, NULL);
302} 304}
303 305
306static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
307{
308 BUG_ON(cpu_notify(val, cpu));
309}
310
304/* Notifier wrappers for transitioning to state machine */ 311/* Notifier wrappers for transitioning to state machine */
305static int notify_prepare(unsigned int cpu) 312static int notify_prepare(unsigned int cpu)
306{ 313{
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
477 } else { 484 } else {
478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
479 } 486 }
487 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489
490 undo_cpu_down(cpu, st, cpuhp_ap_states);
491 /*
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
494 */
495 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
496 st->rollback = false;
480 } else { 497 } else {
481 /* Cannot happen .... */ 498 /* Cannot happen .... */
482 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 499 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
636 read_unlock(&tasklist_lock); 653 read_unlock(&tasklist_lock);
637} 654}
638 655
639static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
640{
641 BUG_ON(cpu_notify(val, cpu));
642}
643
644static int notify_down_prepare(unsigned int cpu) 656static int notify_down_prepare(unsigned int cpu)
645{ 657{
646 int err, nr_calls = 0; 658 int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
721 */ 733 */
722 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); 734 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
723 if (err) { 735 if (err) {
724 /* CPU didn't die: tell everyone. Can't complain. */ 736 /* CPU refused to die */
725 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
726 irq_unlock_sparse(); 737 irq_unlock_sparse();
738 /* Unpark the hotplug thread so we can rollback there */
739 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
727 return err; 740 return err;
728 } 741 }
729 BUG_ON(cpu_online(cpu)); 742 BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
832 * to do the further cleanups. 845 * to do the further cleanups.
833 */ 846 */
834 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 847 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
848 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
849 st->target = prev_state;
850 st->rollback = true;
851 cpuhp_kick_ap_work(cpu);
852 }
835 853
836 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 854 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
837out: 855out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1249 .name = "notify:online", 1267 .name = "notify:online",
1250 .startup = notify_online, 1268 .startup = notify_online,
1251 .teardown = notify_down_prepare, 1269 .teardown = notify_down_prepare,
1270 .skip_onerr = true,
1252 }, 1271 },
1253#endif 1272#endif
1254 /* 1273 /*
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2b7c5b..1902956baba1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
58#include <asm/uaccess.h> 58#include <asm/uaccess.h>
59#include <linux/atomic.h> 59#include <linux/atomic.h>
60#include <linux/mutex.h> 60#include <linux/mutex.h>
61#include <linux/workqueue.h>
62#include <linux/cgroup.h> 61#include <linux/cgroup.h>
63#include <linux/wait.h> 62#include <linux/wait.h>
64 63
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1016 } 1015 }
1017} 1016}
1018 1017
1019void cpuset_post_attach_flush(void) 1018static void cpuset_post_attach(void)
1020{ 1019{
1021 flush_workqueue(cpuset_migrate_mm_wq); 1020 flush_workqueue(cpuset_migrate_mm_wq);
1022} 1021}
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
2087 .can_attach = cpuset_can_attach, 2086 .can_attach = cpuset_can_attach,
2088 .cancel_attach = cpuset_cancel_attach, 2087 .cancel_attach = cpuset_cancel_attach,
2089 .attach = cpuset_attach, 2088 .attach = cpuset_attach,
2089 .post_attach = cpuset_post_attach,
2090 .bind = cpuset_bind, 2090 .bind = cpuset_bind,
2091 .legacy_cftypes = files, 2091 .legacy_cftypes = files,
2092 .early_init = true, 2092 .early_init = true,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 52bedc5a5aaa..4e2ebf6f2f1f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
412 if (ret || !write) 412 if (ret || !write)
413 return ret; 413 return ret;
414 414
415 if (sysctl_perf_cpu_time_max_percent == 100) { 415 if (sysctl_perf_cpu_time_max_percent == 100 ||
416 sysctl_perf_cpu_time_max_percent == 0) {
416 printk(KERN_WARNING 417 printk(KERN_WARNING
417 "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); 418 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
418 WRITE_ONCE(perf_sample_allowed_ns, 0); 419 WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
1105 * function. 1106 * function.
1106 * 1107 *
1107 * Lock order: 1108 * Lock order:
1109 * cred_guard_mutex
1108 * task_struct::perf_event_mutex 1110 * task_struct::perf_event_mutex
1109 * perf_event_context::mutex 1111 * perf_event_context::mutex
1110 * perf_event::child_mutex; 1112 * perf_event::child_mutex;
@@ -3420,7 +3422,6 @@ static struct task_struct *
3420find_lively_task_by_vpid(pid_t vpid) 3422find_lively_task_by_vpid(pid_t vpid)
3421{ 3423{
3422 struct task_struct *task; 3424 struct task_struct *task;
3423 int err;
3424 3425
3425 rcu_read_lock(); 3426 rcu_read_lock();
3426 if (!vpid) 3427 if (!vpid)
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
3434 if (!task) 3435 if (!task)
3435 return ERR_PTR(-ESRCH); 3436 return ERR_PTR(-ESRCH);
3436 3437
3437 /* Reuse ptrace permission checks for now. */
3438 err = -EACCES;
3439 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
3440 goto errout;
3441
3442 return task; 3438 return task;
3443errout:
3444 put_task_struct(task);
3445 return ERR_PTR(err);
3446
3447} 3439}
3448 3440
3449/* 3441/*
@@ -8413,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open,
8413 8405
8414 get_online_cpus(); 8406 get_online_cpus();
8415 8407
8408 if (task) {
8409 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
8410 if (err)
8411 goto err_cpus;
8412
8413 /*
8414 * Reuse ptrace permission checks for now.
8415 *
8416 * We must hold cred_guard_mutex across this and any potential
8417 * perf_install_in_context() call for this new event to
8418 * serialize against exec() altering our credentials (and the
8419 * perf_event_exit_task() that could imply).
8420 */
8421 err = -EACCES;
8422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
8423 goto err_cred;
8424 }
8425
8416 if (flags & PERF_FLAG_PID_CGROUP) 8426 if (flags & PERF_FLAG_PID_CGROUP)
8417 cgroup_fd = pid; 8427 cgroup_fd = pid;
8418 8428
@@ -8420,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open,
8420 NULL, NULL, cgroup_fd); 8430 NULL, NULL, cgroup_fd);
8421 if (IS_ERR(event)) { 8431 if (IS_ERR(event)) {
8422 err = PTR_ERR(event); 8432 err = PTR_ERR(event);
8423 goto err_cpus; 8433 goto err_cred;
8424 } 8434 }
8425 8435
8426 if (is_sampling_event(event)) { 8436 if (is_sampling_event(event)) {
@@ -8479,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open,
8479 goto err_context; 8489 goto err_context;
8480 } 8490 }
8481 8491
8482 if (task) {
8483 put_task_struct(task);
8484 task = NULL;
8485 }
8486
8487 /* 8492 /*
8488 * Look up the group leader (we will attach this event to it): 8493 * Look up the group leader (we will attach this event to it):
8489 */ 8494 */
@@ -8581,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open,
8581 8586
8582 WARN_ON_ONCE(ctx->parent_ctx); 8587 WARN_ON_ONCE(ctx->parent_ctx);
8583 8588
8589 /*
8590 * This is the point on no return; we cannot fail hereafter. This is
8591 * where we start modifying current state.
8592 */
8593
8584 if (move_group) { 8594 if (move_group) {
8585 /* 8595 /*
8586 * See perf_event_ctx_lock() for comments on the details 8596 * See perf_event_ctx_lock() for comments on the details
@@ -8652,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open,
8652 mutex_unlock(&gctx->mutex); 8662 mutex_unlock(&gctx->mutex);
8653 mutex_unlock(&ctx->mutex); 8663 mutex_unlock(&ctx->mutex);
8654 8664
8665 if (task) {
8666 mutex_unlock(&task->signal->cred_guard_mutex);
8667 put_task_struct(task);
8668 }
8669
8655 put_online_cpus(); 8670 put_online_cpus();
8656 8671
8657 mutex_lock(&current->perf_event_mutex); 8672 mutex_lock(&current->perf_event_mutex);
@@ -8684,6 +8699,9 @@ err_alloc:
8684 */ 8699 */
8685 if (!event_file) 8700 if (!event_file)
8686 free_event(event); 8701 free_event(event);
8702err_cred:
8703 if (task)
8704 mutex_unlock(&task->signal->cred_guard_mutex);
8687err_cpus: 8705err_cpus:
8688 put_online_cpus(); 8706 put_online_cpus();
8689err_task: 8707err_task:
@@ -8968,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8968 8986
8969/* 8987/*
8970 * When a child task exits, feed back event values to parent events. 8988 * When a child task exits, feed back event values to parent events.
8989 *
8990 * Can be called with cred_guard_mutex held when called from
8991 * install_exec_creds().
8971 */ 8992 */
8972void perf_event_exit_task(struct task_struct *child) 8993void perf_event_exit_task(struct task_struct *child)
8973{ 8994{
diff --git a/kernel/futex.c b/kernel/futex.c
index a5d2e74c89e0..c20f06f38ef3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1295 if (unlikely(should_fail_futex(true))) 1295 if (unlikely(should_fail_futex(true)))
1296 ret = -EFAULT; 1296 ret = -EFAULT;
1297 1297
1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1299 ret = -EFAULT; 1299 ret = -EFAULT;
1300 else if (curval != uval) 1300 } else if (curval != uval) {
1301 ret = -EINVAL; 1301 /*
1302 * If a unconditional UNLOCK_PI operation (user space did not
1303 * try the TID->0 transition) raced with a waiter setting the
1304 * FUTEX_WAITERS flag between get_user() and locking the hash
1305 * bucket lock, retry the operation.
1306 */
1307 if ((FUTEX_TID_MASK & curval) == uval)
1308 ret = -EAGAIN;
1309 else
1310 ret = -EINVAL;
1311 }
1302 if (ret) { 1312 if (ret) {
1303 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1304 return ret; 1314 return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1525 if (likely(&hb1->chain != &hb2->chain)) { 1535 if (likely(&hb1->chain != &hb2->chain)) {
1526 plist_del(&q->list, &hb1->chain); 1536 plist_del(&q->list, &hb1->chain);
1527 hb_waiters_dec(hb1); 1537 hb_waiters_dec(hb1);
1528 plist_add(&q->list, &hb2->chain);
1529 hb_waiters_inc(hb2); 1538 hb_waiters_inc(hb2);
1539 plist_add(&q->list, &hb2->chain);
1530 q->lock_ptr = &hb2->lock; 1540 q->lock_ptr = &hb2->lock;
1531 } 1541 }
1532 get_futex_key_refs(key2); 1542 get_futex_key_refs(key2);
@@ -2623,6 +2633,15 @@ retry:
2623 if (ret == -EFAULT) 2633 if (ret == -EFAULT)
2624 goto pi_faulted; 2634 goto pi_faulted;
2625 /* 2635 /*
2636 * A unconditional UNLOCK_PI op raced against a waiter
2637 * setting the FUTEX_WAITERS bit. Try again.
2638 */
2639 if (ret == -EAGAIN) {
2640 spin_unlock(&hb->lock);
2641 put_futex_key(&key);
2642 goto retry;
2643 }
2644 /*
2626 * wake_futex_pi has detected invalid state. Tell user 2645 * wake_futex_pi has detected invalid state. Tell user
2627 * space. 2646 * space.
2628 */ 2647 */
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c37f34b00a11..14777af8e097 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
94 data = irq_get_irq_data(virq + i); 94 data = irq_get_irq_data(virq + i);
95 cpumask_copy(data->common->affinity, dest); 95 cpumask_copy(data->common->affinity, dest);
96 data->common->ipi_offset = offset; 96 data->common->ipi_offset = offset;
97 irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
97 } 98 }
98 return virq; 99 return virq;
99 100
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0834a8..a02f2dddd1d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
1#define pr_fmt(fmt) "kcov: " fmt 1#define pr_fmt(fmt) "kcov: " fmt
2 2
3#define DISABLE_BRANCH_PROFILING
3#include <linux/compiler.h> 4#include <linux/compiler.h>
4#include <linux/types.h> 5#include <linux/types.h>
5#include <linux/file.h> 6#include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
43 * Entry point from instrumented code. 44 * Entry point from instrumented code.
44 * This is called once per basic-block/edge. 45 * This is called once per basic-block/edge.
45 */ 46 */
46void __sanitizer_cov_trace_pc(void) 47void notrace __sanitizer_cov_trace_pc(void)
47{ 48{
48 struct task_struct *t; 49 struct task_struct *t;
49 enum kcov_mode mode; 50 enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308ea449..1391d3ee3b86 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
1415 VMCOREINFO_OFFSET(page, lru); 1415 VMCOREINFO_OFFSET(page, lru);
1416 VMCOREINFO_OFFSET(page, _mapcount); 1416 VMCOREINFO_OFFSET(page, _mapcount);
1417 VMCOREINFO_OFFSET(page, private); 1417 VMCOREINFO_OFFSET(page, private);
1418 VMCOREINFO_OFFSET(page, compound_dtor);
1419 VMCOREINFO_OFFSET(page, compound_order);
1420 VMCOREINFO_OFFSET(page, compound_head);
1418 VMCOREINFO_OFFSET(pglist_data, node_zones); 1421 VMCOREINFO_OFFSET(pglist_data, node_zones);
1419 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1422 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1420#ifdef CONFIG_FLAT_NODE_MEM_MAP 1423#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
1447#ifdef CONFIG_X86 1450#ifdef CONFIG_X86
1448 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); 1451 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
1449#endif 1452#endif
1450#ifdef CONFIG_HUGETLBFS 1453#ifdef CONFIG_HUGETLB_PAGE
1451 VMCOREINFO_SYMBOL(free_huge_page); 1454 VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
1452#endif 1455#endif
1453 1456
1454 arch_crash_save_vmcoreinfo(); 1457 arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ed9410936a22..78c1c0ee6dc1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2176,15 +2176,37 @@ cache_hit:
2176 chain->irq_context = hlock->irq_context; 2176 chain->irq_context = hlock->irq_context;
2177 i = get_first_held_lock(curr, hlock); 2177 i = get_first_held_lock(curr, hlock);
2178 chain->depth = curr->lockdep_depth + 1 - i; 2178 chain->depth = curr->lockdep_depth + 1 - i;
2179
2180 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2181 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
2182 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2183
2179 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2184 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2180 chain->base = nr_chain_hlocks; 2185 chain->base = nr_chain_hlocks;
2181 nr_chain_hlocks += chain->depth;
2182 for (j = 0; j < chain->depth - 1; j++, i++) { 2186 for (j = 0; j < chain->depth - 1; j++, i++) {
2183 int lock_id = curr->held_locks[i].class_idx - 1; 2187 int lock_id = curr->held_locks[i].class_idx - 1;
2184 chain_hlocks[chain->base + j] = lock_id; 2188 chain_hlocks[chain->base + j] = lock_id;
2185 } 2189 }
2186 chain_hlocks[chain->base + j] = class - lock_classes; 2190 chain_hlocks[chain->base + j] = class - lock_classes;
2187 } 2191 }
2192
2193 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2194 nr_chain_hlocks += chain->depth;
2195
2196#ifdef CONFIG_DEBUG_LOCKDEP
2197 /*
2198 * Important for check_no_collision().
2199 */
2200 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2201 if (debug_locks_off_graph_unlock())
2202 return 0;
2203
2204 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2205 dump_stack();
2206 return 0;
2207 }
2208#endif
2209
2188 hlist_add_head_rcu(&chain->entry, hash_head); 2210 hlist_add_head_rcu(&chain->entry, hash_head);
2189 debug_atomic_inc(chain_lookup_misses); 2211 debug_atomic_inc(chain_lookup_misses);
2190 inc_chains(); 2212 inc_chains();
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2932 return 1; 2954 return 1;
2933} 2955}
2934 2956
2957static inline unsigned int task_irq_context(struct task_struct *task)
2958{
2959 return 2 * !!task->hardirq_context + !!task->softirq_context;
2960}
2961
2935static int separate_irq_context(struct task_struct *curr, 2962static int separate_irq_context(struct task_struct *curr,
2936 struct held_lock *hlock) 2963 struct held_lock *hlock)
2937{ 2964{
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
2940 /* 2967 /*
2941 * Keep track of points where we cross into an interrupt context: 2968 * Keep track of points where we cross into an interrupt context:
2942 */ 2969 */
2943 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2944 curr->softirq_context;
2945 if (depth) { 2970 if (depth) {
2946 struct held_lock *prev_hlock; 2971 struct held_lock *prev_hlock;
2947 2972
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
2973 return 1; 2998 return 1;
2974} 2999}
2975 3000
3001static inline unsigned int task_irq_context(struct task_struct *task)
3002{
3003 return 0;
3004}
3005
2976static inline int separate_irq_context(struct task_struct *curr, 3006static inline int separate_irq_context(struct task_struct *curr,
2977 struct held_lock *hlock) 3007 struct held_lock *hlock)
2978{ 3008{
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3241 hlock->acquire_ip = ip; 3271 hlock->acquire_ip = ip;
3242 hlock->instance = lock; 3272 hlock->instance = lock;
3243 hlock->nest_lock = nest_lock; 3273 hlock->nest_lock = nest_lock;
3274 hlock->irq_context = task_irq_context(curr);
3244 hlock->trylock = trylock; 3275 hlock->trylock = trylock;
3245 hlock->read = read; 3276 hlock->read = read;
3246 hlock->check = check; 3277 hlock->check = check;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a302548..a0f61effad25 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
141 int i; 141 int i;
142 142
143 if (v == SEQ_START_TOKEN) { 143 if (v == SEQ_START_TOKEN) {
144 if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
145 seq_printf(m, "(buggered) ");
144 seq_printf(m, "all lock chains:\n"); 146 seq_printf(m, "all lock chains:\n");
145 return 0; 147 return 0;
146 } 148 }
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb2a2c9bc3fc..d734b7502001 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
136 } 136 }
137 137
138 if (counter == qstat_pv_hash_hops) { 138 if (counter == qstat_pv_hash_hops) {
139 u64 frac; 139 u64 frac = 0;
140 140
141 frac = 100ULL * do_div(stat, kicks); 141 if (kicks) {
142 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 142 frac = 100ULL * do_div(stat, kicks);
143 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
144 }
143 145
144 /* 146 /*
145 * Return a X.XX decimal number 147 * Return a X.XX decimal number
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..3bfdff06eea7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
666 */ 666 */
667 smp_wmb(); 667 smp_wmb();
668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); 668 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
669 /*
670 * The following mb guarantees that previous clear of a PENDING bit
671 * will not be reordered with any speculative LOADS or STORES from
672 * work->current_func, which is executed afterwards. This possible
673 * reordering can lead to a missed execution on attempt to qeueue
674 * the same @work. E.g. consider this case:
675 *
676 * CPU#0 CPU#1
677 * ---------------------------- --------------------------------
678 *
679 * 1 STORE event_indicated
680 * 2 queue_work_on() {
681 * 3 test_and_set_bit(PENDING)
682 * 4 } set_..._and_clear_pending() {
683 * 5 set_work_data() # clear bit
684 * 6 smp_mb()
685 * 7 work->current_func() {
686 * 8 LOAD event_indicated
687 * }
688 *
689 * Without an explicit full barrier speculative LOAD on line 8 can
690 * be executed before CPU#0 does STORE on line 1. If that happens,
691 * CPU#0 observes the PENDING bit is still set and new execution of
692 * a @work is not queued in a hope, that CPU#1 will eventually
693 * finish the queued @work. Meanwhile CPU#1 does not see
694 * event_indicated is set, because speculative LOAD was executed
695 * before actual STORE.
696 */
697 smp_mb();
669} 698}
670 699
671static void clear_work_data(struct work_struct *work) 700static void clear_work_data(struct work_struct *work)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..9e0b0315a724 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -210,10 +210,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
210 goto fast_exit; 210 goto fast_exit;
211 211
212 hash = hash_stack(trace->entries, trace->nr_entries); 212 hash = hash_stack(trace->entries, trace->nr_entries);
213 /* Bad luck, we won't store this stack. */
214 if (hash == 0)
215 goto exit;
216
217 bucket = &stack_table[hash & STACK_HASH_MASK]; 213 bucket = &stack_table[hash & STACK_HASH_MASK];
218 214
219 /* 215 /*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b82f8e..df67b53ae3c5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@ retry:
232 return READ_ONCE(huge_zero_page); 232 return READ_ONCE(huge_zero_page);
233} 233}
234 234
235static void put_huge_zero_page(void) 235void put_huge_zero_page(void)
236{ 236{
237 /* 237 /*
238 * Counter should never go to zero here. Only shrinker can put 238 * Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1684 if (vma_is_dax(vma)) { 1684 if (vma_is_dax(vma)) {
1685 spin_unlock(ptl); 1685 spin_unlock(ptl);
1686 if (is_huge_zero_pmd(orig_pmd)) 1686 if (is_huge_zero_pmd(orig_pmd))
1687 put_huge_zero_page(); 1687 tlb_remove_page(tlb, pmd_page(orig_pmd));
1688 } else if (is_huge_zero_pmd(orig_pmd)) { 1688 } else if (is_huge_zero_pmd(orig_pmd)) {
1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); 1689 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1690 atomic_long_dec(&tlb->mm->nr_ptes); 1690 atomic_long_dec(&tlb->mm->nr_ptes);
1691 spin_unlock(ptl); 1691 spin_unlock(ptl);
1692 put_huge_zero_page(); 1692 tlb_remove_page(tlb, pmd_page(orig_pmd));
1693 } else { 1693 } else {
1694 struct page *page = pmd_page(orig_pmd); 1694 struct page *page = pmd_page(orig_pmd);
1695 page_remove_rmap(page, true); 1695 page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1960 * page fault if needed. 1960 * page fault if needed.
1961 */ 1961 */
1962 return 0; 1962 return 0;
1963 if (vma->vm_ops) 1963 if (vma->vm_ops || (vm_flags & VM_NO_THP))
1964 /* khugepaged not yet working on file or special mappings */ 1964 /* khugepaged not yet working on file or special mappings */
1965 return 0; 1965 return 0;
1966 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
1967 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 1966 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1968 hend = vma->vm_end & HPAGE_PMD_MASK; 1967 hend = vma->vm_end & HPAGE_PMD_MASK;
1969 if (hstart < hend) 1968 if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
2352 return false; 2351 return false;
2353 if (is_vma_temporary_stack(vma)) 2352 if (is_vma_temporary_stack(vma))
2354 return false; 2353 return false;
2355 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2354 return !(vma->vm_flags & VM_NO_THP);
2356 return true;
2357} 2355}
2358 2356
2359static void collapse_huge_page(struct mm_struct *mm, 2357static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05fa8acb..fe787f5c41bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
207/* "mc" and its members are protected by cgroup_mutex */ 207/* "mc" and its members are protected by cgroup_mutex */
208static struct move_charge_struct { 208static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */ 209 spinlock_t lock; /* for from, to */
210 struct mm_struct *mm;
210 struct mem_cgroup *from; 211 struct mem_cgroup *from;
211 struct mem_cgroup *to; 212 struct mem_cgroup *to;
212 unsigned long flags; 213 unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
4667 4668
4668static void mem_cgroup_clear_mc(void) 4669static void mem_cgroup_clear_mc(void)
4669{ 4670{
4671 struct mm_struct *mm = mc.mm;
4672
4670 /* 4673 /*
4671 * we must clear moving_task before waking up waiters at the end of 4674 * we must clear moving_task before waking up waiters at the end of
4672 * task migration. 4675 * task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
4676 spin_lock(&mc.lock); 4679 spin_lock(&mc.lock);
4677 mc.from = NULL; 4680 mc.from = NULL;
4678 mc.to = NULL; 4681 mc.to = NULL;
4682 mc.mm = NULL;
4679 spin_unlock(&mc.lock); 4683 spin_unlock(&mc.lock);
4684
4685 mmput(mm);
4680} 4686}
4681 4687
4682static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4688static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4733 VM_BUG_ON(mc.moved_swap); 4739 VM_BUG_ON(mc.moved_swap);
4734 4740
4735 spin_lock(&mc.lock); 4741 spin_lock(&mc.lock);
4742 mc.mm = mm;
4736 mc.from = from; 4743 mc.from = from;
4737 mc.to = memcg; 4744 mc.to = memcg;
4738 mc.flags = move_flags; 4745 mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4742 ret = mem_cgroup_precharge_mc(mm); 4749 ret = mem_cgroup_precharge_mc(mm);
4743 if (ret) 4750 if (ret)
4744 mem_cgroup_clear_mc(); 4751 mem_cgroup_clear_mc();
4752 } else {
4753 mmput(mm);
4745 } 4754 }
4746 mmput(mm);
4747 return ret; 4755 return ret;
4748} 4756}
4749 4757
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
4852 return ret; 4860 return ret;
4853} 4861}
4854 4862
4855static void mem_cgroup_move_charge(struct mm_struct *mm) 4863static void mem_cgroup_move_charge(void)
4856{ 4864{
4857 struct mm_walk mem_cgroup_move_charge_walk = { 4865 struct mm_walk mem_cgroup_move_charge_walk = {
4858 .pmd_entry = mem_cgroup_move_charge_pte_range, 4866 .pmd_entry = mem_cgroup_move_charge_pte_range,
4859 .mm = mm, 4867 .mm = mc.mm,
4860 }; 4868 };
4861 4869
4862 lru_add_drain_all(); 4870 lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4868 atomic_inc(&mc.from->moving_account); 4876 atomic_inc(&mc.from->moving_account);
4869 synchronize_rcu(); 4877 synchronize_rcu();
4870retry: 4878retry:
4871 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 4879 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4872 /* 4880 /*
4873 * Someone who are holding the mmap_sem might be waiting in 4881 * Someone who are holding the mmap_sem might be waiting in
4874 * waitq. So we cancel all extra charges, wake up all waiters, 4882 * waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
4885 * additional charge, the page walk just aborts. 4893 * additional charge, the page walk just aborts.
4886 */ 4894 */
4887 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); 4895 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4888 up_read(&mm->mmap_sem); 4896 up_read(&mc.mm->mmap_sem);
4889 atomic_dec(&mc.from->moving_account); 4897 atomic_dec(&mc.from->moving_account);
4890} 4898}
4891 4899
4892static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4900static void mem_cgroup_move_task(void)
4893{ 4901{
4894 struct cgroup_subsys_state *css; 4902 if (mc.to) {
4895 struct task_struct *p = cgroup_taskset_first(tset, &css); 4903 mem_cgroup_move_charge();
4896 struct mm_struct *mm = get_task_mm(p);
4897
4898 if (mm) {
4899 if (mc.to)
4900 mem_cgroup_move_charge(mm);
4901 mmput(mm);
4902 }
4903 if (mc.to)
4904 mem_cgroup_clear_mc(); 4904 mem_cgroup_clear_mc();
4905 }
4905} 4906}
4906#else /* !CONFIG_MMU */ 4907#else /* !CONFIG_MMU */
4907static int mem_cgroup_can_attach(struct cgroup_taskset *tset) 4908static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4911static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) 4912static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4912{ 4913{
4913} 4914}
4914static void mem_cgroup_move_task(struct cgroup_taskset *tset) 4915static void mem_cgroup_move_task(void)
4915{ 4916{
4916} 4917}
4917#endif 4918#endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
5195 .css_reset = mem_cgroup_css_reset, 5196 .css_reset = mem_cgroup_css_reset,
5196 .can_attach = mem_cgroup_can_attach, 5197 .can_attach = mem_cgroup_can_attach,
5197 .cancel_attach = mem_cgroup_cancel_attach, 5198 .cancel_attach = mem_cgroup_cancel_attach,
5198 .attach = mem_cgroup_move_task, 5199 .post_attach = mem_cgroup_move_task,
5199 .bind = mem_cgroup_bind, 5200 .bind = mem_cgroup_bind,
5200 .dfl_cftypes = memory_files, 5201 .dfl_cftypes = memory_files,
5201 .legacy_cftypes = mem_cgroup_legacy_files, 5202 .legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 78f5f2641b91..ca5acee53b7a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
888 } 888 }
889 } 889 }
890 890
891 return get_page_unless_zero(head); 891 if (get_page_unless_zero(head)) {
892 if (head == compound_head(page))
893 return 1;
894
895 pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
896 put_page(head);
897 }
898
899 return 0;
892} 900}
893EXPORT_SYMBOL_GPL(get_hwpoison_page); 901EXPORT_SYMBOL_GPL(get_hwpoison_page);
894 902
diff --git a/mm/memory.c b/mm/memory.c
index 93897f23cc11..305537fc8640 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@ out:
789 return pfn_to_page(pfn); 789 return pfn_to_page(pfn);
790} 790}
791 791
792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
793struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
794 pmd_t pmd)
795{
796 unsigned long pfn = pmd_pfn(pmd);
797
798 /*
799 * There is no pmd_special() but there may be special pmds, e.g.
800 * in a direct-access (dax) mapping, so let's just replicate the
801 * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
802 */
803 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
804 if (vma->vm_flags & VM_MIXEDMAP) {
805 if (!pfn_valid(pfn))
806 return NULL;
807 goto out;
808 } else {
809 unsigned long off;
810 off = (addr - vma->vm_start) >> PAGE_SHIFT;
811 if (pfn == vma->vm_pgoff + off)
812 return NULL;
813 if (!is_cow_mapping(vma->vm_flags))
814 return NULL;
815 }
816 }
817
818 if (is_zero_pfn(pfn))
819 return NULL;
820 if (unlikely(pfn > highest_memmap_pfn))
821 return NULL;
822
823 /*
824 * NOTE! We still have PageReserved() pages in the page tables.
825 * eg. VDSO mappings can cause them to exist.
826 */
827out:
828 return pfn_to_page(pfn);
829}
830#endif
831
792/* 832/*
793 * copy one vm_area from one task to the other. Assumes the page tables 833 * copy one vm_area from one task to the other. Assumes the page tables
794 * already present in the new task to be cleared in the whole range 834 * already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7b27e0..f9dfb18a4eba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@ out:
975 dec_zone_page_state(page, NR_ISOLATED_ANON + 975 dec_zone_page_state(page, NR_ISOLATED_ANON +
976 page_is_file_cache(page)); 976 page_is_file_cache(page));
977 /* Soft-offlined page shouldn't go through lru cache list */ 977 /* Soft-offlined page shouldn't go through lru cache list */
978 if (reason == MR_MEMORY_FAILURE) { 978 if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
979 /*
980 * With this release, we free successfully migrated
981 * page and set PG_HWPoison on just freed page
982 * intentionally. Although it's rather weird, it's how
983 * HWPoison flag works at the moment.
984 */
979 put_page(page); 985 put_page(page);
980 if (!test_set_page_hwpoison(page)) 986 if (!test_set_page_hwpoison(page))
981 num_poisoned_pages_inc(); 987 num_poisoned_pages_inc();
diff --git a/mm/page_io.c b/mm/page_io.c
index cd92e3d67a32..985f23cfa79b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
353 353
354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); 354 ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
355 if (!ret) { 355 if (!ret) {
356 swap_slot_free_notify(page); 356 if (trylock_page(page)) {
357 swap_slot_free_notify(page);
358 unlock_page(page);
359 }
360
357 count_vm_event(PSWPIN); 361 count_vm_event(PSWPIN);
358 return 0; 362 return 0;
359 } 363 }
diff --git a/mm/swap.c b/mm/swap.c
index a0bc206b4ac6..03aacbcb013f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
728 zone = NULL; 728 zone = NULL;
729 } 729 }
730 730
731 if (is_huge_zero_page(page)) {
732 put_huge_zero_page();
733 continue;
734 }
735
731 page = compound_head(page); 736 page = compound_head(page);
732 if (!put_page_testzero(page)) 737 if (!put_page_testzero(page))
733 continue; 738 continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223eaa45..142cb61f4822 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2553 sc->gfp_mask |= __GFP_HIGHMEM; 2553 sc->gfp_mask |= __GFP_HIGHMEM;
2554 2554
2555 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2555 for_each_zone_zonelist_nodemask(zone, z, zonelist,
2556 requested_highidx, sc->nodemask) { 2556 gfp_zone(sc->gfp_mask), sc->nodemask) {
2557 enum zone_type classzone_idx; 2557 enum zone_type classzone_idx;
2558 2558
2559 if (!populated_zone(zone)) 2559 if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3318 /* Try to sleep for a short interval */ 3318 /* Try to sleep for a short interval */
3319 if (prepare_kswapd_sleep(pgdat, order, remaining, 3319 if (prepare_kswapd_sleep(pgdat, order, remaining,
3320 balanced_classzone_idx)) { 3320 balanced_classzone_idx)) {
3321 /*
3322 * Compaction records what page blocks it recently failed to
3323 * isolate pages from and skips them in the future scanning.
3324 * When kswapd is going to sleep, it is reasonable to assume
3325 * that pages and compaction may succeed so reset the cache.
3326 */
3327 reset_isolation_suitable(pgdat);
3328
3329 /*
3330 * We have freed the memory, now we should compact it to make
3331 * allocation of the requested order possible.
3332 */
3333 wakeup_kcompactd(pgdat, order, classzone_idx);
3334
3321 remaining = schedule_timeout(HZ/10); 3335 remaining = schedule_timeout(HZ/10);
3322 finish_wait(&pgdat->kswapd_wait, &wait); 3336 finish_wait(&pgdat->kswapd_wait, &wait);
3323 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); 3337 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
3341 */ 3355 */
3342 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); 3356 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
3343 3357
3344 /*
3345 * Compaction records what page blocks it recently failed to
3346 * isolate pages from and skips them in the future scanning.
3347 * When kswapd is going to sleep, it is reasonable to assume
3348 * that pages and compaction may succeed so reset the cache.
3349 */
3350 reset_isolation_suitable(pgdat);
3351
3352 /*
3353 * We have freed the memory, now we should compact it to make
3354 * allocation of the requested order possible.
3355 */
3356 wakeup_kcompactd(pgdat, order, classzone_idx);
3357
3358 if (!kthread_should_stop()) 3358 if (!kthread_should_stop())
3359 schedule(); 3359 schedule();
3360 3360
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 253bc77eda3b..7dbc80d01eb0 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
61 e->flags |= MDB_FLAGS_OFFLOAD; 61 e->flags |= MDB_FLAGS_OFFLOAD;
62} 62}
63 63
64static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
65{
66 memset(ip, 0, sizeof(struct br_ip));
67 ip->vid = entry->vid;
68 ip->proto = entry->addr.proto;
69 if (ip->proto == htons(ETH_P_IP))
70 ip->u.ip4 = entry->addr.u.ip4;
71#if IS_ENABLED(CONFIG_IPV6)
72 else
73 ip->u.ip6 = entry->addr.u.ip6;
74#endif
75}
76
64static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 77static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
65 struct net_device *dev) 78 struct net_device *dev)
66{ 79{
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
243 + nla_total_size(sizeof(struct br_mdb_entry)); 256 + nla_total_size(sizeof(struct br_mdb_entry));
244} 257}
245 258
246static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 259struct br_mdb_complete_info {
247 int type, struct net_bridge_port_group *pg) 260 struct net_bridge_port *port;
261 struct br_ip ip;
262};
263
264static void br_mdb_complete(struct net_device *dev, int err, void *priv)
248{ 265{
266 struct br_mdb_complete_info *data = priv;
267 struct net_bridge_port_group __rcu **pp;
268 struct net_bridge_port_group *p;
269 struct net_bridge_mdb_htable *mdb;
270 struct net_bridge_mdb_entry *mp;
271 struct net_bridge_port *port = data->port;
272 struct net_bridge *br = port->br;
273
274 if (err)
275 goto err;
276
277 spin_lock_bh(&br->multicast_lock);
278 mdb = mlock_dereference(br->mdb, br);
279 mp = br_mdb_ip_get(mdb, &data->ip);
280 if (!mp)
281 goto out;
282 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
283 pp = &p->next) {
284 if (p->port != port)
285 continue;
286 p->flags |= MDB_PG_FLAGS_OFFLOAD;
287 }
288out:
289 spin_unlock_bh(&br->multicast_lock);
290err:
291 kfree(priv);
292}
293
294static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
295 struct br_mdb_entry *entry, int type)
296{
297 struct br_mdb_complete_info *complete_info;
249 struct switchdev_obj_port_mdb mdb = { 298 struct switchdev_obj_port_mdb mdb = {
250 .obj = { 299 .obj = {
251 .id = SWITCHDEV_OBJ_ID_PORT_MDB, 300 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
268 317
269 mdb.obj.orig_dev = port_dev; 318 mdb.obj.orig_dev = port_dev;
270 if (port_dev && type == RTM_NEWMDB) { 319 if (port_dev && type == RTM_NEWMDB) {
271 err = switchdev_port_obj_add(port_dev, &mdb.obj); 320 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
272 if (!err && pg) 321 if (complete_info) {
273 pg->flags |= MDB_PG_FLAGS_OFFLOAD; 322 complete_info->port = p;
323 __mdb_entry_to_br_ip(entry, &complete_info->ip);
324 mdb.obj.complete_priv = complete_info;
325 mdb.obj.complete = br_mdb_complete;
326 switchdev_port_obj_add(port_dev, &mdb.obj);
327 }
274 } else if (port_dev && type == RTM_DELMDB) { 328 } else if (port_dev && type == RTM_DELMDB) {
275 switchdev_port_obj_del(port_dev, &mdb.obj); 329 switchdev_port_obj_del(port_dev, &mdb.obj);
276 } 330 }
@@ -291,21 +345,21 @@ errout:
291 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 345 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
292} 346}
293 347
294void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 348void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
295 int type) 349 struct br_ip *group, int type, u8 flags)
296{ 350{
297 struct br_mdb_entry entry; 351 struct br_mdb_entry entry;
298 352
299 memset(&entry, 0, sizeof(entry)); 353 memset(&entry, 0, sizeof(entry));
300 entry.ifindex = pg->port->dev->ifindex; 354 entry.ifindex = port->dev->ifindex;
301 entry.addr.proto = pg->addr.proto; 355 entry.addr.proto = group->proto;
302 entry.addr.u.ip4 = pg->addr.u.ip4; 356 entry.addr.u.ip4 = group->u.ip4;
303#if IS_ENABLED(CONFIG_IPV6) 357#if IS_ENABLED(CONFIG_IPV6)
304 entry.addr.u.ip6 = pg->addr.u.ip6; 358 entry.addr.u.ip6 = group->u.ip6;
305#endif 359#endif
306 entry.vid = pg->addr.vid; 360 entry.vid = group->vid;
307 __mdb_entry_fill_flags(&entry, pg->flags); 361 __mdb_entry_fill_flags(&entry, flags);
308 __br_mdb_notify(dev, &entry, type, pg); 362 __br_mdb_notify(dev, port, &entry, type);
309} 363}
310 364
311static int nlmsg_populate_rtr_fill(struct sk_buff *skb, 365static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
450} 504}
451 505
452static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 506static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
453 struct br_ip *group, unsigned char state, 507 struct br_ip *group, unsigned char state)
454 struct net_bridge_port_group **pg)
455{ 508{
456 struct net_bridge_mdb_entry *mp; 509 struct net_bridge_mdb_entry *mp;
457 struct net_bridge_port_group *p; 510 struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
482 if (unlikely(!p)) 535 if (unlikely(!p))
483 return -ENOMEM; 536 return -ENOMEM;
484 rcu_assign_pointer(*pp, p); 537 rcu_assign_pointer(*pp, p);
485 *pg = p;
486 if (state == MDB_TEMPORARY) 538 if (state == MDB_TEMPORARY)
487 mod_timer(&p->timer, now + br->multicast_membership_interval); 539 mod_timer(&p->timer, now + br->multicast_membership_interval);
488 540
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
490} 542}
491 543
492static int __br_mdb_add(struct net *net, struct net_bridge *br, 544static int __br_mdb_add(struct net *net, struct net_bridge *br,
493 struct br_mdb_entry *entry, 545 struct br_mdb_entry *entry)
494 struct net_bridge_port_group **pg)
495{ 546{
496 struct br_ip ip; 547 struct br_ip ip;
497 struct net_device *dev; 548 struct net_device *dev;
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
509 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 560 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
510 return -EINVAL; 561 return -EINVAL;
511 562
512 memset(&ip, 0, sizeof(ip)); 563 __mdb_entry_to_br_ip(entry, &ip);
513 ip.vid = entry->vid;
514 ip.proto = entry->addr.proto;
515 if (ip.proto == htons(ETH_P_IP))
516 ip.u.ip4 = entry->addr.u.ip4;
517#if IS_ENABLED(CONFIG_IPV6)
518 else
519 ip.u.ip6 = entry->addr.u.ip6;
520#endif
521 564
522 spin_lock_bh(&br->multicast_lock); 565 spin_lock_bh(&br->multicast_lock);
523 ret = br_mdb_add_group(br, p, &ip, entry->state, pg); 566 ret = br_mdb_add_group(br, p, &ip, entry->state);
524 spin_unlock_bh(&br->multicast_lock); 567 spin_unlock_bh(&br->multicast_lock);
525 return ret; 568 return ret;
526} 569}
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
528static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 571static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
529{ 572{
530 struct net *net = sock_net(skb->sk); 573 struct net *net = sock_net(skb->sk);
531 struct net_bridge_port_group *pg;
532 struct net_bridge_vlan_group *vg; 574 struct net_bridge_vlan_group *vg;
533 struct net_device *dev, *pdev; 575 struct net_device *dev, *pdev;
534 struct br_mdb_entry *entry; 576 struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
558 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 600 if (br_vlan_enabled(br) && vg && entry->vid == 0) {
559 list_for_each_entry(v, &vg->vlan_list, vlist) { 601 list_for_each_entry(v, &vg->vlan_list, vlist) {
560 entry->vid = v->vid; 602 entry->vid = v->vid;
561 err = __br_mdb_add(net, br, entry, &pg); 603 err = __br_mdb_add(net, br, entry);
562 if (err) 604 if (err)
563 break; 605 break;
564 __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 606 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
565 } 607 }
566 } else { 608 } else {
567 err = __br_mdb_add(net, br, entry, &pg); 609 err = __br_mdb_add(net, br, entry);
568 if (!err) 610 if (!err)
569 __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 611 __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
570 } 612 }
571 613
572 return err; 614 return err;
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
584 if (!netif_running(br->dev) || br->multicast_disabled) 626 if (!netif_running(br->dev) || br->multicast_disabled)
585 return -EINVAL; 627 return -EINVAL;
586 628
587 memset(&ip, 0, sizeof(ip)); 629 __mdb_entry_to_br_ip(entry, &ip);
588 ip.vid = entry->vid;
589 ip.proto = entry->addr.proto;
590 if (ip.proto == htons(ETH_P_IP))
591 ip.u.ip4 = entry->addr.u.ip4;
592#if IS_ENABLED(CONFIG_IPV6)
593 else
594 ip.u.ip6 = entry->addr.u.ip6;
595#endif
596 630
597 spin_lock_bh(&br->multicast_lock); 631 spin_lock_bh(&br->multicast_lock);
598 mdb = mlock_dereference(br->mdb, br); 632 mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
662 entry->vid = v->vid; 696 entry->vid = v->vid;
663 err = __br_mdb_del(br, entry); 697 err = __br_mdb_del(br, entry);
664 if (!err) 698 if (!err)
665 __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 699 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
666 } 700 }
667 } else { 701 } else {
668 err = __br_mdb_del(br, entry); 702 err = __br_mdb_del(br, entry);
669 if (!err) 703 if (!err)
670 __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 704 __br_mdb_notify(dev, p, entry, RTM_DELMDB);
671 } 705 }
672 706
673 return err; 707 return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a4c15df2b792..191ea66e4d92 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
283 rcu_assign_pointer(*pp, p->next); 283 rcu_assign_pointer(*pp, p->next);
284 hlist_del_init(&p->mglist); 284 hlist_del_init(&p->mglist);
285 del_timer(&p->timer); 285 del_timer(&p->timer);
286 br_mdb_notify(br->dev, p, RTM_DELMDB); 286 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
287 p->flags);
287 call_rcu_bh(&p->rcu, br_multicast_free_pg); 288 call_rcu_bh(&p->rcu, br_multicast_free_pg);
288 289
289 if (!mp->ports && !mp->mglist && 290 if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
705 if (unlikely(!p)) 706 if (unlikely(!p))
706 goto err; 707 goto err;
707 rcu_assign_pointer(*pp, p); 708 rcu_assign_pointer(*pp, p);
708 br_mdb_notify(br->dev, p, RTM_NEWMDB); 709 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
709 710
710found: 711found:
711 mod_timer(&p->timer, now + br->multicast_membership_interval); 712 mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br,
1461 hlist_del_init(&p->mglist); 1462 hlist_del_init(&p->mglist);
1462 del_timer(&p->timer); 1463 del_timer(&p->timer);
1463 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1464 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1464 br_mdb_notify(br->dev, p, RTM_DELMDB); 1465 br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1466 p->flags);
1465 1467
1466 if (!mp->ports && !mp->mglist && 1468 if (!mp->ports && !mp->mglist &&
1467 netif_running(br->dev)) 1469 netif_running(br->dev))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1b5d145dfcbf..d9da857182ef 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
560 unsigned char flags); 560 unsigned char flags);
561void br_mdb_init(void); 561void br_mdb_init(void);
562void br_mdb_uninit(void); 562void br_mdb_uninit(void);
563void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 563void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
564 int type); 564 struct br_ip *group, int type, u8 flags);
565void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 565void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
566 int type); 566 int type);
567 567
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8570bc7744c2..5a61f35412a0 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
370 left - sizeof(struct ebt_entry_match) < m->match_size) 370 left - sizeof(struct ebt_entry_match) < m->match_size)
371 return -EINVAL; 371 return -EINVAL;
372 372
373 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0); 373 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
375 request_module("ebt_%s", m->u.name);
376 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
377 }
374 if (IS_ERR(match)) 378 if (IS_ERR(match))
375 return PTR_ERR(match); 379 return PTR_ERR(match);
376 m->u.match = match; 380 m->u.match = match;
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bcaa2a4..2bc5965fdd1e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
293} 293}
294EXPORT_SYMBOL(ceph_auth_create_authorizer); 294EXPORT_SYMBOL(ceph_auth_create_authorizer);
295 295
296void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, 296void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
297 struct ceph_authorizer *a)
298{ 297{
299 mutex_lock(&ac->mutex); 298 a->destroy(a);
300 if (ac->ops && ac->ops->destroy_authorizer)
301 ac->ops->destroy_authorizer(ac, a);
302 mutex_unlock(&ac->mutex);
303} 299}
304EXPORT_SYMBOL(ceph_auth_destroy_authorizer); 300EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
305 301
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8d81bc..5f836f02ae36 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
16 struct ceph_auth_none_info *xi = ac->private; 16 struct ceph_auth_none_info *xi = ac->private;
17 17
18 xi->starting = true; 18 xi->starting = true;
19 xi->built_authorizer = false;
20} 19}
21 20
22static void destroy(struct ceph_auth_client *ac) 21static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
39 return xi->starting; 38 return xi->starting;
40} 39}
41 40
41static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
42 struct ceph_none_authorizer *au)
43{
44 void *p = au->buf;
45 void *const end = p + sizeof(au->buf);
46 int ret;
47
48 ceph_encode_8_safe(&p, end, 1, e_range);
49 ret = ceph_entity_name_encode(ac->name, &p, end);
50 if (ret < 0)
51 return ret;
52
53 ceph_encode_64_safe(&p, end, ac->global_id, e_range);
54 au->buf_len = p - (void *)au->buf;
55 dout("%s built authorizer len %d\n", __func__, au->buf_len);
56 return 0;
57
58e_range:
59 return -ERANGE;
60}
61
42static int build_request(struct ceph_auth_client *ac, void *buf, void *end) 62static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
43{ 63{
44 return 0; 64 return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
57 return result; 77 return result;
58} 78}
59 79
80static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
81{
82 kfree(a);
83}
84
60/* 85/*
61 * build an 'authorizer' with our entity_name and global_id. we can 86 * build an 'authorizer' with our entity_name and global_id. it is
62 * reuse a single static copy since it is identical for all services 87 * identical for all services we connect to.
63 * we connect to.
64 */ 88 */
65static int ceph_auth_none_create_authorizer( 89static int ceph_auth_none_create_authorizer(
66 struct ceph_auth_client *ac, int peer_type, 90 struct ceph_auth_client *ac, int peer_type,
67 struct ceph_auth_handshake *auth) 91 struct ceph_auth_handshake *auth)
68{ 92{
69 struct ceph_auth_none_info *ai = ac->private; 93 struct ceph_none_authorizer *au;
70 struct ceph_none_authorizer *au = &ai->au;
71 void *p, *end;
72 int ret; 94 int ret;
73 95
74 if (!ai->built_authorizer) { 96 au = kmalloc(sizeof(*au), GFP_NOFS);
75 p = au->buf; 97 if (!au)
76 end = p + sizeof(au->buf); 98 return -ENOMEM;
77 ceph_encode_8(&p, 1); 99
78 ret = ceph_entity_name_encode(ac->name, &p, end - 8); 100 au->base.destroy = ceph_auth_none_destroy_authorizer;
79 if (ret < 0) 101
80 goto bad; 102 ret = ceph_auth_none_build_authorizer(ac, au);
81 ceph_decode_need(&p, end, sizeof(u64), bad2); 103 if (ret) {
82 ceph_encode_64(&p, ac->global_id); 104 kfree(au);
83 au->buf_len = p - (void *)au->buf; 105 return ret;
84 ai->built_authorizer = true;
85 dout("built authorizer len %d\n", au->buf_len);
86 } 106 }
87 107
88 auth->authorizer = (struct ceph_authorizer *) au; 108 auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
92 auth->authorizer_reply_buf_len = sizeof (au->reply_buf); 112 auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
93 113
94 return 0; 114 return 0;
95
96bad2:
97 ret = -ERANGE;
98bad:
99 return ret;
100}
101
102static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
103 struct ceph_authorizer *a)
104{
105 /* nothing to do */
106} 115}
107 116
108static const struct ceph_auth_client_ops ceph_auth_none_ops = { 117static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
114 .build_request = build_request, 123 .build_request = build_request,
115 .handle_reply = handle_reply, 124 .handle_reply = handle_reply,
116 .create_authorizer = ceph_auth_none_create_authorizer, 125 .create_authorizer = ceph_auth_none_create_authorizer,
117 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
118}; 126};
119 127
120int ceph_auth_none_init(struct ceph_auth_client *ac) 128int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
127 return -ENOMEM; 135 return -ENOMEM;
128 136
129 xi->starting = true; 137 xi->starting = true;
130 xi->built_authorizer = false;
131 138
132 ac->protocol = CEPH_AUTH_NONE; 139 ac->protocol = CEPH_AUTH_NONE;
133 ac->private = xi; 140 ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce4b53f..62021535ae4a 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14struct ceph_none_authorizer { 14struct ceph_none_authorizer {
15 struct ceph_authorizer base;
15 char buf[128]; 16 char buf[128];
16 int buf_len; 17 int buf_len;
17 char reply_buf[0]; 18 char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
19 20
20struct ceph_auth_none_info { 21struct ceph_auth_none_info {
21 bool starting; 22 bool starting;
22 bool built_authorizer;
23 struct ceph_none_authorizer au; /* we only need one; it's static */
24}; 23};
25 24
26int ceph_auth_none_init(struct ceph_auth_client *ac); 25int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a315e662..a0905f04bd13 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
565 return -EAGAIN; 565 return -EAGAIN;
566} 566}
567 567
568static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
569{
570 struct ceph_x_authorizer *au = (void *)a;
571
572 ceph_x_authorizer_cleanup(au);
573 kfree(au);
574}
575
568static int ceph_x_create_authorizer( 576static int ceph_x_create_authorizer(
569 struct ceph_auth_client *ac, int peer_type, 577 struct ceph_auth_client *ac, int peer_type,
570 struct ceph_auth_handshake *auth) 578 struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
581 if (!au) 589 if (!au)
582 return -ENOMEM; 590 return -ENOMEM;
583 591
592 au->base.destroy = ceph_x_destroy_authorizer;
593
584 ret = ceph_x_build_authorizer(ac, th, au); 594 ret = ceph_x_build_authorizer(ac, th, au);
585 if (ret) { 595 if (ret) {
586 kfree(au); 596 kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
643 return ret; 653 return ret;
644} 654}
645 655
646static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
647 struct ceph_authorizer *a)
648{
649 struct ceph_x_authorizer *au = (void *)a;
650
651 ceph_x_authorizer_cleanup(au);
652 kfree(au);
653}
654
655
656static void ceph_x_reset(struct ceph_auth_client *ac) 656static void ceph_x_reset(struct ceph_auth_client *ac)
657{ 657{
658 struct ceph_x_info *xi = ac->private; 658 struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
770 .create_authorizer = ceph_x_create_authorizer, 770 .create_authorizer = ceph_x_create_authorizer,
771 .update_authorizer = ceph_x_update_authorizer, 771 .update_authorizer = ceph_x_update_authorizer,
772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply, 772 .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
773 .destroy_authorizer = ceph_x_destroy_authorizer,
774 .invalidate_authorizer = ceph_x_invalidate_authorizer, 773 .invalidate_authorizer = ceph_x_invalidate_authorizer,
775 .reset = ceph_x_reset, 774 .reset = ceph_x_reset,
776 .destroy = ceph_x_destroy, 775 .destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3cf7397..21a5af904bae 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
26 26
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_authorizer base;
29 struct ceph_crypto_key session_key; 30 struct ceph_crypto_key session_key;
30 struct ceph_buffer *buf; 31 struct ceph_buffer *buf;
31 unsigned int service; 32 unsigned int service;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d0103..40a53a70efdf 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), 1087 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1088 atomic_read(&osd->o_ref) - 1); 1088 atomic_read(&osd->o_ref) - 1);
1089 if (atomic_dec_and_test(&osd->o_ref)) { 1089 if (atomic_dec_and_test(&osd->o_ref)) {
1090 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
1091
1092 if (osd->o_auth.authorizer) 1090 if (osd->o_auth.authorizer)
1093 ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); 1091 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1094 kfree(osd); 1092 kfree(osd);
1095 } 1093 }
1096} 1094}
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
2984 struct ceph_auth_handshake *auth = &o->o_auth; 2982 struct ceph_auth_handshake *auth = &o->o_auth;
2985 2983
2986 if (force_new && auth->authorizer) { 2984 if (force_new && auth->authorizer) {
2987 ceph_auth_destroy_authorizer(ac, auth->authorizer); 2985 ceph_auth_destroy_authorizer(auth->authorizer);
2988 auth->authorizer = NULL; 2986 auth->authorizer = NULL;
2989 } 2987 }
2990 if (!auth->authorizer) { 2988 if (!auth->authorizer) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d04c2d1c8c87..e561f9f07d6d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4502 __skb_push(skb, offset); 4502 __skb_push(skb, offset);
4503 err = __vlan_insert_tag(skb, skb->vlan_proto, 4503 err = __vlan_insert_tag(skb, skb->vlan_proto,
4504 skb_vlan_tag_get(skb)); 4504 skb_vlan_tag_get(skb));
4505 if (err) 4505 if (err) {
4506 __skb_pull(skb, offset);
4506 return err; 4507 return err;
4508 }
4509
4507 skb->protocol = skb->vlan_proto; 4510 skb->protocol = skb->vlan_proto;
4508 skb->mac_len += VLAN_HLEN; 4511 skb->mac_len += VLAN_HLEN;
4509 __skb_pull(skb, offset);
4510 4512
4511 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4514 __skb_pull(skb, offset);
4512 } 4515 }
4513 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4514 return 0; 4517 return 0;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 607a14f20d88..b1dc096d22f8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1034,10 +1034,13 @@ source_ok:
1034 if (!fld.daddr) { 1034 if (!fld.daddr) {
1035 fld.daddr = fld.saddr; 1035 fld.daddr = fld.saddr;
1036 1036
1037 err = -EADDRNOTAVAIL;
1038 if (dev_out) 1037 if (dev_out)
1039 dev_put(dev_out); 1038 dev_put(dev_out);
1039 err = -EINVAL;
1040 dev_out = init_net.loopback_dev; 1040 dev_out = init_net.loopback_dev;
1041 if (!dev_out->dn_ptr)
1042 goto out;
1043 err = -EADDRNOTAVAIL;
1041 dev_hold(dev_out); 1044 dev_hold(dev_out);
1042 if (!fld.daddr) { 1045 if (!fld.daddr) {
1043 fld.daddr = 1046 fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
1110 if (dev_out == NULL) 1113 if (dev_out == NULL)
1111 goto out; 1114 goto out;
1112 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1115 dn_db = rcu_dereference_raw(dev_out->dn_ptr);
1116 if (!dn_db)
1117 goto e_inval;
1113 /* Possible improvement - check all devices for local addr */ 1118 /* Possible improvement - check all devices for local addr */
1114 if (dn_dev_islocal(dev_out, fld.daddr)) { 1119 if (dn_dev_islocal(dev_out, fld.daddr)) {
1115 dev_put(dev_out); 1120 dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
1151 dev_put(dev_out); 1156 dev_put(dev_out);
1152 dev_out = init_net.loopback_dev; 1157 dev_out = init_net.loopback_dev;
1153 dev_hold(dev_out); 1158 dev_hold(dev_out);
1159 if (!dev_out->dn_ptr)
1160 goto e_inval;
1154 fld.flowidn_oif = dev_out->ifindex; 1161 fld.flowidn_oif = dev_out->ifindex;
1155 if (res.fi) 1162 if (res.fi)
1156 dn_fib_info_put(res.fi); 1163 dn_fib_info_put(res.fi);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8a9246deccfe..63566ec54794 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
904 if (ifa->ifa_flags & IFA_F_SECONDARY) { 904 if (ifa->ifa_flags & IFA_F_SECONDARY) {
905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
906 if (!prim) { 906 if (!prim) {
907 pr_warn("%s: bug: prim == NULL\n", __func__); 907 /* if the device has been deleted, we don't perform
908 * address promotion
909 */
910 if (!in_dev->dead)
911 pr_warn("%s: bug: prim == NULL\n", __func__);
908 return; 912 return;
909 } 913 }
910 if (iprim && iprim != prim) { 914 if (iprim && iprim != prim) {
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index dd8c80dc32a2..8f8713b4388f 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
81 return ret; 81 return ret;
82 } 82 }
83 83
84 ret = arptable_filter_table_init(&init_net);
85 if (ret) {
86 unregister_pernet_subsys(&arptable_filter_net_ops);
87 kfree(arpfilter_ops);
88 }
89
84 return ret; 90 return ret;
85} 91}
86 92
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 02c62299d717..60398a9370e7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438#endif 1438#endif
1439} 1439}
1440 1440
1441static struct rtable *rt_dst_alloc(struct net_device *dev, 1441struct rtable *rt_dst_alloc(struct net_device *dev,
1442 unsigned int flags, u16 type, 1442 unsigned int flags, u16 type,
1443 bool nopolicy, bool noxfrm, bool will_cache) 1443 bool nopolicy, bool noxfrm, bool will_cache)
1444{ 1444{
1445 struct rtable *rt; 1445 struct rtable *rt;
1446 1446
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
1468 1468
1469 return rt; 1469 return rt;
1470} 1470}
1471EXPORT_SYMBOL(rt_dst_alloc);
1471 1472
1472/* called in rcu_read_lock() section */ 1473/* called in rcu_read_lock() section */
1473static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1474static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2045 */ 2046 */
2046 if (fi && res->prefixlen < 4) 2047 if (fi && res->prefixlen < 4)
2047 fi = NULL; 2048 fi = NULL;
2049 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2050 (orig_oif != dev_out->ifindex)) {
2051 /* For local routes that require a particular output interface
2052 * we do not want to cache the result. Caching the result
2053 * causes incorrect behaviour when there are multiple source
2054 * addresses on the interface, the end result being that if the
2055 * intended recipient is waiting on that interface for the
2056 * packet he won't receive it because it will be delivered on
2057 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2058 * be set to the loopback interface as well.
2059 */
2060 fi = NULL;
2048 } 2061 }
2049 2062
2050 fnhe = NULL; 2063 fnhe = NULL;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e6e65f79ade8..c124c3c12f7c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1309 if (skb == tcp_highest_sack(sk)) 1309 if (skb == tcp_highest_sack(sk))
1310 tcp_advance_highest_sack(sk, skb); 1310 tcp_advance_highest_sack(sk, skb);
1311 1311
1312 tcp_skb_collapse_tstamp(prev, skb);
1312 tcp_unlink_write_queue(skb, sk); 1313 tcp_unlink_write_queue(skb, sk);
1313 sk_wmem_free_skb(sk, skb); 1314 sk_wmem_free_skb(sk, skb);
1314 1315
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3098 3099
3099 shinfo = skb_shinfo(skb); 3100 shinfo = skb_shinfo(skb);
3100 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) && 3101 if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
3101 between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1)) 3102 !before(shinfo->tskey, prior_snd_una) &&
3103 before(shinfo->tskey, tcp_sk(sk)->snd_una))
3102 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3104 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
3103} 3105}
3104 3106
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d2dc015cd19..441ae9da3a23 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
2441 return window; 2441 return window;
2442} 2442}
2443 2443
2444void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2445 const struct sk_buff *next_skb)
2446{
2447 const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
2448 u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2449
2450 if (unlikely(tsflags)) {
2451 struct skb_shared_info *shinfo = skb_shinfo(skb);
2452
2453 shinfo->tx_flags |= tsflags;
2454 shinfo->tskey = next_shinfo->tskey;
2455 }
2456}
2457
2444/* Collapses two adjacent SKB's during retransmission. */ 2458/* Collapses two adjacent SKB's during retransmission. */
2445static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2459static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2446{ 2460{
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2484 2498
2485 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2499 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2486 2500
2501 tcp_skb_collapse_tstamp(skb, next_skb);
2502
2487 sk_wmem_free_skb(sk, next_skb); 2503 sk_wmem_free_skb(sk, next_skb);
2488} 2504}
2489 2505
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 08eed5e16df0..a2e7f55a1f61 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -339,8 +339,13 @@ found:
339 339
340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); 340 hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
341 spin_lock(&hslot2->lock); 341 spin_lock(&hslot2->lock);
342 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, 342 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
343 &hslot2->head); 343 sk->sk_family == AF_INET6)
344 hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
345 &hslot2->head);
346 else
347 hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
348 &hslot2->head);
344 hslot2->count++; 349 hslot2->count++;
345 spin_unlock(&hslot2->lock); 350 spin_unlock(&hslot2->lock);
346 } 351 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 27aed1afcf81..8ec4b3089e20 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev)
3176} 3176}
3177#endif 3177#endif
3178 3178
3179#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
3180/* If the host route is cached on the addr struct make sure it is associated
3181 * with the proper table. e.g., enslavement can change and if so the cached
3182 * host route needs to move to the new table.
3183 */
3184static void l3mdev_check_host_rt(struct inet6_dev *idev,
3185 struct inet6_ifaddr *ifp)
3186{
3187 if (ifp->rt) {
3188 u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3189
3190 if (tb_id != ifp->rt->rt6i_table->tb6_id) {
3191 ip6_del_rt(ifp->rt);
3192 ifp->rt = NULL;
3193 }
3194 }
3195}
3196#else
3197static void l3mdev_check_host_rt(struct inet6_dev *idev,
3198 struct inet6_ifaddr *ifp)
3199{
3200}
3201#endif
3202
3203static int fixup_permanent_addr(struct inet6_dev *idev, 3179static int fixup_permanent_addr(struct inet6_dev *idev,
3204 struct inet6_ifaddr *ifp) 3180 struct inet6_ifaddr *ifp)
3205{ 3181{
3206 l3mdev_check_host_rt(idev, ifp);
3207
3208 if (!ifp->rt) { 3182 if (!ifp->rt) {
3209 struct rt6_info *rt; 3183 struct rt6_info *rt;
3210 3184
@@ -3255,6 +3229,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3255 void *ptr) 3229 void *ptr)
3256{ 3230{
3257 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3231 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3232 struct netdev_notifier_changeupper_info *info;
3258 struct inet6_dev *idev = __in6_dev_get(dev); 3233 struct inet6_dev *idev = __in6_dev_get(dev);
3259 int run_pending = 0; 3234 int run_pending = 0;
3260 int err; 3235 int err;
@@ -3303,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3303 break; 3278 break;
3304 3279
3305 if (event == NETDEV_UP) { 3280 if (event == NETDEV_UP) {
3281 /* restore routes for permanent addresses */
3282 addrconf_permanent_addr(dev);
3283
3306 if (!addrconf_qdisc_ok(dev)) { 3284 if (!addrconf_qdisc_ok(dev)) {
3307 /* device is not ready yet. */ 3285 /* device is not ready yet. */
3308 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", 3286 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3336,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3336 run_pending = 1; 3314 run_pending = 1;
3337 } 3315 }
3338 3316
3339 /* restore routes for permanent addresses */
3340 addrconf_permanent_addr(dev);
3341
3342 switch (dev->type) { 3317 switch (dev->type) {
3343#if IS_ENABLED(CONFIG_IPV6_SIT) 3318#if IS_ENABLED(CONFIG_IPV6_SIT)
3344 case ARPHRD_SIT: 3319 case ARPHRD_SIT:
@@ -3413,6 +3388,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
3413 if (idev) 3388 if (idev)
3414 addrconf_type_change(dev, event); 3389 addrconf_type_change(dev, event);
3415 break; 3390 break;
3391
3392 case NETDEV_CHANGEUPPER:
3393 info = ptr;
3394
3395 /* flush all routes if dev is linked to or unlinked from
3396 * an L3 master device (e.g., VRF)
3397 */
3398 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3399 addrconf_ifdown(dev, 0);
3416 } 3400 }
3417 3401
3418 return NOTIFY_OK; 3402 return NOTIFY_OK;
@@ -3438,6 +3422,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
3438 ipv6_mc_unmap(idev); 3422 ipv6_mc_unmap(idev);
3439} 3423}
3440 3424
3425static bool addr_is_local(const struct in6_addr *addr)
3426{
3427 return ipv6_addr_type(addr) &
3428 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3429}
3430
3441static int addrconf_ifdown(struct net_device *dev, int how) 3431static int addrconf_ifdown(struct net_device *dev, int how)
3442{ 3432{
3443 struct net *net = dev_net(dev); 3433 struct net *net = dev_net(dev);
@@ -3495,7 +3485,8 @@ restart:
3495 * address is retained on a down event 3485 * address is retained on a down event
3496 */ 3486 */
3497 if (!keep_addr || 3487 if (!keep_addr ||
3498 !(ifa->flags & IFA_F_PERMANENT)) { 3488 !(ifa->flags & IFA_F_PERMANENT) ||
3489 addr_is_local(&ifa->addr)) {
3499 hlist_del_init_rcu(&ifa->addr_lst); 3490 hlist_del_init_rcu(&ifa->addr_lst);
3500 goto restart; 3491 goto restart;
3501 } 3492 }
@@ -3539,17 +3530,23 @@ restart:
3539 3530
3540 INIT_LIST_HEAD(&del_list); 3531 INIT_LIST_HEAD(&del_list);
3541 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3532 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3533 struct rt6_info *rt = NULL;
3534
3542 addrconf_del_dad_work(ifa); 3535 addrconf_del_dad_work(ifa);
3543 3536
3544 write_unlock_bh(&idev->lock); 3537 write_unlock_bh(&idev->lock);
3545 spin_lock_bh(&ifa->lock); 3538 spin_lock_bh(&ifa->lock);
3546 3539
3547 if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) { 3540 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3541 !addr_is_local(&ifa->addr)) {
3548 /* set state to skip the notifier below */ 3542 /* set state to skip the notifier below */
3549 state = INET6_IFADDR_STATE_DEAD; 3543 state = INET6_IFADDR_STATE_DEAD;
3550 ifa->state = 0; 3544 ifa->state = 0;
3551 if (!(ifa->flags & IFA_F_NODAD)) 3545 if (!(ifa->flags & IFA_F_NODAD))
3552 ifa->flags |= IFA_F_TENTATIVE; 3546 ifa->flags |= IFA_F_TENTATIVE;
3547
3548 rt = ifa->rt;
3549 ifa->rt = NULL;
3553 } else { 3550 } else {
3554 state = ifa->state; 3551 state = ifa->state;
3555 ifa->state = INET6_IFADDR_STATE_DEAD; 3552 ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3560,6 +3557,9 @@ restart:
3560 3557
3561 spin_unlock_bh(&ifa->lock); 3558 spin_unlock_bh(&ifa->lock);
3562 3559
3560 if (rt)
3561 ip6_del_rt(rt);
3562
3563 if (state != INET6_IFADDR_STATE_DEAD) { 3563 if (state != INET6_IFADDR_STATE_DEAD) {
3564 __ipv6_ifa_notify(RTM_DELADDR, ifa); 3564 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3565 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); 3565 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -5325,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5325 if (rt) 5325 if (rt)
5326 ip6_del_rt(rt); 5326 ip6_del_rt(rt);
5327 } 5327 }
5328 dst_hold(&ifp->rt->dst); 5328 if (ifp->rt) {
5329 5329 dst_hold(&ifp->rt->dst);
5330 ip6_del_rt(ifp->rt); 5330 ip6_del_rt(ifp->rt);
5331 5331 }
5332 rt_genid_bump_ipv6(net); 5332 rt_genid_bump_ipv6(net);
5333 break; 5333 break;
5334 } 5334 }
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 428162155280..9dd3882fe6bf 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
44{
45 struct inet_sock *inet = inet_sk(sk);
46 struct ipv6_pinfo *np = inet6_sk(sk);
47
48 memset(fl6, 0, sizeof(*fl6));
49 fl6->flowi6_proto = sk->sk_protocol;
50 fl6->daddr = sk->sk_v6_daddr;
51 fl6->saddr = np->saddr;
52 fl6->flowi6_oif = sk->sk_bound_dev_if;
53 fl6->flowi6_mark = sk->sk_mark;
54 fl6->fl6_dport = inet->inet_dport;
55 fl6->fl6_sport = inet->inet_sport;
56 fl6->flowlabel = np->flow_label;
57
58 if (!fl6->flowi6_oif)
59 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
60
61 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
62 fl6->flowi6_oif = np->mcast_oif;
63
64 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
65}
66
67int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
68{
69 struct ip6_flowlabel *flowlabel = NULL;
70 struct in6_addr *final_p, final;
71 struct ipv6_txoptions *opt;
72 struct dst_entry *dst;
73 struct inet_sock *inet = inet_sk(sk);
74 struct ipv6_pinfo *np = inet6_sk(sk);
75 struct flowi6 fl6;
76 int err = 0;
77
78 if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
79 flowlabel = fl6_sock_lookup(sk, np->flow_label);
80 if (!flowlabel)
81 return -EINVAL;
82 }
83 ip6_datagram_flow_key_init(&fl6, sk);
84
85 rcu_read_lock();
86 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
87 final_p = fl6_update_dst(&fl6, opt, &final);
88 rcu_read_unlock();
89
90 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
91 if (IS_ERR(dst)) {
92 err = PTR_ERR(dst);
93 goto out;
94 }
95
96 if (fix_sk_saddr) {
97 if (ipv6_addr_any(&np->saddr))
98 np->saddr = fl6.saddr;
99
100 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
101 sk->sk_v6_rcv_saddr = fl6.saddr;
102 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
103 if (sk->sk_prot->rehash)
104 sk->sk_prot->rehash(sk);
105 }
106 }
107
108 ip6_dst_store(sk, dst,
109 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
110 &sk->sk_v6_daddr : NULL,
111#ifdef CONFIG_IPV6_SUBTREES
112 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
113 &np->saddr :
114#endif
115 NULL);
116
117out:
118 fl6_sock_release(flowlabel);
119 return err;
120}
121
122void ip6_datagram_release_cb(struct sock *sk)
123{
124 struct dst_entry *dst;
125
126 if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
127 return;
128
129 rcu_read_lock();
130 dst = __sk_dst_get(sk);
131 if (!dst || !dst->obsolete ||
132 dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
133 rcu_read_unlock();
134 return;
135 }
136 rcu_read_unlock();
137
138 ip6_datagram_dst_update(sk, false);
139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141
43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 143{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
47 struct ipv6_pinfo *np = inet6_sk(sk); 146 struct ipv6_pinfo *np = inet6_sk(sk);
48 struct in6_addr *daddr, *final_p, final; 147 struct in6_addr *daddr;
49 struct dst_entry *dst;
50 struct flowi6 fl6;
51 struct ip6_flowlabel *flowlabel = NULL;
52 struct ipv6_txoptions *opt;
53 int addr_type; 148 int addr_type;
54 int err; 149 int err;
150 __be32 fl6_flowlabel = 0;
55 151
56 if (usin->sin6_family == AF_INET) { 152 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 153 if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
66 if (usin->sin6_family != AF_INET6) 162 if (usin->sin6_family != AF_INET6)
67 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
68 164
69 memset(&fl6, 0, sizeof(fl6)); 165 if (np->sndflow)
70 if (np->sndflow) { 166 fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
71 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
72 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
73 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
74 if (!flowlabel)
75 return -EINVAL;
76 }
77 }
78 167
79 addr_type = ipv6_addr_type(&usin->sin6_addr); 168 addr_type = ipv6_addr_type(&usin->sin6_addr);
80 169
@@ -145,7 +234,7 @@ ipv4_connected:
145 } 234 }
146 235
147 sk->sk_v6_daddr = *daddr; 236 sk->sk_v6_daddr = *daddr;
148 np->flow_label = fl6.flowlabel; 237 np->flow_label = fl6_flowlabel;
149 238
150 inet->inet_dport = usin->sin6_port; 239 inet->inet_dport = usin->sin6_port;
151 240
@@ -154,59 +243,13 @@ ipv4_connected:
154 * destination cache for it. 243 * destination cache for it.
155 */ 244 */
156 245
157 fl6.flowi6_proto = sk->sk_protocol; 246 err = ip6_datagram_dst_update(sk, true);
158 fl6.daddr = sk->sk_v6_daddr; 247 if (err)
159 fl6.saddr = np->saddr;
160 fl6.flowi6_oif = sk->sk_bound_dev_if;
161 fl6.flowi6_mark = sk->sk_mark;
162 fl6.fl6_dport = inet->inet_dport;
163 fl6.fl6_sport = inet->inet_sport;
164
165 if (!fl6.flowi6_oif)
166 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
167
168 if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
169 fl6.flowi6_oif = np->mcast_oif;
170
171 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
172
173 rcu_read_lock();
174 opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
175 final_p = fl6_update_dst(&fl6, opt, &final);
176 rcu_read_unlock();
177
178 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
179 err = 0;
180 if (IS_ERR(dst)) {
181 err = PTR_ERR(dst);
182 goto out; 248 goto out;
183 }
184
185 /* source address lookup done in ip6_dst_lookup */
186
187 if (ipv6_addr_any(&np->saddr))
188 np->saddr = fl6.saddr;
189
190 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
191 sk->sk_v6_rcv_saddr = fl6.saddr;
192 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
193 if (sk->sk_prot->rehash)
194 sk->sk_prot->rehash(sk);
195 }
196
197 ip6_dst_store(sk, dst,
198 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
199 &sk->sk_v6_daddr : NULL,
200#ifdef CONFIG_IPV6_SUBTREES
201 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
202 &np->saddr :
203#endif
204 NULL);
205 249
206 sk->sk_state = TCP_ESTABLISHED; 250 sk->sk_state = TCP_ESTABLISHED;
207 sk_set_txhash(sk); 251 sk_set_txhash(sk);
208out: 252out:
209 fl6_sock_release(flowlabel);
210 return err; 253 return err;
211} 254}
212 255
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ed446639219c..d916d6ab9ad2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
338 return rt; 338 return rt;
339} 339}
340 340
341static struct rt6_info *ip6_dst_alloc(struct net *net, 341struct rt6_info *ip6_dst_alloc(struct net *net,
342 struct net_device *dev, 342 struct net_device *dev,
343 int flags) 343 int flags)
344{ 344{
345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); 345 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 346
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
364 364
365 return rt; 365 return rt;
366} 366}
367EXPORT_SYMBOL(ip6_dst_alloc);
367 368
368static void ip6_dst_destroy(struct dst_entry *dst) 369static void ip6_dst_destroy(struct dst_entry *dst)
369{ 370{
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1417 1418
1418void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) 1419void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1419{ 1420{
1421 struct dst_entry *dst;
1422
1420 ip6_update_pmtu(skb, sock_net(sk), mtu, 1423 ip6_update_pmtu(skb, sock_net(sk), mtu,
1421 sk->sk_bound_dev_if, sk->sk_mark); 1424 sk->sk_bound_dev_if, sk->sk_mark);
1425
1426 dst = __sk_dst_get(sk);
1427 if (!dst || !dst->obsolete ||
1428 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1429 return;
1430
1431 bh_lock_sock(sk);
1432 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1433 ip6_datagram_dst_update(sk, false);
1434 bh_unlock_sock(sk);
1422} 1435}
1423EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); 1436EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1424 1437
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8125931106be..6bc5c664fa46 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
1539 .sendmsg = udpv6_sendmsg, 1539 .sendmsg = udpv6_sendmsg,
1540 .recvmsg = udpv6_recvmsg, 1540 .recvmsg = udpv6_recvmsg,
1541 .backlog_rcv = __udpv6_queue_rcv_skb, 1541 .backlog_rcv = __udpv6_queue_rcv_skb,
1542 .release_cb = ip6_datagram_release_cb,
1542 .hash = udp_lib_hash, 1543 .hash = udp_lib_hash,
1543 .unhash = udp_lib_unhash, 1544 .unhash = udp_lib_unhash,
1544 .rehash = udp_v6_rehash, 1545 .rehash = udp_v6_rehash,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356ef..7cc1d9c22a9f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
410 length--; 410 length--;
411 continue; 411 continue;
412 default: 412 default:
413 if (length < 2)
414 return;
413 opsize=*ptr++; 415 opsize=*ptr++;
414 if (opsize < 2) /* "silly options" */ 416 if (opsize < 2) /* "silly options" */
415 return; 417 return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
470 length--; 472 length--;
471 continue; 473 continue;
472 default: 474 default:
475 if (length < 2)
476 return;
473 opsize = *ptr++; 477 opsize = *ptr++;
474 if (opsize < 2) /* "silly options" */ 478 if (opsize < 2) /* "silly options" */
475 return; 479 return;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 215fc08c02ab..330ebd600f25 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
688 688
689 skb_queue_purge(&sk->sk_write_queue); 689 skb_queue_purge(&sk->sk_write_queue);
690 690
691 if (nlk->portid) { 691 if (nlk->portid && nlk->bound) {
692 struct netlink_notify n = { 692 struct netlink_notify n = {
693 .net = sock_net(sk), 693 .net = sock_net(sk),
694 .protocol = sk->sk_protocol, 694 .protocol = sk->sk_protocol,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index e9dd47b2a85b..879185fe183f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 461 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
462 462
463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 463 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
464 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, 464 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
465 true); 465 true);
466 memcpy(&flow_key->ipv6.addr.src, masked, 466 memcpy(&flow_key->ipv6.addr.src, masked,
467 sizeof(flow_key->ipv6.addr.src)); 467 sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
483 NULL, &flags) 483 NULL, &flags)
484 != NEXTHDR_ROUTING); 484 != NEXTHDR_ROUTING);
485 485
486 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, 486 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
487 recalc_csum); 487 recalc_csum);
488 memcpy(&flow_key->ipv6.addr.dst, masked, 488 memcpy(&flow_key->ipv6.addr.dst, masked,
489 sizeof(flow_key->ipv6.addr.dst)); 489 sizeof(flow_key->ipv6.addr.dst));
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b9d286756be..b5fea1101faa 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
367 } else if (key->eth.type == htons(ETH_P_IPV6)) { 367 } else if (key->eth.type == htons(ETH_P_IPV6)) {
368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; 368 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
369 369
370 skb_orphan(skb);
370 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
371 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
372 if (err) 373 if (err)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f12c17f355d9..18d0becbc46d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3521 i->ifindex = mreq->mr_ifindex; 3521 i->ifindex = mreq->mr_ifindex;
3522 i->alen = mreq->mr_alen; 3522 i->alen = mreq->mr_alen;
3523 memcpy(i->addr, mreq->mr_address, i->alen); 3523 memcpy(i->addr, mreq->mr_address, i->alen);
3524 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3524 i->count = 1; 3525 i->count = 1;
3525 i->next = po->mclist; 3526 i->next = po->mclist;
3526 po->mclist = i; 3527 po->mclist = i;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8246fd..6641bcf7c185 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 299 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 300 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
301 301
302 __set_bit_le(off, (void *)map->m_page_addrs[i]); 302 set_bit_le(off, (void *)map->m_page_addrs[i]);
303} 303}
304 304
305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) 305void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; 313 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; 314 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
315 315
316 __clear_bit_le(off, (void *)map->m_page_addrs[i]); 316 clear_bit_le(off, (void *)map->m_page_addrs[i]);
317} 317}
318 318
319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) 319static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 8764970f0c24..310cabce2311 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version); 194 dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version); 195 dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS); 196 dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
197 dp->dp_ack_seq = rds_ib_piggyb_ack(ic); 197 dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
198 198
199 /* Advertise flow control */ 199 /* Advertise flow control */
200 if (ic->i_flowctl) { 200 if (ic->i_flowctl) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f18c35024207..80742edea96f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
159 if (validate) 159 if (validate)
160 skb = validate_xmit_skb_list(skb, dev); 160 skb = validate_xmit_skb_list(skb, dev);
161 161
162 if (skb) { 162 if (likely(skb)) {
163 HARD_TX_LOCK(dev, txq, smp_processor_id()); 163 HARD_TX_LOCK(dev, txq, smp_processor_id());
164 if (!netif_xmit_frozen_or_stopped(txq)) 164 if (!netif_xmit_frozen_or_stopped(txq))
165 skb = dev_hard_start_xmit(skb, dev, txq, &ret); 165 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
166 166
167 HARD_TX_UNLOCK(dev, txq); 167 HARD_TX_UNLOCK(dev, txq);
168 } else {
169 spin_lock(root_lock);
170 return qdisc_qlen(q);
168 } 171 }
169 spin_lock(root_lock); 172 spin_lock(root_lock);
170 173
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 8d3d3625130e..084718f9b3da 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
866 * sender MUST assure that at least one T3-rtx 866 * sender MUST assure that at least one T3-rtx
867 * timer is running. 867 * timer is running.
868 */ 868 */
869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) 869 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
870 sctp_transport_reset_timers(transport); 870 sctp_transport_reset_t3_rtx(transport);
871 transport->last_time_sent = jiffies;
872 }
871 } 873 }
872 break; 874 break;
873 875
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
924 error = sctp_outq_flush_rtx(q, packet, 926 error = sctp_outq_flush_rtx(q, packet,
925 rtx_timeout, &start_timer); 927 rtx_timeout, &start_timer);
926 928
927 if (start_timer) 929 if (start_timer) {
928 sctp_transport_reset_timers(transport); 930 sctp_transport_reset_t3_rtx(transport);
931 transport->last_time_sent = jiffies;
932 }
929 933
930 /* This can happen on COOKIE-ECHO resend. Only 934 /* This can happen on COOKIE-ECHO resend. Only
931 * one chunk can get bundled with a COOKIE-ECHO. 935 * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1062 list_add_tail(&chunk->transmitted_list, 1066 list_add_tail(&chunk->transmitted_list,
1063 &transport->transmitted); 1067 &transport->transmitted);
1064 1068
1065 sctp_transport_reset_timers(transport); 1069 sctp_transport_reset_t3_rtx(transport);
1070 transport->last_time_sent = jiffies;
1066 1071
1067 /* Only let one DATA chunk get bundled with a 1072 /* Only let one DATA chunk get bundled with a
1068 * COOKIE-ECHO chunk. 1073 * COOKIE-ECHO chunk.
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7f0bf798205b..56f364d8f932 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3080 return SCTP_ERROR_RSRC_LOW; 3080 return SCTP_ERROR_RSRC_LOW;
3081 3081
3082 /* Start the heartbeat timer. */ 3082 /* Start the heartbeat timer. */
3083 if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) 3083 sctp_transport_reset_hb_timer(peer);
3084 sctp_transport_hold(peer);
3085 asoc->new_transport = peer; 3084 asoc->new_transport = peer;
3086 break; 3085 break;
3087 case SCTP_PARAM_DEL_IP: 3086 case SCTP_PARAM_DEL_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 7fe56d0acabf..41b081a64752 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
69 sctp_cmd_seq_t *commands, 69 sctp_cmd_seq_t *commands,
70 gfp_t gfp); 70 gfp_t gfp);
71 71
72static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
73 struct sctp_transport *t);
74/******************************************************************** 72/********************************************************************
75 * Helper functions 73 * Helper functions
76 ********************************************************************/ 74 ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
367 struct sctp_association *asoc = transport->asoc; 365 struct sctp_association *asoc = transport->asoc;
368 struct sock *sk = asoc->base.sk; 366 struct sock *sk = asoc->base.sk;
369 struct net *net = sock_net(sk); 367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
370 369
371 bh_lock_sock(sk); 370 bh_lock_sock(sk);
372 if (sock_owned_by_user(sk)) { 371 if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
378 goto out_unlock; 377 goto out_unlock;
379 } 378 }
380 379
380 /* Check if we should still send the heartbeat or reschedule */
381 elapsed = jiffies - transport->last_time_sent;
382 timeout = sctp_transport_timeout(transport);
383 if (elapsed < timeout) {
384 elapsed = timeout - elapsed;
385 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
386 sctp_transport_hold(transport);
387 goto out_unlock;
388 }
389
381 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 390 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
382 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 391 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
383 asoc->state, asoc->ep, asoc, 392 asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
507 0); 516 0);
508 517
509 /* Update the hb timer to resend a heartbeat every rto */ 518 /* Update the hb timer to resend a heartbeat every rto */
510 sctp_cmd_hb_timer_update(commands, transport); 519 sctp_transport_reset_hb_timer(transport);
511 } 520 }
512 521
513 if (transport->state != SCTP_INACTIVE && 522 if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
634 * hold a reference on the transport to make sure none of 643 * hold a reference on the transport to make sure none of
635 * the needed data structures go away. 644 * the needed data structures go away.
636 */ 645 */
637 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { 646 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
638 647 sctp_transport_reset_hb_timer(t);
639 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
640 sctp_transport_hold(t);
641 }
642} 648}
643 649
644static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, 650static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
669} 675}
670 676
671 677
672/* Helper function to update the heartbeat timer. */
673static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
674 struct sctp_transport *t)
675{
676 /* Update the heartbeat timer. */
677 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
678 sctp_transport_hold(t);
679}
680
681/* Helper function to handle the reception of an HEARTBEAT ACK. */ 678/* Helper function to handle the reception of an HEARTBEAT ACK. */
682static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, 679static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
683 struct sctp_association *asoc, 680 struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
742 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 739 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
743 740
744 /* Update the heartbeat timer. */ 741 /* Update the heartbeat timer. */
745 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) 742 sctp_transport_reset_hb_timer(t);
746 sctp_transport_hold(t);
747 743
748 if (was_unconfirmed && asoc->peer.transport_count == 1) 744 if (was_unconfirmed && asoc->peer.transport_count == 1)
749 sctp_transport_immediate_rtx(t); 745 sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1614 1610
1615 case SCTP_CMD_HB_TIMER_UPDATE: 1611 case SCTP_CMD_HB_TIMER_UPDATE:
1616 t = cmd->obj.transport; 1612 t = cmd->obj.transport;
1617 sctp_cmd_hb_timer_update(commands, t); 1613 sctp_transport_reset_hb_timer(t);
1618 break; 1614 break;
1619 1615
1620 case SCTP_CMD_HB_TIMERS_STOP: 1616 case SCTP_CMD_HB_TIMERS_STOP:
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 9b6b48c7524e..81b86678be4d 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
183/* Start T3_rtx timer if it is not already running and update the heartbeat 183/* Start T3_rtx timer if it is not already running and update the heartbeat
184 * timer. This routine is called every time a DATA chunk is sent. 184 * timer. This routine is called every time a DATA chunk is sent.
185 */ 185 */
186void sctp_transport_reset_timers(struct sctp_transport *transport) 186void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
187{ 187{
188 /* RFC 2960 6.3.2 Retransmission Timer Rules 188 /* RFC 2960 6.3.2 Retransmission Timer Rules
189 * 189 *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
197 if (!mod_timer(&transport->T3_rtx_timer, 197 if (!mod_timer(&transport->T3_rtx_timer,
198 jiffies + transport->rto)) 198 jiffies + transport->rto))
199 sctp_transport_hold(transport); 199 sctp_transport_hold(transport);
200}
201
202void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
203{
204 unsigned long expires;
200 205
201 /* When a data chunk is sent, reset the heartbeat interval. */ 206 /* When a data chunk is sent, reset the heartbeat interval. */
202 if (!mod_timer(&transport->hb_timer, 207 expires = jiffies + sctp_transport_timeout(transport);
203 sctp_transport_timeout(transport))) 208 if (time_before(transport->hb_timer.expires, expires) &&
204 sctp_transport_hold(transport); 209 !mod_timer(&transport->hb_timer,
210 expires + prandom_u32_max(transport->rto)))
211 sctp_transport_hold(transport);
205} 212}
206 213
207/* This transport has been assigned to an association. 214/* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
595unsigned long sctp_transport_timeout(struct sctp_transport *trans) 602unsigned long sctp_transport_timeout(struct sctp_transport *trans)
596{ 603{
597 /* RTO + timer slack +/- 50% of RTO */ 604 /* RTO + timer slack +/- 50% of RTO */
598 unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); 605 unsigned long timeout = trans->rto >> 1;
599 606
600 if (trans->state != SCTP_UNCONFIRMED && 607 if (trans->state != SCTP_UNCONFIRMED &&
601 trans->state != SCTP_PF) 608 trans->state != SCTP_PF)
602 timeout += trans->hbinterval; 609 timeout += trans->hbinterval;
603 610
604 return timeout + jiffies; 611 return timeout;
605} 612}
606 613
607/* Reset transport variables to their initial values */ 614/* Reset transport variables to their initial values */
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 2b9b98f1c2ff..b7e01d88bdc5 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
305 if (err && err != -EOPNOTSUPP) 305 if (err && err != -EOPNOTSUPP)
306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
307 err, attr->id); 307 err, attr->id);
308 if (attr->complete)
309 attr->complete(dev, err, attr->complete_priv);
308} 310}
309 311
310static int switchdev_port_attr_set_defer(struct net_device *dev, 312static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
434 if (err && err != -EOPNOTSUPP) 436 if (err && err != -EOPNOTSUPP)
435 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 437 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
436 err, obj->id); 438 err, obj->id);
439 if (obj->complete)
440 obj->complete(dev, err, obj->complete_priv);
437} 441}
438 442
439static int switchdev_port_obj_add_defer(struct net_device *dev, 443static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
502 if (err && err != -EOPNOTSUPP) 506 if (err && err != -EOPNOTSUPP)
503 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 507 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
504 err, obj->id); 508 err, obj->id);
509 if (obj->complete)
510 obj->complete(dev, err, obj->complete_priv);
505} 511}
506 512
507static int switchdev_port_obj_del_defer(struct net_device *dev, 513static int switchdev_port_obj_del_defer(struct net_device *dev,
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a842870c52..e2bdb07a49a2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
69 if (err) 69 if (err)
70 goto out_nametbl; 70 goto out_nametbl;
71 71
72 INIT_LIST_HEAD(&tn->dist_queue);
72 err = tipc_topsrv_start(net); 73 err = tipc_topsrv_start(net);
73 if (err) 74 if (err)
74 goto out_subscr; 75 goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5504d63503df..eff58dc53aa1 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@ struct tipc_net {
103 spinlock_t nametbl_lock; 103 spinlock_t nametbl_lock;
104 struct name_table *nametbl; 104 struct name_table *nametbl;
105 105
106 /* Name dist queue */
107 struct list_head dist_queue;
108
106 /* Topology subscription server */ 109 /* Topology subscription server */
107 struct tipc_server *topsrv; 110 struct tipc_server *topsrv;
108 atomic_t subscription_count; 111 atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ebe9d0ff6e9e..6b626a64b517 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000; 41int sysctl_tipc_named_timeout __read_mostly = 2000;
42 42
43/**
44 * struct tipc_dist_queue - queue holding deferred name table updates
45 */
46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47
48struct distr_queue_item { 43struct distr_queue_item {
49 struct distr_item i; 44 struct distr_item i;
50 u32 dtype; 45 u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
229 kfree_rcu(p, rcu); 224 kfree_rcu(p, rcu);
230} 225}
231 226
227/**
228 * tipc_dist_queue_purge - remove deferred updates from a node that went down
229 */
230static void tipc_dist_queue_purge(struct net *net, u32 addr)
231{
232 struct tipc_net *tn = net_generic(net, tipc_net_id);
233 struct distr_queue_item *e, *tmp;
234
235 spin_lock_bh(&tn->nametbl_lock);
236 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
237 if (e->node != addr)
238 continue;
239 list_del(&e->next);
240 kfree(e);
241 }
242 spin_unlock_bh(&tn->nametbl_lock);
243}
244
232void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 245void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
233{ 246{
234 struct publication *publ, *tmp; 247 struct publication *publ, *tmp;
235 248
236 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 249 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
237 tipc_publ_purge(net, publ, addr); 250 tipc_publ_purge(net, publ, addr);
251 tipc_dist_queue_purge(net, addr);
238} 252}
239 253
240/** 254/**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
279 * tipc_named_add_backlog - add a failed name table update to the backlog 293 * tipc_named_add_backlog - add a failed name table update to the backlog
280 * 294 *
281 */ 295 */
282static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) 296static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
297 u32 type, u32 node)
283{ 298{
284 struct distr_queue_item *e; 299 struct distr_queue_item *e;
300 struct tipc_net *tn = net_generic(net, tipc_net_id);
285 unsigned long now = get_jiffies_64(); 301 unsigned long now = get_jiffies_64();
286 302
287 e = kzalloc(sizeof(*e), GFP_ATOMIC); 303 e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
291 e->node = node; 307 e->node = node;
292 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); 308 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
293 memcpy(e, i, sizeof(*i)); 309 memcpy(e, i, sizeof(*i));
294 list_add_tail(&e->next, &tipc_dist_queue); 310 list_add_tail(&e->next, &tn->dist_queue);
295} 311}
296 312
297/** 313/**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
301void tipc_named_process_backlog(struct net *net) 317void tipc_named_process_backlog(struct net *net)
302{ 318{
303 struct distr_queue_item *e, *tmp; 319 struct distr_queue_item *e, *tmp;
320 struct tipc_net *tn = net_generic(net, tipc_net_id);
304 char addr[16]; 321 char addr[16];
305 unsigned long now = get_jiffies_64(); 322 unsigned long now = get_jiffies_64();
306 323
307 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 324 list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
308 if (time_after(e->expires, now)) { 325 if (time_after(e->expires, now)) {
309 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 326 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
310 continue; 327 continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
344 node = msg_orignode(msg); 361 node = msg_orignode(msg);
345 while (count--) { 362 while (count--) {
346 if (!tipc_update_nametbl(net, item, node, mtype)) 363 if (!tipc_update_nametbl(net, item, node, mtype))
347 tipc_named_add_backlog(item, mtype, node); 364 tipc_named_add_backlog(net, item, mtype, node);
348 item++; 365 item++;
349 } 366 }
350 kfree_skb(skb); 367 kfree_skb(skb);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 662bdd20a748..56214736fe88 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1735 /* Retrieve the head sk_buff from the socket's receive queue. */ 1735 /* Retrieve the head sk_buff from the socket's receive queue. */
1736 err = 0; 1736 err = 0;
1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); 1737 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1738 if (err)
1739 return err;
1740
1741 if (!skb) 1738 if (!skb)
1742 return -EAGAIN; 1739 return err;
1743 1740
1744 dg = (struct vmci_datagram *)skb->data; 1741 dg = (struct vmci_datagram *)skb->data;
1745 if (!dg) 1742 if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
2154 2151
2155MODULE_AUTHOR("VMware, Inc."); 2152MODULE_AUTHOR("VMware, Inc.");
2156MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2153MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2157MODULE_VERSION("1.0.3.0-k"); 2154MODULE_VERSION("1.0.4.0-k");
2158MODULE_LICENSE("GPL v2"); 2155MODULE_LICENSE("GPL v2");
2159MODULE_ALIAS("vmware_vsock"); 2156MODULE_ALIAS("vmware_vsock");
2160MODULE_ALIAS_NETPROTO(PF_VSOCK); 2157MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98c924260b3d..056a7307862b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
13216 struct wireless_dev *wdev; 13216 struct wireless_dev *wdev;
13217 struct cfg80211_beacon_registration *reg, *tmp; 13217 struct cfg80211_beacon_registration *reg, *tmp;
13218 13218
13219 if (state != NETLINK_URELEASE) 13219 if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
13220 return NOTIFY_DONE; 13220 return NOTIFY_DONE;
13221 13221
13222 rcu_read_lock(); 13222 rcu_read_lock();
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4cad5c1..626f3bb24c55 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
104 */ 104 */
105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) 105void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
106{ 106{
107 struct hdac_stream *s; 107 struct hdac_stream *s, *_s;
108 struct hdac_ext_stream *stream; 108 struct hdac_ext_stream *stream;
109 struct hdac_bus *bus = ebus_to_hbus(ebus); 109 struct hdac_bus *bus = ebus_to_hbus(ebus);
110 110
111 while (!list_empty(&bus->stream_list)) { 111 list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
112 s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
113 stream = stream_to_hdac_ext_stream(s); 112 stream = stream_to_hdac_ext_stream(s);
114 snd_hdac_ext_stream_decouple(ebus, stream, false); 113 snd_hdac_ext_stream_decouple(ebus, stream, false);
115 list_del(&s->list); 114 list_del(&s->list);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index d1a4d6973330..03c9872c31cf 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
299int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid, 299int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
300 int parm) 300 int parm)
301{ 301{
302 int val; 302 unsigned int cmd, val;
303 303
304 if (codec->regmap) 304 cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
305 regcache_cache_bypass(codec->regmap, true); 305 if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
306 val = snd_hdac_read_parm(codec, nid, parm); 306 return -1;
307 if (codec->regmap)
308 regcache_cache_bypass(codec->regmap, false);
309 return val; 307 return val;
310} 308}
311EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached); 309EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 54babe1c0b16..607bbeaebddf 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
20#include <sound/core.h> 20#include <sound/core.h>
21#include <sound/hdaudio.h> 21#include <sound/hdaudio.h>
22#include <sound/hda_i915.h> 22#include <sound/hda_i915.h>
23#include <sound/hda_register.h>
23 24
24static struct i915_audio_component *hdac_acomp; 25static struct i915_audio_component *hdac_acomp;
25 26
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
97} 98}
98EXPORT_SYMBOL_GPL(snd_hdac_display_power); 99EXPORT_SYMBOL_GPL(snd_hdac_display_power);
99 100
101#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
102 ((pci)->device == 0x0c0c) || \
103 ((pci)->device == 0x0d0c) || \
104 ((pci)->device == 0x160c))
105
100/** 106/**
101 * snd_hdac_get_display_clk - Get CDCLK in kHz 107 * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
102 * @bus: HDA core bus 108 * @bus: HDA core bus
103 * 109 *
104 * This function is supposed to be used only by a HD-audio controller 110 * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
105 * driver that needs the interaction with i915 graphics. 111 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
112 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
113 * BCLK = CDCLK * M / N
114 * The values will be lost when the display power well is disabled and need to
115 * be restored to avoid abnormal playback speed.
106 * 116 *
107 * This function queries CDCLK value in kHz from the graphics driver and 117 * Call this function at initializing and changing power well, as well as
108 * returns the value. A negative code is returned in error. 118 * at ELD notifier for the hotplug.
109 */ 119 */
110int snd_hdac_get_display_clk(struct hdac_bus *bus) 120void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
111{ 121{
112 struct i915_audio_component *acomp = bus->audio_component; 122 struct i915_audio_component *acomp = bus->audio_component;
123 struct pci_dev *pci = to_pci_dev(bus->dev);
124 int cdclk_freq;
125 unsigned int bclk_m, bclk_n;
126
127 if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
128 return; /* only for i915 binding */
129 if (!CONTROLLER_IN_GPU(pci))
130 return; /* only HSW/BDW */
131
132 cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
133 switch (cdclk_freq) {
134 case 337500:
135 bclk_m = 16;
136 bclk_n = 225;
137 break;
138
139 case 450000:
140 default: /* default CDCLK 450MHz */
141 bclk_m = 4;
142 bclk_n = 75;
143 break;
144
145 case 540000:
146 bclk_m = 4;
147 bclk_n = 90;
148 break;
149
150 case 675000:
151 bclk_m = 8;
152 bclk_n = 225;
153 break;
154 }
113 155
114 if (!acomp || !acomp->ops) 156 snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
115 return -ENODEV; 157 snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
116
117 return acomp->ops->get_cdclk_freq(acomp->dev);
118} 158}
119EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); 159EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
120 160
121/* There is a fixed mapping between audio pin node and display port 161/* There is a fixed mapping between audio pin node and display port
122 * on current Intel platforms: 162 * on current Intel platforms:
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index bdbcd6b75ff6..87041ddd29cb 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
453EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw); 453EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
454 454
455static int reg_raw_read(struct hdac_device *codec, unsigned int reg, 455static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
456 unsigned int *val) 456 unsigned int *val, bool uncached)
457{ 457{
458 if (!codec->regmap) 458 if (uncached || !codec->regmap)
459 return hda_reg_read(codec, reg, val); 459 return hda_reg_read(codec, reg, val);
460 else 460 else
461 return regmap_read(codec->regmap, reg, val); 461 return regmap_read(codec->regmap, reg, val);
462} 462}
463 463
464static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
465 unsigned int reg, unsigned int *val,
466 bool uncached)
467{
468 int err;
469
470 err = reg_raw_read(codec, reg, val, uncached);
471 if (err == -EAGAIN) {
472 err = snd_hdac_power_up_pm(codec);
473 if (!err)
474 err = reg_raw_read(codec, reg, val, uncached);
475 snd_hdac_power_down_pm(codec);
476 }
477 return err;
478}
479
464/** 480/**
465 * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt 481 * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
466 * @codec: the codec object 482 * @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
472int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg, 488int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
473 unsigned int *val) 489 unsigned int *val)
474{ 490{
475 int err; 491 return __snd_hdac_regmap_read_raw(codec, reg, val, false);
476
477 err = reg_raw_read(codec, reg, val);
478 if (err == -EAGAIN) {
479 err = snd_hdac_power_up_pm(codec);
480 if (!err)
481 err = reg_raw_read(codec, reg, val);
482 snd_hdac_power_down_pm(codec);
483 }
484 return err;
485} 492}
486EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw); 493EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
487 494
495/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
496 * cache but always via hda verbs.
497 */
498int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
499 unsigned int reg, unsigned int *val)
500{
501 return __snd_hdac_regmap_read_raw(codec, reg, val, true);
502}
503
488/** 504/**
489 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt 505 * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
490 * @codec: the codec object 506 * @codec: the codec object
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 7ca5b89f088a..dfaf1a93fb8a 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
826 bool allow_powerdown) 826 bool allow_powerdown)
827{ 827{
828 hda_nid_t nid, changed = 0; 828 hda_nid_t nid, changed = 0;
829 int i, state; 829 int i, state, power;
830 830
831 for (i = 0; i < path->depth; i++) { 831 for (i = 0; i < path->depth; i++) {
832 nid = path->path[i]; 832 nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
838 state = AC_PWRST_D0; 838 state = AC_PWRST_D0;
839 else 839 else
840 state = AC_PWRST_D3; 840 state = AC_PWRST_D3;
841 if (!snd_hda_check_power_state(codec, nid, state)) { 841 power = snd_hda_codec_read(codec, nid, 0,
842 AC_VERB_GET_POWER_STATE, 0);
843 if (power != (state | (state << 4))) {
842 snd_hda_codec_write(codec, nid, 0, 844 snd_hda_codec_write(codec, nid, 0,
843 AC_VERB_SET_POWER_STATE, state); 845 AC_VERB_SET_POWER_STATE, state);
844 changed = nid; 846 changed = nid;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b680be0e937d..9a0d1445ca5c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
857#define azx_del_card_list(chip) /* NOP */ 857#define azx_del_card_list(chip) /* NOP */
858#endif /* CONFIG_PM */ 858#endif /* CONFIG_PM */
859 859
860/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
861 * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
862 * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
863 * BCLK = CDCLK * M / N
864 * The values will be lost when the display power well is disabled and need to
865 * be restored to avoid abnormal playback speed.
866 */
867static void haswell_set_bclk(struct hda_intel *hda)
868{
869 struct azx *chip = &hda->chip;
870 int cdclk_freq;
871 unsigned int bclk_m, bclk_n;
872
873 if (!hda->need_i915_power)
874 return;
875
876 cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
877 switch (cdclk_freq) {
878 case 337500:
879 bclk_m = 16;
880 bclk_n = 225;
881 break;
882
883 case 450000:
884 default: /* default CDCLK 450MHz */
885 bclk_m = 4;
886 bclk_n = 75;
887 break;
888
889 case 540000:
890 bclk_m = 4;
891 bclk_n = 90;
892 break;
893
894 case 675000:
895 bclk_m = 8;
896 bclk_n = 225;
897 break;
898 }
899
900 azx_writew(chip, HSW_EM4, bclk_m);
901 azx_writew(chip, HSW_EM5, bclk_n);
902}
903
904#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 860#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
905/* 861/*
906 * power management 862 * power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
958 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 914 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
959 && hda->need_i915_power) { 915 && hda->need_i915_power) {
960 snd_hdac_display_power(azx_bus(chip), true); 916 snd_hdac_display_power(azx_bus(chip), true);
961 haswell_set_bclk(hda); 917 snd_hdac_i915_set_bclk(azx_bus(chip));
962 } 918 }
963 if (chip->msi) 919 if (chip->msi)
964 if (pci_enable_msi(pci) < 0) 920 if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
1058 bus = azx_bus(chip); 1014 bus = azx_bus(chip);
1059 if (hda->need_i915_power) { 1015 if (hda->need_i915_power) {
1060 snd_hdac_display_power(bus, true); 1016 snd_hdac_display_power(bus, true);
1061 haswell_set_bclk(hda); 1017 snd_hdac_i915_set_bclk(bus);
1062 } else { 1018 } else {
1063 /* toggle codec wakeup bit for STATESTS read */ 1019 /* toggle codec wakeup bit for STATESTS read */
1064 snd_hdac_set_codec_wakeup(bus, true); 1020 snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
1796 /* initialize chip */ 1752 /* initialize chip */
1797 azx_init_pci(chip); 1753 azx_init_pci(chip);
1798 1754
1799 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { 1755 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
1800 struct hda_intel *hda; 1756 snd_hdac_i915_set_bclk(bus);
1801
1802 hda = container_of(chip, struct hda_intel, chip);
1803 haswell_set_bclk(hda);
1804 }
1805 1757
1806 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); 1758 hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
1807 1759
@@ -2232,6 +2184,9 @@ static const struct pci_device_id azx_ids[] = {
2232 /* Broxton-P(Apollolake) */ 2184 /* Broxton-P(Apollolake) */
2233 { PCI_DEVICE(0x8086, 0x5a98), 2185 { PCI_DEVICE(0x8086, 0x5a98),
2234 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2186 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2187 /* Broxton-T */
2188 { PCI_DEVICE(0x8086, 0x1a98),
2189 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
2235 /* Haswell */ 2190 /* Haswell */
2236 { PCI_DEVICE(0x8086, 0x0a0c), 2191 { PCI_DEVICE(0x8086, 0x0a0c),
2237 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2192 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a47e8ae0eb30..80bbadc83721 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
361{ 361{
362 struct cs_spec *spec = codec->spec; 362 struct cs_spec *spec = codec->spec;
363 int err; 363 int err;
364 int i;
364 365
365 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0); 366 err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
366 if (err < 0) 367 if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
370 if (err < 0) 371 if (err < 0)
371 return err; 372 return err;
372 373
374 /* keep the ADCs powered up when it's dynamically switchable */
375 if (spec->gen.dyn_adc_switch) {
376 unsigned int done = 0;
377 for (i = 0; i < spec->gen.input_mux.num_items; i++) {
378 int idx = spec->gen.dyn_adc_idx[i];
379 if (done & (1 << idx))
380 continue;
381 snd_hda_gen_fix_pin_power(codec,
382 spec->gen.adc_nids[idx]);
383 done |= 1 << idx;
384 }
385 }
386
373 return 0; 387 return 0;
374} 388}
375 389
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c83c1a8d9742..1483f85999ec 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1858,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
1858 struct hdmi_spec *spec = codec->spec; 1858 struct hdmi_spec *spec = codec->spec;
1859 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx); 1859 struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
1860 1860
1861 if (!per_pin)
1862 return;
1861 mutex_lock(&per_pin->lock); 1863 mutex_lock(&per_pin->lock);
1862 per_pin->chmap_set = true; 1864 per_pin->chmap_set = true;
1863 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap)); 1865 memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
@@ -2230,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
2230 if (atomic_read(&(codec)->core.in_pm)) 2232 if (atomic_read(&(codec)->core.in_pm))
2231 return; 2233 return;
2232 2234
2235 snd_hdac_i915_set_bclk(&codec->bus->core);
2233 check_presence_and_report(codec, pin_nid); 2236 check_presence_and_report(codec, pin_nid);
2234} 2237}
2235 2238
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1402ba954b3d..ac4490a96863 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5449 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5449 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5450 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5450 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5451 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5451 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5452 SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5452 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), 5453 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5453 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5454 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5454 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5455 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5583 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), 5584 SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5584 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), 5585 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5585 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), 5586 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5587 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5586 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5588 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5587 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5589 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5588 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5590 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index c5194f5b150a..d7e71f309299 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
1341 } 1341 }
1342 1342
1343 pcxhr_msg_thread(mgr); 1343 pcxhr_msg_thread(mgr);
1344 mutex_unlock(&mgr->lock);
1344 return IRQ_HANDLED; 1345 return IRQ_HANDLED;
1345} 1346}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a252ae..7ef3a0c16478 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
629 629
630config SND_SOC_RT5616 630config SND_SOC_RT5616
631 tristate "Realtek RT5616 CODEC" 631 tristate "Realtek RT5616 CODEC"
632 depends on I2C
632 633
633config SND_SOC_RT5631 634config SND_SOC_RT5631
634 tristate "Realtek ALC5631/RT5631 CODEC" 635 tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a018d68..83959312f7a0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
249} 249}
250EXPORT_SYMBOL_GPL(arizona_init_spk); 250EXPORT_SYMBOL_GPL(arizona_init_spk);
251 251
252int arizona_free_spk(struct snd_soc_codec *codec)
253{
254 struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
255 struct arizona *arizona = priv->arizona;
256
257 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
258 arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
259
260 return 0;
261}
262EXPORT_SYMBOL_GPL(arizona_free_spk);
263
252static const struct snd_soc_dapm_route arizona_mono_routes[] = { 264static const struct snd_soc_dapm_route arizona_mono_routes[] = {
253 { "OUT1R", NULL, "OUT1L" }, 265 { "OUT1R", NULL, "OUT1L" },
254 { "OUT2R", NULL, "OUT2L" }, 266 { "OUT2R", NULL, "OUT2L" },
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4ecf8d4..ce0531b8c632 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
307extern int arizona_init_gpio(struct snd_soc_codec *codec); 307extern int arizona_init_gpio(struct snd_soc_codec *codec);
308extern int arizona_init_mono(struct snd_soc_codec *codec); 308extern int arizona_init_mono(struct snd_soc_codec *codec);
309 309
310extern int arizona_free_spk(struct snd_soc_codec *codec);
311
310extern int arizona_init_dai(struct arizona_priv *priv, int dai); 312extern int arizona_init_dai(struct arizona_priv *priv, int dai);
311 313
312int arizona_set_output_mode(struct snd_soc_codec *codec, int output, 314int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe3e315..287d13740be4 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) 274 if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
275 pdata->sdout_share = val; 275 pdata->sdout_share = val;
276 276
277 of_property_read_u32(np, "cirrus,boost-manager", &val); 277 if (of_property_read_u32(np, "cirrus,boost-manager", &val))
278 val = -1u;
279
278 switch (val) { 280 switch (val) {
279 case CS35L32_BOOST_MGR_AUTO: 281 case CS35L32_BOOST_MGR_AUTO:
280 case CS35L32_BOOST_MGR_AUTO_AUDIO: 282 case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
282 case CS35L32_BOOST_MGR_FIXED: 284 case CS35L32_BOOST_MGR_FIXED:
283 pdata->boost_mng = val; 285 pdata->boost_mng = val;
284 break; 286 break;
287 case -1u:
285 default: 288 default:
286 dev_err(&i2c_client->dev, 289 dev_err(&i2c_client->dev,
287 "Wrong cirrus,boost-manager DT value %d\n", val); 290 "Wrong cirrus,boost-manager DT value %d\n", val);
288 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; 291 pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
289 } 292 }
290 293
291 of_property_read_u32(np, "cirrus,sdout-datacfg", &val); 294 if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
295 val = -1u;
292 switch (val) { 296 switch (val) {
293 case CS35L32_DATA_CFG_LR_VP: 297 case CS35L32_DATA_CFG_LR_VP:
294 case CS35L32_DATA_CFG_LR_STAT: 298 case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
296 case CS35L32_DATA_CFG_LR_VPSTAT: 300 case CS35L32_DATA_CFG_LR_VPSTAT:
297 pdata->sdout_datacfg = val; 301 pdata->sdout_datacfg = val;
298 break; 302 break;
303 case -1u:
299 default: 304 default:
300 dev_err(&i2c_client->dev, 305 dev_err(&i2c_client->dev,
301 "Wrong cirrus,sdout-datacfg DT value %d\n", val); 306 "Wrong cirrus,sdout-datacfg DT value %d\n", val);
302 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; 307 pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
303 } 308 }
304 309
305 of_property_read_u32(np, "cirrus,battery-threshold", &val); 310 if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
311 val = -1u;
306 switch (val) { 312 switch (val) {
307 case CS35L32_BATT_THRESH_3_1V: 313 case CS35L32_BATT_THRESH_3_1V:
308 case CS35L32_BATT_THRESH_3_2V: 314 case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
310 case CS35L32_BATT_THRESH_3_4V: 316 case CS35L32_BATT_THRESH_3_4V:
311 pdata->batt_thresh = val; 317 pdata->batt_thresh = val;
312 break; 318 break;
319 case -1u:
313 default: 320 default:
314 dev_err(&i2c_client->dev, 321 dev_err(&i2c_client->dev,
315 "Wrong cirrus,battery-threshold DT value %d\n", val); 322 "Wrong cirrus,battery-threshold DT value %d\n", val);
316 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; 323 pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
317 } 324 }
318 325
319 of_property_read_u32(np, "cirrus,battery-recovery", &val); 326 if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
327 val = -1u;
320 switch (val) { 328 switch (val) {
321 case CS35L32_BATT_RECOV_3_1V: 329 case CS35L32_BATT_RECOV_3_1V:
322 case CS35L32_BATT_RECOV_3_2V: 330 case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
326 case CS35L32_BATT_RECOV_3_6V: 334 case CS35L32_BATT_RECOV_3_6V:
327 pdata->batt_recov = val; 335 pdata->batt_recov = val;
328 break; 336 break;
337 case -1u:
329 default: 338 default:
330 dev_err(&i2c_client->dev, 339 dev_err(&i2c_client->dev,
331 "Wrong cirrus,battery-recovery DT value %d\n", val); 340 "Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087bda330..00e9b6fc1b5c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
1108 priv->core.arizona->dapm = NULL; 1108 priv->core.arizona->dapm = NULL;
1109 1109
1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 1110 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1111
1112 arizona_free_spk(codec);
1113
1111 return 0; 1114 return 0;
1112} 1115}
1113 1116
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459cb3bc..aaa038ffc8a5 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
1420} 1420}
1421 1421
1422#ifdef CONFIG_PM 1422#ifdef CONFIG_PM
1423static int hdmi_codec_resume(struct snd_soc_codec *codec) 1423static int hdmi_codec_prepare(struct device *dev)
1424{ 1424{
1425 struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec); 1425 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_device *hdac = &edev->hdac;
1427
1428 pm_runtime_get_sync(&edev->hdac.dev);
1429
1430 /*
1431 * Power down afg.
1432 * codec_read is preferred over codec_write to set the power state.
1433 * This way verb is send to set the power state and response
1434 * is received. So setting power state is ensured without using loop
1435 * to read the state.
1436 */
1437 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 AC_PWRST_D3);
1439
1440 return 0;
1441}
1442
1443static void hdmi_codec_complete(struct device *dev)
1444{
1445 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1426 struct hdac_hdmi_priv *hdmi = edev->private_data; 1446 struct hdac_hdmi_priv *hdmi = edev->private_data;
1427 struct hdac_hdmi_pin *pin; 1447 struct hdac_hdmi_pin *pin;
1428 struct hdac_device *hdac = &edev->hdac; 1448 struct hdac_device *hdac = &edev->hdac;
1429 struct hdac_bus *bus = hdac->bus;
1430 int err;
1431 unsigned long timeout;
1432
1433 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1434 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1435 1449
1436 /* Power up afg */ 1450 /* Power up afg */
1437 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) { 1451 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1438 1452 AC_PWRST_D0);
1439 snd_hdac_codec_write(hdac, hdac->afg, 0,
1440 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1441 1453
1442 /* Wait till power state is set to D0 */ 1454 hdac_hdmi_skl_enable_all_pins(&edev->hdac);
1443 timeout = jiffies + msecs_to_jiffies(1000); 1455 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1444 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
1445 && time_before(jiffies, timeout)) {
1446 msleep(50);
1447 }
1448 }
1449 1456
1450 /* 1457 /*
1451 * As the ELD notify callback request is not entertained while the 1458 * As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
1455 list_for_each_entry(pin, &hdmi->pin_list, head) 1462 list_for_each_entry(pin, &hdmi->pin_list, head)
1456 hdac_hdmi_present_sense(pin, 1); 1463 hdac_hdmi_present_sense(pin, 1);
1457 1464
1458 /* 1465 pm_runtime_put_sync(&edev->hdac.dev);
1459 * Codec power is turned ON during controller resume.
1460 * Turn it OFF here
1461 */
1462 err = snd_hdac_display_power(bus, false);
1463 if (err < 0) {
1464 dev_err(bus->dev,
1465 "Cannot turn OFF display power on i915, err: %d\n",
1466 err);
1467 return err;
1468 }
1469
1470 return 0;
1471} 1466}
1472#else 1467#else
1473#define hdmi_codec_resume NULL 1468#define hdmi_codec_prepare NULL
1469#define hdmi_codec_complete NULL
1474#endif 1470#endif
1475 1471
1476static struct snd_soc_codec_driver hdmi_hda_codec = { 1472static struct snd_soc_codec_driver hdmi_hda_codec = {
1477 .probe = hdmi_codec_probe, 1473 .probe = hdmi_codec_probe,
1478 .remove = hdmi_codec_remove, 1474 .remove = hdmi_codec_remove,
1479 .resume = hdmi_codec_resume,
1480 .idle_bias_off = true, 1475 .idle_bias_off = true,
1481}; 1476};
1482 1477
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1561 struct hdac_ext_device *edev = to_hda_ext_device(dev); 1556 struct hdac_ext_device *edev = to_hda_ext_device(dev);
1562 struct hdac_device *hdac = &edev->hdac; 1557 struct hdac_device *hdac = &edev->hdac;
1563 struct hdac_bus *bus = hdac->bus; 1558 struct hdac_bus *bus = hdac->bus;
1564 unsigned long timeout;
1565 int err; 1559 int err;
1566 1560
1567 dev_dbg(dev, "Enter: %s\n", __func__); 1561 dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1570 if (!bus) 1564 if (!bus)
1571 return 0; 1565 return 0;
1572 1566
1573 /* Power down afg */ 1567 /*
1574 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) { 1568 * Power down afg.
1575 snd_hdac_codec_write(hdac, hdac->afg, 0, 1569 * codec_read is preferred over codec_write to set the power state.
1576 AC_VERB_SET_POWER_STATE, AC_PWRST_D3); 1570 * This way verb is send to set the power state and response
1577 1571 * is received. So setting power state is ensured without using loop
1578 /* Wait till power state is set to D3 */ 1572 * to read the state.
1579 timeout = jiffies + msecs_to_jiffies(1000); 1573 */
1580 while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3) 1574 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1581 && time_before(jiffies, timeout)) { 1575 AC_PWRST_D3);
1582
1583 msleep(50);
1584 }
1585 }
1586
1587 err = snd_hdac_display_power(bus, false); 1576 err = snd_hdac_display_power(bus, false);
1588 if (err < 0) { 1577 if (err < 0) {
1589 dev_err(bus->dev, "Cannot turn on display power on i915\n"); 1578 dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1616 hdac_hdmi_skl_enable_dp12(&edev->hdac); 1605 hdac_hdmi_skl_enable_dp12(&edev->hdac);
1617 1606
1618 /* Power up afg */ 1607 /* Power up afg */
1619 if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) 1608 snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
1620 snd_hdac_codec_write(hdac, hdac->afg, 0, 1609 AC_PWRST_D0);
1621 AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
1622 1610
1623 return 0; 1611 return 0;
1624} 1612}
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1629 1617
1630static const struct dev_pm_ops hdac_hdmi_pm = { 1618static const struct dev_pm_ops hdac_hdmi_pm = {
1631 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) 1619 SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
1620 .prepare = hdmi_codec_prepare,
1621 .complete = hdmi_codec_complete,
1632}; 1622};
1633 1623
1634static const struct hda_device_id hdmi_list[] = { 1624static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c8729984c2b..683769f0f246 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, 343 SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
344 0), 344 0),
345 345
346 /* ADC for button press detection */ 346 /* ADC for button press detection. A dapm supply widget is used to
347 SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL, 347 * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
348 NAU8825_SAR_ADC_EN_SFT, 0), 348 * during suspend.
349 */
350 SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
351 NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
349 352
350 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), 353 SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
351 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), 354 SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
607 610
608static void nau8825_restart_jack_detection(struct regmap *regmap) 611static void nau8825_restart_jack_detection(struct regmap *regmap)
609{ 612{
613 /* Chip needs one FSCLK cycle in order to generate interrupts,
614 * as we cannot guarantee one will be provided by the system. Turning
615 * master mode on then off enables us to generate that FSCLK cycle
616 * with a minimum of contention on the clock bus.
617 */
618 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
619 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
620 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
621 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
622
610 /* this will restart the entire jack detection process including MIC/GND 623 /* this will restart the entire jack detection process including MIC/GND
611 * switching and create interrupts. We have to go from 0 to 1 and back 624 * switching and create interrupts. We have to go from 0 to 1 and back
612 * to 0 to restart. 625 * to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
728 struct regmap *regmap = nau8825->regmap; 741 struct regmap *regmap = nau8825->regmap;
729 int active_irq, clear_irq = 0, event = 0, event_mask = 0; 742 int active_irq, clear_irq = 0, event = 0, event_mask = 0;
730 743
731 regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq); 744 if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
745 dev_err(nau8825->dev, "failed to read irq status\n");
746 return IRQ_NONE;
747 }
732 748
733 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == 749 if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
734 NAU8825_JACK_EJECTION_DETECTED) { 750 NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
1141 return ret; 1157 return ret;
1142 } 1158 }
1143 } 1159 }
1144
1145 ret = regcache_sync(nau8825->regmap);
1146 if (ret) {
1147 dev_err(codec->dev,
1148 "Failed to sync cache: %d\n", ret);
1149 return ret;
1150 }
1151 } 1160 }
1152
1153 break; 1161 break;
1154 1162
1155 case SND_SOC_BIAS_OFF: 1163 case SND_SOC_BIAS_OFF:
1156 if (nau8825->mclk_freq) 1164 if (nau8825->mclk_freq)
1157 clk_disable_unprepare(nau8825->mclk); 1165 clk_disable_unprepare(nau8825->mclk);
1158
1159 regcache_mark_dirty(nau8825->regmap);
1160 break; 1166 break;
1161 } 1167 }
1162 return 0; 1168 return 0;
1163} 1169}
1164 1170
1171#ifdef CONFIG_PM
1172static int nau8825_suspend(struct snd_soc_codec *codec)
1173{
1174 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1175
1176 disable_irq(nau8825->irq);
1177 regcache_cache_only(nau8825->regmap, true);
1178 regcache_mark_dirty(nau8825->regmap);
1179
1180 return 0;
1181}
1182
1183static int nau8825_resume(struct snd_soc_codec *codec)
1184{
1185 struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
1186
1187 /* The chip may lose power and reset in S3. regcache_sync restores
1188 * register values including configurations for sysclk, irq, and
1189 * jack/button detection.
1190 */
1191 regcache_cache_only(nau8825->regmap, false);
1192 regcache_sync(nau8825->regmap);
1193
1194 /* Check the jack plug status directly. If the headset is unplugged
1195 * during S3 when the chip has no power, there will be no jack
1196 * detection irq even after the nau8825_restart_jack_detection below,
1197 * because the chip just thinks no headset has ever been plugged in.
1198 */
1199 if (!nau8825_is_jack_inserted(nau8825->regmap)) {
1200 nau8825_eject_jack(nau8825);
1201 snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
1202 }
1203
1204 enable_irq(nau8825->irq);
1205
1206 /* Run jack detection to check the type (OMTP or CTIA) of the headset
1207 * if there is one. This handles the case where a different type of
1208 * headset is plugged in during S3. This triggers an IRQ iff a headset
1209 * is already plugged in.
1210 */
1211 nau8825_restart_jack_detection(nau8825->regmap);
1212
1213 return 0;
1214}
1215#else
1216#define nau8825_suspend NULL
1217#define nau8825_resume NULL
1218#endif
1219
1165static struct snd_soc_codec_driver nau8825_codec_driver = { 1220static struct snd_soc_codec_driver nau8825_codec_driver = {
1166 .probe = nau8825_codec_probe, 1221 .probe = nau8825_codec_probe,
1167 .set_sysclk = nau8825_set_sysclk, 1222 .set_sysclk = nau8825_set_sysclk,
1168 .set_pll = nau8825_set_pll, 1223 .set_pll = nau8825_set_pll,
1169 .set_bias_level = nau8825_set_bias_level, 1224 .set_bias_level = nau8825_set_bias_level,
1170 .suspend_bias_off = true, 1225 .suspend_bias_off = true,
1226 .suspend = nau8825_suspend,
1227 .resume = nau8825_resume,
1171 1228
1172 .controls = nau8825_controls, 1229 .controls = nau8825_controls,
1173 .num_controls = ARRAY_SIZE(nau8825_controls), 1230 .num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
1277 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, 1334 regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
1278 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); 1335 NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
1279 1336
1280 /* Chip needs one FSCLK cycle in order to generate interrupts,
1281 * as we cannot guarantee one will be provided by the system. Turning
1282 * master mode on then off enables us to generate that FSCLK cycle
1283 * with a minimum of contention on the clock bus.
1284 */
1285 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1286 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
1287 regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
1288 NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
1289
1290 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, 1337 ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
1291 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1338 nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1292 "nau8825", nau8825); 1339 "nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
1354 return 0; 1401 return 0;
1355} 1402}
1356 1403
1357#ifdef CONFIG_PM_SLEEP
1358static int nau8825_suspend(struct device *dev)
1359{
1360 struct i2c_client *client = to_i2c_client(dev);
1361 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1362
1363 disable_irq(client->irq);
1364 regcache_cache_only(nau8825->regmap, true);
1365 regcache_mark_dirty(nau8825->regmap);
1366
1367 return 0;
1368}
1369
1370static int nau8825_resume(struct device *dev)
1371{
1372 struct i2c_client *client = to_i2c_client(dev);
1373 struct nau8825 *nau8825 = dev_get_drvdata(dev);
1374
1375 regcache_cache_only(nau8825->regmap, false);
1376 regcache_sync(nau8825->regmap);
1377 enable_irq(client->irq);
1378
1379 return 0;
1380}
1381#endif
1382
1383static const struct dev_pm_ops nau8825_pm = {
1384 SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
1385};
1386
1387static const struct i2c_device_id nau8825_i2c_ids[] = { 1404static const struct i2c_device_id nau8825_i2c_ids[] = {
1388 { "nau8825", 0 }, 1405 { "nau8825", 0 },
1389 { } 1406 { }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
1410 .name = "nau8825", 1427 .name = "nau8825",
1411 .of_match_table = of_match_ptr(nau8825_of_ids), 1428 .of_match_table = of_match_ptr(nau8825_of_ids),
1412 .acpi_match_table = ACPI_PTR(nau8825_acpi_match), 1429 .acpi_match_table = ACPI_PTR(nau8825_acpi_match),
1413 .pm = &nau8825_pm,
1414 }, 1430 },
1415 .probe = nau8825_i2c_probe, 1431 .probe = nau8825_i2c_probe,
1416 .remove = nau8825_i2c_remove, 1432 .remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba04417a..09e8988bbb2d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
359 359
360/* Interface data select */ 360/* Interface data select */
361static const char * const rt5640_data_select[] = { 361static const char * const rt5640_data_select[] = {
362 "Normal", "left copy to right", "right copy to left", "Swap"}; 362 "Normal", "Swap", "left copy to right", "right copy to left"};
363 363
364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, 364static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select); 365 RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a98b76..58b664b06c16 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) 443#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
444#define RT5640_IF1_DAC_SEL_SFT 14 444#define RT5640_IF1_DAC_SEL_SFT 14
445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) 445#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
446#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14) 446#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
447#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14) 447#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
448#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14) 448#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) 449#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
450#define RT5640_IF1_ADC_SEL_SFT 12 450#define RT5640_IF1_ADC_SEL_SFT 12
451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) 451#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
452#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12) 452#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
453#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12) 453#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
454#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12) 454#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) 455#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
456#define RT5640_IF2_DAC_SEL_SFT 10 456#define RT5640_IF2_DAC_SEL_SFT 10
457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) 457#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
458#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10) 458#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
459#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10) 459#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
460#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10) 460#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) 461#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
462#define RT5640_IF2_ADC_SEL_SFT 8 462#define RT5640_IF2_ADC_SEL_SFT 8
463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) 463#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
464#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8) 464#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
465#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8) 465#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
466#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8) 466#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) 467#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
468#define RT5640_IF3_DAC_SEL_SFT 6 468#define RT5640_IF3_DAC_SEL_SFT 6
469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) 469#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
470#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6) 470#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
471#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6) 471#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
472#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6) 472#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) 473#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
474#define RT5640_IF3_ADC_SEL_SFT 4 474#define RT5640_IF3_ADC_SEL_SFT 4
475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) 475#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
476#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4) 476#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
477#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4) 477#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
478#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4) 478#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
479 479
480/* REC Left Mixer Control 1 (0x3b) */ 480/* REC Left Mixer Control 1 (0x3b) */
481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13) 481#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f701f9..1bae17ee8817 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
1955static int wm5102_codec_remove(struct snd_soc_codec *codec) 1955static int wm5102_codec_remove(struct snd_soc_codec *codec)
1956{ 1956{
1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); 1957 struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
1958 struct arizona *arizona = priv->core.arizona;
1958 1959
1959 wm_adsp2_codec_remove(&priv->core.adsp[0], codec); 1960 wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
1960 1961
1961 priv->core.arizona->dapm = NULL; 1962 priv->core.arizona->dapm = NULL;
1962 1963
1964 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
1965
1966 arizona_free_spk(codec);
1967
1963 return 0; 1968 return 0;
1964} 1969}
1965 1970
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70fe16e6..2728ac545ffe 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
2298 2298
2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); 2299 arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
2300 2300
2301 arizona_free_spk(codec);
2302
2301 return 0; 2303 return 0;
2302} 2304}
2303 2305
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 88223608a33f..720a14e0687d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
2471 break; 2471 break;
2472 default: 2472 default:
2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); 2473 dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
2474 dspclk = wm8962->sysclk; 2474 dspclk = wm8962->sysclk_rate;
2475 } 2475 }
2476 2476
2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk); 2477 dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766efe14f..6b0785b5a5c5 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
1072 1072
1073 priv->core.arizona->dapm = NULL; 1073 priv->core.arizona->dapm = NULL;
1074 1074
1075 arizona_free_spk(codec);
1076
1075 return 0; 1077 return 0;
1076} 1078}
1077 1079
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 012396074a8a..449f66636205 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
1324 1324
1325 priv->core.arizona->dapm = NULL; 1325 priv->core.arizona->dapm = NULL;
1326 1326
1327 arizona_free_spk(codec);
1328
1327 return 0; 1329 return 0;
1328} 1330}
1329 1331
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c2300457..1120f4f4d011 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
163 tristate 163 tristate
164 select SND_HDA_EXT_CORE 164 select SND_HDA_EXT_CORE
165 select SND_SOC_TOPOLOGY 165 select SND_SOC_TOPOLOGY
166 select SND_HDA_I915
167 select SND_SOC_INTEL_SST 166 select SND_SOC_INTEL_SST
168 167
169config SND_SOC_INTEL_SKL_RT286_MACH 168config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f1301e21..91565229d074 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
1345 return 0; 1345 return 0;
1346 1346
1347 /* wait for pause to complete before we reset the stream */ 1347 /* wait for pause to complete before we reset the stream */
1348 while (stream->running && tries--) 1348 while (stream->running && --tries)
1349 msleep(1); 1349 msleep(1);
1350 if (!tries) { 1350 if (!tries) {
1351 dev_err(hsw->dev, "error: reset stream %d still running\n", 1351 dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8a96e0..2962ef22fc84 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
336 skl_ipc_int_disable(dsp); 336 skl_ipc_int_disable(dsp);
337 337
338 free_irq(dsp->irq, dsp); 338 free_irq(dsp->irq, dsp);
339 dsp->cl_dev.ops.cl_cleanup_controller(dsp);
340 skl_cldma_int_disable(dsp);
341 skl_ipc_op_int_disable(dsp);
342 skl_ipc_int_disable(dsp);
343
339 skl_dsp_disable_core(dsp); 344 skl_dsp_disable_core(dsp);
340} 345}
341EXPORT_SYMBOL_GPL(skl_dsp_free); 346EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e77b8aa..cdb78b7e5a14 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
239{ 239{
240 int multiplier = 1; 240 int multiplier = 1;
241 struct skl_module_fmt *in_fmt, *out_fmt; 241 struct skl_module_fmt *in_fmt, *out_fmt;
242 int in_rate, out_rate;
242 243
243 244
244 /* Since fixups is applied to pin 0 only, ibs, obs needs 245 /* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
249 250
250 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 251 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
251 multiplier = 5; 252 multiplier = 5;
252 mcfg->ibs = (in_fmt->s_freq / 1000) * 253
253 (mcfg->in_fmt->channels) * 254 if (in_fmt->s_freq % 1000)
254 (mcfg->in_fmt->bit_depth >> 3) * 255 in_rate = (in_fmt->s_freq / 1000) + 1;
255 multiplier; 256 else
256 257 in_rate = (in_fmt->s_freq / 1000);
257 mcfg->obs = (mcfg->out_fmt->s_freq / 1000) * 258
258 (mcfg->out_fmt->channels) * 259 mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
259 (mcfg->out_fmt->bit_depth >> 3) * 260 (mcfg->in_fmt->bit_depth >> 3) *
260 multiplier; 261 multiplier;
262
263 if (mcfg->out_fmt->s_freq % 1000)
264 out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
265 else
266 out_rate = (mcfg->out_fmt->s_freq / 1000);
267
268 mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
269 (mcfg->out_fmt->bit_depth >> 3) *
270 multiplier;
261} 271}
262 272
263static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 273static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
485 if (!skl_is_pipe_mcps_avail(skl, mconfig)) 495 if (!skl_is_pipe_mcps_avail(skl, mconfig))
486 return -ENOMEM; 496 return -ENOMEM;
487 497
498 skl_tplg_alloc_pipe_mcps(skl, mconfig);
499
488 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { 500 if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
489 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, 501 ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
490 mconfig->id.module_id, mconfig->guid); 502 mconfig->id.module_id, mconfig->guid);
491 if (ret < 0) 503 if (ret < 0)
492 return ret; 504 return ret;
505
506 mconfig->m_state = SKL_MODULE_LOADED;
493 } 507 }
494 508
495 /* update blob if blob is null for be with default value */ 509 /* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
509 ret = skl_tplg_set_module_params(w, ctx); 523 ret = skl_tplg_set_module_params(w, ctx);
510 if (ret < 0) 524 if (ret < 0)
511 return ret; 525 return ret;
512 skl_tplg_alloc_pipe_mcps(skl, mconfig);
513 } 526 }
514 527
515 return 0; 528 return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
524 list_for_each_entry(w_module, &pipe->w_list, node) { 537 list_for_each_entry(w_module, &pipe->w_list, node) {
525 mconfig = w_module->w->priv; 538 mconfig = w_module->w->priv;
526 539
527 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod) 540 if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
541 mconfig->m_state > SKL_MODULE_UNINIT)
528 return ctx->dsp->fw_ops.unload_mod(ctx->dsp, 542 return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
529 mconfig->id.module_id); 543 mconfig->id.module_id);
530 } 544 }
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
558 if (!skl_is_pipe_mem_avail(skl, mconfig)) 572 if (!skl_is_pipe_mem_avail(skl, mconfig))
559 return -ENOMEM; 573 return -ENOMEM;
560 574
575 skl_tplg_alloc_pipe_mem(skl, mconfig);
576 skl_tplg_alloc_pipe_mcps(skl, mconfig);
577
561 /* 578 /*
562 * Create a list of modules for pipe. 579 * Create a list of modules for pipe.
563 * This list contains modules from source to sink 580 * This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
601 src_module = dst_module; 618 src_module = dst_module;
602 } 619 }
603 620
604 skl_tplg_alloc_pipe_mem(skl, mconfig);
605 skl_tplg_alloc_pipe_mcps(skl, mconfig);
606
607 return 0; 621 return 0;
608} 622}
609 623
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401284d9..d2d923002d5c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,10 +274,10 @@ struct skl_pipe {
274 274
275enum skl_module_state { 275enum skl_module_state {
276 SKL_MODULE_UNINIT = 0, 276 SKL_MODULE_UNINIT = 0,
277 SKL_MODULE_INIT_DONE = 1, 277 SKL_MODULE_LOADED = 1,
278 SKL_MODULE_LOADED = 2, 278 SKL_MODULE_INIT_DONE = 2,
279 SKL_MODULE_UNLOADED = 3, 279 SKL_MODULE_BIND_DONE = 3,
280 SKL_MODULE_BIND_DONE = 4 280 SKL_MODULE_UNLOADED = 4,
281}; 281};
282 282
283struct skl_module_cfg { 283struct skl_module_cfg {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25aaeee3..3982f5536f2d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci); 222 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
223 struct skl *skl = ebus_to_skl(ebus); 223 struct skl *skl = ebus_to_skl(ebus);
224 struct hdac_bus *bus = ebus_to_hbus(ebus); 224 struct hdac_bus *bus = ebus_to_hbus(ebus);
225 int ret = 0;
225 226
226 /* 227 /*
227 * Do not suspend if streams which are marked ignore suspend are 228 * Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
232 enable_irq_wake(bus->irq); 233 enable_irq_wake(bus->irq);
233 pci_save_state(pci); 234 pci_save_state(pci);
234 pci_disable_device(pci); 235 pci_disable_device(pci);
235 return 0;
236 } else { 236 } else {
237 return _skl_suspend(ebus); 237 ret = _skl_suspend(ebus);
238 if (ret < 0)
239 return ret;
240 }
241
242 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
243 ret = snd_hdac_display_power(bus, false);
244 if (ret < 0)
245 dev_err(bus->dev,
246 "Cannot turn OFF display power on i915\n");
238 } 247 }
248
249 return ret;
239} 250}
240 251
241static int skl_resume(struct device *dev) 252static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
316 327
317 if (bus->irq >= 0) 328 if (bus->irq >= 0)
318 free_irq(bus->irq, (void *)bus); 329 free_irq(bus->irq, (void *)bus);
319 if (bus->remap_addr)
320 iounmap(bus->remap_addr);
321
322 snd_hdac_bus_free_stream_pages(bus); 330 snd_hdac_bus_free_stream_pages(bus);
323 snd_hdac_stream_free_all(ebus); 331 snd_hdac_stream_free_all(ebus);
324 snd_hdac_link_free_all(ebus); 332 snd_hdac_link_free_all(ebus);
333
334 if (bus->remap_addr)
335 iounmap(bus->remap_addr);
336
325 pci_release_regions(skl->pci); 337 pci_release_regions(skl->pci);
326 pci_disable_device(skl->pci); 338 pci_disable_device(skl->pci);
327 339
328 snd_hdac_ext_bus_exit(ebus); 340 snd_hdac_ext_bus_exit(ebus);
329 341
342 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
343 snd_hdac_i915_exit(&ebus->bus);
330 return 0; 344 return 0;
331} 345}
332 346
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
719 if (skl->tplg) 733 if (skl->tplg)
720 release_firmware(skl->tplg); 734 release_firmware(skl->tplg);
721 735
722 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
723 snd_hdac_i915_exit(&ebus->bus);
724
725 if (pci_dev_run_wake(pci)) 736 if (pci_dev_run_wake(pci))
726 pm_runtime_get_noresume(&pci->dev); 737 pm_runtime_get_noresume(&pci->dev);
727 pci_dev_put(pci); 738
739 /* codec removal, invoke bus_device_remove */
740 snd_hdac_ext_bus_device_remove(ebus);
741
728 skl_platform_unregister(&pci->dev); 742 skl_platform_unregister(&pci->dev);
729 skl_free_dsp(skl); 743 skl_free_dsp(skl);
730 skl_machine_device_unregister(skl); 744 skl_machine_device_unregister(skl);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a81dfd..c4464858bf01 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
2188 int count = 0; 2188 int count = 0;
2189 char *state = "not set"; 2189 char *state = "not set";
2190 2190
2191 /* card won't be set for the dummy component, as a spot fix
2192 * we're checking for that case specifically here but in future
2193 * we will ensure that the dummy component looks like others.
2194 */
2195 if (!cmpnt->card)
2196 return 0;
2197
2191 list_for_each_entry(w, &cmpnt->card->widgets, list) { 2198 list_for_each_entry(w, &cmpnt->card->widgets, list) {
2192 if (w->dapm != dapm) 2199 if (w->dapm != dapm)
2193 continue; 2200 continue;
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/stack-validation.txt
index 5a95896105bc..55a60d331f47 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/stack-validation.txt
@@ -299,18 +299,38 @@ they mean, and suggestions for how to fix them.
299Errors in .c files 299Errors in .c files
300------------------ 300------------------
301 301
302If you're getting an objtool error in a compiled .c file, chances are 3021. c_file.o: warning: objtool: funcA() falls through to next function funcB()
303the file uses an asm() statement which has a "call" instruction. An
304asm() statement with a call instruction must declare the use of the
305stack pointer in its output operand. For example, on x86_64:
306 303
307 register void *__sp asm("rsp"); 304 This means that funcA() doesn't end with a return instruction or an
308 asm volatile("call func" : "+r" (__sp)); 305 unconditional jump, and that objtool has determined that the function
306 can fall through into the next function. There could be different
307 reasons for this:
309 308
310Otherwise the stack frame may not get created before the call. 309 1) funcA()'s last instruction is a call to a "noreturn" function like
310 panic(). In this case the noreturn function needs to be added to
311 objtool's hard-coded global_noreturns array. Feel free to bug the
312 objtool maintainer, or you can submit a patch.
311 313
312Another possible cause for errors in C code is if the Makefile removes 314 2) funcA() uses the unreachable() annotation in a section of code
313-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options. 315 that is actually reachable.
316
317 3) If funcA() calls an inline function, the object code for funcA()
318 might be corrupt due to a gcc bug. For more details, see:
319 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
320
3212. If you're getting any other objtool error in a compiled .c file, it
322 may be because the file uses an asm() statement which has a "call"
323 instruction. An asm() statement with a call instruction must declare
324 the use of the stack pointer in its output operand. For example, on
325 x86_64:
326
327 register void *__sp asm("rsp");
328 asm volatile("call func" : "+r" (__sp));
329
330 Otherwise the stack frame may not get created before the call.
331
3323. Another possible cause for errors in C code is if the Makefile removes
333 -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
314 334
315Also see the above section for .S file errors for more information what 335Also see the above section for .S file errors for more information what
316the individual error messages mean. 336the individual error messages mean.
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 7515cb2e879a..e8a1e69eb92c 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -54,6 +54,7 @@ struct instruction {
54 struct symbol *call_dest; 54 struct symbol *call_dest;
55 struct instruction *jump_dest; 55 struct instruction *jump_dest;
56 struct list_head alts; 56 struct list_head alts;
57 struct symbol *func;
57}; 58};
58 59
59struct alternative { 60struct alternative {
@@ -66,6 +67,7 @@ struct objtool_file {
66 struct list_head insn_list; 67 struct list_head insn_list;
67 DECLARE_HASHTABLE(insn_hash, 16); 68 DECLARE_HASHTABLE(insn_hash, 16);
68 struct section *rodata, *whitelist; 69 struct section *rodata, *whitelist;
70 bool ignore_unreachables, c_file;
69}; 71};
70 72
71const char *objname; 73const char *objname;
@@ -228,7 +230,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
228 } 230 }
229 } 231 }
230 232
231 if (insn->type == INSN_JUMP_DYNAMIC) 233 if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
232 /* sibling call */ 234 /* sibling call */
233 return 0; 235 return 0;
234 } 236 }
@@ -248,6 +250,7 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
248static int decode_instructions(struct objtool_file *file) 250static int decode_instructions(struct objtool_file *file)
249{ 251{
250 struct section *sec; 252 struct section *sec;
253 struct symbol *func;
251 unsigned long offset; 254 unsigned long offset;
252 struct instruction *insn; 255 struct instruction *insn;
253 int ret; 256 int ret;
@@ -281,6 +284,21 @@ static int decode_instructions(struct objtool_file *file)
281 hash_add(file->insn_hash, &insn->hash, insn->offset); 284 hash_add(file->insn_hash, &insn->hash, insn->offset);
282 list_add_tail(&insn->list, &file->insn_list); 285 list_add_tail(&insn->list, &file->insn_list);
283 } 286 }
287
288 list_for_each_entry(func, &sec->symbol_list, list) {
289 if (func->type != STT_FUNC)
290 continue;
291
292 if (!find_insn(file, sec, func->offset)) {
293 WARN("%s(): can't find starting instruction",
294 func->name);
295 return -1;
296 }
297
298 func_for_each_insn(file, func, insn)
299 if (!insn->func)
300 insn->func = func;
301 }
284 } 302 }
285 303
286 return 0; 304 return 0;
@@ -664,13 +682,40 @@ static int add_func_switch_tables(struct objtool_file *file,
664 text_rela->addend); 682 text_rela->addend);
665 683
666 /* 684 /*
667 * TODO: Document where this is needed, or get rid of it.
668 *
669 * rare case: jmpq *[addr](%rip) 685 * rare case: jmpq *[addr](%rip)
686 *
687 * This check is for a rare gcc quirk, currently only seen in
688 * three driver functions in the kernel, only with certain
689 * obscure non-distro configs.
690 *
691 * As part of an optimization, gcc makes a copy of an existing
692 * switch jump table, modifies it, and then hard-codes the jump
693 * (albeit with an indirect jump) to use a single entry in the
694 * table. The rest of the jump table and some of its jump
695 * targets remain as dead code.
696 *
697 * In such a case we can just crudely ignore all unreachable
698 * instruction warnings for the entire object file. Ideally we
699 * would just ignore them for the function, but that would
700 * require redesigning the code quite a bit. And honestly
701 * that's just not worth doing: unreachable instruction
702 * warnings are of questionable value anyway, and this is such
703 * a rare issue.
704 *
705 * kbuild reports:
706 * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
707 * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
708 * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
709 *
710 * gcc bug:
711 * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
670 */ 712 */
671 if (!rodata_rela) 713 if (!rodata_rela) {
672 rodata_rela = find_rela_by_dest(file->rodata, 714 rodata_rela = find_rela_by_dest(file->rodata,
673 text_rela->addend + 4); 715 text_rela->addend + 4);
716 if (rodata_rela)
717 file->ignore_unreachables = true;
718 }
674 719
675 if (!rodata_rela) 720 if (!rodata_rela)
676 continue; 721 continue;
@@ -732,9 +777,6 @@ static int decode_sections(struct objtool_file *file)
732{ 777{
733 int ret; 778 int ret;
734 779
735 file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
736 file->rodata = find_section_by_name(file->elf, ".rodata");
737
738 ret = decode_instructions(file); 780 ret = decode_instructions(file);
739 if (ret) 781 if (ret)
740 return ret; 782 return ret;
@@ -799,6 +841,7 @@ static int validate_branch(struct objtool_file *file,
799 struct alternative *alt; 841 struct alternative *alt;
800 struct instruction *insn; 842 struct instruction *insn;
801 struct section *sec; 843 struct section *sec;
844 struct symbol *func = NULL;
802 unsigned char state; 845 unsigned char state;
803 int ret; 846 int ret;
804 847
@@ -813,6 +856,16 @@ static int validate_branch(struct objtool_file *file,
813 } 856 }
814 857
815 while (1) { 858 while (1) {
859 if (file->c_file && insn->func) {
860 if (func && func != insn->func) {
861 WARN("%s() falls through to next function %s()",
862 func->name, insn->func->name);
863 return 1;
864 }
865
866 func = insn->func;
867 }
868
816 if (insn->visited) { 869 if (insn->visited) {
817 if (frame_state(insn->state) != frame_state(state)) { 870 if (frame_state(insn->state) != frame_state(state)) {
818 WARN_FUNC("frame pointer state mismatch", 871 WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@ static int validate_branch(struct objtool_file *file,
823 return 0; 876 return 0;
824 } 877 }
825 878
826 /*
827 * Catch a rare case where a noreturn function falls through to
828 * the next function.
829 */
830 if (is_fentry_call(insn) && (state & STATE_FENTRY))
831 return 0;
832
833 insn->visited = true; 879 insn->visited = true;
834 insn->state = state; 880 insn->state = state;
835 881
@@ -1035,12 +1081,8 @@ static int validate_functions(struct objtool_file *file)
1035 continue; 1081 continue;
1036 1082
1037 insn = find_insn(file, sec, func->offset); 1083 insn = find_insn(file, sec, func->offset);
1038 if (!insn) { 1084 if (!insn)
1039 WARN("%s(): can't find starting instruction",
1040 func->name);
1041 warnings++;
1042 continue; 1085 continue;
1043 }
1044 1086
1045 ret = validate_branch(file, insn, 0); 1087 ret = validate_branch(file, insn, 0);
1046 warnings += ret; 1088 warnings += ret;
@@ -1056,13 +1098,14 @@ static int validate_functions(struct objtool_file *file)
1056 if (insn->visited) 1098 if (insn->visited)
1057 continue; 1099 continue;
1058 1100
1059 if (!ignore_unreachable_insn(func, insn) &&
1060 !warnings) {
1061 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
1062 warnings++;
1063 }
1064
1065 insn->visited = true; 1101 insn->visited = true;
1102
1103 if (file->ignore_unreachables || warnings ||
1104 ignore_unreachable_insn(func, insn))
1105 continue;
1106
1107 WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
1108 warnings++;
1066 } 1109 }
1067 } 1110 }
1068 } 1111 }
@@ -1133,6 +1176,10 @@ int cmd_check(int argc, const char **argv)
1133 1176
1134 INIT_LIST_HEAD(&file.insn_list); 1177 INIT_LIST_HEAD(&file.insn_list);
1135 hash_init(file.insn_hash); 1178 hash_init(file.insn_hash);
1179 file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
1180 file.rodata = find_section_by_name(file.elf, ".rodata");
1181 file.ignore_unreachables = false;
1182 file.c_file = find_section_by_name(file.elf, ".comment");
1136 1183
1137 ret = decode_sections(&file); 1184 ret = decode_sections(&file);
1138 if (ret < 0) 1185 if (ret < 0)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 407f11b97c8d..617578440989 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1130,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1130 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n", 1130 pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
1131 ret); 1131 ret);
1132 1132
1133 if (pt->synth_opts.callchain) 1133 if (pt->synth_opts.last_branch)
1134 intel_pt_reset_last_branch_rb(ptq); 1134 intel_pt_reset_last_branch_rb(ptq);
1135 1135
1136 return ret; 1136 return ret;
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 69bb3fc38fb2..0840684deb7d 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,3 +3,4 @@ psock_fanout
3psock_tpacket 3psock_tpacket
4reuseport_bpf 4reuseport_bpf
5reuseport_bpf_cpu 5reuseport_bpf_cpu
6reuseport_dualstack
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index c658792d47b4..0e5340742620 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
4 4
5CFLAGS += -I../../../../usr/include/ 5CFLAGS += -I../../../../usr/include/
6 6
7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu 7NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
8 8
9all: $(NET_PROGS) 9all: $(NET_PROGS)
10%: %.c 10%: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644
index 000000000000..90958aaaafb9
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_dualstack.c
@@ -0,0 +1,208 @@
1/*
2 * It is possible to use SO_REUSEPORT to open multiple sockets bound to
3 * equivalent local addresses using AF_INET and AF_INET6 at the same time. If
4 * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
5 * receive a given incoming packet. However, when it is not set, incoming v4
6 * packets should prefer the AF_INET socket(s). This behavior was defined with
7 * the original SO_REUSEPORT implementation, but broke with
8 * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
9 * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
10 * AF_INET preference for v4 packets.
11 */
12
13#define _GNU_SOURCE
14
15#include <arpa/inet.h>
16#include <errno.h>
17#include <error.h>
18#include <linux/in.h>
19#include <linux/unistd.h>
20#include <stdio.h>
21#include <stdlib.h>
22#include <string.h>
23#include <sys/epoll.h>
24#include <sys/types.h>
25#include <sys/socket.h>
26#include <unistd.h>
27
28static const int PORT = 8888;
29
30static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
31{
32 struct sockaddr_storage addr;
33 struct sockaddr_in *addr4;
34 struct sockaddr_in6 *addr6;
35 int opt, i;
36
37 switch (family) {
38 case AF_INET:
39 addr4 = (struct sockaddr_in *)&addr;
40 addr4->sin_family = AF_INET;
41 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
42 addr4->sin_port = htons(PORT);
43 break;
44 case AF_INET6:
45 addr6 = (struct sockaddr_in6 *)&addr;
46 addr6->sin6_family = AF_INET6;
47 addr6->sin6_addr = in6addr_any;
48 addr6->sin6_port = htons(PORT);
49 break;
50 default:
51 error(1, 0, "Unsupported family %d", family);
52 }
53
54 for (i = 0; i < count; ++i) {
55 rcv_fds[i] = socket(family, proto, 0);
56 if (rcv_fds[i] < 0)
57 error(1, errno, "failed to create receive socket");
58
59 opt = 1;
60 if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
61 sizeof(opt)))
62 error(1, errno, "failed to set SO_REUSEPORT");
63
64 if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
65 error(1, errno, "failed to bind receive socket");
66
67 if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
68 error(1, errno, "failed to listen on receive port");
69 }
70}
71
72static void send_from_v4(int proto)
73{
74 struct sockaddr_in saddr, daddr;
75 int fd;
76
77 saddr.sin_family = AF_INET;
78 saddr.sin_addr.s_addr = htonl(INADDR_ANY);
79 saddr.sin_port = 0;
80
81 daddr.sin_family = AF_INET;
82 daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
83 daddr.sin_port = htons(PORT);
84
85 fd = socket(AF_INET, proto, 0);
86 if (fd < 0)
87 error(1, errno, "failed to create send socket");
88
89 if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
90 error(1, errno, "failed to bind send socket");
91
92 if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
93 error(1, errno, "failed to connect send socket");
94
95 if (send(fd, "a", 1, 0) < 0)
96 error(1, errno, "failed to send message");
97
98 close(fd);
99}
100
101static int receive_once(int epfd, int proto)
102{
103 struct epoll_event ev;
104 int i, fd;
105 char buf[8];
106
107 i = epoll_wait(epfd, &ev, 1, -1);
108 if (i < 0)
109 error(1, errno, "epoll_wait failed");
110
111 if (proto == SOCK_STREAM) {
112 fd = accept(ev.data.fd, NULL, NULL);
113 if (fd < 0)
114 error(1, errno, "failed to accept");
115 i = recv(fd, buf, sizeof(buf), 0);
116 close(fd);
117 } else {
118 i = recv(ev.data.fd, buf, sizeof(buf), 0);
119 }
120
121 if (i < 0)
122 error(1, errno, "failed to recv");
123
124 return ev.data.fd;
125}
126
127static void test(int *rcv_fds, int count, int proto)
128{
129 struct epoll_event ev;
130 int epfd, i, test_fd;
131 uint16_t test_family;
132 socklen_t len;
133
134 epfd = epoll_create(1);
135 if (epfd < 0)
136 error(1, errno, "failed to create epoll");
137
138 ev.events = EPOLLIN;
139 for (i = 0; i < count; ++i) {
140 ev.data.fd = rcv_fds[i];
141 if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
142 error(1, errno, "failed to register sock epoll");
143 }
144
145 send_from_v4(proto);
146
147 test_fd = receive_once(epfd, proto);
148 if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
149 error(1, errno, "failed to read socket domain");
150 if (test_family != AF_INET)
151 error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
152 test_family);
153
154 close(epfd);
155}
156
157int main(void)
158{
159 int rcv_fds[32], i;
160
161 fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
162 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
163 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
164 test(rcv_fds, 10, SOCK_DGRAM);
165 for (i = 0; i < 10; ++i)
166 close(rcv_fds[i]);
167
168 fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
169 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
170 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
171 test(rcv_fds, 10, SOCK_DGRAM);
172 for (i = 0; i < 10; ++i)
173 close(rcv_fds[i]);
174
175 /* NOTE: UDP socket lookups traverse a different code path when there
176 * are > 10 sockets in a group.
177 */
178 fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
179 build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
180 build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
181 test(rcv_fds, 32, SOCK_DGRAM);
182 for (i = 0; i < 32; ++i)
183 close(rcv_fds[i]);
184
185 fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
186 build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
187 build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
188 test(rcv_fds, 32, SOCK_DGRAM);
189 for (i = 0; i < 32; ++i)
190 close(rcv_fds[i]);
191
192 fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
193 build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
194 build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
195 test(rcv_fds, 10, SOCK_STREAM);
196 for (i = 0; i < 10; ++i)
197 close(rcv_fds[i]);
198
199 fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
200 build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
201 build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
202 test(rcv_fds, 10, SOCK_STREAM);
203 for (i = 0; i < 10; ++i)
204 close(rcv_fds[i]);
205
206 fprintf(stderr, "SUCCESS\n");
207 return 0;
208}