aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/gic-v3.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt4
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt8
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt3
-rw-r--r--Documentation/devicetree/bindings/regulator/pbias-regulator.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt16
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt27
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt1
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/networking/vrf.txt96
-rw-r--r--Documentation/sysctl/net.txt16
-rw-r--r--Documentation/thermal/power_allocator.txt2
-rw-r--r--MAINTAINERS38
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/pci.c7
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts46
-rw-r--r--arch/arm/boot/dts/dm8148-evm.dts4
-rw-r--r--arch/arm/boot/dts/dm8148-t410.dts6
-rw-r--r--arch/arm/boot/dts/dm814x.dtsi8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi5
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3-igep0020-common.dtsi6
-rw-r--r--arch/arm/boot/dts/omap3.dtsi25
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts4
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi1
-rw-r--r--arch/arm/boot/dts/stih407.dtsi82
-rw-r--r--arch/arm/boot/dts/stih410.dtsi82
-rw-r--r--arch/arm/configs/omap2plus_defconfig5
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/uapi/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/board-generic.c7
-rw-r--r--arch/arm/mach-omap2/id.c8
-rw-r--r--arch/arm/mach-omap2/io.c1
-rw-r--r--arch/arm/mach-omap2/omap_device.c3
-rw-r--r--arch/arm/mach-omap2/pm.h3
-rw-r--r--arch/arm/mach-omap2/soc.h2
-rw-r--r--arch/arm/mach-omap2/timer.c8
-rw-r--r--arch/arm/mach-omap2/vc.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/addr-map.h7
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c21
-rw-r--r--arch/arm/mm/alignment.c30
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/ia64/pci/pci.c5
-rw-r--r--arch/microblaze/pci/pci-common.c9
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/pci/pci.c6
-rw-r--r--arch/mn10300/unit-asb2305/pci.c1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig5
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/unistd.h20
-rw-r--r--arch/s390/include/uapi/asm/unistd.h21
-rw-r--r--arch/s390/kernel/compat_signal.c27
-rw-r--r--arch/s390/kernel/compat_wrapper.c70
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c12
-rw-r--r--arch/s390/kernel/swsusp.S38
-rw-r--r--arch/s390/kernel/syscalls.S121
-rw-r--r--arch/s390/kernel/vtime.c12
-rw-r--r--arch/x86/entry/entry_64.S16
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/kernel/paravirt.c16
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/pci/common.c1
-rw-r--r--arch/xtensa/kernel/pci.c4
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c12
-rw-r--r--drivers/base/cacheinfo.c10
-rw-r--r--drivers/char/hw_random/xgene-rng.c7
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c7
-rw-r--r--drivers/crypto/marvell/hash.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c3
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/firmware/qcom_scm-64.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c155
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c11
-rw-r--r--drivers/hv/channel_mgmt.c17
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c293
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h21
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c24
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/mei/debugfs.c3
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c74
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c47
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_hw.c8
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c19
-rw-r--r--drivers/net/phy/mdio_bus.c31
-rw-r--r--drivers/net/phy/phy_device.c62
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ch9200.c432
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_pci_irq.c22
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c27
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/core.c21
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c56
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/sh/pm_runtime.c19
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-mt65xx.c53
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/staging/android/TODO20
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c10
-rw-r--r--drivers/staging/fbtft/flexfb.c11
-rw-r--r--drivers/staging/lustre/README.txt16
-rw-r--r--drivers/staging/most/Kconfig1
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig1
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig2
-rw-r--r--drivers/staging/most/mostcore/Kconfig1
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c13
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c5
-rw-r--r--drivers/target/target_core_device.c45
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c91
-rw-r--r--drivers/target/target_core_tpg.c5
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/cpu_cooling.c52
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/power_allocator.c243
-rw-r--r--drivers/thermal/thermal_core.c28
-rw-r--r--drivers/thermal/ti-soc-thermal/Kconfig8
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c25
-rw-r--r--drivers/usb/chipidea/udc.c84
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c43
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c11
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c46
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/host/xhci-pci.c90
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c7
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-generic.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c1
-rw-r--r--drivers/usb/serial/option.c24
-rw-r--r--drivers/usb/serial/whiteheat.c31
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c7
-rw-r--r--fs/btrfs/extent_io.c65
-rw-r--r--fs/btrfs/inode.c45
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h5
-rw-r--r--fs/cifs/cifsencrypt.c53
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/smb2pdu.c84
-rw-r--r--fs/nfs/delegation.c8
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/direct.c7
-rw-r--r--fs/nfs/filelayout/filelayout.c31
-rw-r--r--fs/nfs/nfs42proc.c4
-rw-r--r--fs/nfs/nfs4proc.c127
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/pnfs.c35
-rw-r--r--fs/nfs/pnfs.h7
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c8
-rw-r--r--fs/userfaultfd.c8
-rw-r--r--include/linux/backing-dev.h11
-rw-r--r--include/linux/cgroup-defs.h27
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/sunrpc/xprtsock.h3
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/linux/wait.h5
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/inet_timewait_sock.h14
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/ip6_tunnel.h17
-rw-r--r--include/net/ip_fib.h30
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/linux/lwtunnel.h4
-rw-r--r--kernel/cgroup.c110
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--lib/iommu-common.c6
-rw-r--r--lib/rhashtable.c5
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/bluetooth/smp.c12
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/net-sysfs.c9
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/sock.c12
-rw-r--r--net/dccp/ackvec.c12
-rw-r--r--net/dccp/ccid.c3
-rw-r--r--net/dccp/minisocks.c4
-rw-r--r--net/dsa/dsa.c41
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/ipv4/arp.c39
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c8
-rw-r--r--net/ipv4/inet_timewait_sock.c16
-rw-r--r--net/ipv4/ip_tunnel_core.c54
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_cubic.c10
-rw-r--r--net/ipv4/tcp_minisocks.c13
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c26
-rw-r--r--net/ipv6/ip6_gre.c93
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6_tunnel.c147
-rw-r--r--net/ipv6/route.c16
-rw-r--r--net/mac80211/cfg.c13
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nft_compat.c24
-rw-r--r--net/netlink/af_netlink.c63
-rw-r--r--net/netlink/af_netlink.h10
-rw-r--r--net/openvswitch/Kconfig3
-rw-r--r--net/openvswitch/conntrack.c8
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/openvswitch/flow_netlink.c82
-rw-r--r--net/openvswitch/flow_table.c23
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/sched/cls_fw.c30
-rw-r--r--net/sctp/protocol.c64
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--net/sunrpc/xprtsock.c15
-rw-r--r--net/tipc/msg.c1
-rw-r--r--sound/arm/Kconfig15
-rw-r--r--sound/pci/hda/hda_tegra.c30
-rw-r--r--sound/pci/hda/patch_realtek.c31
-rw-r--r--sound/soc/au1x/psc-i2s.c1
-rw-r--r--sound/soc/codecs/rt5645.c22
-rw-r--r--sound/soc/codecs/wm0010.c23
-rw-r--r--sound/soc/codecs/wm8960.c26
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/davinci/davinci-mcasp.c14
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c3
-rw-r--r--sound/soc/fsl/fsl_ssi.c5
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c20
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c17
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c4
-rw-r--r--sound/soc/soc-dapm.c2
-rw-r--r--sound/soc/soc-utils.c9
-rw-r--r--sound/soc/spear/Kconfig2
-rw-r--r--sound/soc/sti/uniperif_player.c14
-rw-r--r--sound/soc/sti/uniperif_reader.c6
-rw-r--r--tools/testing/selftests/membarrier/Makefile7
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c5
-rw-r--r--tools/testing/selftests/vm/Makefile9
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c52
-rw-r--r--virt/kvm/kvm_main.c4
438 files changed, 4987 insertions, 2582 deletions
diff --git a/Documentation/devicetree/bindings/arm/gic-v3.txt b/Documentation/devicetree/bindings/arm/gic-v3.txt
index ddfade40ac59..7803e77d85cb 100644
--- a/Documentation/devicetree/bindings/arm/gic-v3.txt
+++ b/Documentation/devicetree/bindings/arm/gic-v3.txt
@@ -57,6 +57,8 @@ used to route Message Signalled Interrupts (MSI) to the CPUs.
57These nodes must have the following properties: 57These nodes must have the following properties:
58- compatible : Should at least contain "arm,gic-v3-its". 58- compatible : Should at least contain "arm,gic-v3-its".
59- msi-controller : Boolean property. Identifies the node as an MSI controller 59- msi-controller : Boolean property. Identifies the node as an MSI controller
60- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device
61 which will generate the MSI.
60- reg: Specifies the base physical address and size of the ITS 62- reg: Specifies the base physical address and size of the ITS
61 registers. 63 registers.
62 64
@@ -83,6 +85,7 @@ Examples:
83 gic-its@2c200000 { 85 gic-its@2c200000 {
84 compatible = "arm,gic-v3-its"; 86 compatible = "arm,gic-v3-its";
85 msi-controller; 87 msi-controller;
88 #msi-cells = <1>;
86 reg = <0x0 0x2c200000 0 0x200000>; 89 reg = <0x0 0x2c200000 0 0x200000>;
87 }; 90 };
88 }; 91 };
@@ -107,12 +110,14 @@ Examples:
107 gic-its@2c200000 { 110 gic-its@2c200000 {
108 compatible = "arm,gic-v3-its"; 111 compatible = "arm,gic-v3-its";
109 msi-controller; 112 msi-controller;
113 #msi-cells = <1>;
110 reg = <0x0 0x2c200000 0 0x200000>; 114 reg = <0x0 0x2c200000 0 0x200000>;
111 }; 115 };
112 116
113 gic-its@2c400000 { 117 gic-its@2c400000 {
114 compatible = "arm,gic-v3-its"; 118 compatible = "arm,gic-v3-its";
115 msi-controller; 119 msi-controller;
120 #msi-cells = <1>;
116 reg = <0x0 0x2c400000 0 0x200000>; 121 reg = <0x0 0x2c400000 0 0x200000>;
117 }; 122 };
118 }; 123 };
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index a8274eabae2e..b8e41c148a3c 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -497,7 +497,7 @@ cpus {
497 }; 497 };
498 498
499 idle-states { 499 idle-states {
500 entry-method = "arm,psci"; 500 entry-method = "psci";
501 501
502 CPU_RETENTION_0_0: cpu-retention-0-0 { 502 CPU_RETENTION_0_0: cpu-retention-0-0 {
503 compatible = "arm,idle-state"; 503 compatible = "arm,idle-state";
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 5788d5cf1252..82d40e2505f6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -16,7 +16,9 @@ properties, each containing a 'gpio-list':
16GPIO properties should be named "[<name>-]gpios", with <name> being the purpose 16GPIO properties should be named "[<name>-]gpios", with <name> being the purpose
17of this GPIO for the device. While a non-existent <name> is considered valid 17of this GPIO for the device. While a non-existent <name> is considered valid
18for compatibility reasons (resolving to the "gpios" property), it is not allowed 18for compatibility reasons (resolving to the "gpios" property), it is not allowed
19for new bindings. 19for new bindings. Also, GPIO properties named "[<name>-]gpio" are valid and old
20bindings use it, but are only supported for compatibility reasons and should not
21be used for newer bindings since it has been deprecated.
20 22
21GPIO properties can contain one or more GPIO phandles, but only in exceptional 23GPIO properties can contain one or more GPIO phandles, but only in exceptional
22cases should they contain more than one. If your device uses several GPIOs with 24cases should they contain more than one. If your device uses several GPIOs with
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
index c5933573e0f6..4a3679d54457 100644
--- a/Documentation/devicetree/bindings/iio/accel/bma180.txt
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -1,10 +1,11 @@
1* Bosch BMA180 triaxial acceleration sensor 1* Bosch BMA180 / BMA250 triaxial acceleration sensor
2 2
3http://omapworld.com/BMA180_111_1002839.pdf 3http://omapworld.com/BMA180_111_1002839.pdf
4http://ae-bst.resource.bosch.com/media/products/dokumente/bma250/bst-bma250-ds002-05.pdf
4 5
5Required properties: 6Required properties:
6 7
7 - compatible : should be "bosch,bma180" 8 - compatible : should be "bosch,bma180" or "bosch,bma250"
8 - reg : the I2C address of the sensor 9 - reg : the I2C address of the sensor
9 10
10Optional properties: 11Optional properties:
@@ -13,6 +14,9 @@ Optional properties:
13 14
14 - interrupts : interrupt mapping for GPIO IRQ, it should by configured with 15 - interrupts : interrupt mapping for GPIO IRQ, it should by configured with
15 flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING 16 flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
17 For the bma250 the first interrupt listed must be the one
18 connected to the INT1 pin, the second (optional) interrupt
19 listed must be the one connected to the INT2 pin.
16 20
17Example: 21Example:
18 22
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index d8ef5bf50f11..7fab84b33531 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,7 +7,8 @@ OHCI and EHCI controllers.
7 7
8Required properties: 8Required properties:
9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; 9- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
10 "renesas,pci-r8a7791" for the R8A7791 SoC. 10 "renesas,pci-r8a7791" for the R8A7791 SoC;
11 "renesas,pci-r8a7794" for the R8A7794 SoC.
11- reg: A list of physical regions to access the device: the first is 12- reg: A list of physical regions to access the device: the first is
12 the operational registers for the OHCI/EHCI controllers and the 13 the operational registers for the OHCI/EHCI controllers and the
13 second is for the bridge configuration and control registers. 14 second is for the bridge configuration and control registers.
diff --git a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
index 32aa26f1e434..acbcb452a69a 100644
--- a/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pbias-regulator.txt
@@ -2,7 +2,12 @@ PBIAS internal regulator for SD card dual voltage i/o pads on OMAP SoCs.
2 2
3Required properties: 3Required properties:
4- compatible: 4- compatible:
5 - "ti,pbias-omap" for OMAP2, OMAP3, OMAP4, OMAP5, DRA7. 5 - should be "ti,pbias-dra7" for DRA7
6 - should be "ti,pbias-omap2" for OMAP2
7 - should be "ti,pbias-omap3" for OMAP3
8 - should be "ti,pbias-omap4" for OMAP4
9 - should be "ti,pbias-omap5" for OMAP5
10 - "ti,pbias-omap" is deprecated
6- reg: pbias register offset from syscon base and size of pbias register. 11- reg: pbias register offset from syscon base and size of pbias register.
7- syscon : phandle of the system control module 12- syscon : phandle of the system control module
8- regulator-name : should be 13- regulator-name : should be
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index dcefc438272f..6160ffbcb3d3 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -15,17 +15,18 @@ Required properties:
15- interrupts: Should contain spi interrupt 15- interrupts: Should contain spi interrupt
16 16
17- clocks: phandles to input clocks. 17- clocks: phandles to input clocks.
18 The first should be <&topckgen CLK_TOP_SPI_SEL>. 18 The first should be one of the following. It's PLL.
19 The second should be one of the following.
20 - <&clk26m>: specify parent clock 26MHZ. 19 - <&clk26m>: specify parent clock 26MHZ.
21 - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ. 20 - <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ.
22 It's the default one. 21 It's the default one.
23 - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ. 22 - <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ.
24 - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ. 23 - <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
25 - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ. 24 - <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
25 The second should be <&topckgen CLK_TOP_SPI_SEL>. It's clock mux.
26 The third is <&pericfg CLK_PERI_SPI0>. It's clock gate.
26 27
27- clock-names: shall be "spi-clk" for the controller clock, and 28- clock-names: shall be "parent-clk" for the parent clock, "sel-clk" for the
28 "parent-clk" for the parent clock. 29 muxes clock, and "spi-clk" for the clock gate.
29 30
30Optional properties: 31Optional properties:
31- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi 32- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
@@ -44,8 +45,11 @@ spi: spi@1100a000 {
44 #size-cells = <0>; 45 #size-cells = <0>;
45 reg = <0 0x1100a000 0 0x1000>; 46 reg = <0 0x1100a000 0 0x1000>;
46 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>; 47 interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
47 clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>; 48 clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
48 clock-names = "spi-clk", "parent-clk"; 49 <&topckgen CLK_TOP_SPI_SEL>,
50 <&pericfg CLK_PERI_SPI0>;
51 clock-names = "parent-clk", "sel-clk", "spi-clk";
52
49 mediatek,pad-select = <0>; 53 mediatek,pad-select = <0>;
50 status = "disabled"; 54 status = "disabled";
51}; 55};
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index 8a49362dea6e..41b817f7b670 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -55,19 +55,11 @@ of heat dissipation). For example a fan's cooling states correspond to
55the different fan speeds possible. Cooling states are referred to by 55the different fan speeds possible. Cooling states are referred to by
56single unsigned integers, where larger numbers mean greater heat 56single unsigned integers, where larger numbers mean greater heat
57dissipation. The precise set of cooling states associated with a device 57dissipation. The precise set of cooling states associated with a device
58(as referred to be the cooling-min-state and cooling-max-state 58(as referred to by the cooling-min-level and cooling-max-level
59properties) should be defined in a particular device's binding. 59properties) should be defined in a particular device's binding.
60For more examples of cooling devices, refer to the example sections below. 60For more examples of cooling devices, refer to the example sections below.
61 61
62Required properties: 62Required properties:
63- cooling-min-state: An integer indicating the smallest
64 Type: unsigned cooling state accepted. Typically 0.
65 Size: one cell
66
67- cooling-max-state: An integer indicating the largest
68 Type: unsigned cooling state accepted.
69 Size: one cell
70
71- #cooling-cells: Used to provide cooling device specific information 63- #cooling-cells: Used to provide cooling device specific information
72 Type: unsigned while referring to it. Must be at least 2, in order 64 Type: unsigned while referring to it. Must be at least 2, in order
73 Size: one cell to specify minimum and maximum cooling state used 65 Size: one cell to specify minimum and maximum cooling state used
@@ -77,6 +69,15 @@ Required properties:
77 See Cooling device maps section below for more details 69 See Cooling device maps section below for more details
78 on how consumers refer to cooling devices. 70 on how consumers refer to cooling devices.
79 71
72Optional properties:
73- cooling-min-level: An integer indicating the smallest
74 Type: unsigned cooling state accepted. Typically 0.
75 Size: one cell
76
77- cooling-max-level: An integer indicating the largest
78 Type: unsigned cooling state accepted.
79 Size: one cell
80
80* Trip points 81* Trip points
81 82
82The trip node is a node to describe a point in the temperature domain 83The trip node is a node to describe a point in the temperature domain
@@ -225,8 +226,8 @@ cpus {
225 396000 950000 226 396000 950000
226 198000 850000 227 198000 850000
227 >; 228 >;
228 cooling-min-state = <0>; 229 cooling-min-level = <0>;
229 cooling-max-state = <3>; 230 cooling-max-level = <3>;
230 #cooling-cells = <2>; /* min followed by max */ 231 #cooling-cells = <2>; /* min followed by max */
231 }; 232 };
232 ... 233 ...
@@ -240,8 +241,8 @@ cpus {
240 */ 241 */
241 fan0: fan@0x48 { 242 fan0: fan@0x48 {
242 ... 243 ...
243 cooling-min-state = <0>; 244 cooling-min-level = <0>;
244 cooling-max-state = <9>; 245 cooling-max-level = <9>;
245 #cooling-cells = <2>; /* min followed by max */ 246 #cooling-cells = <2>; /* min followed by max */
246 }; 247 };
247}; 248};
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index d71ef07bca5d..a057b75ba4b5 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -6,6 +6,7 @@ Required properties:
6 "lsi,zevio-usb" 6 "lsi,zevio-usb"
7 "qcom,ci-hdrc" 7 "qcom,ci-hdrc"
8 "chipidea,usb2" 8 "chipidea,usb2"
9 "xlnx,zynq-usb-2.20a"
9- reg: base address and length of the registers 10- reg: base address and length of the registers
10- interrupts: interrupt for the USB controller 11- interrupts: interrupt for the USB controller
11 12
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ac5f0c34ae00..82d2ac97af74 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -203,6 +203,7 @@ sitronix Sitronix Technology Corporation
203skyworks Skyworks Solutions, Inc. 203skyworks Skyworks Solutions, Inc.
204smsc Standard Microsystems Corporation 204smsc Standard Microsystems Corporation
205snps Synopsys, Inc. 205snps Synopsys, Inc.
206socionext Socionext Inc.
206solidrun SolidRun 207solidrun SolidRun
207solomon Solomon Systech Limited 208solomon Solomon Systech Limited
208sony Sony Corporation 209sony Sony Corporation
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
new file mode 100644
index 000000000000..031ef4a63485
--- /dev/null
+++ b/Documentation/networking/vrf.txt
@@ -0,0 +1,96 @@
1Virtual Routing and Forwarding (VRF)
2====================================
3The VRF device combined with ip rules provides the ability to create virtual
4routing and forwarding domains (aka VRFs, VRF-lite to be specific) in the
5Linux network stack. One use case is the multi-tenancy problem where each
6tenant has their own unique routing tables and in the very least need
7different default gateways.
8
9Processes can be "VRF aware" by binding a socket to the VRF device. Packets
10through the socket then use the routing table associated with the VRF
11device. An important feature of the VRF device implementation is that it
12impacts only Layer 3 and above so L2 tools (e.g., LLDP) are not affected
13(ie., they do not need to be run in each VRF). The design also allows
14the use of higher priority ip rules (Policy Based Routing, PBR) to take
15precedence over the VRF device rules directing specific traffic as desired.
16
17In addition, VRF devices allow VRFs to be nested within namespaces. For
18example network namespaces provide separation of network interfaces at L1
19(Layer 1 separation), VLANs on the interfaces within a namespace provide
20L2 separation and then VRF devices provide L3 separation.
21
22Design
23------
24A VRF device is created with an associated route table. Network interfaces
25are then enslaved to a VRF device:
26
27 +-----------------------------+
28 | vrf-blue | ===> route table 10
29 +-----------------------------+
30 | | |
31 +------+ +------+ +-------------+
32 | eth1 | | eth2 | ... | bond1 |
33 +------+ +------+ +-------------+
34 | |
35 +------+ +------+
36 | eth8 | | eth9 |
37 +------+ +------+
38
39Packets received on an enslaved device and are switched to the VRF device
40using an rx_handler which gives the impression that packets flow through
41the VRF device. Similarly on egress routing rules are used to send packets
42to the VRF device driver before getting sent out the actual interface. This
43allows tcpdump on a VRF device to capture all packets into and out of the
44VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
45using the VRF device to specify rules that apply to the VRF domain as a whole.
46
47[1] Packets in the forwarded state do not flow through the device, so those
48 packets are not seen by tcpdump. Will revisit this limitation in a
49 future release.
50
51[2] Iptables on ingress is limited to NF_INET_PRE_ROUTING only with skb->dev
52 set to real ingress device and egress is limited to NF_INET_POST_ROUTING.
53 Will revisit this limitation in a future release.
54
55
56Setup
57-----
581. VRF device is created with an association to a FIB table.
59 e.g, ip link add vrf-blue type vrf table 10
60 ip link set dev vrf-blue up
61
622. Rules are added that send lookups to the associated FIB table when the
63 iif or oif is the VRF device. e.g.,
64 ip ru add oif vrf-blue table 10
65 ip ru add iif vrf-blue table 10
66
67 Set the default route for the table (and hence default route for the VRF).
68 e.g, ip route add table 10 prohibit default
69
703. Enslave L3 interfaces to a VRF device.
71 e.g, ip link set dev eth1 master vrf-blue
72
73 Local and connected routes for enslaved devices are automatically moved to
74 the table associated with VRF device. Any additional routes depending on
75 the enslaved device will need to be reinserted following the enslavement.
76
774. Additional VRF routes are added to associated table.
78 e.g., ip route add table 10 ...
79
80
81Applications
82------------
83Applications that are to work within a VRF need to bind their socket to the
84VRF device:
85
86 setsockopt(sd, SOL_SOCKET, SO_BINDTODEVICE, dev, strlen(dev)+1);
87
88or to specify the output device using cmsg and IP_PKTINFO.
89
90
91Limitations
92-----------
93VRF device currently only works for IPv4. Support for IPv6 is under development.
94
95Index of original ingress interface is not available via cmsg. Will address
96soon.
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 6294b5186ae5..809ab6efcc74 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -54,13 +54,15 @@ default_qdisc
54-------------- 54--------------
55 55
56The default queuing discipline to use for network devices. This allows 56The default queuing discipline to use for network devices. This allows
57overriding the default queue discipline of pfifo_fast with an 57overriding the default of pfifo_fast with an alternative. Since the default
58alternative. Since the default queuing discipline is created with the 58queuing discipline is created without additional parameters so is best suited
59no additional parameters so is best suited to queuing disciplines that 59to queuing disciplines that work well without configuration like stochastic
60work well without configuration like stochastic fair queue (sfq), 60fair queue (sfq), CoDel (codel) or fair queue CoDel (fq_codel). Don't use
61CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines 61queuing disciplines like Hierarchical Token Bucket or Deficit Round Robin
62like Hierarchical Token Bucket or Deficit Round Robin which require setting 62which require setting up classes and bandwidths. Note that physical multiqueue
63up classes and bandwidths. 63interfaces still use mq as root qdisc, which in turn uses this default for its
64leaves. Virtual devices (like e.g. lo or veth) ignore this setting and instead
65default to noqueue.
64Default: pfifo_fast 66Default: pfifo_fast
65 67
66busy_read 68busy_read
diff --git a/Documentation/thermal/power_allocator.txt b/Documentation/thermal/power_allocator.txt
index c3797b529991..a1ce2235f121 100644
--- a/Documentation/thermal/power_allocator.txt
+++ b/Documentation/thermal/power_allocator.txt
@@ -4,7 +4,7 @@ Power allocator governor tunables
4Trip points 4Trip points
5----------- 5-----------
6 6
7The governor requires the following two passive trip points: 7The governor works optimally with the following two passive trip points:
8 8
91. "switch on" trip point: temperature above which the governor 91. "switch on" trip point: temperature above which the governor
10 control loop starts operating. This is the first passive trip 10 control loop starts operating. This is the first passive trip
diff --git a/MAINTAINERS b/MAINTAINERS
index 274f85405584..9f6685f6c5a9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -615,9 +615,8 @@ F: Documentation/hwmon/fam15h_power
615F: drivers/hwmon/fam15h_power.c 615F: drivers/hwmon/fam15h_power.c
616 616
617AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER 617AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
618M: Thomas Dahlmann <dahlmann.thomas@arcor.de>
619L: linux-geode@lists.infradead.org (moderated for non-subscribers) 618L: linux-geode@lists.infradead.org (moderated for non-subscribers)
620S: Supported 619S: Orphan
621F: drivers/usb/gadget/udc/amd5536udc.* 620F: drivers/usb/gadget/udc/amd5536udc.*
622 621
623AMD GEODE PROCESSOR/CHIPSET SUPPORT 622AMD GEODE PROCESSOR/CHIPSET SUPPORT
@@ -808,6 +807,13 @@ S: Maintained
808F: drivers/video/fbdev/arcfb.c 807F: drivers/video/fbdev/arcfb.c
809F: drivers/video/fbdev/core/fb_defio.c 808F: drivers/video/fbdev/core/fb_defio.c
810 809
810ARCNET NETWORK LAYER
811M: Michael Grzeschik <m.grzeschik@pengutronix.de>
812L: netdev@vger.kernel.org
813S: Maintained
814F: drivers/net/arcnet/
815F: include/uapi/linux/if_arcnet.h
816
811ARM MFM AND FLOPPY DRIVERS 817ARM MFM AND FLOPPY DRIVERS
812M: Ian Molton <spyro@f2s.com> 818M: Ian Molton <spyro@f2s.com>
813S: Maintained 819S: Maintained
@@ -3394,7 +3400,6 @@ F: drivers/staging/dgnc/
3394 3400
3395DIGI EPCA PCI PRODUCTS 3401DIGI EPCA PCI PRODUCTS
3396M: Lidza Louina <lidza.louina@gmail.com> 3402M: Lidza Louina <lidza.louina@gmail.com>
3397M: Mark Hounschell <markh@compro.net>
3398M: Daeseok Youn <daeseok.youn@gmail.com> 3403M: Daeseok Youn <daeseok.youn@gmail.com>
3399L: driverdev-devel@linuxdriverproject.org 3404L: driverdev-devel@linuxdriverproject.org
3400S: Maintained 3405S: Maintained
@@ -8500,7 +8505,6 @@ F: Documentation/networking/LICENSE.qla3xxx
8500F: drivers/net/ethernet/qlogic/qla3xxx.* 8505F: drivers/net/ethernet/qlogic/qla3xxx.*
8501 8506
8502QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER 8507QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
8503M: Shahed Shaikh <shahed.shaikh@qlogic.com>
8504M: Dept-GELinuxNICDev@qlogic.com 8508M: Dept-GELinuxNICDev@qlogic.com
8505L: netdev@vger.kernel.org 8509L: netdev@vger.kernel.org
8506S: Supported 8510S: Supported
@@ -9904,8 +9908,8 @@ F: drivers/staging/media/lirc/
9904STAGING - LUSTRE PARALLEL FILESYSTEM 9908STAGING - LUSTRE PARALLEL FILESYSTEM
9905M: Oleg Drokin <oleg.drokin@intel.com> 9909M: Oleg Drokin <oleg.drokin@intel.com>
9906M: Andreas Dilger <andreas.dilger@intel.com> 9910M: Andreas Dilger <andreas.dilger@intel.com>
9907L: HPDD-discuss@lists.01.org (moderated for non-subscribers) 9911L: lustre-devel@lists.lustre.org (moderated for non-subscribers)
9908W: http://lustre.opensfs.org/ 9912W: http://wiki.lustre.org/
9909S: Maintained 9913S: Maintained
9910F: drivers/staging/lustre 9914F: drivers/staging/lustre
9911 9915
@@ -10338,6 +10342,16 @@ F: include/uapi/linux/thermal.h
10338F: include/linux/cpu_cooling.h 10342F: include/linux/cpu_cooling.h
10339F: Documentation/devicetree/bindings/thermal/ 10343F: Documentation/devicetree/bindings/thermal/
10340 10344
10345THERMAL/CPU_COOLING
10346M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
10347M: Viresh Kumar <viresh.kumar@linaro.org>
10348M: Javi Merino <javi.merino@arm.com>
10349L: linux-pm@vger.kernel.org
10350S: Supported
10351F: Documentation/thermal/cpu-cooling-api.txt
10352F: drivers/thermal/cpu_cooling.c
10353F: include/linux/cpu_cooling.h
10354
10341THINGM BLINK(1) USB RGB LED DRIVER 10355THINGM BLINK(1) USB RGB LED DRIVER
10342M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> 10356M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
10343S: Maintained 10357S: Maintained
@@ -11187,7 +11201,7 @@ F: drivers/vlynq/vlynq.c
11187F: include/linux/vlynq.h 11201F: include/linux/vlynq.h
11188 11202
11189VME SUBSYSTEM 11203VME SUBSYSTEM
11190M: Martyn Welch <martyn.welch@ge.com> 11204M: Martyn Welch <martyn@welchs.me.uk>
11191M: Manohar Vanga <manohar.vanga@gmail.com> 11205M: Manohar Vanga <manohar.vanga@gmail.com>
11192M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 11206M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
11193L: devel@driverdev.osuosl.org 11207L: devel@driverdev.osuosl.org
@@ -11239,7 +11253,6 @@ VOLTAGE AND CURRENT REGULATOR FRAMEWORK
11239M: Liam Girdwood <lgirdwood@gmail.com> 11253M: Liam Girdwood <lgirdwood@gmail.com>
11240M: Mark Brown <broonie@kernel.org> 11254M: Mark Brown <broonie@kernel.org>
11241L: linux-kernel@vger.kernel.org 11255L: linux-kernel@vger.kernel.org
11242W: http://opensource.wolfsonmicro.com/node/15
11243W: http://www.slimlogic.co.uk/?p=48 11256W: http://www.slimlogic.co.uk/?p=48
11244T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git 11257T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
11245S: Supported 11258S: Supported
@@ -11253,6 +11266,7 @@ L: netdev@vger.kernel.org
11253S: Maintained 11266S: Maintained
11254F: drivers/net/vrf.c 11267F: drivers/net/vrf.c
11255F: include/net/vrf.h 11268F: include/net/vrf.h
11269F: Documentation/networking/vrf.txt
11256 11270
11257VT1211 HARDWARE MONITOR DRIVER 11271VT1211 HARDWARE MONITOR DRIVER
11258M: Juerg Haefliger <juergh@gmail.com> 11272M: Juerg Haefliger <juergh@gmail.com>
@@ -11368,17 +11382,15 @@ WM97XX TOUCHSCREEN DRIVERS
11368M: Mark Brown <broonie@kernel.org> 11382M: Mark Brown <broonie@kernel.org>
11369M: Liam Girdwood <lrg@slimlogic.co.uk> 11383M: Liam Girdwood <lrg@slimlogic.co.uk>
11370L: linux-input@vger.kernel.org 11384L: linux-input@vger.kernel.org
11371T: git git://opensource.wolfsonmicro.com/linux-2.6-touch 11385W: https://github.com/CirrusLogic/linux-drivers/wiki
11372W: http://opensource.wolfsonmicro.com/node/7
11373S: Supported 11386S: Supported
11374F: drivers/input/touchscreen/*wm97* 11387F: drivers/input/touchscreen/*wm97*
11375F: include/linux/wm97xx.h 11388F: include/linux/wm97xx.h
11376 11389
11377WOLFSON MICROELECTRONICS DRIVERS 11390WOLFSON MICROELECTRONICS DRIVERS
11378L: patches@opensource.wolfsonmicro.com 11391L: patches@opensource.wolfsonmicro.com
11379T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 11392T: git https://github.com/CirrusLogic/linux-drivers.git
11380T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 11393W: https://github.com/CirrusLogic/linux-drivers/wiki
11381W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
11382S: Supported 11394S: Supported
11383F: Documentation/hwmon/wm83?? 11395F: Documentation/hwmon/wm83??
11384F: arch/arm/mach-s3c64xx/mach-crag6410* 11396F: arch/arm/mach-s3c64xx/mach-crag6410*
diff --git a/Makefile b/Makefile
index 84f4b31e3c6e..1d341eba143d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 3 2PATCHLEVEL = 3
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index cded02c890aa..5f387ee5b5c5 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -242,7 +242,12 @@ pci_restore_srm_config(void)
242 242
243void pcibios_fixup_bus(struct pci_bus *bus) 243void pcibios_fixup_bus(struct pci_bus *bus)
244{ 244{
245 struct pci_dev *dev; 245 struct pci_dev *dev = bus->self;
246
247 if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
248 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
249 pci_read_bridge_bases(bus);
250 }
246 251
247 list_for_each_entry(dev, &bus->devices, bus_list) { 252 list_for_each_entry(dev, &bus->devices, bus_list) {
248 pdev_save_srm_config(dev); 253 pdev_save_srm_config(dev);
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 4d28fc3aac69..5dd084f3c81c 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -252,10 +252,10 @@
252 }; 252 };
253 253
254 vdd1_reg: regulator@2 { 254 vdd1_reg: regulator@2 {
255 /* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */ 255 /* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */
256 regulator-name = "vdd_mpu"; 256 regulator-name = "vdd_mpu";
257 regulator-min-microvolt = <912500>; 257 regulator-min-microvolt = <912500>;
258 regulator-max-microvolt = <1312500>; 258 regulator-max-microvolt = <1378000>;
259 regulator-boot-on; 259 regulator-boot-on;
260 regulator-always-on; 260 regulator-always-on;
261 }; 261 };
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 3a05b94f59ed..568adf5efde0 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -98,13 +98,6 @@
98 pinctrl-0 = <&extcon_usb1_pins>; 98 pinctrl-0 = <&extcon_usb1_pins>;
99 }; 99 };
100 100
101 extcon_usb2: extcon_usb2 {
102 compatible = "linux,extcon-usb-gpio";
103 id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>;
104 pinctrl-names = "default";
105 pinctrl-0 = <&extcon_usb2_pins>;
106 };
107
108 hdmi0: connector { 101 hdmi0: connector {
109 compatible = "hdmi-connector"; 102 compatible = "hdmi-connector";
110 label = "hdmi"; 103 label = "hdmi";
@@ -326,12 +319,6 @@
326 >; 319 >;
327 }; 320 };
328 321
329 extcon_usb2_pins: extcon_usb2_pins {
330 pinctrl-single,pins = <
331 0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */
332 >;
333 };
334
335 tpd12s015_pins: pinmux_tpd12s015_pins { 322 tpd12s015_pins: pinmux_tpd12s015_pins {
336 pinctrl-single,pins = < 323 pinctrl-single,pins = <
337 0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ 324 0x3b0 (PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
@@ -432,7 +419,7 @@
432 }; 419 };
433 420
434 ldo3_reg: ldo3 { 421 ldo3_reg: ldo3 {
435 /* VDDA_1V8_PHY */ 422 /* VDDA_1V8_PHYA */
436 regulator-name = "ldo3"; 423 regulator-name = "ldo3";
437 regulator-min-microvolt = <1800000>; 424 regulator-min-microvolt = <1800000>;
438 regulator-max-microvolt = <1800000>; 425 regulator-max-microvolt = <1800000>;
@@ -440,6 +427,15 @@
440 regulator-boot-on; 427 regulator-boot-on;
441 }; 428 };
442 429
430 ldo4_reg: ldo4 {
431 /* VDDA_1V8_PHYB */
432 regulator-name = "ldo4";
433 regulator-min-microvolt = <1800000>;
434 regulator-max-microvolt = <1800000>;
435 regulator-always-on;
436 regulator-boot-on;
437 };
438
443 ldo9_reg: ldo9 { 439 ldo9_reg: ldo9 {
444 /* VDD_RTC */ 440 /* VDD_RTC */
445 regulator-name = "ldo9"; 441 regulator-name = "ldo9";
@@ -495,6 +491,14 @@
495 gpio-controller; 491 gpio-controller;
496 #gpio-cells = <2>; 492 #gpio-cells = <2>;
497 }; 493 };
494
495 extcon_usb2: tps659038_usb {
496 compatible = "ti,palmas-usb-vid";
497 ti,enable-vbus-detection;
498 ti,enable-id-detection;
499 id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
500 };
501
498 }; 502 };
499 503
500 tmp102: tmp102@48 { 504 tmp102: tmp102@48 {
@@ -517,7 +521,8 @@
517 mcp_rtc: rtc@6f { 521 mcp_rtc: rtc@6f {
518 compatible = "microchip,mcp7941x"; 522 compatible = "microchip,mcp7941x";
519 reg = <0x6f>; 523 reg = <0x6f>;
520 interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */ 524 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
525 <&dra7_pmx_core 0x424>;
521 526
522 pinctrl-names = "default"; 527 pinctrl-names = "default";
523 pinctrl-0 = <&mcp79410_pins_default>; 528 pinctrl-0 = <&mcp79410_pins_default>;
@@ -579,7 +584,6 @@
579 pinctrl-0 = <&mmc1_pins_default>; 584 pinctrl-0 = <&mmc1_pins_default>;
580 585
581 vmmc-supply = <&ldo1_reg>; 586 vmmc-supply = <&ldo1_reg>;
582 vmmc_aux-supply = <&vdd_3v3>;
583 bus-width = <4>; 587 bus-width = <4>;
584 cd-gpios = <&gpio6 27 0>; /* gpio 219 */ 588 cd-gpios = <&gpio6 27 0>; /* gpio 219 */
585}; 589};
@@ -623,6 +627,14 @@
623}; 627};
624 628
625&usb2 { 629&usb2 {
630 /*
631 * Stand alone usage is peripheral only.
632 * However, with some resistor modifications
633 * this port can be used via expansion connectors
634 * as "host" or "dual-role". If so, provide
635 * the necessary dr_mode override in the expansion
636 * board's DT.
637 */
626 dr_mode = "peripheral"; 638 dr_mode = "peripheral";
627}; 639};
628 640
@@ -681,7 +693,7 @@
681 693
682&hdmi { 694&hdmi {
683 status = "ok"; 695 status = "ok";
684 vdda-supply = <&ldo3_reg>; 696 vdda-supply = <&ldo4_reg>;
685 697
686 pinctrl-names = "default"; 698 pinctrl-names = "default";
687 pinctrl-0 = <&hdmi_pins>; 699 pinctrl-0 = <&hdmi_pins>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index 92bacd3c8fab..109fd4711647 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -19,10 +19,10 @@
19 19
20&cpsw_emac0 { 20&cpsw_emac0 {
21 phy_id = <&davinci_mdio>, <0>; 21 phy_id = <&davinci_mdio>, <0>;
22 phy-mode = "mii"; 22 phy-mode = "rgmii";
23}; 23};
24 24
25&cpsw_emac1 { 25&cpsw_emac1 {
26 phy_id = <&davinci_mdio>, <1>; 26 phy_id = <&davinci_mdio>, <1>;
27 phy-mode = "mii"; 27 phy-mode = "rgmii";
28}; 28};
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 8c4bbc7573df..79838dd8dee7 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -8,7 +8,7 @@
8#include "dm814x.dtsi" 8#include "dm814x.dtsi"
9 9
10/ { 10/ {
11 model = "DM8148 EVM"; 11 model = "HP t410 Smart Zero Client";
12 compatible = "hp,t410", "ti,dm8148"; 12 compatible = "hp,t410", "ti,dm8148";
13 13
14 memory { 14 memory {
@@ -19,10 +19,10 @@
19 19
20&cpsw_emac0 { 20&cpsw_emac0 {
21 phy_id = <&davinci_mdio>, <0>; 21 phy_id = <&davinci_mdio>, <0>;
22 phy-mode = "mii"; 22 phy-mode = "rgmii";
23}; 23};
24 24
25&cpsw_emac1 { 25&cpsw_emac1 {
26 phy_id = <&davinci_mdio>, <1>; 26 phy_id = <&davinci_mdio>, <1>;
27 phy-mode = "mii"; 27 phy-mode = "rgmii";
28}; 28};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 972c9c9e885b..7988b42e5764 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -181,9 +181,9 @@
181 ti,hwmods = "timer3"; 181 ti,hwmods = "timer3";
182 }; 182 };
183 183
184 control: control@160000 { 184 control: control@140000 {
185 compatible = "ti,dm814-scm", "simple-bus"; 185 compatible = "ti,dm814-scm", "simple-bus";
186 reg = <0x160000 0x16d000>; 186 reg = <0x140000 0x16d000>;
187 #address-cells = <1>; 187 #address-cells = <1>;
188 #size-cells = <1>; 188 #size-cells = <1>;
189 ranges = <0 0x160000 0x16d000>; 189 ranges = <0 0x160000 0x16d000>;
@@ -321,9 +321,9 @@
321 mac-address = [ 00 00 00 00 00 00 ]; 321 mac-address = [ 00 00 00 00 00 00 ];
322 }; 322 };
323 323
324 phy_sel: cpsw-phy-sel@0x48160650 { 324 phy_sel: cpsw-phy-sel@48140650 {
325 compatible = "ti,am3352-cpsw-phy-sel"; 325 compatible = "ti,am3352-cpsw-phy-sel";
326 reg= <0x48160650 0x4>; 326 reg= <0x48140650 0x4>;
327 reg-names = "gmii-sel"; 327 reg-names = "gmii-sel";
328 }; 328 };
329 }; 329 };
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5d65db9ebc2b..e289c706d27d 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -120,9 +120,10 @@
120 reg = <0x0 0x1400>; 120 reg = <0x0 0x1400>;
121 #address-cells = <1>; 121 #address-cells = <1>;
122 #size-cells = <1>; 122 #size-cells = <1>;
123 ranges = <0 0x0 0x1400>;
123 124
124 pbias_regulator: pbias_regulator { 125 pbias_regulator: pbias_regulator {
125 compatible = "ti,pbias-omap"; 126 compatible = "ti,pbias-dra7", "ti,pbias-omap";
126 reg = <0xe00 0x4>; 127 reg = <0xe00 0x4>;
127 syscon = <&scm_conf>; 128 syscon = <&scm_conf>;
128 pbias_mmc_reg: pbias_mmc_omap5 { 129 pbias_mmc_reg: pbias_mmc_omap5 {
@@ -1417,7 +1418,7 @@
1417 ti,irqs-safe-map = <0>; 1418 ti,irqs-safe-map = <0>;
1418 }; 1419 };
1419 1420
1420 mac: ethernet@4a100000 { 1421 mac: ethernet@48484000 {
1421 compatible = "ti,dra7-cpsw","ti,cpsw"; 1422 compatible = "ti,dra7-cpsw","ti,cpsw";
1422 ti,hwmods = "gmac"; 1423 ti,hwmods = "gmac";
1423 clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; 1424 clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 2390f387c271..798dda072b2a 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -56,6 +56,7 @@
56 reg = <0x270 0x240>; 56 reg = <0x270 0x240>;
57 #address-cells = <1>; 57 #address-cells = <1>;
58 #size-cells = <1>; 58 #size-cells = <1>;
59 ranges = <0 0x270 0x240>;
59 60
60 scm_clocks: clocks { 61 scm_clocks: clocks {
61 #address-cells = <1>; 62 #address-cells = <1>;
@@ -63,7 +64,7 @@
63 }; 64 };
64 65
65 pbias_regulator: pbias_regulator { 66 pbias_regulator: pbias_regulator {
66 compatible = "ti,pbias-omap"; 67 compatible = "ti,pbias-omap2", "ti,pbias-omap";
67 reg = <0x230 0x4>; 68 reg = <0x230 0x4>;
68 syscon = <&scm_conf>; 69 syscon = <&scm_conf>;
69 pbias_mmc_reg: pbias_mmc_omap2430 { 70 pbias_mmc_reg: pbias_mmc_omap2430 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a5474113cd50..67659a0ed13e 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
202 202
203 tfp410_pins: pinmux_tfp410_pins { 203 tfp410_pins: pinmux_tfp410_pins {
204 pinctrl-single,pins = < 204 pinctrl-single,pins = <
205 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */ 205 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
206 >; 206 >;
207 }; 207 };
208 208
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index d5e5cd449b16..2230e1c03320 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -78,12 +78,6 @@
78 >; 78 >;
79 }; 79 };
80 80
81 smsc9221_pins: pinmux_smsc9221_pins {
82 pinctrl-single,pins = <
83 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
84 >;
85 };
86
87 i2c1_pins: pinmux_i2c1_pins { 81 i2c1_pins: pinmux_i2c1_pins {
88 pinctrl-single,pins = < 82 pinctrl-single,pins = <
89 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ 83 0x18a (PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index e458c2185e3c..5ad688c57a00 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -156,6 +156,12 @@
156 OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ 156 OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
157 >; 157 >;
158 }; 158 };
159
160 smsc9221_pins: pinmux_smsc9221_pins {
161 pinctrl-single,pins = <
162 OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
163 >;
164 };
159}; 165};
160 166
161&omap3_pmx_core2 { 167&omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 69a40cfc1f29..8a2b25332b8c 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -113,10 +113,22 @@
113 }; 113 };
114 114
115 scm_conf: scm_conf@270 { 115 scm_conf: scm_conf@270 {
116 compatible = "syscon"; 116 compatible = "syscon", "simple-bus";
117 reg = <0x270 0x330>; 117 reg = <0x270 0x330>;
118 #address-cells = <1>; 118 #address-cells = <1>;
119 #size-cells = <1>; 119 #size-cells = <1>;
120 ranges = <0 0x270 0x330>;
121
122 pbias_regulator: pbias_regulator {
123 compatible = "ti,pbias-omap3", "ti,pbias-omap";
124 reg = <0x2b0 0x4>;
125 syscon = <&scm_conf>;
126 pbias_mmc_reg: pbias_mmc_omap2430 {
127 regulator-name = "pbias_mmc_omap2430";
128 regulator-min-microvolt = <1800000>;
129 regulator-max-microvolt = <3000000>;
130 };
131 };
120 132
121 scm_clocks: clocks { 133 scm_clocks: clocks {
122 #address-cells = <1>; 134 #address-cells = <1>;
@@ -202,17 +214,6 @@
202 dma-requests = <96>; 214 dma-requests = <96>;
203 }; 215 };
204 216
205 pbias_regulator: pbias_regulator {
206 compatible = "ti,pbias-omap";
207 reg = <0x2b0 0x4>;
208 syscon = <&scm_conf>;
209 pbias_mmc_reg: pbias_mmc_omap2430 {
210 regulator-name = "pbias_mmc_omap2430";
211 regulator-min-microvolt = <1800000>;
212 regulator-max-microvolt = <3000000>;
213 };
214 };
215
216 gpio1: gpio@48310000 { 217 gpio1: gpio@48310000 {
217 compatible = "ti,omap3-gpio"; 218 compatible = "ti,omap3-gpio";
218 reg = <0x48310000 0x200>; 219 reg = <0x48310000 0x200>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index abc4473e6f8a..5a206c100ce2 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -196,9 +196,10 @@
196 reg = <0x5a0 0x170>; 196 reg = <0x5a0 0x170>;
197 #address-cells = <1>; 197 #address-cells = <1>;
198 #size-cells = <1>; 198 #size-cells = <1>;
199 ranges = <0 0x5a0 0x170>;
199 200
200 pbias_regulator: pbias_regulator { 201 pbias_regulator: pbias_regulator {
201 compatible = "ti,pbias-omap"; 202 compatible = "ti,pbias-omap4", "ti,pbias-omap";
202 reg = <0x60 0x4>; 203 reg = <0x60 0x4>;
203 syscon = <&omap4_padconf_global>; 204 syscon = <&omap4_padconf_global>;
204 pbias_mmc_reg: pbias_mmc_omap4 { 205 pbias_mmc_reg: pbias_mmc_omap4 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 3cc8f357d5b8..3cb030f9d2c4 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
174 174
175 i2c5_pins: pinmux_i2c5_pins { 175 i2c5_pins: pinmux_i2c5_pins {
176 pinctrl-single,pins = < 176 pinctrl-single,pins = <
177 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */ 177 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
178 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */ 178 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
179 >; 179 >;
180 }; 180 };
181 181
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4205a8ac9ddb..4c04389dab32 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -185,9 +185,10 @@
185 reg = <0x5a0 0xec>; 185 reg = <0x5a0 0xec>;
186 #address-cells = <1>; 186 #address-cells = <1>;
187 #size-cells = <1>; 187 #size-cells = <1>;
188 ranges = <0 0x5a0 0xec>;
188 189
189 pbias_regulator: pbias_regulator { 190 pbias_regulator: pbias_regulator {
190 compatible = "ti,pbias-omap"; 191 compatible = "ti,pbias-omap5", "ti,pbias-omap";
191 reg = <0x60 0x4>; 192 reg = <0x60 0x4>;
192 syscon = <&omap5_padconf_global>; 193 syscon = <&omap5_padconf_global>;
193 pbias_mmc_reg: pbias_mmc_omap5 { 194 pbias_mmc_reg: pbias_mmc_omap5 {
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2fa7a0dc83f7..275c78ccc0f3 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -158,6 +158,7 @@
158}; 158};
159 159
160&hdmi { 160&hdmi {
161 ddc-i2c-bus = <&i2c5>;
161 status = "okay"; 162 status = "okay";
162}; 163};
163 164
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 3efa3b2ebe90..6b914e4bb099 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -103,48 +103,46 @@
103 <&clk_s_d0_quadfs 0>, 103 <&clk_s_d0_quadfs 0>,
104 <&clk_s_d2_quadfs 0>, 104 <&clk_s_d2_quadfs 0>,
105 <&clk_s_d2_quadfs 0>; 105 <&clk_s_d2_quadfs 0>;
106 ranges; 106 };
107 107
108 sti-hdmi@8d04000 { 108 sti-hdmi@8d04000 {
109 compatible = "st,stih407-hdmi"; 109 compatible = "st,stih407-hdmi";
110 reg = <0x8d04000 0x1000>; 110 reg = <0x8d04000 0x1000>;
111 reg-names = "hdmi-reg"; 111 reg-names = "hdmi-reg";
112 interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; 112 interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
113 interrupt-names = "irq"; 113 interrupt-names = "irq";
114 clock-names = "pix", 114 clock-names = "pix",
115 "tmds", 115 "tmds",
116 "phy", 116 "phy",
117 "audio", 117 "audio",
118 "main_parent", 118 "main_parent",
119 "aux_parent"; 119 "aux_parent";
120 120
121 clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, 121 clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
122 <&clk_s_d2_flexgen CLK_TMDS_HDMI>, 122 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
123 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, 123 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
124 <&clk_s_d0_flexgen CLK_PCM_0>, 124 <&clk_s_d0_flexgen CLK_PCM_0>,
125 <&clk_s_d2_quadfs 0>, 125 <&clk_s_d2_quadfs 0>,
126 <&clk_s_d2_quadfs 1>; 126 <&clk_s_d2_quadfs 1>;
127 127
128 hdmi,hpd-gpio = <&pio5 3>; 128 hdmi,hpd-gpio = <&pio5 3>;
129 reset-names = "hdmi"; 129 reset-names = "hdmi";
130 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; 130 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
131 ddc = <&hdmiddc>; 131 ddc = <&hdmiddc>;
132 132 };
133 }; 133
134 134 sti-hda@8d02000 {
135 sti-hda@8d02000 { 135 compatible = "st,stih407-hda";
136 compatible = "st,stih407-hda"; 136 reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
137 reg = <0x8d02000 0x400>, <0x92b0120 0x4>; 137 reg-names = "hda-reg", "video-dacs-ctrl";
138 reg-names = "hda-reg", "video-dacs-ctrl"; 138 clock-names = "pix",
139 clock-names = "pix", 139 "hddac",
140 "hddac", 140 "main_parent",
141 "main_parent", 141 "aux_parent";
142 "aux_parent"; 142 clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
143 clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, 143 <&clk_s_d2_flexgen CLK_HDDAC>,
144 <&clk_s_d2_flexgen CLK_HDDAC>, 144 <&clk_s_d2_quadfs 0>,
145 <&clk_s_d2_quadfs 0>, 145 <&clk_s_d2_quadfs 1>;
146 <&clk_s_d2_quadfs 1>;
147 };
148 }; 146 };
149 }; 147 };
150 }; 148 };
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 6f40bc99c22f..8c6e61a27234 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -178,48 +178,46 @@
178 <&clk_s_d0_quadfs 0>, 178 <&clk_s_d0_quadfs 0>,
179 <&clk_s_d2_quadfs 0>, 179 <&clk_s_d2_quadfs 0>,
180 <&clk_s_d2_quadfs 0>; 180 <&clk_s_d2_quadfs 0>;
181 ranges; 181 };
182 182
183 sti-hdmi@8d04000 { 183 sti-hdmi@8d04000 {
184 compatible = "st,stih407-hdmi"; 184 compatible = "st,stih407-hdmi";
185 reg = <0x8d04000 0x1000>; 185 reg = <0x8d04000 0x1000>;
186 reg-names = "hdmi-reg"; 186 reg-names = "hdmi-reg";
187 interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>; 187 interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
188 interrupt-names = "irq"; 188 interrupt-names = "irq";
189 clock-names = "pix", 189 clock-names = "pix",
190 "tmds", 190 "tmds",
191 "phy", 191 "phy",
192 "audio", 192 "audio",
193 "main_parent", 193 "main_parent",
194 "aux_parent"; 194 "aux_parent";
195 195
196 clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, 196 clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
197 <&clk_s_d2_flexgen CLK_TMDS_HDMI>, 197 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
198 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, 198 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
199 <&clk_s_d0_flexgen CLK_PCM_0>, 199 <&clk_s_d0_flexgen CLK_PCM_0>,
200 <&clk_s_d2_quadfs 0>, 200 <&clk_s_d2_quadfs 0>,
201 <&clk_s_d2_quadfs 1>; 201 <&clk_s_d2_quadfs 1>;
202 202
203 hdmi,hpd-gpio = <&pio5 3>; 203 hdmi,hpd-gpio = <&pio5 3>;
204 reset-names = "hdmi"; 204 reset-names = "hdmi";
205 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; 205 resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
206 ddc = <&hdmiddc>; 206 ddc = <&hdmiddc>;
207 207 };
208 }; 208
209 209 sti-hda@8d02000 {
210 sti-hda@8d02000 { 210 compatible = "st,stih407-hda";
211 compatible = "st,stih407-hda"; 211 reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
212 reg = <0x8d02000 0x400>, <0x92b0120 0x4>; 212 reg-names = "hda-reg", "video-dacs-ctrl";
213 reg-names = "hda-reg", "video-dacs-ctrl"; 213 clock-names = "pix",
214 clock-names = "pix", 214 "hddac",
215 "hddac", 215 "main_parent",
216 "main_parent", 216 "aux_parent";
217 "aux_parent"; 217 clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
218 clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, 218 <&clk_s_d2_flexgen CLK_HDDAC>,
219 <&clk_s_d2_flexgen CLK_HDDAC>, 219 <&clk_s_d2_quadfs 0>,
220 <&clk_s_d2_quadfs 0>, 220 <&clk_s_d2_quadfs 1>;
221 <&clk_s_d2_quadfs 1>;
222 };
223 }; 221 };
224 }; 222 };
225 223
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 50c84e1876fc..3f15a5cae167 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -240,7 +240,8 @@ CONFIG_SSI_PROTOCOL=m
240CONFIG_PINCTRL_SINGLE=y 240CONFIG_PINCTRL_SINGLE=y
241CONFIG_DEBUG_GPIO=y 241CONFIG_DEBUG_GPIO=y
242CONFIG_GPIO_SYSFS=y 242CONFIG_GPIO_SYSFS=y
243CONFIG_GPIO_PCF857X=m 243CONFIG_GPIO_PCA953X=m
244CONFIG_GPIO_PCF857X=y
244CONFIG_GPIO_TWL4030=y 245CONFIG_GPIO_TWL4030=y
245CONFIG_GPIO_PALMAS=y 246CONFIG_GPIO_PALMAS=y
246CONFIG_W1=m 247CONFIG_W1=m
@@ -350,6 +351,8 @@ CONFIG_USB_MUSB_HDRC=m
350CONFIG_USB_MUSB_OMAP2PLUS=m 351CONFIG_USB_MUSB_OMAP2PLUS=m
351CONFIG_USB_MUSB_AM35X=m 352CONFIG_USB_MUSB_AM35X=m
352CONFIG_USB_MUSB_DSPS=m 353CONFIG_USB_MUSB_DSPS=m
354CONFIG_USB_INVENTRA_DMA=y
355CONFIG_USB_TI_CPPI41_DMA=y
353CONFIG_USB_DWC3=m 356CONFIG_USB_DWC3=m
354CONFIG_USB_TEST=m 357CONFIG_USB_TEST=m
355CONFIG_AM335X_PHY_USB=y 358CONFIG_AM335X_PHY_USB=y
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3df1e975f72a..c4072d9f32c7 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
33#define KVM_PRIVATE_MEM_SLOTS 4 33#define KVM_PRIVATE_MEM_SLOTS 4
34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 34#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
35#define KVM_HAVE_ONE_REG 35#define KVM_HAVE_ONE_REG
36#define KVM_HALT_POLL_NS_DEFAULT 500000
36 37
37#define KVM_VCPU_MAX_FEATURES 2 38#define KVM_VCPU_MAX_FEATURES 2
38 39
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 32640c431a08..7cba573c2cc9 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (388) 22#define __NR_syscalls (392)
23 23
24/* 24/*
25 * *NOTE*: This is a ghost syscall private to the kernel. Only the 25 * *NOTE*: This is a ghost syscall private to the kernel. Only the
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 0c3f5a0dafd3..7a2a32a1d5a8 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -414,6 +414,8 @@
414#define __NR_memfd_create (__NR_SYSCALL_BASE+385) 414#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
415#define __NR_bpf (__NR_SYSCALL_BASE+386) 415#define __NR_bpf (__NR_SYSCALL_BASE+386)
416#define __NR_execveat (__NR_SYSCALL_BASE+387) 416#define __NR_execveat (__NR_SYSCALL_BASE+387)
417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388)
418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
417 419
418/* 420/*
419 * The following SWIs are ARM private. 421 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 05745eb838c5..fde6c88d560c 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -397,6 +397,8 @@
397/* 385 */ CALL(sys_memfd_create) 397/* 385 */ CALL(sys_memfd_create)
398 CALL(sys_bpf) 398 CALL(sys_bpf)
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier)
400#ifndef syscalls_counted 402#ifndef syscalls_counted
401.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 403.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
402#define syscalls_counted 404#define syscalls_counted
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 07d2e100caab..b3a0dff67e3f 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -44,10 +44,11 @@ config SOC_OMAP5
44 select ARM_CPU_SUSPEND if PM 44 select ARM_CPU_SUSPEND if PM
45 select ARM_GIC 45 select ARM_GIC
46 select HAVE_ARM_SCU if SMP 46 select HAVE_ARM_SCU if SMP
47 select HAVE_ARM_TWD if SMP
48 select HAVE_ARM_ARCH_TIMER 47 select HAVE_ARM_ARCH_TIMER
49 select ARM_ERRATA_798181 if SMP 48 select ARM_ERRATA_798181 if SMP
49 select OMAP_INTERCONNECT
50 select OMAP_INTERCONNECT_BARRIER 50 select OMAP_INTERCONNECT_BARRIER
51 select PM_OPP if PM
51 52
52config SOC_AM33XX 53config SOC_AM33XX
53 bool "TI AM33XX" 54 bool "TI AM33XX"
@@ -70,10 +71,13 @@ config SOC_DRA7XX
70 select ARCH_OMAP2PLUS 71 select ARCH_OMAP2PLUS
71 select ARM_CPU_SUSPEND if PM 72 select ARM_CPU_SUSPEND if PM
72 select ARM_GIC 73 select ARM_GIC
74 select HAVE_ARM_SCU if SMP
73 select HAVE_ARM_ARCH_TIMER 75 select HAVE_ARM_ARCH_TIMER
74 select IRQ_CROSSBAR 76 select IRQ_CROSSBAR
75 select ARM_ERRATA_798181 if SMP 77 select ARM_ERRATA_798181 if SMP
78 select OMAP_INTERCONNECT
76 select OMAP_INTERCONNECT_BARRIER 79 select OMAP_INTERCONNECT_BARRIER
80 select PM_OPP if PM
77 81
78config ARCH_OMAP2PLUS 82config ARCH_OMAP2PLUS
79 bool 83 bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 24c9afc9e8a7..6133eaac685d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,13 +20,6 @@
20 20
21#include "common.h" 21#include "common.h"
22 22
23#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
24#define intc_of_init NULL
25#endif
26#ifndef CONFIG_ARCH_OMAP4
27#define gic_of_init NULL
28#endif
29
30static const struct of_device_id omap_dt_match_table[] __initconst = { 23static const struct of_device_id omap_dt_match_table[] __initconst = {
31 { .compatible = "simple-bus", }, 24 { .compatible = "simple-bus", },
32 { .compatible = "ti,omap-infra", }, 25 { .compatible = "ti,omap-infra", },
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e3f713ffb06b..54a5ba54d2ff 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -653,8 +653,12 @@ void __init dra7xxx_check_revision(void)
653 omap_revision = DRA752_REV_ES1_0; 653 omap_revision = DRA752_REV_ES1_0;
654 break; 654 break;
655 case 1: 655 case 1:
656 default:
657 omap_revision = DRA752_REV_ES1_1; 656 omap_revision = DRA752_REV_ES1_1;
657 break;
658 case 2:
659 default:
660 omap_revision = DRA752_REV_ES2_0;
661 break;
658 } 662 }
659 break; 663 break;
660 664
@@ -674,7 +678,7 @@ void __init dra7xxx_check_revision(void)
674 /* Unknown default to latest silicon rev as default*/ 678 /* Unknown default to latest silicon rev as default*/
675 pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", 679 pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n",
676 __func__, idcode, hawkeye, rev); 680 __func__, idcode, hawkeye, rev);
677 omap_revision = DRA752_REV_ES1_1; 681 omap_revision = DRA752_REV_ES2_0;
678 } 682 }
679 683
680 sprintf(soc_name, "DRA%03x", omap_rev() >> 16); 684 sprintf(soc_name, "DRA%03x", omap_rev() >> 16);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 980c9372e6fd..3eaeaca5da05 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -676,6 +676,7 @@ void __init am43xx_init_early(void)
676void __init am43xx_init_late(void) 676void __init am43xx_init_late(void)
677{ 677{
678 omap_common_late_init(); 678 omap_common_late_init();
679 omap2_clk_enable_autoidle_all();
679} 680}
680#endif 681#endif
681 682
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 4cb8fd9f741f..72ebc4c16bae 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -901,7 +901,8 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
901 if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) 901 if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
902 return 0; 902 return 0;
903 903
904 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { 904 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER &&
905 od->_driver_status != BUS_NOTIFY_BIND_DRIVER) {
905 if (od->_state == OMAP_DEVICE_STATE_ENABLED) { 906 if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
906 dev_warn(dev, "%s: enabled but no driver. Idling\n", 907 dev_warn(dev, "%s: enabled but no driver. Idling\n",
907 __func__); 908 __func__);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 425bfcd67db6..b668719b9b25 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -103,7 +103,8 @@ static inline void enable_omap3630_toggle_l2_on_restore(void) { }
103#define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0) 103#define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD (1 << 0)
104#define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1) 104#define PM_OMAP4_CPU_OSWR_DISABLE (1 << 1)
105 105
106#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4) 106#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\
107 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX))
107extern u16 pm44xx_errata; 108extern u16 pm44xx_errata;
108#define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id)) 109#define IS_PM44XX_ERRATUM(id) (pm44xx_errata & (id))
109#else 110#else
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index f97654d11ea5..2d1d3845253c 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -469,6 +469,8 @@ IS_OMAP_TYPE(3430, 0x3430)
469#define DRA7XX_CLASS 0x07000000 469#define DRA7XX_CLASS 0x07000000
470#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) 470#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
471#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) 471#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
472#define DRA752_REV_ES2_0 (DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
473#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
472#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8)) 474#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
473 475
474void omap2xxx_check_revision(void); 476void omap2xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e4d8701f99f9..a55655127ef2 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -297,12 +297,8 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
297 if (IS_ERR(src)) 297 if (IS_ERR(src))
298 return PTR_ERR(src); 298 return PTR_ERR(src);
299 299
300 r = clk_set_parent(timer->fclk, src); 300 WARN(clk_set_parent(timer->fclk, src) < 0,
301 if (r < 0) { 301 "Cannot set timer parent clock, no PLL clock driver?");
302 pr_warn("%s: %s cannot set source\n", __func__, oh->name);
303 clk_put(src);
304 return r;
305 }
306 302
307 clk_put(src); 303 clk_put(src);
308 304
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index e5a35f6b83a7..d44d311704ba 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -300,7 +300,7 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
300 300
301 val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET); 301 val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET);
302 if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) || 302 if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) ||
303 (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) { 303 (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) {
304 val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL; 304 val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL;
305 val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL; 305 val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL;
306 pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n", 306 pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n",
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index a3ebb517cca1..a727282bfa99 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -502,7 +502,7 @@ static void balloon3_irq_handler(struct irq_desc *desc)
502 balloon3_irq_enabled; 502 balloon3_irq_enabled;
503 do { 503 do {
504 struct irq_data *d = irq_desc_get_irq_data(desc); 504 struct irq_data *d = irq_desc_get_irq_data(desc);
505 struct irq_chip *chip = irq_data_get_chip(d); 505 struct irq_chip *chip = irq_desc_get_chip(desc);
506 unsigned int irq; 506 unsigned int irq;
507 507
508 /* clear useless edge notification */ 508 /* clear useless edge notification */
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index d28fe291233a..07b93fd24474 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -44,6 +44,13 @@
44 */ 44 */
45 45
46/* 46/*
47 * DFI Bus for NAND, PXA3xx only
48 */
49#define NAND_PHYS 0x43100000
50#define NAND_VIRT IOMEM(0xf6300000)
51#define NAND_SIZE 0x00100000
52
53/*
47 * Internal Memory Controller (PXA27x and later) 54 * Internal Memory Controller (PXA27x and later)
48 */ 55 */
49#define IMEMC_PHYS 0x58000000 56#define IMEMC_PHYS 0x58000000
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index ce0f8d6242e2..06005d3f2ba3 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -47,6 +47,13 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47#define ISRAM_START 0x5c000000 47#define ISRAM_START 0x5c000000
48#define ISRAM_SIZE SZ_256K 48#define ISRAM_SIZE SZ_256K
49 49
50/*
51 * NAND NFC: DFI bus arbitration subset
52 */
53#define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
54#define NDCR_ND_ARB_EN (1 << 12)
55#define NDCR_ND_ARB_CNTL (1 << 19)
56
50static void __iomem *sram; 57static void __iomem *sram;
51static unsigned long wakeup_src; 58static unsigned long wakeup_src;
52 59
@@ -362,7 +369,12 @@ static struct map_desc pxa3xx_io_desc[] __initdata = {
362 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE), 369 .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
363 .length = SMEMC_SIZE, 370 .length = SMEMC_SIZE,
364 .type = MT_DEVICE 371 .type = MT_DEVICE
365 } 372 }, {
373 .virtual = (unsigned long)NAND_VIRT,
374 .pfn = __phys_to_pfn(NAND_PHYS),
375 .length = NAND_SIZE,
376 .type = MT_DEVICE
377 },
366}; 378};
367 379
368void __init pxa3xx_map_io(void) 380void __init pxa3xx_map_io(void)
@@ -419,6 +431,13 @@ static int __init pxa3xx_init(void)
419 */ 431 */
420 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S); 432 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
421 433
434 /*
435 * Disable DFI bus arbitration, to prevent a system bus lock if
436 * somebody disables the NAND clock (unused clock) while this
437 * bit remains set.
438 */
439 NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
440
422 if ((ret = pxa_init_dma(IRQ_DMA, 32))) 441 if ((ret = pxa_init_dma(IRQ_DMA, 32)))
423 return ret; 442 return ret;
424 443
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9769f1eefe3b..00b7f7de28a1 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -365,15 +365,21 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
365 user: 365 user:
366 if (LDST_L_BIT(instr)) { 366 if (LDST_L_BIT(instr)) {
367 unsigned long val; 367 unsigned long val;
368 unsigned int __ua_flags = uaccess_save_and_enable();
369
368 get16t_unaligned_check(val, addr); 370 get16t_unaligned_check(val, addr);
371 uaccess_restore(__ua_flags);
369 372
370 /* signed half-word? */ 373 /* signed half-word? */
371 if (instr & 0x40) 374 if (instr & 0x40)
372 val = (signed long)((signed short) val); 375 val = (signed long)((signed short) val);
373 376
374 regs->uregs[rd] = val; 377 regs->uregs[rd] = val;
375 } else 378 } else {
379 unsigned int __ua_flags = uaccess_save_and_enable();
376 put16t_unaligned_check(regs->uregs[rd], addr); 380 put16t_unaligned_check(regs->uregs[rd], addr);
381 uaccess_restore(__ua_flags);
382 }
377 383
378 return TYPE_LDST; 384 return TYPE_LDST;
379 385
@@ -420,14 +426,21 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
420 426
421 user: 427 user:
422 if (load) { 428 if (load) {
423 unsigned long val; 429 unsigned long val, val2;
430 unsigned int __ua_flags = uaccess_save_and_enable();
431
424 get32t_unaligned_check(val, addr); 432 get32t_unaligned_check(val, addr);
433 get32t_unaligned_check(val2, addr + 4);
434
435 uaccess_restore(__ua_flags);
436
425 regs->uregs[rd] = val; 437 regs->uregs[rd] = val;
426 get32t_unaligned_check(val, addr + 4); 438 regs->uregs[rd2] = val2;
427 regs->uregs[rd2] = val;
428 } else { 439 } else {
440 unsigned int __ua_flags = uaccess_save_and_enable();
429 put32t_unaligned_check(regs->uregs[rd], addr); 441 put32t_unaligned_check(regs->uregs[rd], addr);
430 put32t_unaligned_check(regs->uregs[rd2], addr + 4); 442 put32t_unaligned_check(regs->uregs[rd2], addr + 4);
443 uaccess_restore(__ua_flags);
431 } 444 }
432 445
433 return TYPE_LDST; 446 return TYPE_LDST;
@@ -458,10 +471,15 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
458 trans: 471 trans:
459 if (LDST_L_BIT(instr)) { 472 if (LDST_L_BIT(instr)) {
460 unsigned int val; 473 unsigned int val;
474 unsigned int __ua_flags = uaccess_save_and_enable();
461 get32t_unaligned_check(val, addr); 475 get32t_unaligned_check(val, addr);
476 uaccess_restore(__ua_flags);
462 regs->uregs[rd] = val; 477 regs->uregs[rd] = val;
463 } else 478 } else {
479 unsigned int __ua_flags = uaccess_save_and_enable();
464 put32t_unaligned_check(regs->uregs[rd], addr); 480 put32t_unaligned_check(regs->uregs[rd], addr);
481 uaccess_restore(__ua_flags);
482 }
465 return TYPE_LDST; 483 return TYPE_LDST;
466 484
467 fault: 485 fault:
@@ -531,6 +549,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
531#endif 549#endif
532 550
533 if (user_mode(regs)) { 551 if (user_mode(regs)) {
552 unsigned int __ua_flags = uaccess_save_and_enable();
534 for (regbits = REGMASK_BITS(instr), rd = 0; regbits; 553 for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
535 regbits >>= 1, rd += 1) 554 regbits >>= 1, rd += 1)
536 if (regbits & 1) { 555 if (regbits & 1) {
@@ -542,6 +561,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
542 put32t_unaligned_check(regs->uregs[rd], eaddr); 561 put32t_unaligned_check(regs->uregs[rd], eaddr);
543 eaddr += 4; 562 eaddr += 4;
544 } 563 }
564 uaccess_restore(__ua_flags);
545 } else { 565 } else {
546 for (regbits = REGMASK_BITS(instr), rd = 0; regbits; 566 for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
547 regbits >>= 1, rd += 1) 567 regbits >>= 1, rd += 1)
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ad9529cc4203..daa1a65f2eb7 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = {
107 { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP }, 107 { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
108 { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP }, 108 { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
109 { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP }, 109 { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
110 { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
111 { }, 110 { },
112}; 111};
113MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids); 112MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index d18ee4259ee5..06a15644be38 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -81,7 +81,7 @@
81 }; 81 };
82 82
83 idle-states { 83 idle-states {
84 entry-method = "arm,psci"; 84 entry-method = "psci";
85 85
86 CPU_SLEEP_0: cpu-sleep-0 { 86 CPU_SLEEP_0: cpu-sleep-0 {
87 compatible = "arm,idle-state"; 87 compatible = "arm,idle-state";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index a712bea3bf2c..cc093a482aa4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -106,7 +106,7 @@
106 }; 106 };
107 107
108 idle-states { 108 idle-states {
109 entry-method = "arm,psci"; 109 entry-method = "psci";
110 110
111 cpu_sleep: cpu-sleep-0 { 111 cpu_sleep: cpu-sleep-0 {
112 compatible = "arm,idle-state"; 112 compatible = "arm,idle-state";
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4562459456a6..ed039688c221 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
33#define KVM_USER_MEM_SLOTS 32 33#define KVM_USER_MEM_SLOTS 32
34#define KVM_PRIVATE_MEM_SLOTS 4 34#define KVM_PRIVATE_MEM_SLOTS 4
35#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 35#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
36#define KVM_HALT_POLL_NS_DEFAULT 500000
36 37
37#include <kvm/arm_vgic.h> 38#include <kvm/arm_vgic.h>
38#include <kvm/arm_arch_timer.h> 39#include <kvm/arm_arch_timer.h>
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index f9c86c475bbd..f211839e2cae 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -294,6 +294,8 @@ void pcibios_fixup_bus(struct pci_bus *bus)
294 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); 294 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
295#endif 295#endif
296 296
297 pci_read_bridge_bases(bus);
298
297 if (bus->number == 0) { 299 if (bus->number == 0) {
298 struct pci_dev *dev; 300 struct pci_dev *dev;
299 list_for_each_entry(dev, &bus->devices, bus_list) { 301 list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index d89b6013c941..7cc3be9fa7c6 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -533,9 +533,10 @@ void pcibios_fixup_bus(struct pci_bus *b)
533{ 533{
534 struct pci_dev *dev; 534 struct pci_dev *dev;
535 535
536 if (b->self) 536 if (b->self) {
537 pci_read_bridge_bases(b);
537 pcibios_fixup_bridge_resources(b->self); 538 pcibios_fixup_bridge_resources(b->self);
538 539 }
539 list_for_each_entry(dev, &b->devices, bus_list) 540 list_for_each_entry(dev, &b->devices, bus_list)
540 pcibios_fixup_device_resources(dev); 541 pcibios_fixup_device_resources(dev);
541 platform_pci_fixup_bus(b); 542 platform_pci_fixup_bus(b);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 6b8b75266801..ae838ed5fcf2 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -863,7 +863,14 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
863 863
864void pcibios_fixup_bus(struct pci_bus *bus) 864void pcibios_fixup_bus(struct pci_bus *bus)
865{ 865{
866 /* Fixup the bus */ 866 /* When called from the generic PCI probe, read PCI<->PCI bridge
867 * bases. This is -not- called when generating the PCI tree from
868 * the OF device-tree.
869 */
870 if (bus->self != NULL)
871 pci_read_bridge_bases(bus);
872
873 /* Now fixup the bus bus */
867 pcibios_setup_bus_self(bus); 874 pcibios_setup_bus_self(bus);
868 875
869 /* Now fixup devices on that bus */ 876 /* Now fixup devices on that bus */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 3a54dbca9f7e..5a1a882e0a75 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -61,6 +61,7 @@
61#define KVM_PRIVATE_MEM_SLOTS 0 61#define KVM_PRIVATE_MEM_SLOTS 0
62 62
63#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 63#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
64#define KVM_HALT_POLL_NS_DEFAULT 500000
64 65
65 66
66 67
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index c6996cf67a5c..b8a0bf5766f2 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -311,6 +311,12 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
311 311
312void pcibios_fixup_bus(struct pci_bus *bus) 312void pcibios_fixup_bus(struct pci_bus *bus)
313{ 313{
314 struct pci_dev *dev = bus->self;
315
316 if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
317 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
318 pci_read_bridge_bases(bus);
319 }
314} 320}
315 321
316EXPORT_SYMBOL(PCIBIOS_MIN_IO); 322EXPORT_SYMBOL(PCIBIOS_MIN_IO);
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index deaa893efba5..3dfe2d31c67b 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -324,6 +324,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
324 struct pci_dev *dev; 324 struct pci_dev *dev;
325 325
326 if (bus->self) { 326 if (bus->self) {
327 pci_read_bridge_bases(bus);
327 pcibios_fixup_bridge_resources(bus->self); 328 pcibios_fixup_bridge_resources(bus->self);
328 } 329 }
329 330
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 195886a583ba..827a38d7a9db 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
44#ifdef CONFIG_KVM_MMIO 44#ifdef CONFIG_KVM_MMIO
45#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
46#endif 46#endif
47#define KVM_HALT_POLL_NS_DEFAULT 500000
47 48
48/* These values are internal and can be increased later */ 49/* These values are internal and can be increased later */
49#define KVM_NR_IRQCHIPS 1 50#define KVM_NR_IRQCHIPS 1
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4d65499ee1c1..126d0c4f9b7d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -369,3 +369,4 @@ SYSCALL_SPU(bpf)
369COMPAT_SYS(execveat) 369COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian) 370PPC64ONLY(switch_endian)
371SYSCALL_SPU(userfaultfd) 371SYSCALL_SPU(userfaultfd)
372SYSCALL_SPU(membarrier)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4a055b6c2a64..13411be86041 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 365 15#define __NR_syscalls 366
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 6ad58d4c879b..6337738018aa 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -387,5 +387,6 @@
387#define __NR_execveat 362 387#define __NR_execveat 362
388#define __NR_switch_endian 363 388#define __NR_switch_endian 363
389#define __NR_userfaultfd 364 389#define __NR_userfaultfd 364
390#define __NR_membarrier 365
390 391
391#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 392#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index a1d0632d97c6..7587b2ae5f77 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1032,7 +1032,13 @@ void pcibios_set_master(struct pci_dev *dev)
1032 1032
1033void pcibios_fixup_bus(struct pci_bus *bus) 1033void pcibios_fixup_bus(struct pci_bus *bus)
1034{ 1034{
1035 /* Fixup the bus */ 1035 /* When called from the generic PCI probe, read PCI<->PCI bridge
1036 * bases. This is -not- called when generating the PCI tree from
1037 * the OF device-tree.
1038 */
1039 pci_read_bridge_bases(bus);
1040
1041 /* Now fixup the bus bus */
1036 pcibios_setup_bus_self(bus); 1042 pcibios_setup_bus_self(bus);
1037 1043
1038 /* Now fixup devices on that bus */ 1044 /* Now fixup devices on that bus */
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index cf009167d208..099c79d8c160 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -829,12 +829,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
829 unsigned long size = kvmppc_get_gpr(vcpu, 4); 829 unsigned long size = kvmppc_get_gpr(vcpu, 4);
830 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 830 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
831 u64 buf; 831 u64 buf;
832 int srcu_idx;
832 int ret; 833 int ret;
833 834
834 if (!is_power_of_2(size) || (size > sizeof(buf))) 835 if (!is_power_of_2(size) || (size > sizeof(buf)))
835 return H_TOO_HARD; 836 return H_TOO_HARD;
836 837
838 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
837 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf); 839 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
840 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
838 if (ret != 0) 841 if (ret != 0)
839 return H_TOO_HARD; 842 return H_TOO_HARD;
840 843
@@ -869,6 +872,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
869 unsigned long addr = kvmppc_get_gpr(vcpu, 5); 872 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
870 unsigned long val = kvmppc_get_gpr(vcpu, 6); 873 unsigned long val = kvmppc_get_gpr(vcpu, 6);
871 u64 buf; 874 u64 buf;
875 int srcu_idx;
872 int ret; 876 int ret;
873 877
874 switch (size) { 878 switch (size) {
@@ -892,7 +896,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
892 return H_TOO_HARD; 896 return H_TOO_HARD;
893 } 897 }
894 898
899 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
895 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf); 900 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
901 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
896 if (ret != 0) 902 if (ret != 0)
897 return H_TOO_HARD; 903 return H_TOO_HARD;
898 904
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9754e6815e52..228049786888 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2692,9 +2692,13 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2692 2692
2693 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && 2693 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2694 (vc->vcore_state == VCORE_RUNNING || 2694 (vc->vcore_state == VCORE_RUNNING ||
2695 vc->vcore_state == VCORE_EXITING)) 2695 vc->vcore_state == VCORE_EXITING ||
2696 vc->vcore_state == VCORE_PIGGYBACK))
2696 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); 2697 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
2697 2698
2699 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2700 kvmppc_vcore_end_preempt(vc);
2701
2698 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 2702 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2699 kvmppc_remove_runnable(vc, vcpu); 2703 kvmppc_remove_runnable(vc, vcpu);
2700 vcpu->stat.signal_exits++; 2704 vcpu->stat.signal_exits++;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 2273dcacef39..b98889e9851d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1257,6 +1257,7 @@ mc_cont:
1257 bl kvmhv_accumulate_time 1257 bl kvmhv_accumulate_time
1258#endif 1258#endif
1259 1259
1260 mr r3, r12
1260 /* Increment exit count, poke other threads to exit */ 1261 /* Increment exit count, poke other threads to exit */
1261 bl kvmhv_commence_exit 1262 bl kvmhv_commence_exit
1262 nop 1263 nop
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1b0184a0f7f2..92805d604173 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,7 +1,6 @@
1# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
2CONFIG_NO_HZ=y 2CONFIG_NO_HZ=y
3CONFIG_HIGH_RES_TIMERS=y 3CONFIG_HIGH_RES_TIMERS=y
4CONFIG_RCU_FAST_NO_HZ=y
5CONFIG_BLK_DEV_INITRD=y 4CONFIG_BLK_DEV_INITRD=y
6CONFIG_CC_OPTIMIZE_FOR_SIZE=y 5CONFIG_CC_OPTIMIZE_FOR_SIZE=y
7# CONFIG_COMPAT_BRK is not set 6# CONFIG_COMPAT_BRK is not set
@@ -54,10 +53,6 @@ CONFIG_RAW_DRIVER=y
54# CONFIG_MONWRITER is not set 53# CONFIG_MONWRITER is not set
55# CONFIG_S390_VMUR is not set 54# CONFIG_S390_VMUR is not set
56# CONFIG_HID is not set 55# CONFIG_HID is not set
57CONFIG_MEMSTICK=y
58CONFIG_MEMSTICK_DEBUG=y
59CONFIG_MEMSTICK_UNSAFE_RESUME=y
60CONFIG_MSPRO_BLOCK=y
61# CONFIG_IOMMU_SUPPORT is not set 56# CONFIG_IOMMU_SUPPORT is not set
62CONFIG_EXT2_FS=y 57CONFIG_EXT2_FS=y
63CONFIG_EXT3_FS=y 58CONFIG_EXT3_FS=y
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6ce4a0b7e8da..8ced426091e1 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
35 */ 35 */
36#define KVM_NR_IRQCHIPS 1 36#define KVM_NR_IRQCHIPS 1
37#define KVM_IRQCHIP_NUM_PINS 4096 37#define KVM_IRQCHIP_NUM_PINS 4096
38#define KVM_HALT_POLL_NS_DEFAULT 0
38 39
39#define SIGP_CTRL_C 0x80 40#define SIGP_CTRL_C 0x80
40#define SIGP_CTRL_SCN_MASK 0x3f 41#define SIGP_CTRL_SCN_MASK 0x3f
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 525cef73b085..02613bad8bbb 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -8,28 +8,8 @@
8 8
9#include <uapi/asm/unistd.h> 9#include <uapi/asm/unistd.h>
10 10
11
12#define __IGNORE_time 11#define __IGNORE_time
13 12
14/* Ignore system calls that are also reachable via sys_socketcall */
15#define __IGNORE_recvmmsg
16#define __IGNORE_sendmmsg
17#define __IGNORE_socket
18#define __IGNORE_socketpair
19#define __IGNORE_bind
20#define __IGNORE_connect
21#define __IGNORE_listen
22#define __IGNORE_accept4
23#define __IGNORE_getsockopt
24#define __IGNORE_setsockopt
25#define __IGNORE_getsockname
26#define __IGNORE_getpeername
27#define __IGNORE_sendto
28#define __IGNORE_sendmsg
29#define __IGNORE_recvfrom
30#define __IGNORE_recvmsg
31#define __IGNORE_shutdown
32
33#define __ARCH_WANT_OLD_READDIR 13#define __ARCH_WANT_OLD_READDIR
34#define __ARCH_WANT_SYS_ALARM 14#define __ARCH_WANT_SYS_ALARM
35#define __ARCH_WANT_SYS_GETHOSTNAME 15#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 59d2bb4e2d0c..a848adba1504 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -290,7 +290,26 @@
290#define __NR_s390_pci_mmio_write 352 290#define __NR_s390_pci_mmio_write 352
291#define __NR_s390_pci_mmio_read 353 291#define __NR_s390_pci_mmio_read 353
292#define __NR_execveat 354 292#define __NR_execveat 354
293#define NR_syscalls 355 293#define __NR_userfaultfd 355
294#define __NR_membarrier 356
295#define __NR_recvmmsg 357
296#define __NR_sendmmsg 358
297#define __NR_socket 359
298#define __NR_socketpair 360
299#define __NR_bind 361
300#define __NR_connect 362
301#define __NR_listen 363
302#define __NR_accept4 364
303#define __NR_getsockopt 365
304#define __NR_setsockopt 366
305#define __NR_getsockname 367
306#define __NR_getpeername 368
307#define __NR_sendto 369
308#define __NR_sendmsg 370
309#define __NR_recvfrom 371
310#define __NR_recvmsg 372
311#define __NR_shutdown 373
312#define NR_syscalls 374
294 313
295/* 314/*
296 * There are some system calls that are not present on 64 bit, some 315 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index eb4664238613..e0f9d270b30f 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -48,6 +48,19 @@ typedef struct
48 struct ucontext32 uc; 48 struct ucontext32 uc;
49} rt_sigframe32; 49} rt_sigframe32;
50 50
51static inline void sigset_to_sigset32(unsigned long *set64,
52 compat_sigset_word *set32)
53{
54 set32[0] = (compat_sigset_word) set64[0];
55 set32[1] = (compat_sigset_word)(set64[0] >> 32);
56}
57
58static inline void sigset32_to_sigset(compat_sigset_word *set32,
59 unsigned long *set64)
60{
61 set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
62}
63
51int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) 64int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
52{ 65{
53 int err; 66 int err;
@@ -281,10 +294,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
281{ 294{
282 struct pt_regs *regs = task_pt_regs(current); 295 struct pt_regs *regs = task_pt_regs(current);
283 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15]; 296 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
297 compat_sigset_t cset;
284 sigset_t set; 298 sigset_t set;
285 299
286 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 300 if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
287 goto badframe; 301 goto badframe;
302 sigset32_to_sigset(cset.sig, set.sig);
288 set_current_blocked(&set); 303 set_current_blocked(&set);
289 save_fpu_regs(); 304 save_fpu_regs();
290 if (restore_sigregs32(regs, &frame->sregs)) 305 if (restore_sigregs32(regs, &frame->sregs))
@@ -302,10 +317,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
302{ 317{
303 struct pt_regs *regs = task_pt_regs(current); 318 struct pt_regs *regs = task_pt_regs(current);
304 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15]; 319 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
320 compat_sigset_t cset;
305 sigset_t set; 321 sigset_t set;
306 322
307 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 323 if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
308 goto badframe; 324 goto badframe;
325 sigset32_to_sigset(cset.sig, set.sig);
309 set_current_blocked(&set); 326 set_current_blocked(&set);
310 if (compat_restore_altstack(&frame->uc.uc_stack)) 327 if (compat_restore_altstack(&frame->uc.uc_stack))
311 goto badframe; 328 goto badframe;
@@ -377,7 +394,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
377 return -EFAULT; 394 return -EFAULT;
378 395
379 /* Create struct sigcontext32 on the signal stack */ 396 /* Create struct sigcontext32 on the signal stack */
380 memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32); 397 sigset_to_sigset32(set->sig, sc.oldmask);
381 sc.sregs = (__u32)(unsigned long __force) &frame->sregs; 398 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
382 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc))) 399 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
383 return -EFAULT; 400 return -EFAULT;
@@ -438,6 +455,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
438static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, 455static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
439 struct pt_regs *regs) 456 struct pt_regs *regs)
440{ 457{
458 compat_sigset_t cset;
441 rt_sigframe32 __user *frame; 459 rt_sigframe32 __user *frame;
442 unsigned long restorer; 460 unsigned long restorer;
443 size_t frame_size; 461 size_t frame_size;
@@ -485,11 +503,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
485 store_sigregs(); 503 store_sigregs();
486 504
487 /* Create ucontext on the signal stack. */ 505 /* Create ucontext on the signal stack. */
506 sigset_to_sigset32(set->sig, cset.sig);
488 if (__put_user(uc_flags, &frame->uc.uc_flags) || 507 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
489 __put_user(0, &frame->uc.uc_link) || 508 __put_user(0, &frame->uc.uc_link) ||
490 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) || 509 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
491 save_sigregs32(regs, &frame->uc.uc_mcontext) || 510 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
492 __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) || 511 __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
493 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) 512 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
494 return -EFAULT; 513 return -EFAULT;
495 514
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index f8498dde67b1..09f194052df3 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -52,15 +52,13 @@
52 * the regular system call wrappers. 52 * the regular system call wrappers.
53 */ 53 */
54#define COMPAT_SYSCALL_WRAPx(x, name, ...) \ 54#define COMPAT_SYSCALL_WRAPx(x, name, ...) \
55 asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ 55asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
56 asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ 56asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
57 asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ 57asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \
58 { \ 58{ \
59 return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ 59 return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \
60 } 60}
61 61
62COMPAT_SYSCALL_WRAP1(exit, int, error_code);
63COMPAT_SYSCALL_WRAP1(close, unsigned int, fd);
64COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); 62COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
65COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); 63COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
66COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); 64COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
@@ -68,23 +66,16 @@ COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename);
68COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); 66COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
69COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); 67COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
70COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); 68COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
71COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds);
72COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); 69COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
73COMPAT_SYSCALL_WRAP1(nice, int, increment);
74COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig);
75COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); 70COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
76COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); 71COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
77COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); 72COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
78COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes);
79COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); 73COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
80COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); 74COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
81COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); 75COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
82COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); 76COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
83COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); 77COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
84COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid);
85COMPAT_SYSCALL_WRAP1(umask, int, mask);
86COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); 78COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
87COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd);
88COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); 79COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
89COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); 80COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
90COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); 81COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
@@ -93,37 +84,23 @@ COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library);
93COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); 84COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
94COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); 85COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
95COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); 86COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
96COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode);
97COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who);
98COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval);
99COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); 87COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
100COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); 88COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
101COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd);
102COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); 89COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
103COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); 90COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
104COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); 91COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
105COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); 92COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
106COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); 93COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
107COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); 94COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
108COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid);
109COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd);
110COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); 95COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
111COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); 96COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
112COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality);
113COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); 97COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
114COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd);
115COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); 98COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
116COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid);
117COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd);
118COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); 99COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
119COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); 100COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
120COMPAT_SYSCALL_WRAP1(mlockall, int, flags);
121COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); 101COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
122COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); 102COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
123COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); 103COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
124COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid);
125COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy);
126COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy);
127COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); 104COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
128COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); 105COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
129COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); 106COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
@@ -131,20 +108,11 @@ COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size);
131COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); 108COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
132COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); 109COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
133COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); 110COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
134COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid);
135COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid);
136COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); 111COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
137COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); 112COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
138COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group);
139COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid);
140COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); 113COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
141COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid);
142COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); 114COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
143COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); 115COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
144COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid);
145COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid);
146COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid);
147COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid);
148COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); 116COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
149COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); 117COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
150COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); 118COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
@@ -161,23 +129,16 @@ COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size);
161COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); 129COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
162COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); 130COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
163COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); 131COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
164COMPAT_SYSCALL_WRAP1(exit_group, int, error_code);
165COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); 132COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
166COMPAT_SYSCALL_WRAP1(epoll_create, int, size);
167COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); 133COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
168COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); 134COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
169COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id);
170COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id);
171COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); 135COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
172COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); 136COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
173COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); 137COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
174COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); 138COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
175COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); 139COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
176COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); 140COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
177COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio);
178COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who);
179COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); 141COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
180COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd);
181COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); 142COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
182COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); 143COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
183COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); 144COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
@@ -192,23 +153,11 @@ COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags);
192COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); 153COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
193COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); 154COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
194COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); 155COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
195COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count);
196COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags);
197COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags);
198COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags);
199COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); 156COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
200COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags);
201COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
202COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
203COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
204COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); 157COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
205COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); 158COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls);
206COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
207COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); 159COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
208COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); 160COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
209COMPAT_SYSCALL_WRAP1(syncfs, int, fd);
210COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype);
211COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum);
212COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); 161COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
213COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); 162COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
214COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); 163COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
@@ -220,3 +169,10 @@ COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, fla
220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); 169COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
221COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); 170COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
222COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); 171COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
172COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec);
173COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen);
174COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen);
175COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags);
176COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
177COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
178COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 247b7aae4c6d..09b039d7983d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1191,6 +1191,7 @@ cleanup_critical:
1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) 1191 clg %r9,BASED(.Lcleanup_save_fpu_fpc_end)
1192 jhe 1f 1192 jhe 1f
1193 lg %r2,__LC_CURRENT 1193 lg %r2,__LC_CURRENT
1194 aghi %r2,__TASK_thread
11940: # Store floating-point controls 11950: # Store floating-point controls
1195 stfpc __THREAD_FPU_fpc(%r2) 1196 stfpc __THREAD_FPU_fpc(%r2)
11961: # Load register save area and check if VX is active 11971: # Load register save area and check if VX is active
@@ -1252,6 +1253,7 @@ cleanup_critical:
1252 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) 1253 clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
1253 jhe 6f 1254 jhe 6f
1254 lg %r4,__LC_CURRENT 1255 lg %r4,__LC_CURRENT
1256 aghi %r4,__TASK_thread
1255 lfpc __THREAD_FPU_fpc(%r4) 1257 lfpc __THREAD_FPU_fpc(%r4)
1256 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? 1258 tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
1257 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 1259 lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 56fdad479115..a9563409c36e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -157,10 +157,14 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
157 157
158 cpuhw = &get_cpu_var(cpu_hw_events); 158 cpuhw = &get_cpu_var(cpu_hw_events);
159 159
160 /* check authorization for cpu counter sets */ 160 /* Check authorization for cpu counter sets.
161 * If the particular CPU counter set is not authorized,
162 * return with -ENOENT in order to fall back to other
163 * PMUs that might suffice the event request.
164 */
161 ctrs_state = cpumf_state_ctl[hwc->config_base]; 165 ctrs_state = cpumf_state_ctl[hwc->config_base];
162 if (!(ctrs_state & cpuhw->info.auth_ctl)) 166 if (!(ctrs_state & cpuhw->info.auth_ctl))
163 err = -EPERM; 167 err = -ENOENT;
164 168
165 put_cpu_var(cpu_hw_events); 169 put_cpu_var(cpu_hw_events);
166 return err; 170 return err;
@@ -536,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
536 */ 540 */
537 if (!(cpuhw->flags & PERF_EVENT_TXN)) 541 if (!(cpuhw->flags & PERF_EVENT_TXN))
538 if (validate_ctr_auth(&event->hw)) 542 if (validate_ctr_auth(&event->hw))
539 return -EPERM; 543 return -ENOENT;
540 544
541 ctr_set_enable(&cpuhw->state, event->hw.config_base); 545 ctr_set_enable(&cpuhw->state, event->hw.config_base);
542 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 546 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
@@ -611,7 +615,7 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
611 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); 615 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
612 state >>= CPUMF_LCCTL_ENABLE_SHIFT; 616 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
613 if ((state & cpuhw->info.auth_ctl) != state) 617 if ((state & cpuhw->info.auth_ctl) != state)
614 return -EPERM; 618 return -ENOENT;
615 619
616 cpuhw->flags &= ~PERF_EVENT_TXN; 620 cpuhw->flags &= ~PERF_EVENT_TXN;
617 perf_pmu_enable(pmu); 621 perf_pmu_enable(pmu);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index ca6294645dd3..2d6b6e81f812 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -30,6 +30,9 @@ ENTRY(swsusp_arch_suspend)
30 aghi %r15,-STACK_FRAME_OVERHEAD 30 aghi %r15,-STACK_FRAME_OVERHEAD
31 stg %r1,__SF_BACKCHAIN(%r15) 31 stg %r1,__SF_BACKCHAIN(%r15)
32 32
33 /* Store FPU registers */
34 brasl %r14,save_fpu_regs
35
33 /* Deactivate DAT */ 36 /* Deactivate DAT */
34 stnsm __SF_EMPTY(%r15),0xfb 37 stnsm __SF_EMPTY(%r15),0xfb
35 38
@@ -47,23 +50,6 @@ ENTRY(swsusp_arch_suspend)
47 50
48 /* Store registers */ 51 /* Store registers */
49 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 52 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
50 stfpc 0x31c(%r1) /* store fpu control */
51 std 0,0x200(%r1) /* store f0 */
52 std 1,0x208(%r1) /* store f1 */
53 std 2,0x210(%r1) /* store f2 */
54 std 3,0x218(%r1) /* store f3 */
55 std 4,0x220(%r1) /* store f4 */
56 std 5,0x228(%r1) /* store f5 */
57 std 6,0x230(%r1) /* store f6 */
58 std 7,0x238(%r1) /* store f7 */
59 std 8,0x240(%r1) /* store f8 */
60 std 9,0x248(%r1) /* store f9 */
61 std 10,0x250(%r1) /* store f10 */
62 std 11,0x258(%r1) /* store f11 */
63 std 12,0x260(%r1) /* store f12 */
64 std 13,0x268(%r1) /* store f13 */
65 std 14,0x270(%r1) /* store f14 */
66 std 15,0x278(%r1) /* store f15 */
67 stam %a0,%a15,0x340(%r1) /* store access registers */ 53 stam %a0,%a15,0x340(%r1) /* store access registers */
68 stctg %c0,%c15,0x380(%r1) /* store control registers */ 54 stctg %c0,%c15,0x380(%r1) /* store control registers */
69 stmg %r0,%r15,0x280(%r1) /* store general registers */ 55 stmg %r0,%r15,0x280(%r1) /* store general registers */
@@ -249,24 +235,6 @@ restore_registers:
249 lctlg %c0,%c15,0x380(%r13) /* load control registers */ 235 lctlg %c0,%c15,0x380(%r13) /* load control registers */
250 lam %a0,%a15,0x340(%r13) /* load access registers */ 236 lam %a0,%a15,0x340(%r13) /* load access registers */
251 237
252 lfpc 0x31c(%r13) /* load fpu control */
253 ld 0,0x200(%r13) /* load f0 */
254 ld 1,0x208(%r13) /* load f1 */
255 ld 2,0x210(%r13) /* load f2 */
256 ld 3,0x218(%r13) /* load f3 */
257 ld 4,0x220(%r13) /* load f4 */
258 ld 5,0x228(%r13) /* load f5 */
259 ld 6,0x230(%r13) /* load f6 */
260 ld 7,0x238(%r13) /* load f7 */
261 ld 8,0x240(%r13) /* load f8 */
262 ld 9,0x248(%r13) /* load f9 */
263 ld 10,0x250(%r13) /* load f10 */
264 ld 11,0x258(%r13) /* load f11 */
265 ld 12,0x260(%r13) /* load f12 */
266 ld 13,0x268(%r13) /* load f13 */
267 ld 14,0x270(%r13) /* load f14 */
268 ld 15,0x278(%r13) /* load f15 */
269
270 /* Load old stack */ 238 /* Load old stack */
271 lg %r15,0x2f8(%r13) 239 lg %r15,0x2f8(%r13)
272 240
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index f3f4a137aef6..8c56929c8d82 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -9,12 +9,12 @@
9#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall) 9#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
10 10
11NI_SYSCALL /* 0 */ 11NI_SYSCALL /* 0 */
12SYSCALL(sys_exit,compat_sys_exit) 12SYSCALL(sys_exit,sys_exit)
13SYSCALL(sys_fork,sys_fork) 13SYSCALL(sys_fork,sys_fork)
14SYSCALL(sys_read,compat_sys_s390_read) 14SYSCALL(sys_read,compat_sys_s390_read)
15SYSCALL(sys_write,compat_sys_s390_write) 15SYSCALL(sys_write,compat_sys_s390_write)
16SYSCALL(sys_open,compat_sys_open) /* 5 */ 16SYSCALL(sys_open,compat_sys_open) /* 5 */
17SYSCALL(sys_close,compat_sys_close) 17SYSCALL(sys_close,sys_close)
18SYSCALL(sys_restart_syscall,sys_restart_syscall) 18SYSCALL(sys_restart_syscall,sys_restart_syscall)
19SYSCALL(sys_creat,compat_sys_creat) 19SYSCALL(sys_creat,compat_sys_creat)
20SYSCALL(sys_link,compat_sys_link) 20SYSCALL(sys_link,compat_sys_link)
@@ -35,21 +35,21 @@ SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
35SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/ 35SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
36SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */ 36SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
37SYSCALL(sys_ptrace,compat_sys_ptrace) 37SYSCALL(sys_ptrace,compat_sys_ptrace)
38SYSCALL(sys_alarm,compat_sys_alarm) 38SYSCALL(sys_alarm,sys_alarm)
39NI_SYSCALL /* old fstat syscall */ 39NI_SYSCALL /* old fstat syscall */
40SYSCALL(sys_pause,sys_pause) 40SYSCALL(sys_pause,sys_pause)
41SYSCALL(sys_utime,compat_sys_utime) /* 30 */ 41SYSCALL(sys_utime,compat_sys_utime) /* 30 */
42NI_SYSCALL /* old stty syscall */ 42NI_SYSCALL /* old stty syscall */
43NI_SYSCALL /* old gtty syscall */ 43NI_SYSCALL /* old gtty syscall */
44SYSCALL(sys_access,compat_sys_access) 44SYSCALL(sys_access,compat_sys_access)
45SYSCALL(sys_nice,compat_sys_nice) 45SYSCALL(sys_nice,sys_nice)
46NI_SYSCALL /* 35 old ftime syscall */ 46NI_SYSCALL /* 35 old ftime syscall */
47SYSCALL(sys_sync,sys_sync) 47SYSCALL(sys_sync,sys_sync)
48SYSCALL(sys_kill,compat_sys_kill) 48SYSCALL(sys_kill,sys_kill)
49SYSCALL(sys_rename,compat_sys_rename) 49SYSCALL(sys_rename,compat_sys_rename)
50SYSCALL(sys_mkdir,compat_sys_mkdir) 50SYSCALL(sys_mkdir,compat_sys_mkdir)
51SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */ 51SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
52SYSCALL(sys_dup,compat_sys_dup) 52SYSCALL(sys_dup,sys_dup)
53SYSCALL(sys_pipe,compat_sys_pipe) 53SYSCALL(sys_pipe,compat_sys_pipe)
54SYSCALL(sys_times,compat_sys_times) 54SYSCALL(sys_times,compat_sys_times)
55NI_SYSCALL /* old prof syscall */ 55NI_SYSCALL /* old prof syscall */
@@ -65,13 +65,13 @@ NI_SYSCALL /* old lock syscall */
65SYSCALL(sys_ioctl,compat_sys_ioctl) 65SYSCALL(sys_ioctl,compat_sys_ioctl)
66SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */ 66SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
67NI_SYSCALL /* intel mpx syscall */ 67NI_SYSCALL /* intel mpx syscall */
68SYSCALL(sys_setpgid,compat_sys_setpgid) 68SYSCALL(sys_setpgid,sys_setpgid)
69NI_SYSCALL /* old ulimit syscall */ 69NI_SYSCALL /* old ulimit syscall */
70NI_SYSCALL /* old uname syscall */ 70NI_SYSCALL /* old uname syscall */
71SYSCALL(sys_umask,compat_sys_umask) /* 60 */ 71SYSCALL(sys_umask,sys_umask) /* 60 */
72SYSCALL(sys_chroot,compat_sys_chroot) 72SYSCALL(sys_chroot,compat_sys_chroot)
73SYSCALL(sys_ustat,compat_sys_ustat) 73SYSCALL(sys_ustat,compat_sys_ustat)
74SYSCALL(sys_dup2,compat_sys_dup2) 74SYSCALL(sys_dup2,sys_dup2)
75SYSCALL(sys_getppid,sys_getppid) 75SYSCALL(sys_getppid,sys_getppid)
76SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */ 76SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
77SYSCALL(sys_setsid,sys_setsid) 77SYSCALL(sys_setsid,sys_setsid)
@@ -102,10 +102,10 @@ SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
102SYSCALL(sys_munmap,compat_sys_munmap) 102SYSCALL(sys_munmap,compat_sys_munmap)
103SYSCALL(sys_truncate,compat_sys_truncate) 103SYSCALL(sys_truncate,compat_sys_truncate)
104SYSCALL(sys_ftruncate,compat_sys_ftruncate) 104SYSCALL(sys_ftruncate,compat_sys_ftruncate)
105SYSCALL(sys_fchmod,compat_sys_fchmod) 105SYSCALL(sys_fchmod,sys_fchmod)
106SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/ 106SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
107SYSCALL(sys_getpriority,compat_sys_getpriority) 107SYSCALL(sys_getpriority,sys_getpriority)
108SYSCALL(sys_setpriority,compat_sys_setpriority) 108SYSCALL(sys_setpriority,sys_setpriority)
109NI_SYSCALL /* old profil syscall */ 109NI_SYSCALL /* old profil syscall */
110SYSCALL(sys_statfs,compat_sys_statfs) 110SYSCALL(sys_statfs,compat_sys_statfs)
111SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */ 111SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
@@ -126,7 +126,7 @@ SYSCALL(sys_wait4,compat_sys_wait4)
126SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */ 126SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
127SYSCALL(sys_sysinfo,compat_sys_sysinfo) 127SYSCALL(sys_sysinfo,compat_sys_sysinfo)
128SYSCALL(sys_s390_ipc,compat_sys_s390_ipc) 128SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
129SYSCALL(sys_fsync,compat_sys_fsync) 129SYSCALL(sys_fsync,sys_fsync)
130SYSCALL(sys_sigreturn,compat_sys_sigreturn) 130SYSCALL(sys_sigreturn,compat_sys_sigreturn)
131SYSCALL(sys_clone,compat_sys_clone) /* 120 */ 131SYSCALL(sys_clone,compat_sys_clone) /* 120 */
132SYSCALL(sys_setdomainname,compat_sys_setdomainname) 132SYSCALL(sys_setdomainname,compat_sys_setdomainname)
@@ -140,35 +140,35 @@ SYSCALL(sys_init_module,compat_sys_init_module)
140SYSCALL(sys_delete_module,compat_sys_delete_module) 140SYSCALL(sys_delete_module,compat_sys_delete_module)
141NI_SYSCALL /* 130: old get_kernel_syms */ 141NI_SYSCALL /* 130: old get_kernel_syms */
142SYSCALL(sys_quotactl,compat_sys_quotactl) 142SYSCALL(sys_quotactl,compat_sys_quotactl)
143SYSCALL(sys_getpgid,compat_sys_getpgid) 143SYSCALL(sys_getpgid,sys_getpgid)
144SYSCALL(sys_fchdir,compat_sys_fchdir) 144SYSCALL(sys_fchdir,sys_fchdir)
145SYSCALL(sys_bdflush,compat_sys_bdflush) 145SYSCALL(sys_bdflush,compat_sys_bdflush)
146SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */ 146SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
147SYSCALL(sys_s390_personality,compat_sys_s390_personality) 147SYSCALL(sys_s390_personality,sys_s390_personality)
148NI_SYSCALL /* for afs_syscall */ 148NI_SYSCALL /* for afs_syscall */
149SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */ 149SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
150SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */ 150SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
151SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */ 151SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
152SYSCALL(sys_getdents,compat_sys_getdents) 152SYSCALL(sys_getdents,compat_sys_getdents)
153SYSCALL(sys_select,compat_sys_select) 153SYSCALL(sys_select,compat_sys_select)
154SYSCALL(sys_flock,compat_sys_flock) 154SYSCALL(sys_flock,sys_flock)
155SYSCALL(sys_msync,compat_sys_msync) 155SYSCALL(sys_msync,compat_sys_msync)
156SYSCALL(sys_readv,compat_sys_readv) /* 145 */ 156SYSCALL(sys_readv,compat_sys_readv) /* 145 */
157SYSCALL(sys_writev,compat_sys_writev) 157SYSCALL(sys_writev,compat_sys_writev)
158SYSCALL(sys_getsid,compat_sys_getsid) 158SYSCALL(sys_getsid,sys_getsid)
159SYSCALL(sys_fdatasync,compat_sys_fdatasync) 159SYSCALL(sys_fdatasync,sys_fdatasync)
160SYSCALL(sys_sysctl,compat_sys_sysctl) 160SYSCALL(sys_sysctl,compat_sys_sysctl)
161SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */ 161SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
162SYSCALL(sys_munlock,compat_sys_munlock) 162SYSCALL(sys_munlock,compat_sys_munlock)
163SYSCALL(sys_mlockall,compat_sys_mlockall) 163SYSCALL(sys_mlockall,sys_mlockall)
164SYSCALL(sys_munlockall,sys_munlockall) 164SYSCALL(sys_munlockall,sys_munlockall)
165SYSCALL(sys_sched_setparam,compat_sys_sched_setparam) 165SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
166SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */ 166SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
167SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler) 167SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
168SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler) 168SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler)
169SYSCALL(sys_sched_yield,sys_sched_yield) 169SYSCALL(sys_sched_yield,sys_sched_yield)
170SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max) 170SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max)
171SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */ 171SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min) /* 160 */
172SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval) 172SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
173SYSCALL(sys_nanosleep,compat_sys_nanosleep) 173SYSCALL(sys_nanosleep,compat_sys_nanosleep)
174SYSCALL(sys_mremap,compat_sys_mremap) 174SYSCALL(sys_mremap,compat_sys_mremap)
@@ -211,20 +211,20 @@ SYSCALL(sys_getuid,sys_getuid)
211SYSCALL(sys_getgid,sys_getgid) /* 200 */ 211SYSCALL(sys_getgid,sys_getgid) /* 200 */
212SYSCALL(sys_geteuid,sys_geteuid) 212SYSCALL(sys_geteuid,sys_geteuid)
213SYSCALL(sys_getegid,sys_getegid) 213SYSCALL(sys_getegid,sys_getegid)
214SYSCALL(sys_setreuid,compat_sys_setreuid) 214SYSCALL(sys_setreuid,sys_setreuid)
215SYSCALL(sys_setregid,compat_sys_setregid) 215SYSCALL(sys_setregid,sys_setregid)
216SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */ 216SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
217SYSCALL(sys_setgroups,compat_sys_setgroups) 217SYSCALL(sys_setgroups,compat_sys_setgroups)
218SYSCALL(sys_fchown,compat_sys_fchown) 218SYSCALL(sys_fchown,sys_fchown)
219SYSCALL(sys_setresuid,compat_sys_setresuid) 219SYSCALL(sys_setresuid,sys_setresuid)
220SYSCALL(sys_getresuid,compat_sys_getresuid) 220SYSCALL(sys_getresuid,compat_sys_getresuid)
221SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */ 221SYSCALL(sys_setresgid,sys_setresgid) /* 210 */
222SYSCALL(sys_getresgid,compat_sys_getresgid) 222SYSCALL(sys_getresgid,compat_sys_getresgid)
223SYSCALL(sys_chown,compat_sys_chown) 223SYSCALL(sys_chown,compat_sys_chown)
224SYSCALL(sys_setuid,compat_sys_setuid) 224SYSCALL(sys_setuid,sys_setuid)
225SYSCALL(sys_setgid,compat_sys_setgid) 225SYSCALL(sys_setgid,sys_setgid)
226SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */ 226SYSCALL(sys_setfsuid,sys_setfsuid) /* 215 */
227SYSCALL(sys_setfsgid,compat_sys_setfsgid) 227SYSCALL(sys_setfsgid,sys_setfsgid)
228SYSCALL(sys_pivot_root,compat_sys_pivot_root) 228SYSCALL(sys_pivot_root,compat_sys_pivot_root)
229SYSCALL(sys_mincore,compat_sys_mincore) 229SYSCALL(sys_mincore,compat_sys_mincore)
230SYSCALL(sys_madvise,compat_sys_madvise) 230SYSCALL(sys_madvise,compat_sys_madvise)
@@ -245,19 +245,19 @@ SYSCALL(sys_removexattr,compat_sys_removexattr)
245SYSCALL(sys_lremovexattr,compat_sys_lremovexattr) 245SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
246SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */ 246SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
247SYSCALL(sys_gettid,sys_gettid) 247SYSCALL(sys_gettid,sys_gettid)
248SYSCALL(sys_tkill,compat_sys_tkill) 248SYSCALL(sys_tkill,sys_tkill)
249SYSCALL(sys_futex,compat_sys_futex) 249SYSCALL(sys_futex,compat_sys_futex)
250SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity) 250SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
251SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */ 251SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
252SYSCALL(sys_tgkill,compat_sys_tgkill) 252SYSCALL(sys_tgkill,sys_tgkill)
253NI_SYSCALL /* reserved for TUX */ 253NI_SYSCALL /* reserved for TUX */
254SYSCALL(sys_io_setup,compat_sys_io_setup) 254SYSCALL(sys_io_setup,compat_sys_io_setup)
255SYSCALL(sys_io_destroy,compat_sys_io_destroy) 255SYSCALL(sys_io_destroy,compat_sys_io_destroy)
256SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */ 256SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
257SYSCALL(sys_io_submit,compat_sys_io_submit) 257SYSCALL(sys_io_submit,compat_sys_io_submit)
258SYSCALL(sys_io_cancel,compat_sys_io_cancel) 258SYSCALL(sys_io_cancel,compat_sys_io_cancel)
259SYSCALL(sys_exit_group,compat_sys_exit_group) 259SYSCALL(sys_exit_group,sys_exit_group)
260SYSCALL(sys_epoll_create,compat_sys_epoll_create) 260SYSCALL(sys_epoll_create,sys_epoll_create)
261SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */ 261SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
262SYSCALL(sys_epoll_wait,compat_sys_epoll_wait) 262SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
263SYSCALL(sys_set_tid_address,compat_sys_set_tid_address) 263SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
@@ -265,8 +265,8 @@ SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
265SYSCALL(sys_timer_create,compat_sys_timer_create) 265SYSCALL(sys_timer_create,compat_sys_timer_create)
266SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */ 266SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
267SYSCALL(sys_timer_gettime,compat_sys_timer_gettime) 267SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
268SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun) 268SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun)
269SYSCALL(sys_timer_delete,compat_sys_timer_delete) 269SYSCALL(sys_timer_delete,sys_timer_delete)
270SYSCALL(sys_clock_settime,compat_sys_clock_settime) 270SYSCALL(sys_clock_settime,compat_sys_clock_settime)
271SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */ 271SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
272SYSCALL(sys_clock_getres,compat_sys_clock_getres) 272SYSCALL(sys_clock_getres,compat_sys_clock_getres)
@@ -290,11 +290,11 @@ SYSCALL(sys_add_key,compat_sys_add_key)
290SYSCALL(sys_request_key,compat_sys_request_key) 290SYSCALL(sys_request_key,compat_sys_request_key)
291SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */ 291SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
292SYSCALL(sys_waitid,compat_sys_waitid) 292SYSCALL(sys_waitid,compat_sys_waitid)
293SYSCALL(sys_ioprio_set,compat_sys_ioprio_set) 293SYSCALL(sys_ioprio_set,sys_ioprio_set)
294SYSCALL(sys_ioprio_get,compat_sys_ioprio_get) 294SYSCALL(sys_ioprio_get,sys_ioprio_get)
295SYSCALL(sys_inotify_init,sys_inotify_init) 295SYSCALL(sys_inotify_init,sys_inotify_init)
296SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */ 296SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
297SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch) 297SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch)
298SYSCALL(sys_migrate_pages,compat_sys_migrate_pages) 298SYSCALL(sys_migrate_pages,compat_sys_migrate_pages)
299SYSCALL(sys_openat,compat_sys_openat) 299SYSCALL(sys_openat,compat_sys_openat)
300SYSCALL(sys_mkdirat,compat_sys_mkdirat) 300SYSCALL(sys_mkdirat,compat_sys_mkdirat)
@@ -326,31 +326,31 @@ SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
326SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */ 326SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
327SYSCALL(sys_signalfd,compat_sys_signalfd) 327SYSCALL(sys_signalfd,compat_sys_signalfd)
328NI_SYSCALL /* 317 old sys_timer_fd */ 328NI_SYSCALL /* 317 old sys_timer_fd */
329SYSCALL(sys_eventfd,compat_sys_eventfd) 329SYSCALL(sys_eventfd,sys_eventfd)
330SYSCALL(sys_timerfd_create,compat_sys_timerfd_create) 330SYSCALL(sys_timerfd_create,sys_timerfd_create)
331SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */ 331SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
332SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime) 332SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
333SYSCALL(sys_signalfd4,compat_sys_signalfd4) 333SYSCALL(sys_signalfd4,compat_sys_signalfd4)
334SYSCALL(sys_eventfd2,compat_sys_eventfd2) 334SYSCALL(sys_eventfd2,sys_eventfd2)
335SYSCALL(sys_inotify_init1,compat_sys_inotify_init1) 335SYSCALL(sys_inotify_init1,sys_inotify_init1)
336SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */ 336SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
337SYSCALL(sys_dup3,compat_sys_dup3) 337SYSCALL(sys_dup3,sys_dup3)
338SYSCALL(sys_epoll_create1,compat_sys_epoll_create1) 338SYSCALL(sys_epoll_create1,sys_epoll_create1)
339SYSCALL(sys_preadv,compat_sys_preadv) 339SYSCALL(sys_preadv,compat_sys_preadv)
340SYSCALL(sys_pwritev,compat_sys_pwritev) 340SYSCALL(sys_pwritev,compat_sys_pwritev)
341SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ 341SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
342SYSCALL(sys_perf_event_open,compat_sys_perf_event_open) 342SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
343SYSCALL(sys_fanotify_init,compat_sys_fanotify_init) 343SYSCALL(sys_fanotify_init,sys_fanotify_init)
344SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark) 344SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
345SYSCALL(sys_prlimit64,compat_sys_prlimit64) 345SYSCALL(sys_prlimit64,compat_sys_prlimit64)
346SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */ 346SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
347SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at) 347SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
348SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime) 348SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
349SYSCALL(sys_syncfs,compat_sys_syncfs) 349SYSCALL(sys_syncfs,sys_syncfs)
350SYSCALL(sys_setns,compat_sys_setns) 350SYSCALL(sys_setns,sys_setns)
351SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */ 351SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
352SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev) 352SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
353SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr) 353SYSCALL(sys_s390_runtime_instr,sys_s390_runtime_instr)
354SYSCALL(sys_kcmp,compat_sys_kcmp) 354SYSCALL(sys_kcmp,compat_sys_kcmp)
355SYSCALL(sys_finit_module,compat_sys_finit_module) 355SYSCALL(sys_finit_module,compat_sys_finit_module)
356SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ 356SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
@@ -363,3 +363,22 @@ SYSCALL(sys_bpf,compat_sys_bpf)
363SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) 363SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
364SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) 364SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
365SYSCALL(sys_execveat,compat_sys_execveat) 365SYSCALL(sys_execveat,compat_sys_execveat)
366SYSCALL(sys_userfaultfd,sys_userfaultfd) /* 355 */
367SYSCALL(sys_membarrier,sys_membarrier)
368SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
369SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
370SYSCALL(sys_socket,sys_socket)
371SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
372SYSCALL(sys_bind,sys_bind)
373SYSCALL(sys_connect,sys_connect)
374SYSCALL(sys_listen,sys_listen)
375SYSCALL(sys_accept4,sys_accept4)
376SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
377SYSCALL(sys_setsockopt,compat_sys_setsockopt)
378SYSCALL(sys_getsockname,compat_sys_getsockname)
379SYSCALL(sys_getpeername,compat_sys_getpeername)
380SYSCALL(sys_sendto,compat_sys_sendto)
381SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
382SYSCALL(sys_recvfrom,compat_sys_recvfrom)
383SYSCALL(sys_recvmsg,compat_sys_recvmsg)
384SYSCALL(sys_shutdown,sys_shutdown)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b9ce650e9e99..c8653435c70d 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -89,17 +89,21 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
89 if (smp_cpu_mtid && 89 if (smp_cpu_mtid &&
90 time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { 90 time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
91 u64 cycles_new[32], *cycles_old; 91 u64 cycles_new[32], *cycles_old;
92 u64 delta, mult, div; 92 u64 delta, fac, mult, div;
93 93
94 cycles_old = this_cpu_ptr(mt_cycles); 94 cycles_old = this_cpu_ptr(mt_cycles);
95 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { 95 if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
96 fac = 1;
96 mult = div = 0; 97 mult = div = 0;
97 for (i = 0; i <= smp_cpu_mtid; i++) { 98 for (i = 0; i <= smp_cpu_mtid; i++) {
98 delta = cycles_new[i] - cycles_old[i]; 99 delta = cycles_new[i] - cycles_old[i];
99 mult += delta; 100 div += delta;
100 div += (i + 1) * delta; 101 mult *= i + 1;
102 mult += delta * fac;
103 fac *= i + 1;
101 } 104 }
102 if (mult > 0) { 105 div *= fac;
106 if (div > 0) {
103 /* Update scaling factor */ 107 /* Update scaling factor */
104 __this_cpu_write(mt_scaling_mult, mult); 108 __this_cpu_write(mt_scaling_mult, mult);
105 __this_cpu_write(mt_scaling_div, div); 109 __this_cpu_write(mt_scaling_div, div);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d3033183ed70..055a01de7c8d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1128,7 +1128,18 @@ END(error_exit)
1128 1128
1129/* Runs on exception stack */ 1129/* Runs on exception stack */
1130ENTRY(nmi) 1130ENTRY(nmi)
1131 /*
1132 * Fix up the exception frame if we're on Xen.
1133 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1134 * one value to the stack on native, so it may clobber the rdx
1135 * scratch slot, but it won't clobber any of the important
1136 * slots past it.
1137 *
1138 * Xen is a different story, because the Xen frame itself overlaps
1139 * the "NMI executing" variable.
1140 */
1131 PARAVIRT_ADJUST_EXCEPTION_FRAME 1141 PARAVIRT_ADJUST_EXCEPTION_FRAME
1142
1132 /* 1143 /*
1133 * We allow breakpoints in NMIs. If a breakpoint occurs, then 1144 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1134 * the iretq it performs will take us out of NMI context. 1145 * the iretq it performs will take us out of NMI context.
@@ -1179,9 +1190,12 @@ ENTRY(nmi)
1179 * we don't want to enable interrupts, because then we'll end 1190 * we don't want to enable interrupts, because then we'll end
1180 * up in an awkward situation in which IRQs are on but NMIs 1191 * up in an awkward situation in which IRQs are on but NMIs
1181 * are off. 1192 * are off.
1193 *
1194 * We also must not push anything to the stack before switching
1195 * stacks lest we corrupt the "NMI executing" variable.
1182 */ 1196 */
1183 1197
1184 SWAPGS 1198 SWAPGS_UNSAFE_STACK
1185 cld 1199 cld
1186 movq %rsp, %rdx 1200 movq %rsp, %rdx
1187 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 1201 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 155162ea0e00..ab5f1d447ef9 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,16 @@ extern u64 asmlinkage efi_call(void *fp, ...);
86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, 86extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
87 u32 type, u64 attribute); 87 u32 type, u64 attribute);
88 88
89/*
90 * CONFIG_KASAN may redefine memset to __memset. __memset function is present
91 * only in kernel binary. Since the EFI stub linked into a separate binary it
92 * doesn't have __memset(). So we should use standard memset from
93 * arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
94 */
95#undef memcpy
96#undef memset
97#undef memmove
98
89#endif /* CONFIG_X86_32 */ 99#endif /* CONFIG_X86_32 */
90 100
91extern struct efi_scratch efi_scratch; 101extern struct efi_scratch efi_scratch;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 349f80a82b82..2beee0382088 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -40,6 +40,7 @@
40 40
41#define KVM_PIO_PAGE_OFFSET 1 41#define KVM_PIO_PAGE_OFFSET 1
42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2 42#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
43#define KVM_HALT_POLL_NS_DEFAULT 500000
43 44
44#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS 45#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
45 46
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index c1c0a1c14344..b98b471a3b7e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -331,6 +331,7 @@
331/* C1E active bits in int pending message */ 331/* C1E active bits in int pending message */
332#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 332#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
333#define MSR_K8_TSEG_ADDR 0xc0010112 333#define MSR_K8_TSEG_ADDR 0xc0010112
334#define MSR_K8_TSEG_MASK 0xc0010113
334#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ 335#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
335#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ 336#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
336#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ 337#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f68e48f5f6c2..c2130aef3f9d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
41#include <asm/timer.h> 41#include <asm/timer.h>
42#include <asm/special_insns.h> 42#include <asm/special_insns.h>
43 43
44/* nop stub */ 44/*
45void _paravirt_nop(void) 45 * nop stub, which must not clobber anything *including the stack* to
46{ 46 * avoid confusing the entry prologues.
47} 47 */
48extern void _paravirt_nop(void);
49asm (".pushsection .entry.text, \"ax\"\n"
50 ".global _paravirt_nop\n"
51 "_paravirt_nop:\n\t"
52 "ret\n\t"
53 ".size _paravirt_nop, . - _paravirt_nop\n\t"
54 ".type _paravirt_nop, @function\n\t"
55 ".popsection");
48 56
49/* identity function, which can be inlined */ 57/* identity function, which can be inlined */
50u32 _paravirt_ident_32(u32 x) 58u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 69088a1ba509..ff606f507913 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3322,7 +3322,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3322 break; 3322 break;
3323 3323
3324 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, 3324 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3325 leaf); 3325 iterator.level);
3326 } 3326 }
3327 3327
3328 walk_shadow_page_lockless_end(vcpu); 3328 walk_shadow_page_lockless_end(vcpu);
@@ -3614,7 +3614,7 @@ static void
3614__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 3614__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3615 struct rsvd_bits_validate *rsvd_check, 3615 struct rsvd_bits_validate *rsvd_check,
3616 int maxphyaddr, int level, bool nx, bool gbpages, 3616 int maxphyaddr, int level, bool nx, bool gbpages,
3617 bool pse) 3617 bool pse, bool amd)
3618{ 3618{
3619 u64 exb_bit_rsvd = 0; 3619 u64 exb_bit_rsvd = 0;
3620 u64 gbpages_bit_rsvd = 0; 3620 u64 gbpages_bit_rsvd = 0;
@@ -3631,7 +3631,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3631 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for 3631 * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
3632 * leaf entries) on AMD CPUs only. 3632 * leaf entries) on AMD CPUs only.
3633 */ 3633 */
3634 if (guest_cpuid_is_amd(vcpu)) 3634 if (amd)
3635 nonleaf_bit8_rsvd = rsvd_bits(8, 8); 3635 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
3636 3636
3637 switch (level) { 3637 switch (level) {
@@ -3699,7 +3699,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3699 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, 3699 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
3700 cpuid_maxphyaddr(vcpu), context->root_level, 3700 cpuid_maxphyaddr(vcpu), context->root_level,
3701 context->nx, guest_cpuid_has_gbpages(vcpu), 3701 context->nx, guest_cpuid_has_gbpages(vcpu),
3702 is_pse(vcpu)); 3702 is_pse(vcpu), guest_cpuid_is_amd(vcpu));
3703} 3703}
3704 3704
3705static void 3705static void
@@ -3749,13 +3749,24 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3749void 3749void
3750reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) 3750reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3751{ 3751{
3752 /*
3753 * Passing "true" to the last argument is okay; it adds a check
3754 * on bit 8 of the SPTEs which KVM doesn't use anyway.
3755 */
3752 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, 3756 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3753 boot_cpu_data.x86_phys_bits, 3757 boot_cpu_data.x86_phys_bits,
3754 context->shadow_root_level, context->nx, 3758 context->shadow_root_level, context->nx,
3755 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu)); 3759 guest_cpuid_has_gbpages(vcpu), is_pse(vcpu),
3760 true);
3756} 3761}
3757EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); 3762EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
3758 3763
3764static inline bool boot_cpu_is_amd(void)
3765{
3766 WARN_ON_ONCE(!tdp_enabled);
3767 return shadow_x_mask == 0;
3768}
3769
3759/* 3770/*
3760 * the direct page table on host, use as much mmu features as 3771 * the direct page table on host, use as much mmu features as
3761 * possible, however, kvm currently does not do execution-protection. 3772 * possible, however, kvm currently does not do execution-protection.
@@ -3764,11 +3775,11 @@ static void
3764reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, 3775reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
3765 struct kvm_mmu *context) 3776 struct kvm_mmu *context)
3766{ 3777{
3767 if (guest_cpuid_is_amd(vcpu)) 3778 if (boot_cpu_is_amd())
3768 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check, 3779 __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
3769 boot_cpu_data.x86_phys_bits, 3780 boot_cpu_data.x86_phys_bits,
3770 context->shadow_root_level, false, 3781 context->shadow_root_level, false,
3771 cpu_has_gbpages, true); 3782 cpu_has_gbpages, true, true);
3772 else 3783 else
3773 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, 3784 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
3774 boot_cpu_data.x86_phys_bits, 3785 boot_cpu_data.x86_phys_bits,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fdb8cb63a6c0..94b7d15db3fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -202,6 +202,7 @@ module_param(npt, int, S_IRUGO);
202static int nested = true; 202static int nested = true;
203module_param(nested, int, S_IRUGO); 203module_param(nested, int, S_IRUGO);
204 204
205static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
205static void svm_flush_tlb(struct kvm_vcpu *vcpu); 206static void svm_flush_tlb(struct kvm_vcpu *vcpu);
206static void svm_complete_interrupts(struct vcpu_svm *svm); 207static void svm_complete_interrupts(struct vcpu_svm *svm);
207 208
@@ -1263,7 +1264,8 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1263 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. 1264 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1264 * It also updates the guest-visible cr0 value. 1265 * It also updates the guest-visible cr0 value.
1265 */ 1266 */
1266 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); 1267 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1268 kvm_mmu_reset_context(&svm->vcpu);
1267 1269
1268 save->cr4 = X86_CR4_PAE; 1270 save->cr4 = X86_CR4_PAE;
1269 /* rdx = ?? */ 1271 /* rdx = ?? */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6bbb0dfb99d0..991466bf8dee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2190,6 +2190,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2190 case MSR_IA32_LASTINTFROMIP: 2190 case MSR_IA32_LASTINTFROMIP:
2191 case MSR_IA32_LASTINTTOIP: 2191 case MSR_IA32_LASTINTTOIP:
2192 case MSR_K8_SYSCFG: 2192 case MSR_K8_SYSCFG:
2193 case MSR_K8_TSEG_ADDR:
2194 case MSR_K8_TSEG_MASK:
2193 case MSR_K7_HWCR: 2195 case MSR_K7_HWCR:
2194 case MSR_VM_HSAVE_PA: 2196 case MSR_VM_HSAVE_PA:
2195 case MSR_K8_INT_PENDING_MSG: 2197 case MSR_K8_INT_PENDING_MSG:
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 09d3afc0a181..dc78a4a9a466 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -166,6 +166,7 @@ void pcibios_fixup_bus(struct pci_bus *b)
166{ 166{
167 struct pci_dev *dev; 167 struct pci_dev *dev;
168 168
169 pci_read_bridge_bases(b);
169 list_for_each_entry(dev, &b->devices, bus_list) 170 list_for_each_entry(dev, &b->devices, bus_list)
170 pcibios_fixup_device_resources(dev); 171 pcibios_fixup_device_resources(dev);
171} 172}
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index d27b4dcf221f..b848cc3dc913 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -210,6 +210,10 @@ subsys_initcall(pcibios_init);
210 210
211void pcibios_fixup_bus(struct pci_bus *bus) 211void pcibios_fixup_bus(struct pci_bus *bus)
212{ 212{
213 if (bus->parent) {
214 /* This is a subordinate bridge */
215 pci_read_bridge_bases(bus);
216 }
213} 217}
214 218
215void pcibios_set_master(struct pci_dev *dev) 219void pcibios_set_master(struct pci_dev *dev)
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a50e374..0f5cb37636bc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev)
1578 1578
1579 kfree(he_dev->rbpl_virt); 1579 kfree(he_dev->rbpl_virt);
1580 kfree(he_dev->rbpl_table); 1580 kfree(he_dev->rbpl_table);
1581 1581 dma_pool_destroy(he_dev->rbpl_pool);
1582 if (he_dev->rbpl_pool)
1583 dma_pool_destroy(he_dev->rbpl_pool);
1584 1582
1585 if (he_dev->rbrq_base) 1583 if (he_dev->rbrq_base)
1586 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 1584 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev)
1594 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 1592 dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1595 he_dev->tpdrq_base, he_dev->tpdrq_phys); 1593 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1596 1594
1597 if (he_dev->tpd_pool) 1595 dma_pool_destroy(he_dev->tpd_pool);
1598 dma_pool_destroy(he_dev->tpd_pool);
1599 1596
1600 if (he_dev->pci_dev) { 1597 if (he_dev->pci_dev) {
1601 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command); 1598 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0a6d89..3d7fb6516f74 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg)
805 continue; 805 continue;
806 } 806 }
807 807
808 skb = alloc_skb(size + 1, GFP_ATOMIC); 808 /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
809 * headroom, and ensures we can route packets back out an
810 * Ethernet interface (for example) without having to
811 * reallocate. Adding NET_IP_ALIGN also ensures that both
812 * PPPoATM and PPPoEoBR2684 packets end up aligned. */
813 skb = netdev_alloc_skb_ip_align(NULL, size + 1);
809 if (!skb) { 814 if (!skb) {
810 if (net_ratelimit()) 815 if (net_ratelimit())
811 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n"); 816 dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg)
869 /* Allocate RX skbs for any ports which need them */ 874 /* Allocate RX skbs for any ports which need them */
870 if (card->using_dma && card->atmdev[port] && 875 if (card->using_dma && card->atmdev[port] &&
871 !card->rx_skb[port]) { 876 !card->rx_skb[port]) {
872 struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC); 877 /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
878 * here; the FPGA can only DMA to addresses which are
879 * aligned to 4 bytes. */
880 struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
873 if (skb) { 881 if (skb) {
874 SKB_CB(skb)->dma_addr = 882 SKB_CB(skb)->dma_addr =
875 dma_map_single(&card->dev->dev, skb->data, 883 dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a91776..e9fd32e91668 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
148 148
149 if (sibling == cpu) /* skip itself */ 149 if (sibling == cpu) /* skip itself */
150 continue; 150 continue;
151
151 sib_cpu_ci = get_cpu_cacheinfo(sibling); 152 sib_cpu_ci = get_cpu_cacheinfo(sibling);
153 if (!sib_cpu_ci->info_list)
154 continue;
155
152 sib_leaf = sib_cpu_ci->info_list + index; 156 sib_leaf = sib_cpu_ci->info_list + index;
153 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); 157 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
154 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); 158 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
159 163
160static void free_cache_attributes(unsigned int cpu) 164static void free_cache_attributes(unsigned int cpu)
161{ 165{
166 if (!per_cpu_cacheinfo(cpu))
167 return;
168
162 cache_shared_cpu_map_remove(cpu); 169 cache_shared_cpu_map_remove(cpu);
163 170
164 kfree(per_cpu_cacheinfo(cpu)); 171 kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
514 break; 521 break;
515 case CPU_DEAD: 522 case CPU_DEAD:
516 cache_remove_dev(cpu); 523 cache_remove_dev(cpu);
517 if (per_cpu_cacheinfo(cpu)) 524 free_cache_attributes(cpu);
518 free_cache_attributes(cpu);
519 break; 525 break;
520 } 526 }
521 return notifier_from_errno(rc); 527 return notifier_from_errno(rc);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf754a985..3c77645405e5 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev)
344 if (IS_ERR(ctx->csr_base)) 344 if (IS_ERR(ctx->csr_base))
345 return PTR_ERR(ctx->csr_base); 345 return PTR_ERR(ctx->csr_base);
346 346
347 ctx->irq = platform_get_irq(pdev, 0); 347 rc = platform_get_irq(pdev, 0);
348 if (ctx->irq < 0) { 348 if (rc < 0) {
349 dev_err(&pdev->dev, "No IRQ resource\n"); 349 dev_err(&pdev->dev, "No IRQ resource\n");
350 return ctx->irq; 350 return rc;
351 } 351 }
352 ctx->irq = rc;
352 353
353 dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d", 354 dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
354 ctx->csr_base, ctx->irq); 355 ctx->csr_base, ctx->irq);
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b30d30..bc2a55bc35e4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
687 687
688int mv_cesa_queue_req(struct crypto_async_request *req); 688int mv_cesa_queue_req(struct crypto_async_request *req);
689 689
690/*
691 * Helper function that indicates whether a crypto request needs to be
692 * cleaned up or not after being enqueued using mv_cesa_queue_req().
693 */
694static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
695 int ret)
696{
697 /*
698 * The queue still had some space, the request was queued
699 * normally, so there's no need to clean it up.
700 */
701 if (ret == -EINPROGRESS)
702 return false;
703
704 /*
705 * The queue had not space left, but since the request is
706 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
707 * the backlog and will be processed later. There's no need to
708 * clean it up.
709 */
710 if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
711 return false;
712
713 /* Request wasn't queued, we need to clean it up */
714 return true;
715}
716
690/* TDMA functions */ 717/* TDMA functions */
691 718
692static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter, 719static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3b9c0e..3df2f4e7adb2 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
189{ 189{
190 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req); 190 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
191 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq); 191 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
192
193 creq->req.base.engine = engine; 192 creq->req.base.engine = engine;
194 193
195 if (creq->req.base.type == CESA_DMA_REQ) 194 if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
431 return ret; 430 return ret;
432 431
433 ret = mv_cesa_queue_req(&req->base); 432 ret = mv_cesa_queue_req(&req->base);
434 if (ret && ret != -EINPROGRESS) 433 if (mv_cesa_req_needs_cleanup(&req->base, ret))
435 mv_cesa_ablkcipher_cleanup(req); 434 mv_cesa_ablkcipher_cleanup(req);
436 435
437 return ret; 436 return ret;
@@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
551 return ret; 550 return ret;
552 551
553 ret = mv_cesa_queue_req(&req->base); 552 ret = mv_cesa_queue_req(&req->base);
554 if (ret && ret != -EINPROGRESS) 553 if (mv_cesa_req_needs_cleanup(&req->base, ret))
555 mv_cesa_ablkcipher_cleanup(req); 554 mv_cesa_ablkcipher_cleanup(req);
556 555
557 return ret; 556 return ret;
@@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
693 return ret; 692 return ret;
694 693
695 ret = mv_cesa_queue_req(&req->base); 694 ret = mv_cesa_queue_req(&req->base);
696 if (ret && ret != -EINPROGRESS) 695 if (mv_cesa_req_needs_cleanup(&req->base, ret))
697 mv_cesa_ablkcipher_cleanup(req); 696 mv_cesa_ablkcipher_cleanup(req);
698 697
699 return ret; 698 return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272eb9c1a..e8d0d7128137 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
739 return 0; 739 return 0;
740 740
741 ret = mv_cesa_queue_req(&req->base); 741 ret = mv_cesa_queue_req(&req->base);
742 if (ret && ret != -EINPROGRESS) { 742 if (mv_cesa_req_needs_cleanup(&req->base, ret))
743 mv_cesa_ahash_cleanup(req); 743 mv_cesa_ahash_cleanup(req);
744 return ret;
745 }
746 744
747 return ret; 745 return ret;
748} 746}
@@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
766 return 0; 764 return 0;
767 765
768 ret = mv_cesa_queue_req(&req->base); 766 ret = mv_cesa_queue_req(&req->base);
769 if (ret && ret != -EINPROGRESS) 767 if (mv_cesa_req_needs_cleanup(&req->base, ret))
770 mv_cesa_ahash_cleanup(req); 768 mv_cesa_ahash_cleanup(req);
771 769
772 return ret; 770 return ret;
@@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
791 return 0; 789 return 0;
792 790
793 ret = mv_cesa_queue_req(&req->base); 791 ret = mv_cesa_queue_req(&req->base);
794 if (ret && ret != -EINPROGRESS) 792 if (mv_cesa_req_needs_cleanup(&req->base, ret))
795 mv_cesa_ahash_cleanup(req); 793 mv_cesa_ahash_cleanup(req);
796 794
797 return ret; 795 return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b4194de28..0a5ca0ba5d64 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
88 struct pci_dev *parent = pdev->bus->self; 88 struct pci_dev *parent = pdev->bus->self;
89 uint16_t bridge_ctl = 0; 89 uint16_t bridge_ctl = 0;
90 90
91 if (accel_dev->is_vf)
92 return;
93
91 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", 94 dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
92 accel_dev->accel_id); 95 accel_dev->accel_id);
93 96
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addde297b..8dd0af1d50bc 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
159static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached) 159static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
160{ 160{
161 if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) { 161 if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
162 *attached = new ? true : false; 162 *attached = ((new >> idx) & 0x1) ? true : false;
163 return true; 163 return true;
164 } 164 }
165 165
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8dd4de..665efca59487 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@ config QCOM_SCM
139 bool 139 bool
140 depends on ARM || ARM64 140 depends on ARM || ARM64
141 141
142config QCOM_SCM_32
143 def_bool y
144 depends on QCOM_SCM && ARM
145
146config QCOM_SCM_64
147 def_bool y
148 depends on QCOM_SCM && ARM64
149
142source "drivers/firmware/broadcom/Kconfig" 150source "drivers/firmware/broadcom/Kconfig"
143source "drivers/firmware/google/Kconfig" 151source "drivers/firmware/google/Kconfig"
144source "drivers/firmware/efi/Kconfig" 152source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830fc6707..2ee83474a3c1 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o 13obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o 14obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
15obj-$(CONFIG_QCOM_SCM) += qcom_scm.o 15obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
16obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o 16obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
17obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
17CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) 18CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
18 19
19obj-y += broadcom/ 20obj-y += broadcom/
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01cf92f..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
5/* error code which can't be mistaken for valid address */ 5/* error code which can't be mistaken for valid address */
6#define EFI_ERROR (~0UL) 6#define EFI_ERROR (~0UL)
7 7
8#undef memcpy
9#undef memset
10#undef memmove
11
12void efi_char16_printk(efi_system_table_t *, efi_char16_t *); 8void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
13 9
14efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, 10efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 000000000000..bb6555f6d63b
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
1/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/io.h>
14#include <linux/errno.h>
15#include <linux/qcom_scm.h>
16
17/**
18 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
19 * @entry: Entry point function for the cpus
20 * @cpus: The cpumask of cpus that will use the entry point
21 *
22 * Set the cold boot address of the cpus. Any cpu outside the supported
23 * range would be removed from the cpu present mask.
24 */
25int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
26{
27 return -ENOTSUPP;
28}
29
30/**
31 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
32 * @entry: Entry point function for the cpus
33 * @cpus: The cpumask of cpus that will use the entry point
34 *
35 * Set the Linux entry point for the SCM to transfer control to when coming
36 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
37 */
38int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
39{
40 return -ENOTSUPP;
41}
42
43/**
44 * qcom_scm_cpu_power_down() - Power down the cpu
45 * @flags - Flags to flush cache
46 *
47 * This is an end point to power down cpu. If there was a pending interrupt,
48 * the control would return from this function, otherwise, the cpu jumps to the
49 * warm boot entry point set for this cpu upon reset.
50 */
51void __qcom_scm_cpu_power_down(u32 flags)
52{
53}
54
55int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
56{
57 return -ENOTSUPP;
58}
59
60int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
61{
62 return -ENOTSUPP;
63}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
82extern int amdgpu_enable_scheduler; 82extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 83extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 84extern int amdgpu_sched_hw_submission;
85extern int amdgpu_enable_semaphores;
85 86
86#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 87#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
87#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 88#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
432void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 433void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
433void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
434 435
435void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 436int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
436int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
437 struct amdgpu_irq_src *irq_src, 438 struct amdgpu_irq_src *irq_src,
438 unsigned irq_type); 439 unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
890 struct amdgpu_device *adev; 891 struct amdgpu_device *adev;
891 const struct amdgpu_ring_funcs *funcs; 892 const struct amdgpu_ring_funcs *funcs;
892 struct amdgpu_fence_driver fence_drv; 893 struct amdgpu_fence_driver fence_drv;
893 struct amd_gpu_scheduler *scheduler; 894 struct amd_gpu_scheduler sched;
894 895
895 spinlock_t fence_lock; 896 spinlock_t fence_lock;
896 struct mutex *ring_lock; 897 struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
1201 struct amdgpu_irq_src priv_inst_irq; 1202 struct amdgpu_irq_src priv_inst_irq;
1202 /* gfx status */ 1203 /* gfx status */
1203 uint32_t gfx_current_status; 1204 uint32_t gfx_current_status;
1204 /* sync signal for const engine */
1205 unsigned ce_sync_offs;
1206 /* ce ram size*/ 1205 /* ce ram size*/
1207 unsigned ce_ram_size; 1206 unsigned ce_ram_size;
1208}; 1207};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
1274 uint32_t num_ibs; 1273 uint32_t num_ibs;
1275 struct mutex job_lock; 1274 struct mutex job_lock;
1276 struct amdgpu_user_fence uf; 1275 struct amdgpu_user_fence uf;
1277 int (*free_job)(struct amdgpu_job *sched_job); 1276 int (*free_job)(struct amdgpu_job *job);
1278}; 1277};
1278#define to_amdgpu_job(sched_job) \
1279 container_of((sched_job), struct amdgpu_job, base)
1279 1280
1280static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1281static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1281{ 1282{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); 186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
187 if (r) { 187 if (r) {
188 dev_err(rdev->dev, 188 dev_err(rdev->dev,
189 "failed to allocate BO for amdkfd (%d)\n", r); 189 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
79 int time; 79 int time;
80 80
81 n = AMDGPU_BENCHMARK_ITERATIONS; 81 n = AMDGPU_BENCHMARK_ITERATIONS;
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
83 NULL, &sobj);
83 if (r) { 84 if (r) {
84 goto out_cleanup; 85 goto out_cleanup;
85 } 86 }
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
91 if (r) { 92 if (r) {
92 goto out_cleanup; 93 goto out_cleanup;
93 } 94 }
94 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); 95 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
96 NULL, &dobj);
95 if (r) { 97 if (r) {
96 goto out_cleanup; 98 goto out_cleanup;
97 } 99 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..1c3fc99c5465 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
86 86
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 ret = amdgpu_bo_reserve(bo, false); 92 ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
197 197
198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
199 true, domain, flags, 199 true, domain, flags,
200 NULL, &placement, &obj); 200 NULL, &placement, NULL,
201 &obj);
201 if (ret) { 202 if (ret) {
202 DRM_ERROR("(%d) bo create failed\n", ret); 203 DRM_ERROR("(%d) bo create failed\n", ret);
203 return ret; 204 return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..749420f1ea6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,41 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154{ 154{
155 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array = NULL; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size, i;
160 int r = 0; 160 int ret;
161 161
162 if (!cs->in.num_chunks) 162 if (cs->in.num_chunks == 0)
163 goto out; 163 return 0;
164
165 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
166 if (!chunk_array)
167 return -ENOMEM;
164 168
165 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 169 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166 if (!p->ctx) { 170 if (!p->ctx) {
167 r = -EINVAL; 171 ret = -EINVAL;
168 goto out; 172 goto free_chunk;
169 } 173 }
174
170 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 175 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171 176
172 /* get chunks */ 177 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 178 INIT_LIST_HEAD(&p->validated);
174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
175 if (chunk_array == NULL) {
176 r = -ENOMEM;
177 goto out;
178 }
179
180 chunk_array_user = (uint64_t __user *)(cs->in.chunks); 179 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181 if (copy_from_user(chunk_array, chunk_array_user, 180 if (copy_from_user(chunk_array, chunk_array_user,
182 sizeof(uint64_t)*cs->in.num_chunks)) { 181 sizeof(uint64_t)*cs->in.num_chunks)) {
183 r = -EFAULT; 182 ret = -EFAULT;
184 goto out; 183 goto put_bo_list;
185 } 184 }
186 185
187 p->nchunks = cs->in.num_chunks; 186 p->nchunks = cs->in.num_chunks;
188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 187 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189 GFP_KERNEL); 188 GFP_KERNEL);
190 if (p->chunks == NULL) { 189 if (!p->chunks) {
191 r = -ENOMEM; 190 ret = -ENOMEM;
192 goto out; 191 goto put_bo_list;
193 } 192 }
194 193
195 for (i = 0; i < p->nchunks; i++) { 194 for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +199,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200 chunk_ptr = (void __user *)chunk_array[i]; 199 chunk_ptr = (void __user *)chunk_array[i];
201 if (copy_from_user(&user_chunk, chunk_ptr, 200 if (copy_from_user(&user_chunk, chunk_ptr,
202 sizeof(struct drm_amdgpu_cs_chunk))) { 201 sizeof(struct drm_amdgpu_cs_chunk))) {
203 r = -EFAULT; 202 ret = -EFAULT;
204 goto out; 203 i--;
204 goto free_partial_kdata;
205 } 205 }
206 p->chunks[i].chunk_id = user_chunk.chunk_id; 206 p->chunks[i].chunk_id = user_chunk.chunk_id;
207 p->chunks[i].length_dw = user_chunk.length_dw; 207 p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
212 212
213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214 if (p->chunks[i].kdata == NULL) { 214 if (p->chunks[i].kdata == NULL) {
215 r = -ENOMEM; 215 ret = -ENOMEM;
216 goto out; 216 i--;
217 goto free_partial_kdata;
217 } 218 }
218 size *= sizeof(uint32_t); 219 size *= sizeof(uint32_t);
219 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 220 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220 r = -EFAULT; 221 ret = -EFAULT;
221 goto out; 222 goto free_partial_kdata;
222 } 223 }
223 224
224 switch (p->chunks[i].chunk_id) { 225 switch (p->chunks[i].chunk_id) {
@@ -238,15 +239,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238 gobj = drm_gem_object_lookup(p->adev->ddev, 239 gobj = drm_gem_object_lookup(p->adev->ddev,
239 p->filp, handle); 240 p->filp, handle);
240 if (gobj == NULL) { 241 if (gobj == NULL) {
241 r = -EINVAL; 242 ret = -EINVAL;
242 goto out; 243 goto free_partial_kdata;
243 } 244 }
244 245
245 p->uf.bo = gem_to_amdgpu_bo(gobj); 246 p->uf.bo = gem_to_amdgpu_bo(gobj);
246 p->uf.offset = fence_data->offset; 247 p->uf.offset = fence_data->offset;
247 } else { 248 } else {
248 r = -EINVAL; 249 ret = -EINVAL;
249 goto out; 250 goto free_partial_kdata;
250 } 251 }
251 break; 252 break;
252 253
@@ -254,19 +255,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
254 break; 255 break;
255 256
256 default: 257 default:
257 r = -EINVAL; 258 ret = -EINVAL;
258 goto out; 259 goto free_partial_kdata;
259 } 260 }
260 } 261 }
261 262
262 263
263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 264 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264 if (!p->ibs) 265 if (!p->ibs) {
265 r = -ENOMEM; 266 ret = -ENOMEM;
267 goto free_all_kdata;
268 }
266 269
267out:
268 kfree(chunk_array); 270 kfree(chunk_array);
269 return r; 271 return 0;
272
273free_all_kdata:
274 i = p->nchunks - 1;
275free_partial_kdata:
276 for (; i >= 0; i--)
277 drm_free_large(p->chunks[i].kdata);
278 kfree(p->chunks);
279put_bo_list:
280 if (p->bo_list)
281 amdgpu_bo_list_put(p->bo_list);
282 amdgpu_ctx_put(p->ctx);
283free_chunk:
284 kfree(chunk_array);
285
286 return ret;
270} 287}
271 288
272/* Returns how many bytes TTM can move per IB. 289/* Returns how many bytes TTM can move per IB.
@@ -321,25 +338,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
321 return max(bytes_moved_threshold, 1024*1024ull); 338 return max(bytes_moved_threshold, 1024*1024ull);
322} 339}
323 340
324int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) 341int amdgpu_cs_list_validate(struct amdgpu_device *adev,
342 struct amdgpu_vm *vm,
343 struct list_head *validated)
325{ 344{
326 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
327 struct amdgpu_vm *vm = &fpriv->vm;
328 struct amdgpu_device *adev = p->adev;
329 struct amdgpu_bo_list_entry *lobj; 345 struct amdgpu_bo_list_entry *lobj;
330 struct list_head duplicates;
331 struct amdgpu_bo *bo; 346 struct amdgpu_bo *bo;
332 u64 bytes_moved = 0, initial_bytes_moved; 347 u64 bytes_moved = 0, initial_bytes_moved;
333 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); 348 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
334 int r; 349 int r;
335 350
336 INIT_LIST_HEAD(&duplicates); 351 list_for_each_entry(lobj, validated, tv.head) {
337 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
338 if (unlikely(r != 0)) {
339 return r;
340 }
341
342 list_for_each_entry(lobj, &p->validated, tv.head) {
343 bo = lobj->robj; 352 bo = lobj->robj;
344 if (!bo->pin_count) { 353 if (!bo->pin_count) {
345 u32 domain = lobj->prefered_domains; 354 u32 domain = lobj->prefered_domains;
@@ -373,7 +382,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
373 domain = lobj->allowed_domains; 382 domain = lobj->allowed_domains;
374 goto retry; 383 goto retry;
375 } 384 }
376 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
377 return r; 385 return r;
378 } 386 }
379 } 387 }
@@ -386,6 +394,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
386{ 394{
387 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 395 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
388 struct amdgpu_cs_buckets buckets; 396 struct amdgpu_cs_buckets buckets;
397 struct list_head duplicates;
389 bool need_mmap_lock = false; 398 bool need_mmap_lock = false;
390 int i, r; 399 int i, r;
391 400
@@ -405,8 +414,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
405 if (need_mmap_lock) 414 if (need_mmap_lock)
406 down_read(&current->mm->mmap_sem); 415 down_read(&current->mm->mmap_sem);
407 416
408 r = amdgpu_cs_list_validate(p); 417 INIT_LIST_HEAD(&duplicates);
418 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
419 if (unlikely(r != 0))
420 goto error_reserve;
421
422 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
423 if (r)
424 goto error_validate;
425
426 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
427
428error_validate:
429 if (r)
430 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
409 431
432error_reserve:
410 if (need_mmap_lock) 433 if (need_mmap_lock)
411 up_read(&current->mm->mmap_sem); 434 up_read(&current->mm->mmap_sem);
412 435
@@ -772,15 +795,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
772 return 0; 795 return 0;
773} 796}
774 797
775static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) 798static int amdgpu_cs_free_job(struct amdgpu_job *job)
776{ 799{
777 int i; 800 int i;
778 if (sched_job->ibs) 801 if (job->ibs)
779 for (i = 0; i < sched_job->num_ibs; i++) 802 for (i = 0; i < job->num_ibs; i++)
780 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 803 amdgpu_ib_free(job->adev, &job->ibs[i]);
781 kfree(sched_job->ibs); 804 kfree(job->ibs);
782 if (sched_job->uf.bo) 805 if (job->uf.bo)
783 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); 806 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
784 return 0; 807 return 0;
785} 808}
786 809
@@ -804,7 +827,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
804 r = amdgpu_cs_parser_init(parser, data); 827 r = amdgpu_cs_parser_init(parser, data);
805 if (r) { 828 if (r) {
806 DRM_ERROR("Failed to initialize parser !\n"); 829 DRM_ERROR("Failed to initialize parser !\n");
807 amdgpu_cs_parser_fini(parser, r, false); 830 kfree(parser);
808 up_read(&adev->exclusive_lock); 831 up_read(&adev->exclusive_lock);
809 r = amdgpu_cs_handle_lockup(adev, r); 832 r = amdgpu_cs_handle_lockup(adev, r);
810 return r; 833 return r;
@@ -842,7 +865,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
842 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 865 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
843 if (!job) 866 if (!job)
844 return -ENOMEM; 867 return -ENOMEM;
845 job->base.sched = ring->scheduler; 868 job->base.sched = &ring->sched;
846 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 869 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
847 job->adev = parser->adev; 870 job->adev = parser->adev;
848 job->ibs = parser->ibs; 871 job->ibs = parser->ibs;
@@ -857,7 +880,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
857 880
858 job->free_job = amdgpu_cs_free_job; 881 job->free_job = amdgpu_cs_free_job;
859 mutex_lock(&job->job_lock); 882 mutex_lock(&job->job_lock);
860 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 883 r = amd_sched_entity_push_job(&job->base);
861 if (r) { 884 if (r) {
862 mutex_unlock(&job->job_lock); 885 mutex_unlock(&job->job_lock);
863 amdgpu_cs_free_job(job); 886 amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
43 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 44 struct amd_sched_rq *rq;
45 if (kernel) 45 if (kernel)
46 rq = &adev->rings[i]->scheduler->kernel_rq; 46 rq = &adev->rings[i]->sched.kernel_rq;
47 else 47 else
48 rq = &adev->rings[i]->scheduler->sched_rq; 48 rq = &adev->rings[i]->sched.sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->scheduler, 49 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 51 rq, amdgpu_sched_jobs);
52 if (r) 52 if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
55 55
56 if (i < adev->num_rings) { 56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++) 57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->scheduler, 58 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 59 &ctx->rings[j].entity);
60 kfree(ctx); 60 kfree(ctx);
61 return r; 61 return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 75
76 if (amdgpu_enable_scheduler) { 76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++) 77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->scheduler, 78 amd_sched_entity_fini(&adev->rings[i]->sched,
79 &ctx->rings[i].entity); 79 &ctx->rings[i].entity);
80 } 80 }
81} 81}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
249 NULL, &adev->vram_scratch.robj); 249 NULL, NULL, &adev->vram_scratch.robj);
250 if (r) { 250 if (r) {
251 return r; 251 return r;
252 } 252 }
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
449 449
450 if (adev->wb.wb_obj == NULL) { 450 if (adev->wb.wb_obj == NULL) {
451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, 451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); 452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
453 &adev->wb.wb_obj);
453 if (r) { 454 if (r) {
454 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 455 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
455 return r; 456 return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1650 drm_kms_helper_poll_disable(dev); 1651 drm_kms_helper_poll_disable(dev);
1651 1652
1652 /* turn off display hw */ 1653 /* turn off display hw */
1654 drm_modeset_lock_all(dev);
1653 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1654 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1656 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1655 } 1657 }
1658 drm_modeset_unlock_all(dev);
1656 1659
1657 /* unpin the front buffers */ 1660 /* unpin the front buffers */
1658 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1747 if (fbcon) { 1750 if (fbcon) {
1748 drm_helper_resume_force_mode(dev); 1751 drm_helper_resume_force_mode(dev);
1749 /* turn on display hw */ 1752 /* turn on display hw */
1753 drm_modeset_lock_all(dev);
1750 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1751 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1752 } 1756 }
1757 drm_modeset_unlock_all(dev);
1753 } 1758 }
1754 1759
1755 drm_kms_helper_poll_enable(dev); 1760 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..adb48353f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
79int amdgpu_enable_scheduler = 0; 79int amdgpu_enable_scheduler = 0;
80int amdgpu_sched_jobs = 16; 80int amdgpu_sched_jobs = 16;
81int amdgpu_sched_hw_submission = 2; 81int amdgpu_sched_hw_submission = 2;
82int amdgpu_enable_semaphores = 1;
82 83
83MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
84module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
152MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 153MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
153module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 154module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
154 155
156MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
157module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
158
155static struct pci_device_id pciidlist[] = { 159static struct pci_device_id pciidlist[] = {
156#ifdef CONFIG_DRM_AMDGPU_CIK 160#ifdef CONFIG_DRM_AMDGPU_CIK
157 /* Kaveri */ 161 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
609 * Init the fence driver for the requested ring (all asics). 609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init(). 610 * Helper function for amdgpu_fence_driver_init().
611 */ 611 */
612void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 612int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
613{ 613{
614 int i; 614 int i, r;
615 615
616 ring->fence_drv.cpu_addr = NULL; 616 ring->fence_drv.cpu_addr = NULL;
617 ring->fence_drv.gpu_addr = 0; 617 ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
625 amdgpu_fence_check_lockup); 625 amdgpu_fence_check_lockup);
626 ring->fence_drv.ring = ring; 626 ring->fence_drv.ring = ring;
627 627
628 init_waitqueue_head(&ring->fence_drv.fence_queue);
629
628 if (amdgpu_enable_scheduler) { 630 if (amdgpu_enable_scheduler) {
629 ring->scheduler = amd_sched_create(&amdgpu_sched_ops, 631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
630 ring->idx, 632 amdgpu_sched_hw_submission, ring->name);
631 amdgpu_sched_hw_submission, 633 if (r) {
632 (void *)ring->adev); 634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
633 if (!ring->scheduler) 635 ring->name);
634 DRM_ERROR("Failed to create scheduler on ring %d.\n", 636 return r;
635 ring->idx); 637 }
636 } 638 }
639
640 return 0;
637} 641}
638 642
639/** 643/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
681 wake_up_all(&ring->fence_drv.fence_queue); 685 wake_up_all(&ring->fence_drv.fence_queue);
682 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
683 ring->fence_drv.irq_type); 687 ring->fence_drv.irq_type);
684 if (ring->scheduler) 688 amd_sched_fini(&ring->sched);
685 amd_sched_destroy(ring->scheduler);
686 ring->fence_drv.initialized = false; 689 ring->fence_drv.initialized = false;
687 } 690 }
688 mutex_unlock(&adev->ring_lock); 691 mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
127 r = amdgpu_bo_create(adev, adev->gart.table_size, 127 r = amdgpu_bo_create(adev, adev->gart.table_size,
128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
130 NULL, &adev->gart.robj); 130 NULL, NULL, &adev->gart.robj);
131 if (r) { 131 if (r) {
132 return r; 132 return r;
133 } 133 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
69 } 69 }
70 } 70 }
71retry: 71retry:
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73 flags, NULL, NULL, &robj);
73 if (r) { 74 if (r) {
74 if (r != -ERESTARTSYS) { 75 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
426 &args->data.data_size_bytes, 427 &args->data.data_size_bytes,
427 &args->data.flags); 428 &args->data.flags);
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 429 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
430 if (args->data.data_size_bytes > sizeof(args->data.data)) {
431 r = -EINVAL;
432 goto unreserve;
433 }
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 434 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
430 if (!r) 435 if (!r)
431 r = amdgpu_bo_set_metadata(robj, args->data.data, 436 r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
433 args->data.flags); 438 args->data.flags);
434 } 439 }
435 440
441unreserve:
436 amdgpu_bo_unreserve(robj); 442 amdgpu_bo_unreserve(robj);
437out: 443out:
438 drm_gem_object_unreference_unlocked(gobj); 444 drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
454 struct ttm_validate_buffer tv, *entry; 460 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 461 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket; 462 struct ww_acquire_ctx ticket;
457 struct list_head list; 463 struct list_head list, duplicates;
458 unsigned domain; 464 unsigned domain;
459 int r; 465 int r;
460 466
461 INIT_LIST_HEAD(&list); 467 INIT_LIST_HEAD(&list);
468 INIT_LIST_HEAD(&duplicates);
462 469
463 tv.bo = &bo_va->bo->tbo; 470 tv.bo = &bo_va->bo->tbo;
464 tv.shared = true; 471 tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
468 if (!vm_bos) 475 if (!vm_bos)
469 return; 476 return;
470 477
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 478 /* Provide duplicates to avoid -EALREADY */
479 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
472 if (r) 480 if (r)
473 goto error_free; 481 goto error_free;
474 482
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
651 int r; 659 int r;
652 660
653 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 661 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
654 args->size = args->pitch * args->height; 662 args->size = (u64)args->pitch * args->height;
655 args->size = ALIGN(args->size, PAGE_SIZE); 663 args->size = ALIGN(args->size, PAGE_SIZE);
656 664
657 r = amdgpu_gem_object_create(adev, args->size, 0, 665 r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, 43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
44 PAGE_SIZE, true, 44 PAGE_SIZE, true,
45 AMDGPU_GEM_DOMAIN_GTT, 0, 45 AMDGPU_GEM_DOMAIN_GTT, 0,
46 NULL, &adev->irq.ih.ring_obj); 46 NULL, NULL, &adev->irq.ih.ring_obj);
47 if (r) { 47 if (r) {
48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); 48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
49 return r; 49 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
140 */ 140 */
141int amdgpu_irq_postinstall(struct drm_device *dev) 141int amdgpu_irq_postinstall(struct drm_device *dev)
142{ 142{
143 dev->max_vblank_count = 0x001fffff; 143 dev->max_vblank_count = 0x00ffffff;
144 return 0; 144 return 0;
145} 145}
146 146
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..8c735f544b66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
391 } 391 }
392 case AMDGPU_INFO_READ_MMR_REG: { 392 case AMDGPU_INFO_READ_MMR_REG: {
393 unsigned n, alloc_size = info->read_mmr_reg.count * 4; 393 unsigned n, alloc_size;
394 uint32_t *regs; 394 uint32_t *regs;
395 unsigned se_num = (info->read_mmr_reg.instance >> 395 unsigned se_num = (info->read_mmr_reg.instance >>
396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
407 sh_num = 0xffffffff; 407 sh_num = 0xffffffff;
408 408
409 regs = kmalloc(alloc_size, GFP_KERNEL); 409 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
410 if (!regs) 410 if (!regs)
411 return -ENOMEM; 411 return -ENOMEM;
412 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
412 413
413 for (i = 0; i < info->read_mmr_reg.count; i++) 414 for (i = 0; i < info->read_mmr_reg.count; i++)
414 if (amdgpu_asic_read_register(adev, se_num, sh_num, 415 if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215 bool kernel, u32 domain, u64 flags, 215 bool kernel, u32 domain, u64 flags,
216 struct sg_table *sg, 216 struct sg_table *sg,
217 struct ttm_placement *placement, 217 struct ttm_placement *placement,
218 struct reservation_object *resv,
218 struct amdgpu_bo **bo_ptr) 219 struct amdgpu_bo **bo_ptr)
219{ 220{
220 struct amdgpu_bo *bo; 221 struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 /* Kernel allocation are uninterruptible */ 262 /* Kernel allocation are uninterruptible */
262 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 263 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
263 &bo->placement, page_align, !kernel, NULL, 264 &bo->placement, page_align, !kernel, NULL,
264 acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); 265 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
265 if (unlikely(r != 0)) { 266 if (unlikely(r != 0)) {
266 return r; 267 return r;
267 } 268 }
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
275int amdgpu_bo_create(struct amdgpu_device *adev, 276int amdgpu_bo_create(struct amdgpu_device *adev,
276 unsigned long size, int byte_align, 277 unsigned long size, int byte_align,
277 bool kernel, u32 domain, u64 flags, 278 bool kernel, u32 domain, u64 flags,
278 struct sg_table *sg, struct amdgpu_bo **bo_ptr) 279 struct sg_table *sg,
280 struct reservation_object *resv,
281 struct amdgpu_bo **bo_ptr)
279{ 282{
280 struct ttm_placement placement = {0}; 283 struct ttm_placement placement = {0};
281 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 284 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
286 amdgpu_ttm_placement_init(adev, &placement, 289 amdgpu_ttm_placement_init(adev, &placement,
287 placements, domain, flags); 290 placements, domain, flags);
288 291
289 return amdgpu_bo_create_restricted(adev, size, byte_align, 292 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
290 kernel, domain, flags, 293 domain, flags, sg, &placement,
291 sg, 294 resv, bo_ptr);
292 &placement,
293 bo_ptr);
294} 295}
295 296
296int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 297int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
535 if (metadata == NULL) 536 if (metadata == NULL)
536 return -EINVAL; 537 return -EINVAL;
537 538
538 buffer = kzalloc(metadata_size, GFP_KERNEL); 539 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
539 if (buffer == NULL) 540 if (buffer == NULL)
540 return -ENOMEM; 541 return -ENOMEM;
541 542
542 memcpy(buffer, metadata, metadata_size);
543
544 kfree(bo->metadata); 543 kfree(bo->metadata);
545 bo->metadata_flags = flags; 544 bo->metadata_flags = flags;
546 bo->metadata = buffer; 545 bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
129 unsigned long size, int byte_align, 129 unsigned long size, int byte_align,
130 bool kernel, u32 domain, u64 flags, 130 bool kernel, u32 domain, u64 flags,
131 struct sg_table *sg, 131 struct sg_table *sg,
132 struct reservation_object *resv,
132 struct amdgpu_bo **bo_ptr); 133 struct amdgpu_bo **bo_ptr);
133int amdgpu_bo_create_restricted(struct amdgpu_device *adev, 134int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
134 unsigned long size, int byte_align, 135 unsigned long size, int byte_align,
135 bool kernel, u32 domain, u64 flags, 136 bool kernel, u32 domain, u64 flags,
136 struct sg_table *sg, 137 struct sg_table *sg,
137 struct ttm_placement *placement, 138 struct ttm_placement *placement,
139 struct reservation_object *resv,
138 struct amdgpu_bo **bo_ptr); 140 struct amdgpu_bo **bo_ptr);
139int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 141int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
140void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 142void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach, 61 struct dma_buf_attachment *attach,
62 struct sg_table *sg) 62 struct sg_table *sg)
63{ 63{
64 struct reservation_object *resv = attach->dmabuf->resv;
64 struct amdgpu_device *adev = dev->dev_private; 65 struct amdgpu_device *adev = dev->dev_private;
65 struct amdgpu_bo *bo; 66 struct amdgpu_bo *bo;
66 int ret; 67 int ret;
67 68
69 ww_mutex_lock(&resv->lock, NULL);
68 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 70 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
69 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 71 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
70 if (ret) 73 if (ret)
71 return ERR_PTR(ret); 74 return ERR_PTR(ret);
72 75
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
357 ring->adev = adev; 357 ring->adev = adev;
358 ring->idx = adev->num_rings++; 358 ring->idx = adev->num_rings++;
359 adev->rings[ring->idx] = ring; 359 adev->rings[ring->idx] = ring;
360 amdgpu_fence_driver_init_ring(ring); 360 r = amdgpu_fence_driver_init_ring(ring);
361 if (r)
362 return r;
361 } 363 }
362 364
363 init_waitqueue_head(&ring->fence_drv.fence_queue);
364
365 r = amdgpu_wb_get(adev, &ring->rptr_offs); 365 r = amdgpu_wb_get(adev, &ring->rptr_offs);
366 if (r) { 366 if (r) {
367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
407 if (ring->ring_obj == NULL) { 407 if (ring->ring_obj == NULL) {
408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
409 AMDGPU_GEM_DOMAIN_GTT, 0, 409 AMDGPU_GEM_DOMAIN_GTT, 0,
410 NULL, &ring->ring_obj); 410 NULL, NULL, &ring->ring_obj);
411 if (r) { 411 if (r) {
412 dev_err(adev->dev, "(%d) ring create failed\n", r); 412 dev_err(adev->dev, "(%d) ring create failed\n", r);
413 return r; 413 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 } 65 }
66 66
67 r = amdgpu_bo_create(adev, size, align, true, 67 r = amdgpu_bo_create(adev, size, align, true, domain,
68 domain, 0, NULL, &sa_manager->bo); 68 0, NULL, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
145 struct amd_sched_fence *s_fence; 145 struct amd_sched_fence *s_fence;
146 146
147 s_fence = to_amd_sched_fence(f); 147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) 148 if (s_fence) {
149 return s_fence->scheduler->ring_id; 149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
150 a_fence = to_amdgpu_fence(f); 155 a_fence = to_amdgpu_fence(f);
151 if (a_fence) 156 if (a_fence)
152 return a_fence->ring->idx; 157 return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
412} 417}
413 418
414#if defined(CONFIG_DEBUG_FS) 419#if defined(CONFIG_DEBUG_FS)
420
421static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422{
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425
426 if (a_fence)
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
429
430 if (s_fence) {
431 struct amdgpu_ring *ring;
432
433
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
437 }
438}
439
415void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 440void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416 struct seq_file *m) 441 struct seq_file *m)
417{ 442{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 } 453 }
429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430 soffset, eoffset, eoffset - soffset); 455 soffset, eoffset, eoffset - soffset);
431 if (i->fence) { 456 if (i->fence)
432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); 457 amdgpu_sa_bo_dump_fence(i->fence, m);
433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016x on ring %d",
439 s_fence->base.seqno,
440 s_fence->scheduler->ring_id);
441
442 }
443 seq_printf(m, "\n"); 458 seq_printf(m, "\n");
444 } 459 }
445 spin_unlock(&sa_manager->wq.lock); 460 spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29 29
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) 30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 31{
32 struct amdgpu_job *sched_job = (struct amdgpu_job *)job; 32 struct amdgpu_job *job = to_amdgpu_job(sched_job);
33 return amdgpu_sync_get_fence(&sched_job->ibs->sync); 33 return amdgpu_sync_get_fence(&job->ibs->sync);
34} 34}
35 35
36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) 36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
37{ 37{
38 struct amdgpu_job *sched_job; 38 struct amdgpu_fence *fence = NULL;
39 struct amdgpu_fence *fence; 39 struct amdgpu_job *job;
40 int r; 40 int r;
41 41
42 if (!job) { 42 if (!sched_job) {
43 DRM_ERROR("job is null\n"); 43 DRM_ERROR("job is null\n");
44 return NULL; 44 return NULL;
45 } 45 }
46 sched_job = (struct amdgpu_job *)job; 46 job = to_amdgpu_job(sched_job);
47 mutex_lock(&sched_job->job_lock); 47 mutex_lock(&job->job_lock);
48 r = amdgpu_ib_schedule(sched_job->adev, 48 r = amdgpu_ib_schedule(job->adev,
49 sched_job->num_ibs, 49 job->num_ibs,
50 sched_job->ibs, 50 job->ibs,
51 sched_job->base.owner); 51 job->base.owner);
52 if (r) 52 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r);
53 goto err; 54 goto err;
54 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 55 }
55
56 if (sched_job->free_job)
57 sched_job->free_job(sched_job);
58 56
59 mutex_unlock(&sched_job->job_lock); 57 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
60 return &fence->base;
61 58
62err: 59err:
63 DRM_ERROR("Run job error\n"); 60 if (job->free_job)
64 mutex_unlock(&sched_job->job_lock); 61 job->free_job(job);
65 job->sched->ops->process_job(job);
66 return NULL;
67}
68 62
69static void amdgpu_sched_process_job(struct amd_sched_job *job) 63 mutex_unlock(&job->job_lock);
70{ 64 fence_put(&job->base.s_fence->base);
71 struct amdgpu_job *sched_job; 65 kfree(job);
72 66 return fence ? &fence->base : NULL;
73 if (!job) {
74 DRM_ERROR("job is null\n");
75 return;
76 }
77 sched_job = (struct amdgpu_job *)job;
78 /* after processing job, free memory */
79 fence_put(&sched_job->base.s_fence->base);
80 kfree(sched_job);
81} 67}
82 68
83struct amd_sched_backend_ops amdgpu_sched_ops = { 69struct amd_sched_backend_ops amdgpu_sched_ops = {
84 .dependency = amdgpu_sched_dependency, 70 .dependency = amdgpu_sched_dependency,
85 .run_job = amdgpu_sched_run_job, 71 .run_job = amdgpu_sched_run_job,
86 .process_job = amdgpu_sched_process_job
87}; 72};
88 73
89int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 74int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
100 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
101 if (!job) 86 if (!job)
102 return -ENOMEM; 87 return -ENOMEM;
103 job->base.sched = ring->scheduler; 88 job->base.sched = &ring->sched;
104 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
105 job->adev = adev; 90 job->adev = adev;
106 job->ibs = ibs; 91 job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
109 mutex_init(&job->job_lock); 94 mutex_init(&job->job_lock);
110 job->free_job = free_job; 95 job->free_job = free_job;
111 mutex_lock(&job->job_lock); 96 mutex_lock(&job->job_lock);
112 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 97 r = amd_sched_entity_push_job(&job->base);
113 if (r) { 98 if (r) {
114 mutex_unlock(&job->job_lock); 99 mutex_unlock(&job->job_lock);
115 kfree(job); 100 kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 65
66 if (a_fence) 66 if (a_fence)
67 return a_fence->ring->adev == adev; 67 return a_fence->ring->adev == adev;
68 if (s_fence) 68
69 return (struct amdgpu_device *)s_fence->scheduler->priv == adev; 69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
70 return false; 76 return false;
71} 77}
72 78
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
251 fence_put(e->fence); 257 fence_put(e->fence);
252 kfree(e); 258 kfree(e);
253 } 259 }
260
261 if (amdgpu_enable_semaphores)
262 return 0;
263
264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
265 struct amdgpu_fence *fence = sync->sync_to[i];
266 if (!fence)
267 continue;
268
269 r = fence_wait(&fence->base, false);
270 if (r)
271 return r;
272 }
273
254 return 0; 274 return 0;
255} 275}
256 276
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
285 return -EINVAL; 305 return -EINVAL;
286 } 306 }
287 307
288 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { 308 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
309 (count >= AMDGPU_NUM_SYNCS)) {
289 /* not enough room, wait manually */ 310 /* not enough room, wait manually */
290 r = fence_wait(&fence->base, false); 311 r = fence_wait(&fence->base, false);
291 if (r) 312 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
63 NULL, &vram_obj); 63 AMDGPU_GEM_DOMAIN_VRAM, 0,
64 NULL, NULL, &vram_obj);
64 if (r) { 65 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n"); 66 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup; 67 goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 struct fence *fence = NULL; 81 struct fence *fence = NULL;
81 82
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
85 NULL, gtt_obj + i);
84 if (r) { 86 if (r) {
85 DRM_ERROR("Failed to create GTT object %d\n", i); 87 DRM_ERROR("Failed to create GTT object %d\n", i);
86 goto out_lclean; 88 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, 861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
862 AMDGPU_GEM_DOMAIN_VRAM, 862 AMDGPU_GEM_DOMAIN_VRAM,
863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
864 NULL, &adev->stollen_vga_memory); 864 NULL, NULL, &adev->stollen_vga_memory);
865 if (r) { 865 if (r) {
866 return r; 866 return r;
867 } 867 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
247 const struct common_firmware_header *header = NULL; 247 const struct common_firmware_header *header = NULL;
248 248
249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, 249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); 250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
251 if (err) { 251 if (err) {
252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); 252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
253 err = -ENOMEM; 253 err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
157 AMDGPU_GEM_DOMAIN_VRAM, 157 AMDGPU_GEM_DOMAIN_VRAM,
158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
159 NULL, &adev->uvd.vcpu_bo); 159 NULL, NULL, &adev->uvd.vcpu_bo);
160 if (r) { 160 if (r) {
161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
162 return r; 162 return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
543 return -EINVAL; 543 return -EINVAL;
544 } 544 }
545 545
546 if (msg_type == 1) { 546 switch (msg_type) {
547 case 0:
548 /* it's a create msg, calc image size (width * height) */
549 amdgpu_bo_kunmap(bo);
550
551 /* try to alloc a new handle */
552 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
553 if (atomic_read(&adev->uvd.handles[i]) == handle) {
554 DRM_ERROR("Handle 0x%x already in use!\n", handle);
555 return -EINVAL;
556 }
557
558 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
559 adev->uvd.filp[i] = ctx->parser->filp;
560 return 0;
561 }
562 }
563
564 DRM_ERROR("No more free UVD handles!\n");
565 return -EINVAL;
566
567 case 1:
547 /* it's a decode msg, calc buffer sizes */ 568 /* it's a decode msg, calc buffer sizes */
548 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); 569 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
549 amdgpu_bo_kunmap(bo); 570 amdgpu_bo_kunmap(bo);
550 if (r) 571 if (r)
551 return r; 572 return r;
552 573
553 } else if (msg_type == 2) { 574 /* validate the handle */
575 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
576 if (atomic_read(&adev->uvd.handles[i]) == handle) {
577 if (adev->uvd.filp[i] != ctx->parser->filp) {
578 DRM_ERROR("UVD handle collision detected!\n");
579 return -EINVAL;
580 }
581 return 0;
582 }
583 }
584
585 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
586 return -ENOENT;
587
588 case 2:
554 /* it's a destroy msg, free the handle */ 589 /* it's a destroy msg, free the handle */
555 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 590 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
556 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 591 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
557 amdgpu_bo_kunmap(bo); 592 amdgpu_bo_kunmap(bo);
558 return 0; 593 return 0;
559 } else {
560 /* it's a create msg */
561 amdgpu_bo_kunmap(bo);
562
563 if (msg_type != 0) {
564 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565 return -EINVAL;
566 }
567
568 /* it's a create msg, no special handling needed */
569 }
570
571 /* create or decode, validate the handle */
572 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
573 if (atomic_read(&adev->uvd.handles[i]) == handle)
574 return 0;
575 }
576 594
577 /* handle not found try to alloc a new one */ 595 default:
578 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 596 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
579 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 597 return -EINVAL;
580 adev->uvd.filp[i] = ctx->parser->filp;
581 return 0;
582 }
583 } 598 }
584 599 BUG();
585 DRM_ERROR("No more free UVD handles!\n");
586 return -EINVAL; 600 return -EINVAL;
587} 601}
588 602
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
805} 819}
806 820
807static int amdgpu_uvd_free_job( 821static int amdgpu_uvd_free_job(
808 struct amdgpu_job *sched_job) 822 struct amdgpu_job *job)
809{ 823{
810 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 824 amdgpu_ib_free(job->adev, job->ibs);
811 kfree(sched_job->ibs); 825 kfree(job->ibs);
812 return 0; 826 return 0;
813} 827}
814 828
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 919 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
906 AMDGPU_GEM_DOMAIN_VRAM, 920 AMDGPU_GEM_DOMAIN_VRAM,
907 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 921 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
908 NULL, &bo); 922 NULL, NULL, &bo);
909 if (r) 923 if (r)
910 return r; 924 return r;
911 925
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
954 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 968 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
955 AMDGPU_GEM_DOMAIN_VRAM, 969 AMDGPU_GEM_DOMAIN_VRAM,
956 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 970 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
957 NULL, &bo); 971 NULL, NULL, &bo);
958 if (r) 972 if (r)
959 return r; 973 return r;
960 974
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 AMDGPU_GEM_DOMAIN_VRAM, 144 AMDGPU_GEM_DOMAIN_VRAM,
145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
146 NULL, &adev->vce.vcpu_bo); 146 NULL, NULL, &adev->vce.vcpu_bo);
147 if (r) { 147 if (r) {
148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
149 return r; 149 return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
342} 342}
343 343
344static int amdgpu_vce_free_job( 344static int amdgpu_vce_free_job(
345 struct amdgpu_job *sched_job) 345 struct amdgpu_job *job)
346{ 346{
347 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 347 amdgpu_ib_free(job->adev, job->ibs);
348 kfree(sched_job->ibs); 348 kfree(job->ibs);
349 return 0; 349 return 0;
350} 350}
351 351
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..1e14531353e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
316 } 316 }
317} 317}
318 318
319int amdgpu_vm_free_job(struct amdgpu_job *sched_job) 319int amdgpu_vm_free_job(struct amdgpu_job *job)
320{ 320{
321 int i; 321 int i;
322 for (i = 0; i < sched_job->num_ibs; i++) 322 for (i = 0; i < job->num_ibs; i++)
323 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 323 amdgpu_ib_free(job->adev, &job->ibs[i]);
324 kfree(sched_job->ibs); 324 kfree(job->ibs);
325 return 0; 325 return 0;
326} 326}
327 327
@@ -686,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
686} 686}
687 687
688/** 688/**
689 * amdgpu_vm_fence_pts - fence page tables after an update
690 *
691 * @vm: requested vm
692 * @start: start of GPU address range
693 * @end: end of GPU address range
694 * @fence: fence to use
695 *
696 * Fence the page tables in the range @start - @end (cayman+).
697 *
698 * Global and local mutex must be locked!
699 */
700static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
701 uint64_t start, uint64_t end,
702 struct fence *fence)
703{
704 unsigned i;
705
706 start >>= amdgpu_vm_block_size;
707 end >>= amdgpu_vm_block_size;
708
709 for (i = start; i <= end; ++i)
710 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
711}
712
713/**
714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 689 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
715 * 690 *
716 * @adev: amdgpu_device pointer 691 * @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
813 if (r) 788 if (r)
814 goto error_free; 789 goto error_free;
815 790
816 amdgpu_vm_fence_pts(vm, mapping->it.start, 791 amdgpu_bo_fence(vm->page_directory, f, true);
817 mapping->it.last + 1, f);
818 if (fence) { 792 if (fence) {
819 fence_put(*fence); 793 fence_put(*fence);
820 *fence = fence_get(f); 794 *fence = fence_get(f);
@@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
855 int r; 829 int r;
856 830
857 if (mem) { 831 if (mem) {
858 addr = mem->start << PAGE_SHIFT; 832 addr = (u64)mem->start << PAGE_SHIFT;
859 if (mem->mem_type != TTM_PL_TT) 833 if (mem->mem_type != TTM_PL_TT)
860 addr += adev->vm_manager.vram_base_offset; 834 addr += adev->vm_manager.vram_base_offset;
861 } else { 835 } else {
@@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1089 1063
1090 /* walk over the address space and allocate the page tables */ 1064 /* walk over the address space and allocate the page tables */
1091 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1065 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1066 struct reservation_object *resv = vm->page_directory->tbo.resv;
1092 struct amdgpu_bo *pt; 1067 struct amdgpu_bo *pt;
1093 1068
1094 if (vm->page_tables[pt_idx].bo) 1069 if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1097 /* drop mutex to allocate and clear page table */ 1072 /* drop mutex to allocate and clear page table */
1098 mutex_unlock(&vm->mutex); 1073 mutex_unlock(&vm->mutex);
1099 1074
1075 ww_mutex_lock(&resv->lock, NULL);
1100 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1076 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1101 AMDGPU_GPU_PAGE_SIZE, true, 1077 AMDGPU_GPU_PAGE_SIZE, true,
1102 AMDGPU_GEM_DOMAIN_VRAM, 1078 AMDGPU_GEM_DOMAIN_VRAM,
1103 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1079 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1104 NULL, &pt); 1080 NULL, resv, &pt);
1081 ww_mutex_unlock(&resv->lock);
1105 if (r) 1082 if (r)
1106 goto error_free; 1083 goto error_free;
1107 1084
@@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1303 r = amdgpu_bo_create(adev, pd_size, align, true, 1280 r = amdgpu_bo_create(adev, pd_size, align, true,
1304 AMDGPU_GEM_DOMAIN_VRAM, 1281 AMDGPU_GEM_DOMAIN_VRAM,
1305 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1282 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1306 NULL, &vm->page_directory); 1283 NULL, NULL, &vm->page_directory);
1307 if (r) 1284 if (r)
1308 return r; 1285 return r;
1309 1286
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
814 * 3. map kernel virtual address 814 * 3. map kernel virtual address
815 */ 815 */
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, 816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); 817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
818 toc_buf);
818 819
819 if (ret) { 820 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); 821 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
822 } 823 }
823 824
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, 825 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); 826 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
827 smu_buf);
826 828
827 if (ret) { 829 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); 830 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 true, AMDGPU_GEM_DOMAIN_VRAM, 765 true, AMDGPU_GEM_DOMAIN_VRAM,
766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
767 NULL, toc_buf); 767 NULL, NULL, toc_buf);
768 if (ret) { 768 if (ret) {
769 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 769 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
770 return -ENOMEM; 770 return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
775 true, AMDGPU_GEM_DOMAIN_VRAM, 775 true, AMDGPU_GEM_DOMAIN_VRAM,
776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
777 NULL, smu_buf); 777 NULL, NULL, smu_buf);
778 if (ret) { 778 if (ret) {
779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
780 return -ENOMEM; 780 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
3206 r = amdgpu_bo_create(adev, 3206 r = amdgpu_bo_create(adev,
3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
3208 PAGE_SIZE, true, 3208 PAGE_SIZE, true,
3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3210 &adev->gfx.mec.hpd_eop_obj); 3210 &adev->gfx.mec.hpd_eop_obj);
3211 if (r) { 3211 if (r) {
3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3373 r = amdgpu_bo_create(adev, 3373 r = amdgpu_bo_create(adev,
3374 sizeof(struct bonaire_mqd), 3374 sizeof(struct bonaire_mqd),
3375 PAGE_SIZE, true, 3375 PAGE_SIZE, true,
3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3377 &ring->mqd_obj); 3377 &ring->mqd_obj);
3378 if (r) { 3378 if (r) {
3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3610 return 0; 3610 return 0;
3611} 3611}
3612 3612
3613static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
3614{
3615 struct amdgpu_device *adev = ring->adev;
3616 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
3617
3618 /* instruct DE to set a magic number */
3619 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3620 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3621 WRITE_DATA_DST_SEL(5)));
3622 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3623 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3624 amdgpu_ring_write(ring, 1);
3625
3626 /* let CE wait till condition satisfied */
3627 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3628 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3629 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3630 WAIT_REG_MEM_FUNCTION(3) | /* == */
3631 WAIT_REG_MEM_ENGINE(2))); /* ce */
3632 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3633 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3634 amdgpu_ring_write(ring, 1);
3635 amdgpu_ring_write(ring, 0xffffffff);
3636 amdgpu_ring_write(ring, 4); /* poll interval */
3637
3638 /* instruct CE to reset wb of ce_sync to zero */
3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3641 WRITE_DATA_DST_SEL(5) |
3642 WR_CONFIRM));
3643 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3644 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3645 amdgpu_ring_write(ring, 0);
3646}
3647
3648/* 3613/*
3649 * vm 3614 * vm
3650 * VMID 0 is the physical GPU addresses as used by the kernel. 3615 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3663 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3664{ 3629{
3665 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3634 amdgpu_ring_write(ring, 0);
3635 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3636 amdgpu_ring_write(ring, 0);
3637 }
3666 3638
3667 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3668 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3703 amdgpu_ring_write(ring, 0x0); 3675 amdgpu_ring_write(ring, 0x0);
3704 3676
3705 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3677 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3706 gfx_v7_0_ce_sync_me(ring); 3678 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3679 amdgpu_ring_write(ring, 0);
3680 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3681 amdgpu_ring_write(ring, 0);
3707 } 3682 }
3708} 3683}
3709 3684
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3788 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3763 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3789 AMDGPU_GEM_DOMAIN_VRAM, 3764 AMDGPU_GEM_DOMAIN_VRAM,
3790 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3791 NULL, &adev->gfx.rlc.save_restore_obj); 3766 NULL, NULL,
3767 &adev->gfx.rlc.save_restore_obj);
3792 if (r) { 3768 if (r) {
3793 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 3769 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3794 return r; 3770 return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3831 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3807 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3832 AMDGPU_GEM_DOMAIN_VRAM, 3808 AMDGPU_GEM_DOMAIN_VRAM,
3833 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3809 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3834 NULL, &adev->gfx.rlc.clear_state_obj); 3810 NULL, NULL,
3811 &adev->gfx.rlc.clear_state_obj);
3835 if (r) { 3812 if (r) {
3836 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); 3813 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3837 gfx_v7_0_rlc_fini(adev); 3814 gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3870 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, 3847 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3871 AMDGPU_GEM_DOMAIN_VRAM, 3848 AMDGPU_GEM_DOMAIN_VRAM,
3872 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3849 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3873 NULL, &adev->gfx.rlc.cp_table_obj); 3850 NULL, NULL,
3851 &adev->gfx.rlc.cp_table_obj);
3874 if (r) { 3852 if (r) {
3875 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); 3853 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3876 gfx_v7_0_rlc_fini(adev); 3854 gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
4802 return r; 4780 return r;
4803 } 4781 }
4804 4782
4805 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
4806 if (r) {
4807 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
4808 return r;
4809 }
4810
4811 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4783 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4812 ring = &adev->gfx.gfx_ring[i]; 4784 ring = &adev->gfx.gfx_ring[i];
4813 ring->ring_obj = NULL; 4785 ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
4851 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 4823 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
4852 PAGE_SIZE, true, 4824 PAGE_SIZE, true,
4853 AMDGPU_GEM_DOMAIN_GDS, 0, 4825 AMDGPU_GEM_DOMAIN_GDS, 0,
4854 NULL, &adev->gds.gds_gfx_bo); 4826 NULL, NULL, &adev->gds.gds_gfx_bo);
4855 if (r) 4827 if (r)
4856 return r; 4828 return r;
4857 4829
4858 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 4830 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
4859 PAGE_SIZE, true, 4831 PAGE_SIZE, true,
4860 AMDGPU_GEM_DOMAIN_GWS, 0, 4832 AMDGPU_GEM_DOMAIN_GWS, 0,
4861 NULL, &adev->gds.gws_gfx_bo); 4833 NULL, NULL, &adev->gds.gws_gfx_bo);
4862 if (r) 4834 if (r)
4863 return r; 4835 return r;
4864 4836
4865 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 4837 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
4866 PAGE_SIZE, true, 4838 PAGE_SIZE, true,
4867 AMDGPU_GEM_DOMAIN_OA, 0, 4839 AMDGPU_GEM_DOMAIN_OA, 0,
4868 NULL, &adev->gds.oa_gfx_bo); 4840 NULL, NULL, &adev->gds.oa_gfx_bo);
4869 if (r) 4841 if (r)
4870 return r; 4842 return r;
4871 4843
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
4886 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4858 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4887 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4859 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4888 4860
4889 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
4890
4891 gfx_v7_0_cp_compute_fini(adev); 4861 gfx_v7_0_cp_compute_fini(adev);
4892 gfx_v7_0_rlc_fini(adev); 4862 gfx_v7_0_rlc_fini(adev);
4893 gfx_v7_0_mec_fini(adev); 4863 gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
868 r = amdgpu_bo_create(adev, 868 r = amdgpu_bo_create(adev,
869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
870 PAGE_SIZE, true, 870 PAGE_SIZE, true,
871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
872 &adev->gfx.mec.hpd_eop_obj); 872 &adev->gfx.mec.hpd_eop_obj);
873 if (r) { 873 if (r) {
874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
940 return r; 940 return r;
941 } 941 }
942 942
943 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
944 if (r) {
945 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
946 return r;
947 }
948
949 /* set up the gfx ring */ 943 /* set up the gfx ring */
950 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 944 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
951 ring = &adev->gfx.gfx_ring[i]; 945 ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
995 /* reserve GDS, GWS and OA resource for gfx */ 989 /* reserve GDS, GWS and OA resource for gfx */
996 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 990 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
997 PAGE_SIZE, true, 991 PAGE_SIZE, true,
998 AMDGPU_GEM_DOMAIN_GDS, 0, 992 AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
999 NULL, &adev->gds.gds_gfx_bo); 993 NULL, &adev->gds.gds_gfx_bo);
1000 if (r) 994 if (r)
1001 return r; 995 return r;
1002 996
1003 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 997 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1004 PAGE_SIZE, true, 998 PAGE_SIZE, true,
1005 AMDGPU_GEM_DOMAIN_GWS, 0, 999 AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1006 NULL, &adev->gds.gws_gfx_bo); 1000 NULL, &adev->gds.gws_gfx_bo);
1007 if (r) 1001 if (r)
1008 return r; 1002 return r;
1009 1003
1010 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 1004 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1011 PAGE_SIZE, true, 1005 PAGE_SIZE, true,
1012 AMDGPU_GEM_DOMAIN_OA, 0, 1006 AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1013 NULL, &adev->gds.oa_gfx_bo); 1007 NULL, &adev->gds.oa_gfx_bo);
1014 if (r) 1008 if (r)
1015 return r; 1009 return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
1033 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1027 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1034 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1028 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1035 1029
1036 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
1037
1038 gfx_v8_0_mec_fini(adev); 1030 gfx_v8_0_mec_fini(adev);
1039 1031
1040 return 0; 1032 return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3106 sizeof(struct vi_mqd), 3098 sizeof(struct vi_mqd),
3107 PAGE_SIZE, true, 3099 PAGE_SIZE, true,
3108 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3100 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3109 &ring->mqd_obj); 3101 NULL, &ring->mqd_obj);
3110 if (r) { 3102 if (r) {
3111 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3103 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3112 return r; 3104 return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3965 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3957 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3966 amdgpu_ring_write(ring, lower_32_bits(seq)); 3958 amdgpu_ring_write(ring, lower_32_bits(seq));
3967 amdgpu_ring_write(ring, upper_32_bits(seq)); 3959 amdgpu_ring_write(ring, upper_32_bits(seq));
3960
3968} 3961}
3969 3962
3970/** 3963/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
4005 return true; 3998 return true;
4006} 3999}
4007 4000
4008static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) 4001static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4002 unsigned vm_id, uint64_t pd_addr)
4009{ 4003{
4010 struct amdgpu_device *adev = ring->adev; 4004 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4011 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; 4005 uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
4012 4006 uint64_t addr = ring->fence_drv.gpu_addr;
4013 /* instruct DE to set a magic number */
4014 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4015 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4016 WRITE_DATA_DST_SEL(5)));
4017 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
4018 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4019 amdgpu_ring_write(ring, 1);
4020 4007
4021 /* let CE wait till condition satisfied */
4022 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4008 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4023 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 4009 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4024 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4010 WAIT_REG_MEM_FUNCTION(3))); /* equal */
4025 WAIT_REG_MEM_FUNCTION(3) | /* == */ 4011 amdgpu_ring_write(ring, addr & 0xfffffffc);
4026 WAIT_REG_MEM_ENGINE(2))); /* ce */ 4012 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4027 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4013 amdgpu_ring_write(ring, seq);
4028 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4029 amdgpu_ring_write(ring, 1);
4030 amdgpu_ring_write(ring, 0xffffffff); 4014 amdgpu_ring_write(ring, 0xffffffff);
4031 amdgpu_ring_write(ring, 4); /* poll interval */ 4015 amdgpu_ring_write(ring, 4); /* poll interval */
4032 4016
4033 /* instruct CE to reset wb of ce_sync to zero */ 4017 if (usepfp) {
4034 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4018 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
4035 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4019 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4036 WRITE_DATA_DST_SEL(5) | 4020 amdgpu_ring_write(ring, 0);
4037 WR_CONFIRM)); 4021 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4038 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4022 amdgpu_ring_write(ring, 0);
4039 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); 4023 }
4040 amdgpu_ring_write(ring, 0);
4041}
4042
4043static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4044 unsigned vm_id, uint64_t pd_addr)
4045{
4046 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4047 4024
4048 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4025 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4049 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 4026 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4050 WRITE_DATA_DST_SEL(0))); 4027 WRITE_DATA_DST_SEL(0)) |
4028 WR_CONFIRM);
4051 if (vm_id < 8) { 4029 if (vm_id < 8) {
4052 amdgpu_ring_write(ring, 4030 amdgpu_ring_write(ring,
4053 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 4031 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4083 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4061 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4084 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4062 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4085 amdgpu_ring_write(ring, 0x0); 4063 amdgpu_ring_write(ring, 0x0);
4086 4064 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4087 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 4065 amdgpu_ring_write(ring, 0);
4088 gfx_v8_0_ce_sync_me(ring); 4066 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4067 amdgpu_ring_write(ring, 0);
4089 } 4068 }
4090} 4069}
4091 4070
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM, 626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, toc_buf); 628 NULL, NULL, toc_buf);
629 if (ret) { 629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM; 631 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
764 true, AMDGPU_GEM_DOMAIN_VRAM, 764 true, AMDGPU_GEM_DOMAIN_VRAM,
765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
766 NULL, toc_buf); 766 NULL, NULL, toc_buf);
767 if (ret) { 767 if (ret) {
768 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 768 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
769 return -ENOMEM; 769 return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
774 true, AMDGPU_GEM_DOMAIN_VRAM, 774 true, AMDGPU_GEM_DOMAIN_VRAM,
775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
776 NULL, smu_buf); 776 NULL, NULL, smu_buf);
777 if (ret) { 777 if (ret) {
778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
779 return -ENOMEM; 779 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = uvd_v4_2_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = amdgpu_uvd_suspend(adev); 231 r = uvd_v4_2_hw_fini(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = uvd_v5_0_hw_fini(adev); 223 r = amdgpu_uvd_suspend(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v5_0_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev);
220 if (r)
221 return r;
222 }
217 r = uvd_v6_0_hw_fini(adev); 223 r = uvd_v6_0_hw_fini(adev);
218 if (r) 224 if (r)
219 return r; 225 return r;
220 226
221 r = amdgpu_uvd_suspend(adev);
222 if (r)
223 return r;
224
225 return r; 227 return r;
226} 228}
227 229
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
230 int r; 232 int r;
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 234
233 r = amdgpu_uvd_resume(adev); 235 /* Skip this for APU for now */
234 if (r) 236 if (!(adev->flags & AMD_IS_APU)) {
235 return r; 237 r = amdgpu_uvd_resume(adev);
236 238 if (r)
239 return r;
240 }
237 r = uvd_v6_0_hw_init(adev); 241 r = uvd_v6_0_hw_init(adev);
238 if (r) 242 if (r)
239 return r; 243 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..b55ceb14fdcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
1400 case CHIP_CARRIZO: 1400 case CHIP_CARRIZO:
1401 adev->has_uvd = true; 1401 adev->has_uvd = true;
1402 adev->cg_flags = 0; 1402 adev->cg_flags = 0;
1403 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; 1403 /* Disable UVD pg */
1404 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1404 adev->external_rev_id = adev->rev_id + 0x1; 1405 adev->external_rev_id = adev->rev_id + 0x1;
1405 if (amdgpu_smc_load_fw && smc_enabled) 1406 if (amdgpu_smc_load_fw && smc_enabled)
1406 adev->firmware.smu_load = true; 1407 adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 000000000000..144f50acc971
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
1#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _GPU_SCHED_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM gpu_sched
12#define TRACE_INCLUDE_FILE gpu_sched_trace
13
14TRACE_EVENT(amd_sched_job,
15 TP_PROTO(struct amd_sched_job *sched_job),
16 TP_ARGS(sched_job),
17 TP_STRUCT__entry(
18 __field(struct amd_sched_entity *, entity)
19 __field(const char *, name)
20 __field(u32, job_count)
21 __field(int, hw_job_count)
22 ),
23
24 TP_fast_assign(
25 __entry->entity = sched_job->s_entity;
26 __entry->name = sched_job->sched->name;
27 __entry->job_count = kfifo_len(
28 &sched_job->s_entity->job_queue) / sizeof(sched_job);
29 __entry->hw_job_count = atomic_read(
30 &sched_job->sched->hw_rq_count);
31 ),
32 TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
33 __entry->entity, __entry->name, __entry->job_count,
34 __entry->hw_job_count)
35);
36#endif
37
38/* This part must be outside protection */
39#undef TRACE_INCLUDE_PATH
40#define TRACE_INCLUDE_PATH .
41#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b6664c..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
30static struct amd_sched_job * 33static struct amd_sched_job *
31amd_sched_entity_pop_job(struct amd_sched_entity *entity); 34amd_sched_entity_pop_job(struct amd_sched_entity *entity);
32static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 35static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@ static struct amd_sched_job *
65amd_sched_rq_select_job(struct amd_sched_rq *rq) 68amd_sched_rq_select_job(struct amd_sched_rq *rq)
66{ 69{
67 struct amd_sched_entity *entity; 70 struct amd_sched_entity *entity;
68 struct amd_sched_job *job; 71 struct amd_sched_job *sched_job;
69 72
70 spin_lock(&rq->lock); 73 spin_lock(&rq->lock);
71 74
72 entity = rq->current_entity; 75 entity = rq->current_entity;
73 if (entity) { 76 if (entity) {
74 list_for_each_entry_continue(entity, &rq->entities, list) { 77 list_for_each_entry_continue(entity, &rq->entities, list) {
75 job = amd_sched_entity_pop_job(entity); 78 sched_job = amd_sched_entity_pop_job(entity);
76 if (job) { 79 if (sched_job) {
77 rq->current_entity = entity; 80 rq->current_entity = entity;
78 spin_unlock(&rq->lock); 81 spin_unlock(&rq->lock);
79 return job; 82 return sched_job;
80 } 83 }
81 } 84 }
82 } 85 }
83 86
84 list_for_each_entry(entity, &rq->entities, list) { 87 list_for_each_entry(entity, &rq->entities, list) {
85 88
86 job = amd_sched_entity_pop_job(entity); 89 sched_job = amd_sched_entity_pop_job(entity);
87 if (job) { 90 if (sched_job) {
88 rq->current_entity = entity; 91 rq->current_entity = entity;
89 spin_unlock(&rq->lock); 92 spin_unlock(&rq->lock);
90 return job; 93 return sched_job;
91 } 94 }
92 95
93 if (entity == rq->current_entity) 96 if (entity == rq->current_entity)
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
115 struct amd_sched_rq *rq, 118 struct amd_sched_rq *rq,
116 uint32_t jobs) 119 uint32_t jobs)
117{ 120{
121 int r;
122
118 if (!(sched && entity && rq)) 123 if (!(sched && entity && rq))
119 return -EINVAL; 124 return -EINVAL;
120 125
121 memset(entity, 0, sizeof(struct amd_sched_entity)); 126 memset(entity, 0, sizeof(struct amd_sched_entity));
122 entity->belongto_rq = rq; 127 INIT_LIST_HEAD(&entity->list);
123 entity->scheduler = sched; 128 entity->rq = rq;
124 entity->fence_context = fence_context_alloc(1); 129 entity->sched = sched;
125 if(kfifo_alloc(&entity->job_queue,
126 jobs * sizeof(void *),
127 GFP_KERNEL))
128 return -EINVAL;
129 130
130 spin_lock_init(&entity->queue_lock); 131 spin_lock_init(&entity->queue_lock);
132 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133 if (r)
134 return r;
135
131 atomic_set(&entity->fence_seq, 0); 136 atomic_set(&entity->fence_seq, 0);
137 entity->fence_context = fence_context_alloc(1);
132 138
133 /* Add the entity to the run queue */ 139 /* Add the entity to the run queue */
134 amd_sched_rq_add_entity(rq, entity); 140 amd_sched_rq_add_entity(rq, entity);
141
135 return 0; 142 return 0;
136} 143}
137 144
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
146static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, 153static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
147 struct amd_sched_entity *entity) 154 struct amd_sched_entity *entity)
148{ 155{
149 return entity->scheduler == sched && 156 return entity->sched == sched &&
150 entity->belongto_rq != NULL; 157 entity->rq != NULL;
151} 158}
152 159
153/** 160/**
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
177void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, 184void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
178 struct amd_sched_entity *entity) 185 struct amd_sched_entity *entity)
179{ 186{
180 struct amd_sched_rq *rq = entity->belongto_rq; 187 struct amd_sched_rq *rq = entity->rq;
181 188
182 if (!amd_sched_entity_is_initialized(sched, entity)) 189 if (!amd_sched_entity_is_initialized(sched, entity))
183 return; 190 return;
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
198 container_of(cb, struct amd_sched_entity, cb); 205 container_of(cb, struct amd_sched_entity, cb);
199 entity->dependency = NULL; 206 entity->dependency = NULL;
200 fence_put(f); 207 fence_put(f);
201 amd_sched_wakeup(entity->scheduler); 208 amd_sched_wakeup(entity->sched);
202} 209}
203 210
204static struct amd_sched_job * 211static struct amd_sched_job *
205amd_sched_entity_pop_job(struct amd_sched_entity *entity) 212amd_sched_entity_pop_job(struct amd_sched_entity *entity)
206{ 213{
207 struct amd_gpu_scheduler *sched = entity->scheduler; 214 struct amd_gpu_scheduler *sched = entity->sched;
208 struct amd_sched_job *job; 215 struct amd_sched_job *sched_job;
209 216
210 if (ACCESS_ONCE(entity->dependency)) 217 if (ACCESS_ONCE(entity->dependency))
211 return NULL; 218 return NULL;
212 219
213 if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) 220 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
214 return NULL; 221 return NULL;
215 222
216 while ((entity->dependency = sched->ops->dependency(job))) { 223 while ((entity->dependency = sched->ops->dependency(sched_job))) {
217 224
218 if (fence_add_callback(entity->dependency, &entity->cb, 225 if (fence_add_callback(entity->dependency, &entity->cb,
219 amd_sched_entity_wakeup)) 226 amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
222 return NULL; 229 return NULL;
223 } 230 }
224 231
225 return job; 232 return sched_job;
226} 233}
227 234
228/** 235/**
229 * Helper to submit a job to the job queue 236 * Helper to submit a job to the job queue
230 * 237 *
231 * @job The pointer to job required to submit 238 * @sched_job The pointer to job required to submit
232 * 239 *
233 * Returns true if we could submit the job. 240 * Returns true if we could submit the job.
234 */ 241 */
235static bool amd_sched_entity_in(struct amd_sched_job *job) 242static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
236{ 243{
237 struct amd_sched_entity *entity = job->s_entity; 244 struct amd_sched_entity *entity = sched_job->s_entity;
238 bool added, first = false; 245 bool added, first = false;
239 246
240 spin_lock(&entity->queue_lock); 247 spin_lock(&entity->queue_lock);
241 added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); 248 added = kfifo_in(&entity->job_queue, &sched_job,
249 sizeof(sched_job)) == sizeof(sched_job);
242 250
243 if (added && kfifo_len(&entity->job_queue) == sizeof(job)) 251 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
244 first = true; 252 first = true;
245 253
246 spin_unlock(&entity->queue_lock); 254 spin_unlock(&entity->queue_lock);
247 255
248 /* first job wakes up scheduler */ 256 /* first job wakes up scheduler */
249 if (first) 257 if (first)
250 amd_sched_wakeup(job->sched); 258 amd_sched_wakeup(sched_job->sched);
251 259
252 return added; 260 return added;
253} 261}
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
255/** 263/**
256 * Submit a job to the job queue 264 * Submit a job to the job queue
257 * 265 *
258 * @job The pointer to job required to submit 266 * @sched_job The pointer to job required to submit
259 * 267 *
260 * Returns 0 for success, negative error code otherwise. 268 * Returns 0 for success, negative error code otherwise.
261 */ 269 */
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
271 fence_get(&fence->base); 279 fence_get(&fence->base);
272 sched_job->s_fence = fence; 280 sched_job->s_fence = fence;
273 281
274 wait_event(entity->scheduler->job_scheduled, 282 wait_event(entity->sched->job_scheduled,
275 amd_sched_entity_in(sched_job)); 283 amd_sched_entity_in(sched_job));
276 284 trace_amd_sched_job(sched_job);
277 return 0; 285 return 0;
278} 286}
279 287
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
301static struct amd_sched_job * 309static struct amd_sched_job *
302amd_sched_select_job(struct amd_gpu_scheduler *sched) 310amd_sched_select_job(struct amd_gpu_scheduler *sched)
303{ 311{
304 struct amd_sched_job *job; 312 struct amd_sched_job *sched_job;
305 313
306 if (!amd_sched_ready(sched)) 314 if (!amd_sched_ready(sched))
307 return NULL; 315 return NULL;
308 316
309 /* Kernel run queue has higher priority than normal run queue*/ 317 /* Kernel run queue has higher priority than normal run queue*/
310 job = amd_sched_rq_select_job(&sched->kernel_rq); 318 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
311 if (job == NULL) 319 if (sched_job == NULL)
312 job = amd_sched_rq_select_job(&sched->sched_rq); 320 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
313 321
314 return job; 322 return sched_job;
315} 323}
316 324
317static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) 325static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
318{ 326{
319 struct amd_sched_job *sched_job = 327 struct amd_sched_fence *s_fence =
320 container_of(cb, struct amd_sched_job, cb); 328 container_of(cb, struct amd_sched_fence, cb);
321 struct amd_gpu_scheduler *sched; 329 struct amd_gpu_scheduler *sched = s_fence->sched;
322 330
323 sched = sched_job->sched;
324 amd_sched_fence_signal(sched_job->s_fence);
325 atomic_dec(&sched->hw_rq_count); 331 atomic_dec(&sched->hw_rq_count);
326 fence_put(&sched_job->s_fence->base); 332 amd_sched_fence_signal(s_fence);
327 sched->ops->process_job(sched_job); 333 fence_put(&s_fence->base);
328 wake_up_interruptible(&sched->wake_up_worker); 334 wake_up_interruptible(&sched->wake_up_worker);
329} 335}
330 336
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
338 344
339 while (!kthread_should_stop()) { 345 while (!kthread_should_stop()) {
340 struct amd_sched_entity *entity; 346 struct amd_sched_entity *entity;
341 struct amd_sched_job *job; 347 struct amd_sched_fence *s_fence;
348 struct amd_sched_job *sched_job;
342 struct fence *fence; 349 struct fence *fence;
343 350
344 wait_event_interruptible(sched->wake_up_worker, 351 wait_event_interruptible(sched->wake_up_worker,
345 kthread_should_stop() || 352 kthread_should_stop() ||
346 (job = amd_sched_select_job(sched))); 353 (sched_job = amd_sched_select_job(sched)));
347 354
348 if (!job) 355 if (!sched_job)
349 continue; 356 continue;
350 357
351 entity = job->s_entity; 358 entity = sched_job->s_entity;
359 s_fence = sched_job->s_fence;
352 atomic_inc(&sched->hw_rq_count); 360 atomic_inc(&sched->hw_rq_count);
353 fence = sched->ops->run_job(job); 361 fence = sched->ops->run_job(sched_job);
354 if (fence) { 362 if (fence) {
355 r = fence_add_callback(fence, &job->cb, 363 r = fence_add_callback(fence, &s_fence->cb,
356 amd_sched_process_job); 364 amd_sched_process_job);
357 if (r == -ENOENT) 365 if (r == -ENOENT)
358 amd_sched_process_job(fence, &job->cb); 366 amd_sched_process_job(fence, &s_fence->cb);
359 else if (r) 367 else if (r)
360 DRM_ERROR("fence add callback failed (%d)\n", r); 368 DRM_ERROR("fence add callback failed (%d)\n", r);
361 fence_put(fence); 369 fence_put(fence);
370 } else {
371 DRM_ERROR("Failed to run job!\n");
372 amd_sched_process_job(NULL, &s_fence->cb);
362 } 373 }
363 374
364 count = kfifo_out(&entity->job_queue, &job, sizeof(job)); 375 count = kfifo_out(&entity->job_queue, &sched_job,
365 WARN_ON(count != sizeof(job)); 376 sizeof(sched_job));
377 WARN_ON(count != sizeof(sched_job));
366 wake_up(&sched->job_scheduled); 378 wake_up(&sched->job_scheduled);
367 } 379 }
368 return 0; 380 return 0;
369} 381}
370 382
371/** 383/**
372 * Create a gpu scheduler 384 * Init a gpu scheduler instance
373 * 385 *
386 * @sched The pointer to the scheduler
374 * @ops The backend operations for this scheduler. 387 * @ops The backend operations for this scheduler.
375 * @ring The the ring id for the scheduler.
376 * @hw_submissions Number of hw submissions to do. 388 * @hw_submissions Number of hw submissions to do.
389 * @name Name used for debugging
377 * 390 *
378 * Return the pointer to scheduler for success, otherwise return NULL 391 * Return 0 on success, otherwise error code.
379*/ 392*/
380struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, 393int amd_sched_init(struct amd_gpu_scheduler *sched,
381 unsigned ring, unsigned hw_submission, 394 struct amd_sched_backend_ops *ops,
382 void *priv) 395 unsigned hw_submission, const char *name)
383{ 396{
384 struct amd_gpu_scheduler *sched;
385
386 sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
387 if (!sched)
388 return NULL;
389
390 sched->ops = ops; 397 sched->ops = ops;
391 sched->ring_id = ring;
392 sched->hw_submission_limit = hw_submission; 398 sched->hw_submission_limit = hw_submission;
393 sched->priv = priv; 399 sched->name = name;
394 snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
395 amd_sched_rq_init(&sched->sched_rq); 400 amd_sched_rq_init(&sched->sched_rq);
396 amd_sched_rq_init(&sched->kernel_rq); 401 amd_sched_rq_init(&sched->kernel_rq);
397 402
398 init_waitqueue_head(&sched->wake_up_worker); 403 init_waitqueue_head(&sched->wake_up_worker);
399 init_waitqueue_head(&sched->job_scheduled); 404 init_waitqueue_head(&sched->job_scheduled);
400 atomic_set(&sched->hw_rq_count, 0); 405 atomic_set(&sched->hw_rq_count, 0);
406
401 /* Each scheduler will run on a seperate kernel thread */ 407 /* Each scheduler will run on a seperate kernel thread */
402 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 408 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
403 if (IS_ERR(sched->thread)) { 409 if (IS_ERR(sched->thread)) {
404 DRM_ERROR("Failed to create scheduler for id %d.\n", ring); 410 DRM_ERROR("Failed to create scheduler for %s.\n", name);
405 kfree(sched); 411 return PTR_ERR(sched->thread);
406 return NULL;
407 } 412 }
408 413
409 return sched; 414 return 0;
410} 415}
411 416
412/** 417/**
413 * Destroy a gpu scheduler 418 * Destroy a gpu scheduler
414 * 419 *
415 * @sched The pointer to the scheduler 420 * @sched The pointer to the scheduler
416 *
417 * return 0 if succeed. -1 if failed.
418 */ 421 */
419int amd_sched_destroy(struct amd_gpu_scheduler *sched) 422void amd_sched_fini(struct amd_gpu_scheduler *sched)
420{ 423{
421 kthread_stop(sched->thread); 424 kthread_stop(sched->thread);
422 kfree(sched);
423 return 0;
424} 425}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d4d817..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
38*/ 38*/
39struct amd_sched_entity { 39struct amd_sched_entity {
40 struct list_head list; 40 struct list_head list;
41 struct amd_sched_rq *belongto_rq; 41 struct amd_sched_rq *rq;
42 atomic_t fence_seq; 42 struct amd_gpu_scheduler *sched;
43 /* the job_queue maintains the jobs submitted by clients */ 43
44 struct kfifo job_queue;
45 spinlock_t queue_lock; 44 spinlock_t queue_lock;
46 struct amd_gpu_scheduler *scheduler; 45 struct kfifo job_queue;
46
47 atomic_t fence_seq;
47 uint64_t fence_context; 48 uint64_t fence_context;
49
48 struct fence *dependency; 50 struct fence *dependency;
49 struct fence_cb cb; 51 struct fence_cb cb;
50}; 52};
@@ -62,13 +64,13 @@ struct amd_sched_rq {
62 64
63struct amd_sched_fence { 65struct amd_sched_fence {
64 struct fence base; 66 struct fence base;
65 struct amd_gpu_scheduler *scheduler; 67 struct fence_cb cb;
68 struct amd_gpu_scheduler *sched;
66 spinlock_t lock; 69 spinlock_t lock;
67 void *owner; 70 void *owner;
68}; 71};
69 72
70struct amd_sched_job { 73struct amd_sched_job {
71 struct fence_cb cb;
72 struct amd_gpu_scheduler *sched; 74 struct amd_gpu_scheduler *sched;
73 struct amd_sched_entity *s_entity; 75 struct amd_sched_entity *s_entity;
74 struct amd_sched_fence *s_fence; 76 struct amd_sched_fence *s_fence;
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
91 * these functions should be implemented in driver side 93 * these functions should be implemented in driver side
92*/ 94*/
93struct amd_sched_backend_ops { 95struct amd_sched_backend_ops {
94 struct fence *(*dependency)(struct amd_sched_job *job); 96 struct fence *(*dependency)(struct amd_sched_job *sched_job);
95 struct fence *(*run_job)(struct amd_sched_job *job); 97 struct fence *(*run_job)(struct amd_sched_job *sched_job);
96 void (*process_job)(struct amd_sched_job *job);
97}; 98};
98 99
99/** 100/**
100 * One scheduler is implemented for each hardware ring 101 * One scheduler is implemented for each hardware ring
101*/ 102*/
102struct amd_gpu_scheduler { 103struct amd_gpu_scheduler {
103 struct task_struct *thread; 104 struct amd_sched_backend_ops *ops;
105 uint32_t hw_submission_limit;
106 const char *name;
104 struct amd_sched_rq sched_rq; 107 struct amd_sched_rq sched_rq;
105 struct amd_sched_rq kernel_rq; 108 struct amd_sched_rq kernel_rq;
106 atomic_t hw_rq_count;
107 struct amd_sched_backend_ops *ops;
108 uint32_t ring_id;
109 wait_queue_head_t wake_up_worker; 109 wait_queue_head_t wake_up_worker;
110 wait_queue_head_t job_scheduled; 110 wait_queue_head_t job_scheduled;
111 uint32_t hw_submission_limit; 111 atomic_t hw_rq_count;
112 char name[20]; 112 struct task_struct *thread;
113 void *priv;
114}; 113};
115 114
116struct amd_gpu_scheduler * 115int amd_sched_init(struct amd_gpu_scheduler *sched,
117amd_sched_create(struct amd_sched_backend_ops *ops, 116 struct amd_sched_backend_ops *ops,
118 uint32_t ring, uint32_t hw_submission, void *priv); 117 uint32_t hw_submission, const char *name);
119int amd_sched_destroy(struct amd_gpu_scheduler *sched); 118void amd_sched_fini(struct amd_gpu_scheduler *sched);
120 119
121int amd_sched_entity_init(struct amd_gpu_scheduler *sched, 120int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
122 struct amd_sched_entity *entity, 121 struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..d802638094f4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
36 if (fence == NULL) 36 if (fence == NULL)
37 return NULL; 37 return NULL;
38 fence->owner = owner; 38 fence->owner = owner;
39 fence->scheduler = s_entity->scheduler; 39 fence->sched = s_entity->sched;
40 spin_lock_init(&fence->lock); 40 spin_lock_init(&fence->lock);
41 41
42 seq = atomic_inc_return(&s_entity->fence_seq); 42 seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
63static const char *amd_sched_fence_get_timeline_name(struct fence *f) 63static const char *amd_sched_fence_get_timeline_name(struct fence *f)
64{ 64{
65 struct amd_sched_fence *fence = to_amd_sched_fence(f); 65 struct amd_sched_fence *fence = to_amd_sched_fence(f);
66 return (const char *)fence->scheduler->name; 66 return (const char *)fence->sched->name;
67} 67}
68 68
69static bool amd_sched_fence_enable_signaling(struct fence *f) 69static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca1e9d7..d93e7378c077 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
520 520
521/** Ioctl table */ 521/** Ioctl table */
522static const struct drm_ioctl_desc drm_ioctls[] = { 522static const struct drm_ioctl_desc drm_ioctls[] = {
523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 523 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
524 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
524 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 525 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
525 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 526 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
526 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 527 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b86a168..d1e300dcd544 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
58 struct drm_plane_state *old_state) 58 struct drm_plane_state *old_state)
59{ 59{
60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
61 unsigned int index, value, ret; 61 unsigned int value;
62 int index, ret;
62 63
63 index = fsl_dcu_drm_plane_index(plane); 64 index = fsl_dcu_drm_plane_index(plane);
64 if (index < 0) 65 if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab9395b..39d73dbc1c47 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 640 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
641 641
642 /* 642 /*
643 * On HSW, the DSL reg (0x70000) appears to return 0 if we
644 * read it just before the start of vblank. So try it again
645 * so we don't accidentally end up spanning a vblank frame
646 * increment, causing the pipe_update_end() code to squak at us.
647 *
648 * The nature of this problem means we can't simply check the ISR
649 * bit and return the vblank start value; nor can we use the scanline
650 * debug register in the transcoder as it appears to have the same
651 * problem. We may need to extend this to include other platforms,
652 * but so far testing only shows the problem on HSW.
653 */
654 if (IS_HASWELL(dev) && !position) {
655 int i, temp;
656
657 for (i = 0; i < 100; i++) {
658 udelay(1);
659 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
660 DSL_LINEMASK_GEN3;
661 if (temp != position) {
662 position = temp;
663 break;
664 }
665 }
666 }
667
668 /*
643 * See update_scanline_offset() for the details on the 669 * See update_scanline_offset() for the details on the
644 * scanline_offset adjustment. 670 * scanline_offset adjustment.
645 */ 671 */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce1f98..2a5c76faf9f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
430 430
431/** 431/**
432 * intel_audio_codec_disable - Disable the audio codec for HD audio 432 * intel_audio_codec_disable - Disable the audio codec for HD audio
433 * @encoder: encoder on which to disable audio 433 * @intel_encoder: encoder on which to disable audio
434 * 434 *
435 * The disable sequences must be performed before disabling the transcoder or 435 * The disable sequences must be performed before disabling the transcoder or
436 * port. 436 * port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b3bb54..c19e669ffe50 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
42 const struct bdb_header *bdb = _bdb; 42 const struct bdb_header *bdb = _bdb;
43 const u8 *base = _bdb; 43 const u8 *base = _bdb;
44 int index = 0; 44 int index = 0;
45 u16 total, current_size; 45 u32 total, current_size;
46 u8 current_id; 46 u8 current_id;
47 47
48 /* skip to first section */ 48 /* skip to first section */
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
57 current_size = *((const u16 *)(base + index)); 57 current_size = *((const u16 *)(base + index));
58 index += 2; 58 index += 2;
59 59
60 /* The MIPI Sequence Block v3+ has a separate size field. */
61 if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
62 current_size = *((const u32 *)(base + index + 1));
63
60 if (index + current_size > total) 64 if (index + current_size > total)
61 return NULL; 65 return NULL;
62 66
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
799 return; 803 return;
800 } 804 }
801 805
806 /* Fail gracefully for forward incompatible sequence block. */
807 if (sequence->version >= 3) {
808 DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
809 return;
810 }
811
802 DRM_DEBUG_DRIVER("Found MIPI sequence block\n"); 812 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
803 813
804 block_size = get_blocksize(sequence); 814 block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264f7809..cf418be7d30a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
15087 15087
15088 plane_state = to_intel_plane_state(p->base.state); 15088 plane_state = to_intel_plane_state(p->base.state);
15089 15089
15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) 15090 if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
15091 plane_state->visible = primary_get_hw_state(crtc); 15091 plane_state->visible = primary_get_hw_state(crtc);
15092 else { 15092 if (plane_state->visible)
15093 crtc->base.state->plane_mask |=
15094 1 << drm_plane_index(&p->base);
15095 } else {
15093 if (active) 15096 if (active)
15094 p->disable_plane(&p->base, &crtc->base); 15097 p->disable_plane(&p->base, &crtc->base);
15095 15098
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15ea1f93..b35b5b2db4ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
186 186
187 sysram = vmalloc(size); 187 sysram = vmalloc(size);
188 if (!sysram) 188 if (!sysram)
189 return -ENOMEM; 189 goto err_sysram;
190 190
191 info = drm_fb_helper_alloc_fbi(helper); 191 info = drm_fb_helper_alloc_fbi(helper);
192 if (IS_ERR(info)) 192 if (IS_ERR(info)) {
193 return PTR_ERR(info); 193 ret = PTR_ERR(info);
194 goto err_alloc_fbi;
195 }
194 196
195 info->par = mfbdev; 197 info->par = mfbdev;
196 198
197 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj); 199 ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
198 if (ret) 200 if (ret)
199 return ret; 201 goto err_framebuffer_init;
200 202
201 mfbdev->sysram = sysram; 203 mfbdev->sysram = sysram;
202 mfbdev->size = size; 204 mfbdev->size = size;
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
225 227
226 DRM_DEBUG_KMS("allocated %dx%d\n", 228 DRM_DEBUG_KMS("allocated %dx%d\n",
227 fb->width, fb->height); 229 fb->width, fb->height);
230
228 return 0; 231 return 0;
232
233err_framebuffer_init:
234 drm_fb_helper_release_fbi(helper);
235err_alloc_fbi:
236 vfree(sysram);
237err_sysram:
238 drm_gem_object_unreference_unlocked(gobj);
239
240 return ret;
229} 241}
230 242
231static int mga_fbdev_destroy(struct drm_device *dev, 243static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
276 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, 288 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
277 mdev->num_crtc, MGAG200FB_CONN_LIMIT); 289 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
278 if (ret) 290 if (ret)
279 return ret; 291 goto err_fb_helper;
280 292
281 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 293 ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
282 if (ret) 294 if (ret)
283 goto fini; 295 goto err_fb_setup;
284 296
285 /* disable all the possible outputs/crtcs before entering KMS mode */ 297 /* disable all the possible outputs/crtcs before entering KMS mode */
286 drm_helper_disable_unused_functions(mdev->dev); 298 drm_helper_disable_unused_functions(mdev->dev);
287 299
288 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel); 300 ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
289 if (ret) 301 if (ret)
290 goto fini; 302 goto err_fb_setup;
291 303
292 return 0; 304 return 0;
293 305
294fini: 306err_fb_setup:
295 drm_fb_helper_fini(&mfbdev->helper); 307 drm_fb_helper_fini(&mfbdev->helper);
308err_fb_helper:
309 mdev->mfbdev = NULL;
310
296 return ret; 311 return ret;
297} 312}
298 313
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388069e7..b1a0f5656175 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
220 } 220 }
221 r = mgag200_mm_init(mdev); 221 r = mgag200_mm_init(mdev);
222 if (r) 222 if (r)
223 goto out; 223 goto err_mm;
224 224
225 drm_mode_config_init(dev); 225 drm_mode_config_init(dev);
226 dev->mode_config.funcs = (void *)&mga_mode_funcs; 226 dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
233 r = mgag200_modeset_init(mdev); 233 r = mgag200_modeset_init(mdev);
234 if (r) { 234 if (r) {
235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 235 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
236 goto out; 236 goto err_modeset;
237 } 237 }
238 238
239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */ 239 /* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
241 &mdev->cursor.pixels_1); 241 &mdev->cursor.pixels_1);
242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0, 242 mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
243 &mdev->cursor.pixels_2); 243 &mdev->cursor.pixels_2);
244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) 244 if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
245 goto cursor_nospace; 245 mdev->cursor.pixels_1 = NULL;
246 mdev->cursor.pixels_current = mdev->cursor.pixels_1; 246 mdev->cursor.pixels_2 = NULL;
247 mdev->cursor.pixels_prev = mdev->cursor.pixels_2; 247 dev_warn(&dev->pdev->dev,
248 goto cursor_done; 248 "Could not allocate space for cursors. Not doing hardware cursors.\n");
249 cursor_nospace: 249 } else {
250 mdev->cursor.pixels_1 = NULL; 250 mdev->cursor.pixels_current = mdev->cursor.pixels_1;
251 mdev->cursor.pixels_2 = NULL; 251 mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
252 dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n"); 252 }
253 cursor_done: 253
254 254 return 0;
255out: 255
256 if (r) 256err_modeset:
257 mgag200_driver_unload(dev); 257 drm_mode_config_cleanup(dev);
258 mgag200_mm_fini(mdev);
259err_mm:
260 dev->dev_private = NULL;
261
258 return r; 262 return r;
259} 263}
260 264
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c84ba6..dd845f82cc24 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
886 drm_connector_to_qxl_output(connector); 886 drm_connector_to_qxl_output(connector);
887 struct drm_device *ddev = connector->dev; 887 struct drm_device *ddev = connector->dev;
888 struct qxl_device *qdev = ddev->dev_private; 888 struct qxl_device *qdev = ddev->dev_private;
889 int connected; 889 bool connected = false;
890 890
891 /* The first monitor is always connected */ 891 /* The first monitor is always connected */
892 connected = (output->index == 0) || 892 if (!qdev->client_monitors_config) {
893 (qdev->client_monitors_config && 893 if (output->index == 0)
894 qdev->client_monitors_config->count > output->index && 894 connected = true;
895 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index])); 895 } else
896 connected = qdev->client_monitors_config->count > output->index &&
897 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
896 898
897 DRM_DEBUG("#%d connected: %d\n", output->index, connected); 899 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
898 if (!connected) 900 if (!connected)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319dae8358..f3f562f6d848 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1573 1573
1574 drm_kms_helper_poll_disable(dev); 1574 drm_kms_helper_poll_disable(dev);
1575 1575
1576 drm_modeset_lock_all(dev);
1576 /* turn off display hw */ 1577 /* turn off display hw */
1577 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1578 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1579 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1579 } 1580 }
1581 drm_modeset_unlock_all(dev);
1580 1582
1581 /* unpin the front buffers and cursors */ 1583 /* unpin the front buffers and cursors */
1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1584 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1734 if (fbcon) { 1736 if (fbcon) {
1735 drm_helper_resume_force_mode(dev); 1737 drm_helper_resume_force_mode(dev);
1736 /* turn on display hw */ 1738 /* turn on display hw */
1739 drm_modeset_lock_all(dev);
1737 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1740 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1738 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1741 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1739 } 1742 }
1743 drm_modeset_unlock_all(dev);
1740 } 1744 }
1741 1745
1742 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8fd897f..e9115d3f67b0 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, 2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2930 { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
2930 { 0, 0, 0, 0 }, 2931 { 0, 0, 0, 0 },
2931}; 2932};
2932 2933
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de25613..745e996d2dbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
882 if (ret) 882 if (ret)
883 return ret; 883 return ret;
884 man = &bdev->man[mem_type]; 884 man = &bdev->man[mem_type];
885 if (!man->has_type || !man->use_type)
886 continue;
885 887
886 type_ok = ttm_bo_mt_compatible(man, mem_type, place, 888 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
887 &cur_flags); 889 &cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
889 if (!type_ok) 891 if (!type_ok)
890 continue; 892 continue;
891 893
894 type_found = true;
892 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 895 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
893 cur_flags); 896 cur_flags);
894 /* 897 /*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
901 if (mem_type == TTM_PL_SYSTEM) 904 if (mem_type == TTM_PL_SYSTEM)
902 break; 905 break;
903 906
904 if (man->has_type && man->use_type) { 907 ret = (*man->func->get_node)(man, bo, place, mem);
905 type_found = true; 908 if (unlikely(ret))
906 ret = (*man->func->get_node)(man, bo, place, mem); 909 return ret;
907 if (unlikely(ret)) 910
908 return ret;
909 }
910 if (mem->mm_node) 911 if (mem->mm_node)
911 break; 912 break;
912 } 913 }
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
917 return 0; 918 return 0;
918 } 919 }
919 920
920 if (!type_found)
921 return -EINVAL;
922
923 for (i = 0; i < placement->num_busy_placement; ++i) { 921 for (i = 0; i < placement->num_busy_placement; ++i) {
924 const struct ttm_place *place = &placement->busy_placement[i]; 922 const struct ttm_place *place = &placement->busy_placement[i];
925 923
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
927 if (ret) 925 if (ret)
928 return ret; 926 return ret;
929 man = &bdev->man[mem_type]; 927 man = &bdev->man[mem_type];
930 if (!man->has_type) 928 if (!man->has_type || !man->use_type)
931 continue; 929 continue;
932 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) 930 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
933 continue; 931 continue;
934 932
933 type_found = true;
935 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 934 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
936 cur_flags); 935 cur_flags);
937 /* 936 /*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
957 if (ret == -ERESTARTSYS) 956 if (ret == -ERESTARTSYS)
958 has_erestartsys = true; 957 has_erestartsys = true;
959 } 958 }
960 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; 959
961 return ret; 960 if (!type_found) {
961 printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
962 return -EINVAL;
963 }
964
965 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
962} 966}
963EXPORT_SYMBOL(ttm_bo_mem_space); 967EXPORT_SYMBOL(ttm_bo_mem_space);
964 968
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f70fe29..b49445df8a7e 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI && X86
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a125f2b..092ea81eeff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
311 struct vmw_private *dev_priv = res->dev_priv; 311 struct vmw_private *dev_priv = res->dev_priv;
312 struct ttm_buffer_object *bo = val_buf->bo; 312 struct ttm_buffer_object *bo = val_buf->bo;
313 struct vmw_fence_obj *fence; 313 struct vmw_fence_obj *fence;
314 int ret;
315 314
316 if (list_empty(&res->mob_head)) 315 if (list_empty(&res->mob_head))
317 return 0; 316 return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
328 if (likely(fence != NULL)) 327 if (likely(fence != NULL))
329 vmw_fence_obj_unreference(&fence); 328 vmw_fence_obj_unreference(&fence);
330 329
331 return ret; 330 return 0;
332} 331}
333 332
334/** 333/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
754 754
755 755 dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_size);
757 dev_priv->mmio_size);
758
759 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
760 dev_priv->mmio_size);
761 757
762 if (unlikely(dev_priv->mmio_virt == NULL)) { 758 if (unlikely(dev_priv->mmio_virt == NULL)) {
763 ret = -ENOMEM; 759 ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
913out_err4: 909out_err4:
914 iounmap(dev_priv->mmio_virt); 910 iounmap(dev_priv->mmio_virt);
915out_err3: 911out_err3:
916 arch_phys_wc_del(dev_priv->mmio_mtrr);
917 vmw_ttm_global_release(dev_priv); 912 vmw_ttm_global_release(dev_priv);
918out_err0: 913out_err0:
919 for (i = vmw_res_context; i < vmw_res_max; ++i) 914 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
964 959
965 ttm_object_device_release(&dev_priv->tdev); 960 ttm_object_device_release(&dev_priv->tdev);
966 iounmap(dev_priv->mmio_virt); 961 iounmap(dev_priv->mmio_virt);
967 arch_phys_wc_del(dev_priv->mmio_mtrr);
968 if (dev_priv->ctx.staged_bindings) 962 if (dev_priv->ctx.staged_bindings)
969 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
970 vmw_ttm_global_release(dev_priv); 964 vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6dc36c..f19fd39b43e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
376 uint32_t initial_width; 376 uint32_t initial_width;
377 uint32_t initial_height; 377 uint32_t initial_height;
378 u32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
379 int mmio_mtrr;
380 uint32_t capabilities; 379 uint32_t capabilities;
381 uint32_t max_gmr_ids; 380 uint32_t max_gmr_ids;
382 uint32_t max_gmr_pages; 381 uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
631 uint32_t size, 630 uint32_t size,
632 bool shareable, 631 bool shareable,
633 uint32_t *handle, 632 uint32_t *handle,
634 struct vmw_dma_buffer **p_dma_buf); 633 struct vmw_dma_buffer **p_dma_buf,
634 struct ttm_base_object **p_base);
635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, 635extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
636 struct vmw_dma_buffer *dma_buf, 636 struct vmw_dma_buffer *dma_buf,
637 uint32_t *handle); 637 uint32_t *handle);
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
645 uint32_t cur_validate_node); 645 uint32_t cur_validate_node);
646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); 646extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 647extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
648 uint32_t id, struct vmw_dma_buffer **out); 648 uint32_t id, struct vmw_dma_buffer **out,
649 struct ttm_base_object **base);
649extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, 650extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
650 struct drm_file *file_priv); 651 struct drm_file *file_priv);
651extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, 652extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b56565457c96..5da5de0cb522 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1236 struct vmw_relocation *reloc; 1236 struct vmw_relocation *reloc;
1237 int ret; 1237 int ret;
1238 1238
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 NULL);
1240 if (unlikely(ret != 0)) { 1241 if (unlikely(ret != 0)) {
1241 DRM_ERROR("Could not find or use MOB buffer.\n"); 1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1242 ret = -EINVAL; 1243 ret = -EINVAL;
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1296 struct vmw_relocation *reloc; 1297 struct vmw_relocation *reloc;
1297 int ret; 1298 int ret;
1298 1299
1299 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); 1300 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 NULL);
1300 if (unlikely(ret != 0)) { 1302 if (unlikely(ret != 0)) {
1301 DRM_ERROR("Could not find or use GMR region.\n"); 1303 DRM_ERROR("Could not find or use GMR region.\n");
1302 ret = -EINVAL; 1304 ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3de311..15a6c01cd016 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1685 struct drm_crtc *crtc; 1685 struct drm_crtc *crtc;
1686 u32 num_units = 0; 1686 u32 num_units = 0;
1687 u32 i, k; 1687 u32 i, k;
1688 int ret;
1689 1688
1690 dirty->dev_priv = dev_priv; 1689 dirty->dev_priv = dev_priv;
1691 1690
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
1711 if (!dirty->cmd) { 1710 if (!dirty->cmd) {
1712 DRM_ERROR("Couldn't reserve fifo space " 1711 DRM_ERROR("Couldn't reserve fifo space "
1713 "for dirty blits.\n"); 1712 "for dirty blits.\n");
1714 return ret; 1713 return -ENOMEM;
1715 } 1714 }
1716 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 1715 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
1717 } 1716 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f093ccf..222c9c2123a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
484 goto out_unlock; 484 goto out_unlock;
485 } 485 }
486 486
487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); 487 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
488 if (ret) 488 if (ret)
489 goto out_unlock; 489 goto out_unlock;
490 490
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f852b42..e57667ca7557 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
354 } 354 }
355 355
356 *out_surf = NULL; 356 *out_surf = NULL;
357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); 357 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
358 return ret; 358 return ret;
359} 359}
360 360
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
481 uint32_t size, 481 uint32_t size,
482 bool shareable, 482 bool shareable,
483 uint32_t *handle, 483 uint32_t *handle,
484 struct vmw_dma_buffer **p_dma_buf) 484 struct vmw_dma_buffer **p_dma_buf,
485 struct ttm_base_object **p_base)
485{ 486{
486 struct vmw_user_dma_buffer *user_bo; 487 struct vmw_user_dma_buffer *user_bo;
487 struct ttm_buffer_object *tmp; 488 struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
515 } 516 }
516 517
517 *p_dma_buf = &user_bo->dma; 518 *p_dma_buf = &user_bo->dma;
519 if (p_base) {
520 *p_base = &user_bo->prime.base;
521 kref_get(&(*p_base)->refcount);
522 }
518 *handle = user_bo->prime.base.hash.key; 523 *handle = user_bo->prime.base.hash.key;
519 524
520out_no_base_object: 525out_no_base_object:
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
631 struct vmw_dma_buffer *dma_buf; 636 struct vmw_dma_buffer *dma_buf;
632 struct vmw_user_dma_buffer *user_bo; 637 struct vmw_user_dma_buffer *user_bo;
633 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 638 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639 struct ttm_base_object *buffer_base;
634 int ret; 640 int ret;
635 641
636 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 642 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
643 649
644 switch (arg->op) { 650 switch (arg->op) {
645 case drm_vmw_synccpu_grab: 651 case drm_vmw_synccpu_grab:
646 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); 652 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
653 &buffer_base);
647 if (unlikely(ret != 0)) 654 if (unlikely(ret != 0))
648 return ret; 655 return ret;
649 656
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
651 dma); 658 dma);
652 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); 659 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
653 vmw_dmabuf_unreference(&dma_buf); 660 vmw_dmabuf_unreference(&dma_buf);
661 ttm_base_object_unref(&buffer_base);
654 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 662 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
655 ret != -EBUSY)) { 663 ret != -EBUSY)) {
656 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", 664 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
692 return ret; 700 return ret;
693 701
694 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 702 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
695 req->size, false, &handle, &dma_buf); 703 req->size, false, &handle, &dma_buf,
704 NULL);
696 if (unlikely(ret != 0)) 705 if (unlikely(ret != 0))
697 goto out_no_dmabuf; 706 goto out_no_dmabuf;
698 707
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
721} 730}
722 731
723int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 732int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
724 uint32_t handle, struct vmw_dma_buffer **out) 733 uint32_t handle, struct vmw_dma_buffer **out,
734 struct ttm_base_object **p_base)
725{ 735{
726 struct vmw_user_dma_buffer *vmw_user_bo; 736 struct vmw_user_dma_buffer *vmw_user_bo;
727 struct ttm_base_object *base; 737 struct ttm_base_object *base;
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
743 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 753 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
744 prime.base); 754 prime.base);
745 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 755 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
746 ttm_base_object_unref(&base); 756 if (p_base)
757 *p_base = base;
758 else
759 ttm_base_object_unref(&base);
747 *out = &vmw_user_bo->dma; 760 *out = &vmw_user_bo->dma;
748 761
749 return 0; 762 return 0;
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
1004 1017
1005 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 1018 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1006 args->size, false, &args->handle, 1019 args->size, false, &args->handle,
1007 &dma_buf); 1020 &dma_buf, NULL);
1008 if (unlikely(ret != 0)) 1021 if (unlikely(ret != 0))
1009 goto out_no_dmabuf; 1022 goto out_no_dmabuf;
1010 1023
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
1032 struct vmw_dma_buffer *out_buf; 1045 struct vmw_dma_buffer *out_buf;
1033 int ret; 1046 int ret;
1034 1047
1035 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); 1048 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1036 if (ret != 0) 1049 if (ret != 0)
1037 return -EINVAL; 1050 return -EINVAL;
1038 1051
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bba1ee395478..fd47547b0234 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
855 855
856 if (buffer_handle != SVGA3D_INVALID_ID) { 856 if (buffer_handle != SVGA3D_INVALID_ID) {
857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, 857 ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
858 &buffer); 858 &buffer, NULL);
859 if (unlikely(ret != 0)) { 859 if (unlikely(ret != 0)) {
860 DRM_ERROR("Could not find buffer for shader " 860 DRM_ERROR("Could not find buffer for shader "
861 "creation.\n"); 861 "creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3361769842f4..64b50409fa07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -46,6 +46,7 @@ struct vmw_user_surface {
46 struct vmw_surface srf; 46 struct vmw_surface srf;
47 uint32_t size; 47 uint32_t size;
48 struct drm_master *master; 48 struct drm_master *master;
49 struct ttm_base_object *backup_base;
49}; 50};
50 51
51/** 52/**
@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
656 struct vmw_resource *res = &user_srf->srf.res; 657 struct vmw_resource *res = &user_srf->srf.res;
657 658
658 *p_base = NULL; 659 *p_base = NULL;
660 ttm_base_object_unref(&user_srf->backup_base);
659 vmw_resource_unreference(&res); 661 vmw_resource_unreference(&res);
660} 662}
661 663
@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
851 res->backup_size, 853 res->backup_size,
852 true, 854 true,
853 &backup_handle, 855 &backup_handle,
854 &res->backup); 856 &res->backup,
857 &user_srf->backup_base);
855 if (unlikely(ret != 0)) { 858 if (unlikely(ret != 0)) {
856 vmw_resource_unreference(&res); 859 vmw_resource_unreference(&res);
857 goto out_unlock; 860 goto out_unlock;
@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1321 1324
1322 if (req->buffer_handle != SVGA3D_INVALID_ID) { 1325 if (req->buffer_handle != SVGA3D_INVALID_ID) {
1323 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1326 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1324 &res->backup); 1327 &res->backup,
1328 &user_srf->backup_base);
1325 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1329 if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1326 res->backup_size) { 1330 res->backup_size) {
1327 DRM_ERROR("Surface backup buffer is too small.\n"); 1331 DRM_ERROR("Surface backup buffer is too small.\n");
@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1335 req->drm_surface_flags & 1339 req->drm_surface_flags &
1336 drm_vmw_surface_flag_shareable, 1340 drm_vmw_surface_flag_shareable,
1337 &backup_handle, 1341 &backup_handle,
1338 &res->backup); 1342 &res->backup,
1343 &user_srf->backup_base);
1339 1344
1340 if (unlikely(ret != 0)) { 1345 if (unlikely(ret != 0)) {
1341 vmw_resource_unreference(&res); 1346 vmw_resource_unreference(&res);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f9aead4ecfc..652afd11a9ef 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -204,6 +204,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 204 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
205 list_del(&channel->listentry); 205 list_del(&channel->listentry);
206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 206 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
207
208 primary_channel = channel;
207 } else { 209 } else {
208 primary_channel = channel->primary_channel; 210 primary_channel = channel->primary_channel;
209 spin_lock_irqsave(&primary_channel->lock, flags); 211 spin_lock_irqsave(&primary_channel->lock, flags);
@@ -211,6 +213,14 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
211 primary_channel->num_sc--; 213 primary_channel->num_sc--;
212 spin_unlock_irqrestore(&primary_channel->lock, flags); 214 spin_unlock_irqrestore(&primary_channel->lock, flags);
213 } 215 }
216
217 /*
218 * We need to free the bit for init_vp_index() to work in the case
219 * of sub-channel, when we reload drivers like hv_netvsc.
220 */
221 cpumask_clear_cpu(channel->target_cpu,
222 &primary_channel->alloced_cpus_in_node);
223
214 free_channel(channel); 224 free_channel(channel);
215} 225}
216 226
@@ -458,6 +468,13 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
458 continue; 468 continue;
459 } 469 }
460 470
471 /*
472 * NOTE: in the case of sub-channel, we clear the sub-channel
473 * related bit(s) in primary->alloced_cpus_in_node in
474 * hv_process_channel_removal(), so when we reload drivers
475 * like hv_netvsc in SMP guest, here we're able to re-allocate
476 * bit from primary->alloced_cpus_in_node.
477 */
461 if (!cpumask_test_cpu(cur_cpu, 478 if (!cpumask_test_cpu(cur_cpu,
462 &primary->alloced_cpus_in_node)) { 479 &primary->alloced_cpus_in_node)) {
463 cpumask_set_cpu(cur_cpu, 480 cpumask_set_cpu(cur_cpu,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index efecdf0216d8..38be8dc2932e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -442,6 +442,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
442 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 442 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
443 } 443 }
444 444
445 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
446
445 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 447 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
446 0xffffff; 448 0xffffff;
447 props->vendor_part_id = dev->dev->persist->pdev->device; 449 props->vendor_part_id = dev->dev->persist->pdev->device;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 403bd29443b8..aa59037d7504 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -238,8 +238,6 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
238 rx_sg->lkey = device->pd->local_dma_lkey; 238 rx_sg->lkey = device->pd->local_dma_lkey;
239 } 239 }
240 240
241 isert_conn->rx_desc_head = 0;
242
243 return 0; 241 return 0;
244 242
245dma_map_fail: 243dma_map_fail:
@@ -634,7 +632,7 @@ static void
634isert_init_conn(struct isert_conn *isert_conn) 632isert_init_conn(struct isert_conn *isert_conn)
635{ 633{
636 isert_conn->state = ISER_CONN_INIT; 634 isert_conn->state = ISER_CONN_INIT;
637 INIT_LIST_HEAD(&isert_conn->accept_node); 635 INIT_LIST_HEAD(&isert_conn->node);
638 init_completion(&isert_conn->login_comp); 636 init_completion(&isert_conn->login_comp);
639 init_completion(&isert_conn->login_req_comp); 637 init_completion(&isert_conn->login_req_comp);
640 init_completion(&isert_conn->wait); 638 init_completion(&isert_conn->wait);
@@ -762,28 +760,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
762 ret = isert_rdma_post_recvl(isert_conn); 760 ret = isert_rdma_post_recvl(isert_conn);
763 if (ret) 761 if (ret)
764 goto out_conn_dev; 762 goto out_conn_dev;
765 /*
766 * Obtain the second reference now before isert_rdma_accept() to
767 * ensure that any initiator generated REJECT CM event that occurs
768 * asynchronously won't drop the last reference until the error path
769 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
770 * isert_free_conn() -> isert_put_conn() -> kref_put().
771 */
772 if (!kref_get_unless_zero(&isert_conn->kref)) {
773 isert_warn("conn %p connect_release is running\n", isert_conn);
774 goto out_conn_dev;
775 }
776 763
777 ret = isert_rdma_accept(isert_conn); 764 ret = isert_rdma_accept(isert_conn);
778 if (ret) 765 if (ret)
779 goto out_conn_dev; 766 goto out_conn_dev;
780 767
781 mutex_lock(&isert_np->np_accept_mutex); 768 mutex_lock(&isert_np->mutex);
782 list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list); 769 list_add_tail(&isert_conn->node, &isert_np->accepted);
783 mutex_unlock(&isert_np->np_accept_mutex); 770 mutex_unlock(&isert_np->mutex);
784 771
785 isert_info("np %p: Allow accept_np to continue\n", np);
786 up(&isert_np->np_sem);
787 return 0; 772 return 0;
788 773
789out_conn_dev: 774out_conn_dev:
@@ -831,13 +816,21 @@ static void
831isert_connected_handler(struct rdma_cm_id *cma_id) 816isert_connected_handler(struct rdma_cm_id *cma_id)
832{ 817{
833 struct isert_conn *isert_conn = cma_id->qp->qp_context; 818 struct isert_conn *isert_conn = cma_id->qp->qp_context;
819 struct isert_np *isert_np = cma_id->context;
834 820
835 isert_info("conn %p\n", isert_conn); 821 isert_info("conn %p\n", isert_conn);
836 822
837 mutex_lock(&isert_conn->mutex); 823 mutex_lock(&isert_conn->mutex);
838 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 824 isert_conn->state = ISER_CONN_UP;
839 isert_conn->state = ISER_CONN_UP; 825 kref_get(&isert_conn->kref);
840 mutex_unlock(&isert_conn->mutex); 826 mutex_unlock(&isert_conn->mutex);
827
828 mutex_lock(&isert_np->mutex);
829 list_move_tail(&isert_conn->node, &isert_np->pending);
830 mutex_unlock(&isert_np->mutex);
831
832 isert_info("np %p: Allow accept_np to continue\n", isert_np);
833 up(&isert_np->sem);
841} 834}
842 835
843static void 836static void
@@ -903,14 +896,14 @@ isert_np_cma_handler(struct isert_np *isert_np,
903 896
904 switch (event) { 897 switch (event) {
905 case RDMA_CM_EVENT_DEVICE_REMOVAL: 898 case RDMA_CM_EVENT_DEVICE_REMOVAL:
906 isert_np->np_cm_id = NULL; 899 isert_np->cm_id = NULL;
907 break; 900 break;
908 case RDMA_CM_EVENT_ADDR_CHANGE: 901 case RDMA_CM_EVENT_ADDR_CHANGE:
909 isert_np->np_cm_id = isert_setup_id(isert_np); 902 isert_np->cm_id = isert_setup_id(isert_np);
910 if (IS_ERR(isert_np->np_cm_id)) { 903 if (IS_ERR(isert_np->cm_id)) {
911 isert_err("isert np %p setup id failed: %ld\n", 904 isert_err("isert np %p setup id failed: %ld\n",
912 isert_np, PTR_ERR(isert_np->np_cm_id)); 905 isert_np, PTR_ERR(isert_np->cm_id));
913 isert_np->np_cm_id = NULL; 906 isert_np->cm_id = NULL;
914 } 907 }
915 break; 908 break;
916 default: 909 default:
@@ -929,7 +922,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
929 struct isert_conn *isert_conn; 922 struct isert_conn *isert_conn;
930 bool terminating = false; 923 bool terminating = false;
931 924
932 if (isert_np->np_cm_id == cma_id) 925 if (isert_np->cm_id == cma_id)
933 return isert_np_cma_handler(cma_id->context, event); 926 return isert_np_cma_handler(cma_id->context, event);
934 927
935 isert_conn = cma_id->qp->qp_context; 928 isert_conn = cma_id->qp->qp_context;
@@ -945,13 +938,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
945 if (terminating) 938 if (terminating)
946 goto out; 939 goto out;
947 940
948 mutex_lock(&isert_np->np_accept_mutex); 941 mutex_lock(&isert_np->mutex);
949 if (!list_empty(&isert_conn->accept_node)) { 942 if (!list_empty(&isert_conn->node)) {
950 list_del_init(&isert_conn->accept_node); 943 list_del_init(&isert_conn->node);
951 isert_put_conn(isert_conn); 944 isert_put_conn(isert_conn);
952 queue_work(isert_release_wq, &isert_conn->release_work); 945 queue_work(isert_release_wq, &isert_conn->release_work);
953 } 946 }
954 mutex_unlock(&isert_np->np_accept_mutex); 947 mutex_unlock(&isert_np->mutex);
955 948
956out: 949out:
957 return 0; 950 return 0;
@@ -962,6 +955,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
962{ 955{
963 struct isert_conn *isert_conn = cma_id->qp->qp_context; 956 struct isert_conn *isert_conn = cma_id->qp->qp_context;
964 957
958 list_del_init(&isert_conn->node);
965 isert_conn->cm_id = NULL; 959 isert_conn->cm_id = NULL;
966 isert_put_conn(isert_conn); 960 isert_put_conn(isert_conn);
967 961
@@ -1006,35 +1000,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1006} 1000}
1007 1001
1008static int 1002static int
1009isert_post_recv(struct isert_conn *isert_conn, u32 count) 1003isert_post_recvm(struct isert_conn *isert_conn, u32 count)
1010{ 1004{
1011 struct ib_recv_wr *rx_wr, *rx_wr_failed; 1005 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1012 int i, ret; 1006 int i, ret;
1013 unsigned int rx_head = isert_conn->rx_desc_head;
1014 struct iser_rx_desc *rx_desc; 1007 struct iser_rx_desc *rx_desc;
1015 1008
1016 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 1009 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1017 rx_desc = &isert_conn->rx_descs[rx_head]; 1010 rx_desc = &isert_conn->rx_descs[i];
1018 rx_wr->wr_id = (uintptr_t)rx_desc; 1011 rx_wr->wr_id = (uintptr_t)rx_desc;
1019 rx_wr->sg_list = &rx_desc->rx_sg; 1012 rx_wr->sg_list = &rx_desc->rx_sg;
1020 rx_wr->num_sge = 1; 1013 rx_wr->num_sge = 1;
1021 rx_wr->next = rx_wr + 1; 1014 rx_wr->next = rx_wr + 1;
1022 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1023 } 1015 }
1024
1025 rx_wr--; 1016 rx_wr--;
1026 rx_wr->next = NULL; /* mark end of work requests list */ 1017 rx_wr->next = NULL; /* mark end of work requests list */
1027 1018
1028 isert_conn->post_recv_buf_count += count; 1019 isert_conn->post_recv_buf_count += count;
1029 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, 1020 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
1030 &rx_wr_failed); 1021 &rx_wr_failed);
1031 if (ret) { 1022 if (ret) {
1032 isert_err("ib_post_recv() failed with ret: %d\n", ret); 1023 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1033 isert_conn->post_recv_buf_count -= count; 1024 isert_conn->post_recv_buf_count -= count;
1034 } else {
1035 isert_dbg("Posted %d RX buffers\n", count);
1036 isert_conn->rx_desc_head = rx_head;
1037 } 1025 }
1026
1027 return ret;
1028}
1029
1030static int
1031isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
1032{
1033 struct ib_recv_wr *rx_wr_failed, rx_wr;
1034 int ret;
1035
1036 rx_wr.wr_id = (uintptr_t)rx_desc;
1037 rx_wr.sg_list = &rx_desc->rx_sg;
1038 rx_wr.num_sge = 1;
1039 rx_wr.next = NULL;
1040
1041 isert_conn->post_recv_buf_count++;
1042 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
1043 if (ret) {
1044 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1045 isert_conn->post_recv_buf_count--;
1046 }
1047
1038 return ret; 1048 return ret;
1039} 1049}
1040 1050
@@ -1205,7 +1215,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1205 if (ret) 1215 if (ret)
1206 return ret; 1216 return ret;
1207 1217
1208 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX); 1218 ret = isert_post_recvm(isert_conn,
1219 ISERT_QP_MAX_RECV_DTOS);
1209 if (ret) 1220 if (ret)
1210 return ret; 1221 return ret;
1211 1222
@@ -1278,7 +1289,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
1278} 1289}
1279 1290
1280static struct iscsi_cmd 1291static struct iscsi_cmd
1281*isert_allocate_cmd(struct iscsi_conn *conn) 1292*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1282{ 1293{
1283 struct isert_conn *isert_conn = conn->context; 1294 struct isert_conn *isert_conn = conn->context;
1284 struct isert_cmd *isert_cmd; 1295 struct isert_cmd *isert_cmd;
@@ -1292,6 +1303,7 @@ static struct iscsi_cmd
1292 isert_cmd = iscsit_priv_cmd(cmd); 1303 isert_cmd = iscsit_priv_cmd(cmd);
1293 isert_cmd->conn = isert_conn; 1304 isert_cmd->conn = isert_conn;
1294 isert_cmd->iscsi_cmd = cmd; 1305 isert_cmd->iscsi_cmd = cmd;
1306 isert_cmd->rx_desc = rx_desc;
1295 1307
1296 return cmd; 1308 return cmd;
1297} 1309}
@@ -1303,9 +1315,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1303{ 1315{
1304 struct iscsi_conn *conn = isert_conn->conn; 1316 struct iscsi_conn *conn = isert_conn->conn;
1305 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf; 1317 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1306 struct scatterlist *sg;
1307 int imm_data, imm_data_len, unsol_data, sg_nents, rc; 1318 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1308 bool dump_payload = false; 1319 bool dump_payload = false;
1320 unsigned int data_len;
1309 1321
1310 rc = iscsit_setup_scsi_cmd(conn, cmd, buf); 1322 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1311 if (rc < 0) 1323 if (rc < 0)
@@ -1314,7 +1326,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1314 imm_data = cmd->immediate_data; 1326 imm_data = cmd->immediate_data;
1315 imm_data_len = cmd->first_burst_len; 1327 imm_data_len = cmd->first_burst_len;
1316 unsol_data = cmd->unsolicited_data; 1328 unsol_data = cmd->unsolicited_data;
1329 data_len = cmd->se_cmd.data_length;
1317 1330
1331 if (imm_data && imm_data_len == data_len)
1332 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1318 rc = iscsit_process_scsi_cmd(conn, cmd, hdr); 1333 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1319 if (rc < 0) { 1334 if (rc < 0) {
1320 return 0; 1335 return 0;
@@ -1326,13 +1341,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1326 if (!imm_data) 1341 if (!imm_data)
1327 return 0; 1342 return 0;
1328 1343
1329 sg = &cmd->se_cmd.t_data_sg[0]; 1344 if (imm_data_len != data_len) {
1330 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1345 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1331 1346 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1332 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 1347 &rx_desc->data[0], imm_data_len);
1333 sg, sg_nents, &rx_desc->data[0], imm_data_len); 1348 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1334 1349 sg_nents, imm_data_len);
1335 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 1350 } else {
1351 sg_init_table(&isert_cmd->sg, 1);
1352 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1353 cmd->se_cmd.t_data_nents = 1;
1354 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1355 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1356 imm_data_len);
1357 }
1336 1358
1337 cmd->write_data_done += imm_data_len; 1359 cmd->write_data_done += imm_data_len;
1338 1360
@@ -1407,6 +1429,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1407 if (rc < 0) 1429 if (rc < 0)
1408 return rc; 1430 return rc;
1409 1431
1432 /*
1433 * multiple data-outs on the same command can arrive -
1434 * so post the buffer before hand
1435 */
1436 rc = isert_post_recv(isert_conn, rx_desc);
1437 if (rc) {
1438 isert_err("ib_post_recv failed with %d\n", rc);
1439 return rc;
1440 }
1410 return 0; 1441 return 0;
1411} 1442}
1412 1443
@@ -1479,7 +1510,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1479 1510
1480 switch (opcode) { 1511 switch (opcode) {
1481 case ISCSI_OP_SCSI_CMD: 1512 case ISCSI_OP_SCSI_CMD:
1482 cmd = isert_allocate_cmd(conn); 1513 cmd = isert_allocate_cmd(conn, rx_desc);
1483 if (!cmd) 1514 if (!cmd)
1484 break; 1515 break;
1485 1516
@@ -1493,7 +1524,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1493 rx_desc, (unsigned char *)hdr); 1524 rx_desc, (unsigned char *)hdr);
1494 break; 1525 break;
1495 case ISCSI_OP_NOOP_OUT: 1526 case ISCSI_OP_NOOP_OUT:
1496 cmd = isert_allocate_cmd(conn); 1527 cmd = isert_allocate_cmd(conn, rx_desc);
1497 if (!cmd) 1528 if (!cmd)
1498 break; 1529 break;
1499 1530
@@ -1506,7 +1537,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1506 (unsigned char *)hdr); 1537 (unsigned char *)hdr);
1507 break; 1538 break;
1508 case ISCSI_OP_SCSI_TMFUNC: 1539 case ISCSI_OP_SCSI_TMFUNC:
1509 cmd = isert_allocate_cmd(conn); 1540 cmd = isert_allocate_cmd(conn, rx_desc);
1510 if (!cmd) 1541 if (!cmd)
1511 break; 1542 break;
1512 1543
@@ -1514,22 +1545,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1514 (unsigned char *)hdr); 1545 (unsigned char *)hdr);
1515 break; 1546 break;
1516 case ISCSI_OP_LOGOUT: 1547 case ISCSI_OP_LOGOUT:
1517 cmd = isert_allocate_cmd(conn); 1548 cmd = isert_allocate_cmd(conn, rx_desc);
1518 if (!cmd) 1549 if (!cmd)
1519 break; 1550 break;
1520 1551
1521 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1552 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1522 break; 1553 break;
1523 case ISCSI_OP_TEXT: 1554 case ISCSI_OP_TEXT:
1524 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { 1555 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1525 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); 1556 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1526 if (!cmd) 1557 else
1527 break; 1558 cmd = isert_allocate_cmd(conn, rx_desc);
1528 } else { 1559
1529 cmd = isert_allocate_cmd(conn); 1560 if (!cmd)
1530 if (!cmd) 1561 break;
1531 break;
1532 }
1533 1562
1534 isert_cmd = iscsit_priv_cmd(cmd); 1563 isert_cmd = iscsit_priv_cmd(cmd);
1535 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, 1564 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1589,7 +1618,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
1589 struct ib_device *ib_dev = isert_conn->cm_id->device; 1618 struct ib_device *ib_dev = isert_conn->cm_id->device;
1590 struct iscsi_hdr *hdr; 1619 struct iscsi_hdr *hdr;
1591 u64 rx_dma; 1620 u64 rx_dma;
1592 int rx_buflen, outstanding; 1621 int rx_buflen;
1593 1622
1594 if ((char *)desc == isert_conn->login_req_buf) { 1623 if ((char *)desc == isert_conn->login_req_buf) {
1595 rx_dma = isert_conn->login_req_dma; 1624 rx_dma = isert_conn->login_req_dma;
@@ -1629,22 +1658,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
1629 DMA_FROM_DEVICE); 1658 DMA_FROM_DEVICE);
1630 1659
1631 isert_conn->post_recv_buf_count--; 1660 isert_conn->post_recv_buf_count--;
1632 isert_dbg("Decremented post_recv_buf_count: %d\n",
1633 isert_conn->post_recv_buf_count);
1634
1635 if ((char *)desc == isert_conn->login_req_buf)
1636 return;
1637
1638 outstanding = isert_conn->post_recv_buf_count;
1639 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1640 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1641 ISERT_MIN_POSTED_RX);
1642 err = isert_post_recv(isert_conn, count);
1643 if (err) {
1644 isert_err("isert_post_recv() count: %d failed, %d\n",
1645 count, err);
1646 }
1647 }
1648} 1661}
1649 1662
1650static int 1663static int
@@ -2156,6 +2169,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2156 struct ib_send_wr *wr_failed; 2169 struct ib_send_wr *wr_failed;
2157 int ret; 2170 int ret;
2158 2171
2172 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2173 if (ret) {
2174 isert_err("ib_post_recv failed with %d\n", ret);
2175 return ret;
2176 }
2177
2159 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, 2178 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2160 &wr_failed); 2179 &wr_failed);
2161 if (ret) { 2180 if (ret) {
@@ -2950,6 +2969,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2950 &isert_cmd->tx_desc.send_wr); 2969 &isert_cmd->tx_desc.send_wr);
2951 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2970 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2952 wr->send_wr_num += 1; 2971 wr->send_wr_num += 1;
2972
2973 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2974 if (rc) {
2975 isert_err("ib_post_recv failed with %d\n", rc);
2976 return rc;
2977 }
2953 } 2978 }
2954 2979
2955 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); 2980 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
@@ -2999,9 +3024,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2999static int 3024static int
3000isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 3025isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3001{ 3026{
3002 int ret; 3027 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3028 int ret = 0;
3003 3029
3004 switch (state) { 3030 switch (state) {
3031 case ISTATE_REMOVE:
3032 spin_lock_bh(&conn->cmd_lock);
3033 list_del_init(&cmd->i_conn_node);
3034 spin_unlock_bh(&conn->cmd_lock);
3035 isert_put_cmd(isert_cmd, true);
3036 break;
3005 case ISTATE_SEND_NOPIN_WANT_RESPONSE: 3037 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3006 ret = isert_put_nopin(cmd, conn, false); 3038 ret = isert_put_nopin(cmd, conn, false);
3007 break; 3039 break;
@@ -3106,10 +3138,10 @@ isert_setup_np(struct iscsi_np *np,
3106 isert_err("Unable to allocate struct isert_np\n"); 3138 isert_err("Unable to allocate struct isert_np\n");
3107 return -ENOMEM; 3139 return -ENOMEM;
3108 } 3140 }
3109 sema_init(&isert_np->np_sem, 0); 3141 sema_init(&isert_np->sem, 0);
3110 mutex_init(&isert_np->np_accept_mutex); 3142 mutex_init(&isert_np->mutex);
3111 INIT_LIST_HEAD(&isert_np->np_accept_list); 3143 INIT_LIST_HEAD(&isert_np->accepted);
3112 init_completion(&isert_np->np_login_comp); 3144 INIT_LIST_HEAD(&isert_np->pending);
3113 isert_np->np = np; 3145 isert_np->np = np;
3114 3146
3115 /* 3147 /*
@@ -3125,7 +3157,7 @@ isert_setup_np(struct iscsi_np *np,
3125 goto out; 3157 goto out;
3126 } 3158 }
3127 3159
3128 isert_np->np_cm_id = isert_lid; 3160 isert_np->cm_id = isert_lid;
3129 np->np_context = isert_np; 3161 np->np_context = isert_np;
3130 3162
3131 return 0; 3163 return 0;
@@ -3214,7 +3246,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3214 int ret; 3246 int ret;
3215 3247
3216accept_wait: 3248accept_wait:
3217 ret = down_interruptible(&isert_np->np_sem); 3249 ret = down_interruptible(&isert_np->sem);
3218 if (ret) 3250 if (ret)
3219 return -ENODEV; 3251 return -ENODEV;
3220 3252
@@ -3231,15 +3263,15 @@ accept_wait:
3231 } 3263 }
3232 spin_unlock_bh(&np->np_thread_lock); 3264 spin_unlock_bh(&np->np_thread_lock);
3233 3265
3234 mutex_lock(&isert_np->np_accept_mutex); 3266 mutex_lock(&isert_np->mutex);
3235 if (list_empty(&isert_np->np_accept_list)) { 3267 if (list_empty(&isert_np->pending)) {
3236 mutex_unlock(&isert_np->np_accept_mutex); 3268 mutex_unlock(&isert_np->mutex);
3237 goto accept_wait; 3269 goto accept_wait;
3238 } 3270 }
3239 isert_conn = list_first_entry(&isert_np->np_accept_list, 3271 isert_conn = list_first_entry(&isert_np->pending,
3240 struct isert_conn, accept_node); 3272 struct isert_conn, node);
3241 list_del_init(&isert_conn->accept_node); 3273 list_del_init(&isert_conn->node);
3242 mutex_unlock(&isert_np->np_accept_mutex); 3274 mutex_unlock(&isert_np->mutex);
3243 3275
3244 conn->context = isert_conn; 3276 conn->context = isert_conn;
3245 isert_conn->conn = conn; 3277 isert_conn->conn = conn;
@@ -3257,28 +3289,39 @@ isert_free_np(struct iscsi_np *np)
3257 struct isert_np *isert_np = np->np_context; 3289 struct isert_np *isert_np = np->np_context;
3258 struct isert_conn *isert_conn, *n; 3290 struct isert_conn *isert_conn, *n;
3259 3291
3260 if (isert_np->np_cm_id) 3292 if (isert_np->cm_id)
3261 rdma_destroy_id(isert_np->np_cm_id); 3293 rdma_destroy_id(isert_np->cm_id);
3262 3294
3263 /* 3295 /*
3264 * FIXME: At this point we don't have a good way to insure 3296 * FIXME: At this point we don't have a good way to insure
3265 * that at this point we don't have hanging connections that 3297 * that at this point we don't have hanging connections that
3266 * completed RDMA establishment but didn't start iscsi login 3298 * completed RDMA establishment but didn't start iscsi login
3267 * process. So work-around this by cleaning up what ever piled 3299 * process. So work-around this by cleaning up what ever piled
3268 * up in np_accept_list. 3300 * up in accepted and pending lists.
3269 */ 3301 */
3270 mutex_lock(&isert_np->np_accept_mutex); 3302 mutex_lock(&isert_np->mutex);
3271 if (!list_empty(&isert_np->np_accept_list)) { 3303 if (!list_empty(&isert_np->pending)) {
3272 isert_info("Still have isert connections, cleaning up...\n"); 3304 isert_info("Still have isert pending connections\n");
3305 list_for_each_entry_safe(isert_conn, n,
3306 &isert_np->pending,
3307 node) {
3308 isert_info("cleaning isert_conn %p state (%d)\n",
3309 isert_conn, isert_conn->state);
3310 isert_connect_release(isert_conn);
3311 }
3312 }
3313
3314 if (!list_empty(&isert_np->accepted)) {
3315 isert_info("Still have isert accepted connections\n");
3273 list_for_each_entry_safe(isert_conn, n, 3316 list_for_each_entry_safe(isert_conn, n,
3274 &isert_np->np_accept_list, 3317 &isert_np->accepted,
3275 accept_node) { 3318 node) {
3276 isert_info("cleaning isert_conn %p state (%d)\n", 3319 isert_info("cleaning isert_conn %p state (%d)\n",
3277 isert_conn, isert_conn->state); 3320 isert_conn, isert_conn->state);
3278 isert_connect_release(isert_conn); 3321 isert_connect_release(isert_conn);
3279 } 3322 }
3280 } 3323 }
3281 mutex_unlock(&isert_np->np_accept_mutex); 3324 mutex_unlock(&isert_np->mutex);
3282 3325
3283 np->np_context = NULL; 3326 np->np_context = NULL;
3284 kfree(isert_np); 3327 kfree(isert_np);
@@ -3345,6 +3388,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
3345 wait_for_completion(&isert_conn->wait_comp_err); 3388 wait_for_completion(&isert_conn->wait_comp_err);
3346} 3389}
3347 3390
3391/**
3392 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3393 * unsolicitate dataout
3394 * @conn: iscsi connection
3395 *
3396 * We might still have commands that are waiting for unsolicited
3397 * dataouts messages. We must put the extra reference on those
3398 * before blocking on the target_wait_for_session_cmds
3399 */
3400static void
3401isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
3402{
3403 struct iscsi_cmd *cmd, *tmp;
3404 static LIST_HEAD(drop_cmd_list);
3405
3406 spin_lock_bh(&conn->cmd_lock);
3407 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
3408 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
3409 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
3410 (cmd->write_data_done < cmd->se_cmd.data_length))
3411 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
3412 }
3413 spin_unlock_bh(&conn->cmd_lock);
3414
3415 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
3416 list_del_init(&cmd->i_conn_node);
3417 if (cmd->i_state != ISTATE_REMOVE) {
3418 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3419
3420 isert_info("conn %p dropping cmd %p\n", conn, cmd);
3421 isert_put_cmd(isert_cmd, true);
3422 }
3423 }
3424}
3425
3348static void isert_wait_conn(struct iscsi_conn *conn) 3426static void isert_wait_conn(struct iscsi_conn *conn)
3349{ 3427{
3350 struct isert_conn *isert_conn = conn->context; 3428 struct isert_conn *isert_conn = conn->context;
@@ -3363,8 +3441,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3363 isert_conn_terminate(isert_conn); 3441 isert_conn_terminate(isert_conn);
3364 mutex_unlock(&isert_conn->mutex); 3442 mutex_unlock(&isert_conn->mutex);
3365 3443
3366 isert_wait4cmds(conn);
3367 isert_wait4flush(isert_conn); 3444 isert_wait4flush(isert_conn);
3445 isert_put_unsol_pending_cmds(conn);
3446 isert_wait4cmds(conn);
3368 isert_wait4logout(isert_conn); 3447 isert_wait4logout(isert_conn);
3369 3448
3370 queue_work(isert_release_wq, &isert_conn->release_work); 3449 queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6a04ba3c0f72..c5b99bcecbcf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -113,7 +113,6 @@ enum {
113}; 113};
114 114
115struct isert_rdma_wr { 115struct isert_rdma_wr {
116 struct list_head wr_list;
117 struct isert_cmd *isert_cmd; 116 struct isert_cmd *isert_cmd;
118 enum iser_ib_op_code iser_ib_op; 117 enum iser_ib_op_code iser_ib_op;
119 struct ib_sge *ib_sge; 118 struct ib_sge *ib_sge;
@@ -134,14 +133,13 @@ struct isert_cmd {
134 uint64_t write_va; 133 uint64_t write_va;
135 u64 pdu_buf_dma; 134 u64 pdu_buf_dma;
136 u32 pdu_buf_len; 135 u32 pdu_buf_len;
137 u32 read_va_off;
138 u32 write_va_off;
139 u32 rdma_wr_num;
140 struct isert_conn *conn; 136 struct isert_conn *conn;
141 struct iscsi_cmd *iscsi_cmd; 137 struct iscsi_cmd *iscsi_cmd;
142 struct iser_tx_desc tx_desc; 138 struct iser_tx_desc tx_desc;
139 struct iser_rx_desc *rx_desc;
143 struct isert_rdma_wr rdma_wr; 140 struct isert_rdma_wr rdma_wr;
144 struct work_struct comp_work; 141 struct work_struct comp_work;
142 struct scatterlist sg;
145}; 143};
146 144
147struct isert_device; 145struct isert_device;
@@ -159,11 +157,10 @@ struct isert_conn {
159 u64 login_req_dma; 157 u64 login_req_dma;
160 int login_req_len; 158 int login_req_len;
161 u64 login_rsp_dma; 159 u64 login_rsp_dma;
162 unsigned int rx_desc_head;
163 struct iser_rx_desc *rx_descs; 160 struct iser_rx_desc *rx_descs;
164 struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX]; 161 struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
165 struct iscsi_conn *conn; 162 struct iscsi_conn *conn;
166 struct list_head accept_node; 163 struct list_head node;
167 struct completion login_comp; 164 struct completion login_comp;
168 struct completion login_req_comp; 165 struct completion login_req_comp;
169 struct iser_tx_desc login_tx_desc; 166 struct iser_tx_desc login_tx_desc;
@@ -222,9 +219,9 @@ struct isert_device {
222 219
223struct isert_np { 220struct isert_np {
224 struct iscsi_np *np; 221 struct iscsi_np *np;
225 struct semaphore np_sem; 222 struct semaphore sem;
226 struct rdma_cm_id *np_cm_id; 223 struct rdma_cm_id *cm_id;
227 struct mutex np_accept_mutex; 224 struct mutex mutex;
228 struct list_head np_accept_list; 225 struct list_head accepted;
229 struct completion np_login_comp; 226 struct list_head pending;
230}; 227};
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 9da9942ac83c..f6d680485bee 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
88{ 88{
89 struct irq_domain *domain = d->domain; 89 struct irq_domain *domain = d->domain;
90 struct irq_domain_chip_generic *dgc = domain->gc; 90 struct irq_domain_chip_generic *dgc = domain->gc;
91 struct irq_chip_generic *gc = dgc->gc[0]; 91 struct irq_chip_generic *bgc = dgc->gc[0];
92 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
92 93
93 /* Disable interrupt on AIC5 */ 94 /*
94 irq_gc_lock(gc); 95 * Disable interrupt on AIC5. We always take the lock of the
96 * first irq chip as all chips share the same registers.
97 */
98 irq_gc_lock(bgc);
95 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 99 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
96 irq_reg_writel(gc, 1, AT91_AIC5_IDCR); 100 irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
97 gc->mask_cache &= ~d->mask; 101 gc->mask_cache &= ~d->mask;
98 irq_gc_unlock(gc); 102 irq_gc_unlock(bgc);
99} 103}
100 104
101static void aic5_unmask(struct irq_data *d) 105static void aic5_unmask(struct irq_data *d)
102{ 106{
103 struct irq_domain *domain = d->domain; 107 struct irq_domain *domain = d->domain;
104 struct irq_domain_chip_generic *dgc = domain->gc; 108 struct irq_domain_chip_generic *dgc = domain->gc;
105 struct irq_chip_generic *gc = dgc->gc[0]; 109 struct irq_chip_generic *bgc = dgc->gc[0];
110 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
106 111
107 /* Enable interrupt on AIC5 */ 112 /*
108 irq_gc_lock(gc); 113 * Enable interrupt on AIC5. We always take the lock of the
114 * first irq chip as all chips share the same registers.
115 */
116 irq_gc_lock(bgc);
109 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); 117 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
110 irq_reg_writel(gc, 1, AT91_AIC5_IECR); 118 irq_reg_writel(gc, 1, AT91_AIC5_IECR);
111 gc->mask_cache |= d->mask; 119 gc->mask_cache |= d->mask;
112 irq_gc_unlock(gc); 120 irq_gc_unlock(bgc);
113} 121}
114 122
115static int aic5_retrigger(struct irq_data *d) 123static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88df5234..4b3b6f8aff0c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
968 968
969/* 969/*
970 * Generate a new unfragmented bio with the given size 970 * Generate a new unfragmented bio with the given size
971 * This should never violate the device limitations 971 * This should never violate the device limitations (but only because
972 * max_segment_size is being constrained to PAGE_SIZE).
972 * 973 *
973 * This function may be called concurrently. If we allocate from the mempool 974 * This function may be called concurrently. If we allocate from the mempool
974 * concurrently, there is a possibility of deadlock. For example, if we have 975 * concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
2045 return fn(ti, cc->dev, cc->start, ti->len, data); 2046 return fn(ti, cc->dev, cc->start, ti->len, data);
2046} 2047}
2047 2048
2049static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2050{
2051 /*
2052 * Unfortunate constraint that is required to avoid the potential
2053 * for exceeding underlying device's max_segments limits -- due to
2054 * crypt_alloc_buffer() possibly allocating pages for the encryption
2055 * bio that are not as physically contiguous as the original bio.
2056 */
2057 limits->max_segment_size = PAGE_SIZE;
2058}
2059
2048static struct target_type crypt_target = { 2060static struct target_type crypt_target = {
2049 .name = "crypt", 2061 .name = "crypt",
2050 .version = {1, 14, 0}, 2062 .version = {1, 14, 1},
2051 .module = THIS_MODULE, 2063 .module = THIS_MODULE,
2052 .ctr = crypt_ctr, 2064 .ctr = crypt_ctr,
2053 .dtr = crypt_dtr, 2065 .dtr = crypt_dtr,
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
2058 .resume = crypt_resume, 2070 .resume = crypt_resume,
2059 .message = crypt_message, 2071 .message = crypt_message,
2060 .iterate_devices = crypt_iterate_devices, 2072 .iterate_devices = crypt_iterate_devices,
2073 .io_hints = crypt_io_hints,
2061}; 2074};
2062 2075
2063static int __init dm_crypt_init(void) 2076static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7bc1fbb..6fcbfb063366 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4249{ 4249{
4250 struct thin_c *tc = ti->private; 4250 struct thin_c *tc = ti->private;
4251 struct pool *pool = tc->pool; 4251 struct pool *pool = tc->pool;
4252 struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
4253
4254 if (!pool_limits->discard_granularity)
4255 return; /* pool's discard support is disabled */
4252 4256
4253 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; 4257 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
4254 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ 4258 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25868c2ec03e..02006f7109a8 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
592 592
593 /* conditionally create the add the binary file for error info buffer */ 593 /* conditionally create the add the binary file for error info buffer */
594 if (afu->eb_len) { 594 if (afu->eb_len) {
595 sysfs_attr_init(&afu->attr_eb.attr);
596
595 afu->attr_eb.attr.name = "afu_err_buff"; 597 afu->attr_eb.attr.name = "afu_err_buff";
596 afu->attr_eb.attr.mode = S_IRUGO; 598 afu->attr_eb.attr.mode = S_IRUGO;
597 afu->attr_eb.size = afu->eb_len; 599 afu->attr_eb.size = afu->eb_len;
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 4b469cf9e60f..8504dbeacd3b 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -204,6 +204,8 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
204 if (!dir) 204 if (!dir)
205 return -ENOMEM; 205 return -ENOMEM;
206 206
207 dev->dbgfs_dir = dir;
208
207 f = debugfs_create_file("meclients", S_IRUSR, dir, 209 f = debugfs_create_file("meclients", S_IRUSR, dir,
208 dev, &mei_dbgfs_fops_meclients); 210 dev, &mei_dbgfs_fops_meclients);
209 if (!f) { 211 if (!f) {
@@ -228,7 +230,6 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
228 dev_err(dev->dev, "allow_fixed_address: registration failed\n"); 230 dev_err(dev->dev, "allow_fixed_address: registration failed\n");
229 goto err; 231 goto err;
230 } 232 }
231 dev->dbgfs_dir = dir;
232 return 0; 233 return 0;
233err: 234err:
234 mei_dbgfs_deregister(dev); 235 mei_dbgfs_deregister(dev);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 10f71c732b59..816d0e94961c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev)
326 dev->type = ARPHRD_ARCNET; 326 dev->type = ARPHRD_ARCNET;
327 dev->netdev_ops = &arcnet_netdev_ops; 327 dev->netdev_ops = &arcnet_netdev_ops;
328 dev->header_ops = &arcnet_header_ops; 328 dev->header_ops = &arcnet_header_ops;
329 dev->hard_header_len = sizeof(struct archdr); 329 dev->hard_header_len = sizeof(struct arc_hardware);
330 dev->mtu = choose_mtu(); 330 dev->mtu = choose_mtu();
331 331
332 dev->addr_len = ARCNET_ALEN; 332 dev->addr_len = ARCNET_ALEN;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 6f13f7206762..f8baa897d1a0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2000,6 +2000,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
2000 */ 2000 */
2001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); 2001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
2002 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { 2002 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
2003 reg &= ~PORT_PCS_CTRL_UNFORCED;
2003 reg |= PORT_PCS_CTRL_FORCE_LINK | 2004 reg |= PORT_PCS_CTRL_FORCE_LINK |
2004 PORT_PCS_CTRL_LINK_UP | 2005 PORT_PCS_CTRL_LINK_UP |
2005 PORT_PCS_CTRL_DUPLEX_FULL | 2006 PORT_PCS_CTRL_DUPLEX_FULL |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index cfa37041ab71..c4bb8027b3fb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
689 netdev_dbg(ndev, "No phy-handle found in DT\n"); 689 netdev_dbg(ndev, "No phy-handle found in DT\n");
690 return -ENODEV; 690 return -ENODEV;
691 } 691 }
692 pdata->phy_dev = of_phy_find_device(phy_np);
693 }
694 692
695 phy_dev = pdata->phy_dev; 693 phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
694 0, pdata->phy_mode);
695 if (!phy_dev) {
696 netdev_err(ndev, "Could not connect to PHY\n");
697 return -ENODEV;
698 }
699
700 pdata->phy_dev = phy_dev;
701 } else {
702 phy_dev = pdata->phy_dev;
696 703
697 if (!phy_dev || 704 if (!phy_dev ||
698 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, 705 phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
699 pdata->phy_mode)) { 706 pdata->phy_mode)) {
700 netdev_err(ndev, "Could not connect to PHY\n"); 707 netdev_err(ndev, "Could not connect to PHY\n");
701 return -ENODEV; 708 return -ENODEV;
709 }
702 } 710 }
703 711
704 pdata->phy_speed = SPEED_UNKNOWN; 712 pdata->phy_speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99bfb511..ffd180570920 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = {
78 { .compatible = "snps,arc-emac" }, 78 { .compatible = "snps,arc-emac" },
79 { /* Sentinel */ } 79 { /* Sentinel */ }
80}; 80};
81MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
81 82
82static struct platform_driver emac_arc_driver = { 83static struct platform_driver emac_arc_driver = {
83 .probe = emac_arc_probe, 84 .probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9a5a97ed4dd..f1b5364f3521 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = {
2079 { .compatible = "brcm,systemport" }, 2079 { .compatible = "brcm,systemport" },
2080 { /* sentinel */ } 2080 { /* sentinel */ }
2081}; 2081};
2082MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2082 2083
2083static struct platform_driver bcm_sysport_driver = { 2084static struct platform_driver bcm_sysport_driver = {
2084 .probe = bcm_sysport_probe, 2085 .probe = bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ba936635322a..b5e64b02200c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1946,6 +1946,7 @@ struct bnx2x {
1946 u16 vlan_cnt; 1946 u16 vlan_cnt;
1947 u16 vlan_credit; 1947 u16 vlan_credit;
1948 u16 vxlan_dst_port; 1948 u16 vxlan_dst_port;
1949 u8 vxlan_dst_port_count;
1949 bool accept_any_vlan; 1950 bool accept_any_vlan;
1950}; 1951};
1951 1952
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e3da2bddf143..f1d62d5dbaff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3705,16 +3705,14 @@ out:
3705 3705
3706void bnx2x_update_mfw_dump(struct bnx2x *bp) 3706void bnx2x_update_mfw_dump(struct bnx2x *bp)
3707{ 3707{
3708 struct timeval epoc;
3709 u32 drv_ver; 3708 u32 drv_ver;
3710 u32 valid_dump; 3709 u32 valid_dump;
3711 3710
3712 if (!SHMEM2_HAS(bp, drv_info)) 3711 if (!SHMEM2_HAS(bp, drv_info))
3713 return; 3712 return;
3714 3713
3715 /* Update Driver load time */ 3714 /* Update Driver load time, possibly broken in y2038 */
3716 do_gettimeofday(&epoc); 3715 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3717 SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
3718 3716
3719 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true); 3717 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver); 3718 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
@@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
10110 if (!netif_running(bp->dev)) 10108 if (!netif_running(bp->dev))
10111 return; 10109 return;
10112 10110
10113 if (bp->vxlan_dst_port || !IS_PF(bp)) { 10111 if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
10112 bp->vxlan_dst_port_count++;
10113 return;
10114 }
10115
10116 if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
10114 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); 10117 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
10115 return; 10118 return;
10116 } 10119 }
10117 10120
10118 bp->vxlan_dst_port = port; 10121 bp->vxlan_dst_port = port;
10122 bp->vxlan_dst_port_count = 1;
10119 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); 10123 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
10120} 10124}
10121 10125
@@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev,
10130 10134
10131static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) 10135static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10132{ 10136{
10133 if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { 10137 if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
10138 !IS_PF(bp)) {
10134 DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); 10139 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10135 return; 10140 return;
10136 } 10141 }
10142 bp->vxlan_dst_port--;
10143 if (bp->vxlan_dst_port)
10144 return;
10137 10145
10138 if (netif_running(bp->dev)) { 10146 if (netif_running(bp->dev)) {
10139 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); 10147 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index c9bd7f16018e..ff702a707a91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4319 4319
4320 /* RSS keys */ 4320 /* RSS keys */
4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { 4321 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4322 memcpy(&data->rss_key[0], &p->rss_key[0], 4322 u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
4323 sizeof(data->rss_key)); 4323 const u8 *src = (const u8 *)p->rss_key;
4324 int i;
4325
4326 /* Apparently, bnx2x reads this array in reverse order
4327 * We need to byte swap rss_key to comply with Toeplitz specs.
4328 */
4329 for (i = 0; i < sizeof(data->rss_key); i++)
4330 *--dst = *src++;
4331
4324 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4332 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4325 } 4333 }
4326 4334
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fadbd0088d3e..3bc701e4c59e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3155,6 +3155,7 @@ static const struct of_device_id bcmgenet_match[] = {
3155 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 }, 3155 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
3156 { }, 3156 { },
3157}; 3157};
3158MODULE_DEVICE_TABLE(of, bcmgenet_match);
3158 3159
3159static int bcmgenet_probe(struct platform_device *pdev) 3160static int bcmgenet_probe(struct platform_device *pdev)
3160{ 3161{
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753cc7e73..04b0d16b210e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2400 q0->rcb->id = 0; 2400 q0->rcb->id = 0;
2401 q0->rx_packets = q0->rx_bytes = 0; 2401 q0->rx_packets = q0->rx_bytes = 0;
2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2402 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2403 q0->rxbuf_map_failed = 0;
2403 2404
2404 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2405 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2405 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); 2406 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2428 : rx_cfg->q1_buf_size; 2429 : rx_cfg->q1_buf_size;
2429 q1->rx_packets = q1->rx_bytes = 0; 2430 q1->rx_packets = q1->rx_bytes = 0;
2430 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2431 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2432 q1->rxbuf_map_failed = 0;
2431 2433
2432 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2434 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2433 &hqpt_mem[i], &hsqpt_mem[i], 2435 &hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f2ea14..c438d032e8bf 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@ struct bna_rxq {
587 u64 rx_bytes; 587 u64 rx_bytes;
588 u64 rx_packets_with_error; 588 u64 rx_packets_with_error;
589 u64 rxbuf_alloc_failed; 589 u64 rxbuf_alloc_failed;
590 u64 rxbuf_map_failed;
590}; 591};
591 592
592/* RxQ pair */ 593/* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 506047c38607..21a0cfc3e7ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
399 } 399 }
400 400
401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, 401 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
402 unmap_q->map_size, DMA_FROM_DEVICE); 402 unmap_q->map_size, DMA_FROM_DEVICE);
403 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
404 put_page(page);
405 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
406 rcb->rxq->rxbuf_map_failed++;
407 goto finishing;
408 }
403 409
404 unmap->page = page; 410 unmap->page = page;
405 unmap->page_offset = page_offset; 411 unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
454 rcb->rxq->rxbuf_alloc_failed++; 460 rcb->rxq->rxbuf_alloc_failed++;
455 goto finishing; 461 goto finishing;
456 } 462 }
463
457 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 464 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
458 buff_sz, DMA_FROM_DEVICE); 465 buff_sz, DMA_FROM_DEVICE);
466 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
467 dev_kfree_skb_any(skb);
468 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 rcb->rxq->rxbuf_map_failed++;
470 goto finishing;
471 }
459 472
460 unmap->skb = skb; 473 unmap->skb = skb;
461 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); 474 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3025 unmap = head_unmap; 3038 unmap = head_unmap;
3026 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 3039 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3027 len, DMA_TO_DEVICE); 3040 len, DMA_TO_DEVICE);
3041 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3042 dev_kfree_skb_any(skb);
3043 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3044 return NETDEV_TX_OK;
3045 }
3028 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); 3046 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3029 txqent->vector[0].length = htons(len); 3047 txqent->vector[0].length = htons(len);
3030 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); 3048 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3056 3074
3057 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 3075 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3058 0, size, DMA_TO_DEVICE); 3076 0, size, DMA_TO_DEVICE);
3077 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3078 /* Undo the changes starting at tcb->producer_index */
3079 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3080 tcb->producer_index);
3081 dev_kfree_skb_any(skb);
3082 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3083 return NETDEV_TX_OK;
3084 }
3085
3059 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); 3086 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3060 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 3087 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3061 txqent->vector[vect_id].length = htons(size); 3088 txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf24777e..f4ed816b93ee 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@ struct bnad_drv_stats {
175 u64 tx_skb_headlen_zero; 175 u64 tx_skb_headlen_zero;
176 u64 tx_skb_frag_zero; 176 u64 tx_skb_frag_zero;
177 u64 tx_skb_len_mismatch; 177 u64 tx_skb_len_mismatch;
178 u64 tx_skb_map_failed;
178 179
179 u64 hw_stats_updates; 180 u64 hw_stats_updates;
180 u64 netif_rx_dropped; 181 u64 netif_rx_dropped;
@@ -189,6 +190,7 @@ struct bnad_drv_stats {
189 u64 rx_unmap_q_alloc_failed; 190 u64 rx_unmap_q_alloc_failed;
190 191
191 u64 rxbuf_alloc_failed; 192 u64 rxbuf_alloc_failed;
193 u64 rxbuf_map_failed;
192}; 194};
193 195
194/* Complete driver stats */ 196/* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5dff4b1..0e4fdc3dd729 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
90 "tx_skb_headlen_zero", 90 "tx_skb_headlen_zero",
91 "tx_skb_frag_zero", 91 "tx_skb_frag_zero",
92 "tx_skb_len_mismatch", 92 "tx_skb_len_mismatch",
93 "tx_skb_map_failed",
93 "hw_stats_updates", 94 "hw_stats_updates",
94 "netif_rx_dropped", 95 "netif_rx_dropped",
95 96
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
102 "tx_unmap_q_alloc_failed", 103 "tx_unmap_q_alloc_failed",
103 "rx_unmap_q_alloc_failed", 104 "rx_unmap_q_alloc_failed",
104 "rxbuf_alloc_failed", 105 "rxbuf_alloc_failed",
106 "rxbuf_map_failed",
105 107
106 "mac_stats_clr_cnt", 108 "mac_stats_clr_cnt",
107 "mac_frame_64", 109 "mac_frame_64",
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
807 rx_packets_with_error; 809 rx_packets_with_error;
808 buf[bi++] = rcb->rxq-> 810 buf[bi++] = rcb->rxq->
809 rxbuf_alloc_failed; 811 rxbuf_alloc_failed;
812 buf[bi++] = rcb->rxq->rxbuf_map_failed;
810 buf[bi++] = rcb->producer_index; 813 buf[bi++] = rcb->producer_index;
811 buf[bi++] = rcb->consumer_index; 814 buf[bi++] = rcb->consumer_index;
812 } 815 }
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
821 rx_packets_with_error; 824 rx_packets_with_error;
822 buf[bi++] = rcb->rxq-> 825 buf[bi++] = rcb->rxq->
823 rxbuf_alloc_failed; 826 rxbuf_alloc_failed;
827 buf[bi++] = rcb->rxq->rxbuf_map_failed;
824 buf[bi++] = rcb->producer_index; 828 buf[bi++] = rcb->producer_index;
825 buf[bi++] = rcb->consumer_index; 829 buf[bi++] = rcb->consumer_index;
826 } 830 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 8353a6cbfcc2..03ed00c49823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */ 157 CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */ 158 CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */ 159 CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
160 CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
161 CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
162 CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
163 CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
164 CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
160 165
161 /* T6 adapters: 166 /* T6 adapters:
162 */ 167 */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a27805cbbbd..821540913343 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -582,6 +582,7 @@ struct be_adapter {
582 u16 pvid; 582 u16 pvid;
583 __be16 vxlan_port; 583 __be16 vxlan_port;
584 int vxlan_port_count; 584 int vxlan_port_count;
585 int vxlan_port_aliases;
585 struct phy_info phy; 586 struct phy_info phy;
586 u8 wol_cap; 587 u8 wol_cap;
587 bool wol_en; 588 bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 12687bf52b95..7bf51a1a0a77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5176,6 +5176,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) 5176 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5177 return; 5177 return;
5178 5178
5179 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5180 adapter->vxlan_port_aliases++;
5181 return;
5182 }
5183
5179 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5184 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
5180 dev_info(dev, 5185 dev_info(dev,
5181 "Only one UDP port supported for VxLAN offloads\n"); 5186 "Only one UDP port supported for VxLAN offloads\n");
@@ -5226,6 +5231,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5226 if (adapter->vxlan_port != port) 5231 if (adapter->vxlan_port != port)
5227 goto done; 5232 goto done;
5228 5233
5234 if (adapter->vxlan_port_aliases) {
5235 adapter->vxlan_port_aliases--;
5236 return;
5237 }
5238
5229 be_disable_vxlan_offloads(adapter); 5239 be_disable_vxlan_offloads(adapter);
5230 5240
5231 dev_info(&adapter->pdev->dev, 5241 dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b69d061d90f..710715fcb23d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev)
1710 * everything for us? Resetting it takes the link down and requires 1710 * everything for us? Resetting it takes the link down and requires
1711 * several seconds for it to come back. 1711 * several seconds for it to come back.
1712 */ 1712 */
1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) 1713 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1714 put_device(&tbiphy->dev);
1714 return; 1715 return;
1716 }
1715 1717
1716 /* Single clk mode, mii mode off(for serdes communication) */ 1718 /* Single clk mode, mii mode off(for serdes communication) */
1717 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); 1719 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev)
1723 phy_write(tbiphy, MII_BMCR, 1725 phy_write(tbiphy, MII_BMCR,
1724 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX | 1726 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1725 BMCR_SPEED1000); 1727 BMCR_SPEED1000);
1728
1729 put_device(&tbiphy->dev);
1726} 1730}
1727 1731
1728static int __gfar_is_rx_idle(struct gfar_private *priv) 1732static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1970 /* Install our interrupt handlers for Error, 1974 /* Install our interrupt handlers for Error,
1971 * Transmit, and Receive 1975 * Transmit, and Receive
1972 */ 1976 */
1973 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 1977 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
1974 IRQF_NO_SUSPEND,
1975 gfar_irq(grp, ER)->name, grp); 1978 gfar_irq(grp, ER)->name, grp);
1976 if (err < 0) { 1979 if (err < 0) {
1977 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 1980 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1979 1982
1980 goto err_irq_fail; 1983 goto err_irq_fail;
1981 } 1984 }
1985 enable_irq_wake(gfar_irq(grp, ER)->irq);
1986
1982 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0, 1987 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
1983 gfar_irq(grp, TX)->name, grp); 1988 gfar_irq(grp, TX)->name, grp);
1984 if (err < 0) { 1989 if (err < 0) {
@@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
1994 goto rx_irq_fail; 1999 goto rx_irq_fail;
1995 } 2000 }
1996 } else { 2001 } else {
1997 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 2002 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
1998 IRQF_NO_SUSPEND,
1999 gfar_irq(grp, TX)->name, grp); 2003 gfar_irq(grp, TX)->name, grp);
2000 if (err < 0) { 2004 if (err < 0) {
2001 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2005 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2002 gfar_irq(grp, TX)->irq); 2006 gfar_irq(grp, TX)->irq);
2003 goto err_irq_fail; 2007 goto err_irq_fail;
2004 } 2008 }
2009 enable_irq_wake(gfar_irq(grp, TX)->irq);
2005 } 2010 }
2006 2011
2007 return 0; 2012 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77aa347..664d0c261269 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = {
557 { .compatible = "fsl,etsec-ptp" }, 557 { .compatible = "fsl,etsec-ptp" },
558 {}, 558 {},
559}; 559};
560MODULE_DEVICE_TABLE(of, match_table);
560 561
561static struct platform_driver gianfar_ptp_driver = { 562static struct platform_driver gianfar_ptp_driver = {
562 .driver = { 563 .driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e057f40..650f7888e32b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1384 value = phy_read(tbiphy, ENET_TBI_MII_CR); 1384 value = phy_read(tbiphy, ENET_TBI_MII_CR);
1385 value &= ~0x1000; /* Turn off autonegotiation */ 1385 value &= ~0x1000; /* Turn off autonegotiation */
1386 phy_write(tbiphy, ENET_TBI_MII_CR, value); 1386 phy_write(tbiphy, ENET_TBI_MII_CR, value);
1387
1388 put_device(&tbiphy->dev);
1387 } 1389 }
1388 1390
1389 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); 1391 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev)
1702 * everything for us? Resetting it takes the link down and requires 1704 * everything for us? Resetting it takes the link down and requires
1703 * several seconds for it to come back. 1705 * several seconds for it to come back.
1704 */ 1706 */
1705 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) 1707 if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
1708 put_device(&tbiphy->dev);
1706 return; 1709 return;
1710 }
1707 1711
1708 /* Single clk mode, mii mode off(for serdes communication) */ 1712 /* Single clk mode, mii mode off(for serdes communication) */
1709 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); 1713 phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev)
1711 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); 1715 phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1712 1716
1713 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); 1717 phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
1718
1719 put_device(&tbiphy->dev);
1714} 1720}
1715 1721
1716/* Configure the PHY for dev. 1722/* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fe2299ac4f5c..514df76fc70f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); 1479 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1480 struct sk_buff *skb; 1480 struct sk_buff *skb;
1481 unsigned char *data; 1481 unsigned char *data;
1482 dma_addr_t phys_addr;
1482 u32 rx_status; 1483 u32 rx_status;
1483 int rx_bytes, err; 1484 int rx_bytes, err;
1484 1485
@@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1486 rx_status = rx_desc->status; 1487 rx_status = rx_desc->status;
1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1488 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1488 data = (unsigned char *)rx_desc->buf_cookie; 1489 data = (unsigned char *)rx_desc->buf_cookie;
1490 phys_addr = rx_desc->buf_phys_addr;
1489 1491
1490 if (!mvneta_rxq_desc_is_first_last(rx_status) || 1492 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1491 (rx_status & MVNETA_RXD_ERR_SUMMARY)) { 1493 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1534 if (!skb) 1536 if (!skb)
1535 goto err_drop_frame; 1537 goto err_drop_frame;
1536 1538
1537 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, 1539 dma_unmap_single(dev->dev.parent, phys_addr,
1538 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1540 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1539 1541
1540 rcvd_pkts++; 1542 rcvd_pkts++;
@@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev)
3173 struct phy_device *phy = of_phy_find_device(dn); 3175 struct phy_device *phy = of_phy_find_device(dn);
3174 3176
3175 mvneta_fixed_link_update(pp, phy); 3177 mvneta_fixed_link_update(pp, phy);
3178
3179 put_device(&phy->dev);
3176 } 3180 }
3177 3181
3178 return 0; 3182 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4c7de8c44659..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1270,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1270 rss_context->hash_fn = MLX4_RSS_HASH_TOP; 1270 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1271 memcpy(rss_context->rss_key, priv->rss_key, 1271 memcpy(rss_context->rss_key, priv->rss_key,
1272 MLX4_EN_RSS_KEY_SIZE); 1272 MLX4_EN_RSS_KEY_SIZE);
1273 netdev_rss_key_fill(rss_context->rss_key,
1274 MLX4_EN_RSS_KEY_SIZE);
1275 } else { 1273 } else {
1276 en_err(priv, "Unknown RSS hash function requested\n"); 1274 en_err(priv, "Unknown RSS hash function requested\n");
1277 err = -EINVAL; 1275 err = -EINVAL;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab703f45..60f43ec22175 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = {
1601 { .compatible = "micrel,ks8851" }, 1601 { .compatible = "micrel,ks8851" },
1602 { } 1602 { }
1603}; 1603};
1604MODULE_DEVICE_TABLE(of, ks8851_match_table);
1604 1605
1605static struct spi_driver ks8851_driver = { 1606static struct spi_driver ks8851_driver = {
1606 .driver = { 1607 .driver = {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f1f5a7..a10c928bbd6b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
552 { .compatible = "moxa,moxart-mac" }, 552 { .compatible = "moxa,moxart-mac" },
553 { } 553 { }
554}; 554};
555MODULE_DEVICE_TABLE(of, moxart_mac_match);
555 556
556static struct platform_driver moxart_mac_driver = { 557static struct platform_driver moxart_mac_driver = {
557 .probe = moxart_mac_probe, 558 .probe = moxart_mac_probe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 06bcc734fe8d..d6696cfa11d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -536,6 +536,7 @@ struct qlcnic_hardware_context {
536 u8 extend_lb_time; 536 u8 extend_lb_time;
537 u8 phys_port_id[ETH_ALEN]; 537 u8 phys_port_id[ETH_ALEN];
538 u8 lb_mode; 538 u8 lb_mode;
539 u8 vxlan_port_count;
539 u16 vxlan_port; 540 u16 vxlan_port;
540 struct device *hwmon_dev; 541 struct device *hwmon_dev;
541 u32 post_mode; 542 u32 post_mode;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8b08b20e8b30..d4481454b5f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
483 /* Adapter supports only one VXLAN port. Use very first port 483 /* Adapter supports only one VXLAN port. Use very first port
484 * for enabling offload 484 * for enabling offload
485 */ 485 */
486 if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port) 486 if (!qlcnic_encap_rx_offload(adapter))
487 return; 487 return;
488 if (!ahw->vxlan_port_count) {
489 ahw->vxlan_port_count = 1;
490 ahw->vxlan_port = ntohs(port);
491 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
492 return;
493 }
494 if (ahw->vxlan_port == ntohs(port))
495 ahw->vxlan_port_count++;
488 496
489 ahw->vxlan_port = ntohs(port);
490 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
491} 497}
492 498
493static void qlcnic_del_vxlan_port(struct net_device *netdev, 499static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
496 struct qlcnic_adapter *adapter = netdev_priv(netdev); 502 struct qlcnic_adapter *adapter = netdev_priv(netdev);
497 struct qlcnic_hardware_context *ahw = adapter->ahw; 503 struct qlcnic_hardware_context *ahw = adapter->ahw;
498 504
499 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port || 505 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
500 (ahw->vxlan_port != ntohs(port))) 506 (ahw->vxlan_port != ntohs(port)))
501 return; 507 return;
502 508
503 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 509 ahw->vxlan_port_count--;
510 if (!ahw->vxlan_port_count)
511 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
504} 512}
505 513
506static netdev_features_t qlcnic_features_check(struct sk_buff *skb, 514static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..686334f4588d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
157 NWayAdvert = 0x66, /* MII ADVERTISE */ 157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */ 158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */ 159 NWayExpansion = 0x6A, /* MII Expansion */
160 TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
160 Config5 = 0xD8, /* Config5 */ 161 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ 162 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ 163 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -341,6 +342,7 @@ struct cp_private {
341 unsigned tx_tail; 342 unsigned tx_tail;
342 struct cp_desc *tx_ring; 343 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE]; 344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
344 346
345 unsigned rx_buf_sz; 347 unsigned rx_buf_sz;
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ 348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
665 BUG_ON(!skb); 667 BUG_ON(!skb);
666 668
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff, 670 cp->tx_opts[tx_tail] & 0xffff,
669 PCI_DMA_TODEVICE); 671 PCI_DMA_TODEVICE);
670 672
671 if (status & LastFrag) { 673 if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
733{ 735{
734 struct cp_private *cp = netdev_priv(dev); 736 struct cp_private *cp = netdev_priv(dev);
735 unsigned entry; 737 unsigned entry;
736 u32 eor, flags; 738 u32 eor, opts1;
737 unsigned long intr_flags; 739 unsigned long intr_flags;
738 __le32 opts2; 740 __le32 opts2;
739 int mss = 0; 741 int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
753 mss = skb_shinfo(skb)->gso_size; 755 mss = skb_shinfo(skb)->gso_size;
754 756
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); 757 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
758 opts1 = DescOwn;
759 if (mss)
760 opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
761 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
762 const struct iphdr *ip = ip_hdr(skb);
763 if (ip->protocol == IPPROTO_TCP)
764 opts1 |= IPCS | TCPCS;
765 else if (ip->protocol == IPPROTO_UDP)
766 opts1 |= IPCS | UDPCS;
767 else {
768 WARN_ONCE(1,
769 "Net bug: asked to checksum invalid Legacy IP packet\n");
770 goto out_dma_error;
771 }
772 }
756 773
757 if (skb_shinfo(skb)->nr_frags == 0) { 774 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry]; 775 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
768 txd->addr = cpu_to_le64(mapping); 785 txd->addr = cpu_to_le64(mapping);
769 wmb(); 786 wmb();
770 787
771 flags = eor | len | DescOwn | FirstFrag | LastFrag; 788 opts1 |= eor | len | FirstFrag | LastFrag;
772
773 if (mss)
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
781 else
782 WARN_ON(1); /* we need a WARN() */
783 }
784 789
785 txd->opts1 = cpu_to_le32(flags); 790 txd->opts1 = cpu_to_le32(opts1);
786 wmb(); 791 wmb();
787 792
788 cp->tx_skb[entry] = skb; 793 cp->tx_skb[entry] = skb;
789 entry = NEXT_TX(entry); 794 cp->tx_opts[entry] = opts1;
795 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
796 entry, skb->len);
790 } else { 797 } else {
791 struct cp_desc *txd; 798 struct cp_desc *txd;
792 u32 first_len, first_eor; 799 u32 first_len, first_eor, ctrl;
793 dma_addr_t first_mapping; 800 dma_addr_t first_mapping;
794 int frag, first_entry = entry; 801 int frag, first_entry = entry;
795 const struct iphdr *ip = ip_hdr(skb);
796 802
797 /* We must give this initial chunk to the device last. 803 /* We must give this initial chunk to the device last.
798 * Otherwise we could race with the device. 804 * Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
805 goto out_dma_error; 811 goto out_dma_error;
806 812
807 cp->tx_skb[entry] = skb; 813 cp->tx_skb[entry] = skb;
808 entry = NEXT_TX(entry);
809 814
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 815 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 816 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
812 u32 len; 817 u32 len;
813 u32 ctrl;
814 dma_addr_t mapping; 818 dma_addr_t mapping;
815 819
820 entry = NEXT_TX(entry);
821
816 len = skb_frag_size(this_frag); 822 len = skb_frag_size(this_frag);
817 mapping = dma_map_single(&cp->pdev->dev, 823 mapping = dma_map_single(&cp->pdev->dev,
818 skb_frag_address(this_frag), 824 skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
824 830
825 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 831 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
826 832
827 ctrl = eor | len | DescOwn; 833 ctrl = opts1 | eor | len;
828
829 if (mss)
830 ctrl |= LargeSend |
831 ((mss & MSSMask) << MSSShift);
832 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 if (ip->protocol == IPPROTO_TCP)
834 ctrl |= IPCS | TCPCS;
835 else if (ip->protocol == IPPROTO_UDP)
836 ctrl |= IPCS | UDPCS;
837 else
838 BUG();
839 }
840 834
841 if (frag == skb_shinfo(skb)->nr_frags - 1) 835 if (frag == skb_shinfo(skb)->nr_frags - 1)
842 ctrl |= LastFrag; 836 ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
849 txd->opts1 = cpu_to_le32(ctrl); 843 txd->opts1 = cpu_to_le32(ctrl);
850 wmb(); 844 wmb();
851 845
846 cp->tx_opts[entry] = ctrl;
852 cp->tx_skb[entry] = skb; 847 cp->tx_skb[entry] = skb;
853 entry = NEXT_TX(entry);
854 } 848 }
855 849
856 txd = &cp->tx_ring[first_entry]; 850 txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
858 txd->addr = cpu_to_le64(first_mapping); 852 txd->addr = cpu_to_le64(first_mapping);
859 wmb(); 853 wmb();
860 854
861 if (skb->ip_summed == CHECKSUM_PARTIAL) { 855 ctrl = opts1 | first_eor | first_len | FirstFrag;
862 if (ip->protocol == IPPROTO_TCP) 856 txd->opts1 = cpu_to_le32(ctrl);
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
865 IPCS | TCPCS);
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
869 IPCS | UDPCS);
870 else
871 BUG();
872 } else
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
875 wmb(); 857 wmb();
858
859 cp->tx_opts[first_entry] = ctrl;
860 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
861 first_entry, entry, skb->len);
876 } 862 }
877 cp->tx_head = entry; 863 cp->tx_head = NEXT_TX(entry);
878 864
879 netdev_sent_queue(dev, skb->len); 865 netdev_sent_queue(dev, skb->len);
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
881 entry, skb->len);
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 866 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
883 netif_stop_queue(dev); 867 netif_stop_queue(dev);
884 868
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
1115{ 1099{
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1100 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1101 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1102 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1118 1103
1119 cp_init_rings_index(cp); 1104 cp_init_rings_index(cp);
1120 1105
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
1151 desc = cp->rx_ring + i; 1136 desc = cp->rx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1137 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154 dev_kfree_skb(cp->rx_skb[i]); 1139 dev_kfree_skb_any(cp->rx_skb[i]);
1155 } 1140 }
1156 } 1141 }
1157 1142
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
1164 le32_to_cpu(desc->opts1) & 0xffff, 1149 le32_to_cpu(desc->opts1) & 0xffff,
1165 PCI_DMA_TODEVICE); 1150 PCI_DMA_TODEVICE);
1166 if (le32_to_cpu(desc->opts1) & LastFrag) 1151 if (le32_to_cpu(desc->opts1) & LastFrag)
1167 dev_kfree_skb(skb); 1152 dev_kfree_skb_any(skb);
1168 cp->dev->stats.tx_dropped++; 1153 cp->dev->stats.tx_dropped++;
1169 } 1154 }
1170 } 1155 }
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
1172 1157
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1160 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1175 1161
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); 1162 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); 1163 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
1249{ 1235{
1250 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags; 1237 unsigned long flags;
1252 int rc; 1238 int rc, i;
1253 1239
1254 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", 1240 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255 cpr8(Cmd), cpr16(CpCmd), 1241 cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
1257 1243
1258 spin_lock_irqsave(&cp->lock, flags); 1244 spin_lock_irqsave(&cp->lock, flags);
1259 1245
1246 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1247 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1248 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1249 netif_dbg(cp, tx_err, cp->dev,
1250 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1251 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1252 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1253 le64_to_cpu(cp->tx_ring[i].addr),
1254 cp->tx_skb[i]);
1255 }
1256
1260 cp_stop_hw(cp); 1257 cp_stop_hw(cp);
1261 cp_clean_rings(cp); 1258 cp_clean_rings(cp);
1262 rc = cp_init_rings(cp); 1259 rc = cp_init_rings(cp);
1263 cp_start_hw(cp); 1260 cp_start_hw(cp);
1264 cp_enable_irq(cp); 1261 __cp_set_rx_mode(dev);
1262 cpw16_f(IntrMask, cp_norx_intr_mask);
1265 1263
1266 netif_wake_queue(dev); 1264 netif_wake_queue(dev);
1265 napi_schedule_irqoff(&cp->napi);
1267 1266
1268 spin_unlock_irqrestore(&cp->lock, flags); 1267 spin_unlock_irqrestore(&cp->lock, flags);
1269} 1268}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa22ac95..ebf6abc4853f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus)
161 161
162 if (!gpio_request(reset_gpio, "mdio-reset")) { 162 if (!gpio_request(reset_gpio, "mdio-reset")) {
163 gpio_direction_output(reset_gpio, active_low ? 1 : 0); 163 gpio_direction_output(reset_gpio, active_low ? 1 : 0);
164 udelay(data->delays[0]); 164 if (data->delays[0])
165 msleep(DIV_ROUND_UP(data->delays[0], 1000));
166
165 gpio_set_value(reset_gpio, active_low ? 0 : 1); 167 gpio_set_value(reset_gpio, active_low ? 0 : 1);
166 udelay(data->delays[1]); 168 if (data->delays[1])
169 msleep(DIV_ROUND_UP(data->delays[1], 1000));
170
167 gpio_set_value(reset_gpio, active_low ? 1 : 0); 171 gpio_set_value(reset_gpio, active_low ? 1 : 0);
168 udelay(data->delays[2]); 172 if (data->delays[2])
173 msleep(DIV_ROUND_UP(data->delays[2], 1000));
169 } 174 }
170 } 175 }
171#endif 176#endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200e0b79..cc106d892e29 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = {
1756#endif 1756#endif
1757}; 1757};
1758 1758
1759static struct vnet *vnet_new(const u64 *local_mac) 1759static struct vnet *vnet_new(const u64 *local_mac,
1760 struct vio_dev *vdev)
1760{ 1761{
1761 struct net_device *dev; 1762 struct net_device *dev;
1762 struct vnet *vp; 1763 struct vnet *vp;
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
1790 NETIF_F_HW_CSUM | NETIF_F_SG; 1791 NETIF_F_HW_CSUM | NETIF_F_SG;
1791 dev->features = dev->hw_features; 1792 dev->features = dev->hw_features;
1792 1793
1794 SET_NETDEV_DEV(dev, &vdev->dev);
1795
1793 err = register_netdev(dev); 1796 err = register_netdev(dev);
1794 if (err) { 1797 if (err) {
1795 pr_err("Cannot register net device, aborting\n"); 1798 pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@ err_out_free_dev:
1808 return ERR_PTR(err); 1811 return ERR_PTR(err);
1809} 1812}
1810 1813
1811static struct vnet *vnet_find_or_create(const u64 *local_mac) 1814static struct vnet *vnet_find_or_create(const u64 *local_mac,
1815 struct vio_dev *vdev)
1812{ 1816{
1813 struct vnet *iter, *vp; 1817 struct vnet *iter, *vp;
1814 1818
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1821 } 1825 }
1822 } 1826 }
1823 if (!vp) 1827 if (!vp)
1824 vp = vnet_new(local_mac); 1828 vp = vnet_new(local_mac, vdev);
1825 mutex_unlock(&vnet_list_mutex); 1829 mutex_unlock(&vnet_list_mutex);
1826 1830
1827 return vp; 1831 return vp;
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void)
1848static const char *local_mac_prop = "local-mac-address"; 1852static const char *local_mac_prop = "local-mac-address";
1849 1853
1850static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1854static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1851 u64 port_node) 1855 u64 port_node,
1856 struct vio_dev *vdev)
1852{ 1857{
1853 const u64 *local_mac = NULL; 1858 const u64 *local_mac = NULL;
1854 u64 a; 1859 u64 a;
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1869 if (!local_mac) 1874 if (!local_mac)
1870 return ERR_PTR(-ENODEV); 1875 return ERR_PTR(-ENODEV);
1871 1876
1872 return vnet_find_or_create(local_mac); 1877 return vnet_find_or_create(local_mac, vdev);
1873} 1878}
1874 1879
1875static struct ldc_channel_config vnet_ldc_cfg = { 1880static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1923 1928
1924 hp = mdesc_grab(); 1929 hp = mdesc_grab();
1925 1930
1926 vp = vnet_find_parent(hp, vdev->mp); 1931 vp = vnet_find_parent(hp, vdev->mp, vdev);
1927 if (IS_ERR(vp)) { 1932 if (IS_ERR(vp)) {
1928 pr_err("Cannot find port parent vnet\n"); 1933 pr_err("Cannot find port parent vnet\n");
1929 err = PTR_ERR(vp); 1934 err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1a5aca55ea9f..9f9832f0dea9 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
291 interface_list) { 291 interface_list) {
292 struct netcp_intf_modpriv *intf_modpriv; 292 struct netcp_intf_modpriv *intf_modpriv;
293 293
294 /* If interface not registered then register now */
295 if (!netcp_intf->netdev_registered)
296 ret = netcp_register_interface(netcp_intf);
297
298 if (ret)
299 return -ENODEV;
300
301 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), 294 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
302 GFP_KERNEL); 295 GFP_KERNEL);
303 if (!intf_modpriv) 296 if (!intf_modpriv)
@@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
306 interface = of_parse_phandle(netcp_intf->node_interface, 299 interface = of_parse_phandle(netcp_intf->node_interface,
307 module->name, 0); 300 module->name, 0);
308 301
302 if (!interface) {
303 devm_kfree(dev, intf_modpriv);
304 continue;
305 }
306
309 intf_modpriv->netcp_priv = netcp_intf; 307 intf_modpriv->netcp_priv = netcp_intf;
310 intf_modpriv->netcp_module = module; 308 intf_modpriv->netcp_module = module;
311 list_add_tail(&intf_modpriv->intf_list, 309 list_add_tail(&intf_modpriv->intf_list,
@@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
323 continue; 321 continue;
324 } 322 }
325 } 323 }
324
325 /* Now register the interface with netdev */
326 list_for_each_entry(netcp_intf,
327 &netcp_device->interface_head,
328 interface_list) {
329 /* If interface not registered then register now */
330 if (!netcp_intf->netdev_registered) {
331 ret = netcp_register_interface(netcp_intf);
332 if (ret)
333 return -ENODEV;
334 }
335 }
326 return 0; 336 return 0;
327} 337}
328 338
@@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
357 if (ret < 0) 367 if (ret < 0)
358 goto fail; 368 goto fail;
359 } 369 }
360
361 mutex_unlock(&netcp_modules_lock); 370 mutex_unlock(&netcp_modules_lock);
362 return 0; 371 return 0;
363 372
@@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
796 netcp->rx_pool = NULL; 805 netcp->rx_pool = NULL;
797} 806}
798 807
799static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) 808static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
800{ 809{
801 struct knav_dma_desc *hwdesc; 810 struct knav_dma_desc *hwdesc;
802 unsigned int buf_len, dma_sz; 811 unsigned int buf_len, dma_sz;
@@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
810 hwdesc = knav_pool_desc_get(netcp->rx_pool); 819 hwdesc = knav_pool_desc_get(netcp->rx_pool);
811 if (IS_ERR_OR_NULL(hwdesc)) { 820 if (IS_ERR_OR_NULL(hwdesc)) {
812 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); 821 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
813 return; 822 return -ENOMEM;
814 } 823 }
815 824
816 if (likely(fdq == 0)) { 825 if (likely(fdq == 0)) {
@@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
862 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, 871 knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
863 &dma_sz); 872 &dma_sz);
864 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); 873 knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
865 return; 874 return 0;
866 875
867fail: 876fail:
868 knav_pool_desc_put(netcp->rx_pool, hwdesc); 877 knav_pool_desc_put(netcp->rx_pool, hwdesc);
878 return -ENOMEM;
869} 879}
870 880
871/* Refill Rx FDQ with descriptors & attached buffers */ 881/* Refill Rx FDQ with descriptors & attached buffers */
872static void netcp_rxpool_refill(struct netcp_intf *netcp) 882static void netcp_rxpool_refill(struct netcp_intf *netcp)
873{ 883{
874 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; 884 u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
875 int i; 885 int i, ret = 0;
876 886
877 /* Calculate the FDQ deficit and refill */ 887 /* Calculate the FDQ deficit and refill */
878 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { 888 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
879 fdq_deficit[i] = netcp->rx_queue_depths[i] - 889 fdq_deficit[i] = netcp->rx_queue_depths[i] -
880 knav_queue_get_count(netcp->rx_fdq[i]); 890 knav_queue_get_count(netcp->rx_fdq[i]);
881 891
882 while (fdq_deficit[i]--) 892 while (fdq_deficit[i]-- && !ret)
883 netcp_allocate_rx_buf(netcp, i); 893 ret = netcp_allocate_rx_buf(netcp, i);
884 } /* end for fdqs */ 894 } /* end for fdqs */
885} 895}
886 896
@@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
893 903
894 packets = netcp_process_rx_packets(netcp, budget); 904 packets = netcp_process_rx_packets(netcp, budget);
895 905
906 netcp_rxpool_refill(netcp);
896 if (packets < budget) { 907 if (packets < budget) {
897 napi_complete(&netcp->rx_napi); 908 napi_complete(&netcp->rx_napi);
898 knav_queue_enable_notify(netcp->rx_queue); 909 knav_queue_enable_notify(netcp->rx_queue);
899 } 910 }
900 911
901 netcp_rxpool_refill(netcp);
902 return packets; 912 return packets;
903} 913}
904 914
@@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1384 continue; 1394 continue;
1385 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", 1395 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1386 naddr->addr, naddr->type); 1396 naddr->addr, naddr->type);
1387 mutex_lock(&netcp_modules_lock);
1388 for_each_module(netcp, priv) { 1397 for_each_module(netcp, priv) {
1389 module = priv->netcp_module; 1398 module = priv->netcp_module;
1390 if (!module->del_addr) 1399 if (!module->del_addr)
@@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1393 naddr); 1402 naddr);
1394 WARN_ON(error); 1403 WARN_ON(error);
1395 } 1404 }
1396 mutex_unlock(&netcp_modules_lock);
1397 netcp_addr_del(netcp, naddr); 1405 netcp_addr_del(netcp, naddr);
1398 } 1406 }
1399} 1407}
@@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1410 continue; 1418 continue;
1411 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", 1419 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1412 naddr->addr, naddr->type); 1420 naddr->addr, naddr->type);
1413 mutex_lock(&netcp_modules_lock); 1421
1414 for_each_module(netcp, priv) { 1422 for_each_module(netcp, priv) {
1415 module = priv->netcp_module; 1423 module = priv->netcp_module;
1416 if (!module->add_addr) 1424 if (!module->add_addr)
@@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1418 error = module->add_addr(priv->module_priv, naddr); 1426 error = module->add_addr(priv->module_priv, naddr);
1419 WARN_ON(error); 1427 WARN_ON(error);
1420 } 1428 }
1421 mutex_unlock(&netcp_modules_lock);
1422 } 1429 }
1423} 1430}
1424 1431
@@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1432 ndev->flags & IFF_ALLMULTI || 1439 ndev->flags & IFF_ALLMULTI ||
1433 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); 1440 netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1434 1441
1442 spin_lock(&netcp->lock);
1435 /* first clear all marks */ 1443 /* first clear all marks */
1436 netcp_addr_clear_mark(netcp); 1444 netcp_addr_clear_mark(netcp);
1437 1445
@@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
1450 /* finally sweep and callout into modules */ 1458 /* finally sweep and callout into modules */
1451 netcp_addr_sweep_del(netcp); 1459 netcp_addr_sweep_del(netcp);
1452 netcp_addr_sweep_add(netcp); 1460 netcp_addr_sweep_add(netcp);
1461 spin_unlock(&netcp->lock);
1453} 1462}
1454 1463
1455static void netcp_free_navigator_resources(struct netcp_intf *netcp) 1464static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1614 goto fail; 1623 goto fail;
1615 } 1624 }
1616 1625
1617 mutex_lock(&netcp_modules_lock);
1618 for_each_module(netcp, intf_modpriv) { 1626 for_each_module(netcp, intf_modpriv) {
1619 module = intf_modpriv->netcp_module; 1627 module = intf_modpriv->netcp_module;
1620 if (module->open) { 1628 if (module->open) {
@@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
1625 } 1633 }
1626 } 1634 }
1627 } 1635 }
1628 mutex_unlock(&netcp_modules_lock);
1629 1636
1630 napi_enable(&netcp->rx_napi); 1637 napi_enable(&netcp->rx_napi);
1631 napi_enable(&netcp->tx_napi); 1638 napi_enable(&netcp->tx_napi);
@@ -1642,7 +1649,6 @@ fail_open:
1642 if (module->close) 1649 if (module->close)
1643 module->close(intf_modpriv->module_priv, ndev); 1650 module->close(intf_modpriv->module_priv, ndev);
1644 } 1651 }
1645 mutex_unlock(&netcp_modules_lock);
1646 1652
1647fail: 1653fail:
1648 netcp_free_navigator_resources(netcp); 1654 netcp_free_navigator_resources(netcp);
@@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1666 napi_disable(&netcp->rx_napi); 1672 napi_disable(&netcp->rx_napi);
1667 napi_disable(&netcp->tx_napi); 1673 napi_disable(&netcp->tx_napi);
1668 1674
1669 mutex_lock(&netcp_modules_lock);
1670 for_each_module(netcp, intf_modpriv) { 1675 for_each_module(netcp, intf_modpriv) {
1671 module = intf_modpriv->netcp_module; 1676 module = intf_modpriv->netcp_module;
1672 if (module->close) { 1677 if (module->close) {
@@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
1675 dev_err(netcp->ndev_dev, "Close failed\n"); 1680 dev_err(netcp->ndev_dev, "Close failed\n");
1676 } 1681 }
1677 } 1682 }
1678 mutex_unlock(&netcp_modules_lock);
1679 1683
1680 /* Recycle Rx descriptors from completion queue */ 1684 /* Recycle Rx descriptors from completion queue */
1681 netcp_empty_rx_queue(netcp); 1685 netcp_empty_rx_queue(netcp);
@@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1703 if (!netif_running(ndev)) 1707 if (!netif_running(ndev))
1704 return -EINVAL; 1708 return -EINVAL;
1705 1709
1706 mutex_lock(&netcp_modules_lock);
1707 for_each_module(netcp, intf_modpriv) { 1710 for_each_module(netcp, intf_modpriv) {
1708 module = intf_modpriv->netcp_module; 1711 module = intf_modpriv->netcp_module;
1709 if (!module->ioctl) 1712 if (!module->ioctl)
@@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
1719 } 1722 }
1720 1723
1721out: 1724out:
1722 mutex_unlock(&netcp_modules_lock);
1723 return (ret == 0) ? 0 : err; 1725 return (ret == 0) ? 0 : err;
1724} 1726}
1725 1727
@@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1754 struct netcp_intf *netcp = netdev_priv(ndev); 1756 struct netcp_intf *netcp = netdev_priv(ndev);
1755 struct netcp_intf_modpriv *intf_modpriv; 1757 struct netcp_intf_modpriv *intf_modpriv;
1756 struct netcp_module *module; 1758 struct netcp_module *module;
1759 unsigned long flags;
1757 int err = 0; 1760 int err = 0;
1758 1761
1759 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); 1762 dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1760 1763
1761 mutex_lock(&netcp_modules_lock); 1764 spin_lock_irqsave(&netcp->lock, flags);
1762 for_each_module(netcp, intf_modpriv) { 1765 for_each_module(netcp, intf_modpriv) {
1763 module = intf_modpriv->netcp_module; 1766 module = intf_modpriv->netcp_module;
1764 if ((module->add_vid) && (vid != 0)) { 1767 if ((module->add_vid) && (vid != 0)) {
@@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1770 } 1773 }
1771 } 1774 }
1772 } 1775 }
1773 mutex_unlock(&netcp_modules_lock); 1776 spin_unlock_irqrestore(&netcp->lock, flags);
1777
1774 return err; 1778 return err;
1775} 1779}
1776 1780
@@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1779 struct netcp_intf *netcp = netdev_priv(ndev); 1783 struct netcp_intf *netcp = netdev_priv(ndev);
1780 struct netcp_intf_modpriv *intf_modpriv; 1784 struct netcp_intf_modpriv *intf_modpriv;
1781 struct netcp_module *module; 1785 struct netcp_module *module;
1786 unsigned long flags;
1782 int err = 0; 1787 int err = 0;
1783 1788
1784 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); 1789 dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1785 1790
1786 mutex_lock(&netcp_modules_lock); 1791 spin_lock_irqsave(&netcp->lock, flags);
1787 for_each_module(netcp, intf_modpriv) { 1792 for_each_module(netcp, intf_modpriv) {
1788 module = intf_modpriv->netcp_module; 1793 module = intf_modpriv->netcp_module;
1789 if (module->del_vid) { 1794 if (module->del_vid) {
@@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1795 } 1800 }
1796 } 1801 }
1797 } 1802 }
1798 mutex_unlock(&netcp_modules_lock); 1803 spin_unlock_irqrestore(&netcp->lock, flags);
1799 return err; 1804 return err;
1800} 1805}
1801 1806
@@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
2040 struct device_node *child, *interfaces; 2045 struct device_node *child, *interfaces;
2041 struct netcp_device *netcp_device; 2046 struct netcp_device *netcp_device;
2042 struct device *dev = &pdev->dev; 2047 struct device *dev = &pdev->dev;
2043 struct netcp_module *module;
2044 int ret; 2048 int ret;
2045 2049
2046 if (!node) { 2050 if (!node) {
@@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
2087 /* Add the device instance to the list */ 2091 /* Add the device instance to the list */
2088 list_add_tail(&netcp_device->device_list, &netcp_devices); 2092 list_add_tail(&netcp_device->device_list, &netcp_devices);
2089 2093
2090 /* Probe & attach any modules already registered */
2091 mutex_lock(&netcp_modules_lock);
2092 for_each_netcp_module(module) {
2093 ret = netcp_module_probe(netcp_device, module);
2094 if (ret < 0)
2095 dev_err(dev, "module(%s) probe failed\n", module->name);
2096 }
2097 mutex_unlock(&netcp_modules_lock);
2098 return 0; 2094 return 0;
2099 2095
2100probe_quit_interface: 2096probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6f16d6aaf7b7..6bff8d82ceab 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
77#define GBENU_ALE_OFFSET 0x1e000 77#define GBENU_ALE_OFFSET 0x1e000
78#define GBENU_HOST_PORT_NUM 0 78#define GBENU_HOST_PORT_NUM 0
79#define GBENU_NUM_ALE_ENTRIES 1024 79#define GBENU_NUM_ALE_ENTRIES 1024
80#define GBENU_SGMII_MODULE_SIZE 0x100
80 81
81/* 10G Ethernet SS defines */ 82/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME "netcp-xgbe" 83#define XGBE_MODULE_NAME "netcp-xgbe"
@@ -149,8 +150,8 @@
149#define XGBE_STATS2_MODULE 2 150#define XGBE_STATS2_MODULE 2
150 151
151/* s: 0-based slave_port */ 152/* s: 0-based slave_port */
152#define SGMII_BASE(s) \ 153#define SGMII_BASE(d, s) \
153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) 154 (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
154 155
155#define GBE_TX_QUEUE 648 156#define GBE_TX_QUEUE 648
156#define GBE_TXHOOK_ORDER 0 157#define GBE_TXHOOK_ORDER 0
@@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1997 return; 1998 return;
1998 1999
1999 if (!SLAVE_LINK_IS_XGMII(slave)) { 2000 if (!SLAVE_LINK_IS_XGMII(slave)) {
2000 if (gbe_dev->ss_version == GBE_SS_VERSION_14) 2001 sgmii_link_state =
2001 sgmii_link_state = 2002 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2002 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
2003 else
2004 sgmii_link_state =
2005 netcp_sgmii_get_port_link(
2006 gbe_dev->sgmii_port_regs, sp);
2007 } 2003 }
2008 2004
2009 phy_link_state = gbe_phy_link_status(slave); 2005 phy_link_state = gbe_phy_link_status(slave);
@@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2100static void gbe_sgmii_rtreset(struct gbe_priv *priv, 2096static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2101 struct gbe_slave *slave, bool set) 2097 struct gbe_slave *slave, bool set)
2102{ 2098{
2103 void __iomem *sgmii_port_regs;
2104
2105 if (SLAVE_LINK_IS_XGMII(slave)) 2099 if (SLAVE_LINK_IS_XGMII(slave))
2106 return; 2100 return;
2107 2101
2108 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) 2102 netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2109 sgmii_port_regs = priv->sgmii_port34_regs; 2103 slave->slave_num, set);
2110 else
2111 sgmii_port_regs = priv->sgmii_port_regs;
2112
2113 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
2114} 2104}
2115 2105
2116static void gbe_slave_stop(struct gbe_intf *intf) 2106static void gbe_slave_stop(struct gbe_intf *intf)
@@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
2136 2126
2137static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) 2127static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2138{ 2128{
2139 void __iomem *sgmii_port_regs; 2129 if (SLAVE_LINK_IS_XGMII(slave))
2140 2130 return;
2141 sgmii_port_regs = priv->sgmii_port_regs;
2142 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
2143 sgmii_port_regs = priv->sgmii_port34_regs;
2144 2131
2145 if (!SLAVE_LINK_IS_XGMII(slave)) { 2132 netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2146 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); 2133 netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2147 netcp_sgmii_config(sgmii_port_regs, slave->slave_num, 2134 slave->link_interface);
2148 slave->link_interface);
2149 }
2150} 2135}
2151 2136
2152static int gbe_slave_open(struct gbe_intf *gbe_intf) 2137static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2997 gbe_dev->switch_regs = regs; 2982 gbe_dev->switch_regs = regs;
2998 2983
2999 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; 2984 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2985
2986 /* Although sgmii modules are mem mapped to one contiguous
2987 * region on GBENU devices, setting sgmii_port34_regs allows
2988 * consistent code when accessing sgmii api
2989 */
2990 gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
2991 (2 * GBENU_SGMII_MODULE_SIZE);
2992
3000 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; 2993 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3001 2994
3002 for (i = 0; i < (gbe_dev->max_num_ports); i++) 2995 for (i = 0; i < (gbe_dev->max_num_ports); i++)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b882b9..d3d094742a7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA
17 17
18config VIA_RHINE 18config VIA_RHINE
19 tristate "VIA Rhine support" 19 tristate "VIA Rhine support"
20 depends on (PCI || OF_IRQ) 20 depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
21 depends on HAS_DMA 21 depends on HAS_DMA
22 select CRC32 22 select CRC32
23 select MII 23 select MII
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee01a33..cf468c87ce57 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
828 if (!phydev) 828 if (!phydev)
829 dev_info(dev, 829 dev_info(dev,
830 "MDIO of the phy is not registered yet\n"); 830 "MDIO of the phy is not registered yet\n");
831 else
832 put_device(&phydev->dev);
831 return 0; 833 return 0;
832 } 834 }
833 835
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b5f4a78da828..2d3848c9dc35 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
1011 set_bit(epidx, &irq_bit); 1011 set_bit(epidx, &irq_bit);
1012 break; 1012 break;
1013 } 1013 }
1014 }
1015
1016 hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 1014
1015 hw->ep_shm_info[epidx].es_status =
1016 info[epidx].es_status;
1017 hw->ep_shm_info[epidx].zone = info[epidx].zone;
1018 }
1019 break; 1019 break;
1020 } 1020 }
1021 1021
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index da3259ce7c8d..8f5c02eed47d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
126 __be32 addr; 126 __be32 addr;
127 int err; 127 int err;
128 128
129 iph = ip_hdr(skb); /* outer IP header... */
130
129 if (gs->collect_md) { 131 if (gs->collect_md) {
130 static u8 zero_vni[3]; 132 static u8 zero_vni[3];
131 133
@@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
133 addr = 0; 135 addr = 0;
134 } else { 136 } else {
135 vni = gnvh->vni; 137 vni = gnvh->vni;
136 iph = ip_hdr(skb); /* Still outer IP header... */
137 addr = iph->saddr; 138 addr = iph->saddr;
138 } 139 }
139 140
@@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
178 179
179 skb_reset_network_header(skb); 180 skb_reset_network_header(skb);
180 181
181 iph = ip_hdr(skb); /* Now inner IP header... */
182 err = IP_ECN_decapsulate(iph, skb); 182 err = IP_ECN_decapsulate(iph, skb);
183 183
184 if (unlikely(err)) { 184 if (unlikely(err)) {
@@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct geneve_sock *gs = geneve->sock; 626 struct geneve_sock *gs = geneve->sock;
627 struct ip_tunnel_info *info = NULL; 627 struct ip_tunnel_info *info = NULL;
628 struct rtable *rt = NULL; 628 struct rtable *rt = NULL;
629 const struct iphdr *iip; /* interior IP header */
629 struct flowi4 fl4; 630 struct flowi4 fl4;
630 __u8 tos, ttl; 631 __u8 tos, ttl;
631 __be16 sport; 632 __be16 sport;
@@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
653 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 654 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
654 skb_reset_mac_header(skb); 655 skb_reset_mac_header(skb);
655 656
657 iip = ip_hdr(skb);
658
656 if (info) { 659 if (info) {
657 const struct ip_tunnel_key *key = &info->key; 660 const struct ip_tunnel_key *key = &info->key;
658 u8 *opts = NULL; 661 u8 *opts = NULL;
@@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
668 if (unlikely(err)) 671 if (unlikely(err))
669 goto err; 672 goto err;
670 673
671 tos = key->tos; 674 tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
672 ttl = key->ttl; 675 ttl = key->ttl;
673 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 676 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
674 } else { 677 } else {
675 const struct iphdr *iip; /* interior IP header */
676
677 udp_csum = false; 678 udp_csum = false;
678 err = geneve_build_skb(rt, skb, 0, geneve->vni, 679 err = geneve_build_skb(rt, skb, 0, geneve->vni,
679 0, NULL, udp_csum); 680 0, NULL, udp_csum);
680 if (unlikely(err)) 681 if (unlikely(err))
681 goto err; 682 goto err;
682 683
683 iip = ip_hdr(skb);
684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 684 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
685 ttl = geneve->ttl; 685 ttl = geneve->ttl;
686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 686 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
@@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev)
748 dev->features |= NETIF_F_RXCSUM; 748 dev->features |= NETIF_F_RXCSUM;
749 dev->features |= NETIF_F_GSO_SOFTWARE; 749 dev->features |= NETIF_F_GSO_SOFTWARE;
750 750
751 dev->vlan_features = dev->features;
752 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
753
754 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 751 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
755 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 752 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
756 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
757 753
758 netif_keep_dst(dev); 754 netif_keep_dst(dev);
759 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 755 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
819 815
820static int geneve_configure(struct net *net, struct net_device *dev, 816static int geneve_configure(struct net *net, struct net_device *dev,
821 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos, 817 __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
822 __u16 dst_port, bool metadata) 818 __be16 dst_port, bool metadata)
823{ 819{
824 struct geneve_net *gn = net_generic(net, geneve_net_id); 820 struct geneve_net *gn = net_generic(net, geneve_net_id);
825 struct geneve_dev *t, *geneve = netdev_priv(dev); 821 struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev,
844 840
845 geneve->ttl = ttl; 841 geneve->ttl = ttl;
846 geneve->tos = tos; 842 geneve->tos = tos;
847 geneve->dst_port = htons(dst_port); 843 geneve->dst_port = dst_port;
848 geneve->collect_md = metadata; 844 geneve->collect_md = metadata;
849 845
850 t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni, 846 t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
851 &tun_on_same_port, &tun_collect_md); 847 &tun_on_same_port, &tun_collect_md);
852 if (t) 848 if (t)
853 return -EBUSY; 849 return -EBUSY;
@@ -871,7 +867,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
871static int geneve_newlink(struct net *net, struct net_device *dev, 867static int geneve_newlink(struct net *net, struct net_device *dev,
872 struct nlattr *tb[], struct nlattr *data[]) 868 struct nlattr *tb[], struct nlattr *data[])
873{ 869{
874 __u16 dst_port = GENEVE_UDP_PORT; 870 __be16 dst_port = htons(GENEVE_UDP_PORT);
875 __u8 ttl = 0, tos = 0; 871 __u8 ttl = 0, tos = 0;
876 bool metadata = false; 872 bool metadata = false;
877 __be32 rem_addr; 873 __be32 rem_addr;
@@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
890 tos = nla_get_u8(data[IFLA_GENEVE_TOS]); 886 tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
891 887
892 if (data[IFLA_GENEVE_PORT]) 888 if (data[IFLA_GENEVE_PORT])
893 dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]); 889 dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
894 890
895 if (data[IFLA_GENEVE_COLLECT_METADATA]) 891 if (data[IFLA_GENEVE_COLLECT_METADATA])
896 metadata = true; 892 metadata = true;
@@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev)
913 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */ 909 nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
914 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ 910 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
915 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ 911 nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
916 nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */ 912 nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
917 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ 913 nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
918 0; 914 0;
919} 915}
@@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
935 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) 931 nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
936 goto nla_put_failure; 932 goto nla_put_failure;
937 933
938 if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port))) 934 if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
939 goto nla_put_failure; 935 goto nla_put_failure;
940 936
941 if (geneve->collect_md) { 937 if (geneve->collect_md) {
@@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
975 if (IS_ERR(dev)) 971 if (IS_ERR(dev))
976 return dev; 972 return dev;
977 973
978 err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true); 974 err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
979 if (err) { 975 if (err) {
980 free_netdev(dev); 976 free_netdev(dev);
981 return ERR_PTR(err); 977 return ERR_PTR(err);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58ae11a14bb6..64bb44d5d867 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed) 1031static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1032{ 1032{
1033 struct ali_ircc_cb *self = priv; 1033 struct ali_ircc_cb *self = priv;
1034 unsigned long flags;
1035 int iobase; 1034 int iobase;
1036 int fcr; /* FIFO control reg */ 1035 int fcr; /* FIFO control reg */
1037 int lcr; /* Line control reg */ 1036 int lcr; /* Line control reg */
@@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1061 /* Update accounting for new speed */ 1060 /* Update accounting for new speed */
1062 self->io.speed = speed; 1061 self->io.speed = speed;
1063 1062
1064 spin_lock_irqsave(&self->lock, flags);
1065
1066 divisor = 115200/speed; 1063 divisor = 115200/speed;
1067 1064
1068 fcr = UART_FCR_ENABLE_FIFO; 1065 fcr = UART_FCR_ENABLE_FIFO;
@@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1089 /* without this, the connection will be broken after come back from FIR speed, 1086 /* without this, the connection will be broken after come back from FIR speed,
1090 but with this, the SIR connection is harder to established */ 1087 but with this, the SIR connection is harder to established */
1091 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR); 1088 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
1092
1093 spin_unlock_irqrestore(&self->lock, flags);
1094
1095} 1089}
1096 1090
1097static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed) 1091static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index edd77342773a..248478c6f6e4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1111 return 0; 1111 return 0;
1112 1112
1113 case TUNSETSNDBUF: 1113 case TUNSETSNDBUF:
1114 if (get_user(u, up)) 1114 if (get_user(s, sp))
1115 return -EFAULT; 1115 return -EFAULT;
1116 1116
1117 q->sk.sk_sndbuf = u; 1117 q->sk.sk_sndbuf = s;
1118 return 0; 1118 return 0;
1119 1119
1120 case TUNGETVNETHDRSZ: 1120 case TUNGETVNETHDRSZ:
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fb1299c6326e..e23bf5b90e17 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
220 struct fixed_mdio_bus *fmb = &platform_fmb; 220 struct fixed_mdio_bus *fmb = &platform_fmb;
221 struct fixed_phy *fp; 221 struct fixed_phy *fp;
222 222
223 if (!phydev || !phydev->bus) 223 if (!phydev || phydev->bus != fmb->mii_bus)
224 return -EINVAL; 224 return -EINVAL;
225 225
226 list_for_each_entry(fp, &fmb->phys, node) { 226 list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6897b6a8a53..5de8d5827536 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev)
785 int adv; 785 int adv;
786 int err; 786 int err;
787 int lpa; 787 int lpa;
788 int lpagb;
788 int status = 0; 789 int status = 0;
789 790
790 /* Update the link, but return if there 791 /* Update the link, but return if there
@@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev)
802 if (lpa < 0) 803 if (lpa < 0)
803 return lpa; 804 return lpa;
804 805
806 lpagb = phy_read(phydev, MII_STAT1000);
807 if (lpagb < 0)
808 return lpagb;
809
805 adv = phy_read(phydev, MII_ADVERTISE); 810 adv = phy_read(phydev, MII_ADVERTISE);
806 if (adv < 0) 811 if (adv < 0)
807 return adv; 812 return adv;
808 813
814 phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
815 mii_lpa_to_ethtool_lpa_t(lpa);
816
809 lpa &= adv; 817 lpa &= adv;
810 818
811 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 819 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev)
853 phydev->speed = SPEED_10; 861 phydev->speed = SPEED_10;
854 862
855 phydev->pause = phydev->asym_pause = 0; 863 phydev->pause = phydev->asym_pause = 0;
864 phydev->lp_advertising = 0;
856 } 865 }
857 866
858 return 0; 867 return 0;
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6a52a7f0fa0d..4bde5e728fe0 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
244 { .compatible = "brcm,unimac-mdio", }, 244 { .compatible = "brcm,unimac-mdio", },
245 { /* sentinel */ }, 245 { /* sentinel */ },
246}; 246};
247MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
247 248
248static struct platform_driver unimac_mdio_driver = { 249static struct platform_driver unimac_mdio_driver = {
249 .driver = { 250 .driver = {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7dc21e56a7aa..3bc9f03349f3 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = {
261 { .compatible = "virtual,mdio-gpio", }, 261 { .compatible = "virtual,mdio-gpio", },
262 { /* sentinel */ } 262 { /* sentinel */ }
263}; 263};
264MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
264 265
265static struct platform_driver mdio_gpio_driver = { 266static struct platform_driver mdio_gpio_driver = {
266 .probe = mdio_gpio_probe, 267 .probe = mdio_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 4d4d25efc1e1..280c7c311f72 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev,
113 if (!parent_bus_node) 113 if (!parent_bus_node)
114 return -ENODEV; 114 return -ENODEV;
115 115
116 parent_bus = of_mdio_find_bus(parent_bus_node);
117 if (parent_bus == NULL) {
118 ret_val = -EPROBE_DEFER;
119 goto err_parent_bus;
120 }
121
122 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); 116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
123 if (pb == NULL) { 117 if (pb == NULL) {
124 ret_val = -ENOMEM; 118 ret_val = -ENOMEM;
125 goto err_parent_bus; 119 goto err_parent_bus;
126 } 120 }
127 121
122 parent_bus = of_mdio_find_bus(parent_bus_node);
123 if (parent_bus == NULL) {
124 ret_val = -EPROBE_DEFER;
125 goto err_parent_bus;
126 }
127
128 pb->switch_data = data; 128 pb->switch_data = data;
129 pb->switch_fn = switch_fn; 129 pb->switch_fn = switch_fn;
130 pb->current_child = -1; 130 pb->current_child = -1;
@@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev,
173 dev_info(dev, "Version " DRV_VERSION "\n"); 173 dev_info(dev, "Version " DRV_VERSION "\n");
174 return 0; 174 return 0;
175 } 175 }
176
177 /* balance the reference of_mdio_find_bus() took */
178 put_device(&pb->mii_bus->dev);
179
176err_parent_bus: 180err_parent_bus:
177 of_node_put(parent_bus_node); 181 of_node_put(parent_bus_node);
178 return ret_val; 182 return ret_val;
@@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle)
189 mdiobus_free(cb->mii_bus); 193 mdiobus_free(cb->mii_bus);
190 cb = cb->next; 194 cb = cb->next;
191 } 195 }
196
197 /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */
198 put_device(&pb->mii_bus->dev);
192} 199}
193EXPORT_SYMBOL_GPL(mdio_mux_uninit); 200EXPORT_SYMBOL_GPL(mdio_mux_uninit);
194 201
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02a4615b65f8..12f44c53cc8e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 167 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
168 * @mdio_bus_np: Pointer to the mii_bus. 168 * @mdio_bus_np: Pointer to the mii_bus.
169 * 169 *
170 * Returns a pointer to the mii_bus, or NULL if none found. 170 * Returns a reference to the mii_bus, or NULL if none found. The
171 * embedded struct device will have its reference count incremented,
172 * and this must be put once the bus is finished with.
171 * 173 *
172 * Because the association of a device_node and mii_bus is made via 174 * Because the association of a device_node and mii_bus is made via
173 * of_mdiobus_register(), the mii_bus cannot be found before it is 175 * of_mdiobus_register(), the mii_bus cannot be found before it is
@@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
234#endif 236#endif
235 237
236/** 238/**
237 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 239 * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
238 * @bus: target mii_bus 240 * @bus: target mii_bus
241 * @owner: module containing bus accessor functions
239 * 242 *
240 * Description: Called by a bus driver to bring up all the PHYs 243 * Description: Called by a bus driver to bring up all the PHYs
241 * on a given bus, and attach them to the bus. 244 * on a given bus, and attach them to the bus. Drivers should use
245 * mdiobus_register() rather than __mdiobus_register() unless they
246 * need to pass a specific owner module.
242 * 247 *
243 * Returns 0 on success or < 0 on error. 248 * Returns 0 on success or < 0 on error.
244 */ 249 */
245int mdiobus_register(struct mii_bus *bus) 250int __mdiobus_register(struct mii_bus *bus, struct module *owner)
246{ 251{
247 int i, err; 252 int i, err;
248 253
@@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus)
253 BUG_ON(bus->state != MDIOBUS_ALLOCATED && 258 BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
254 bus->state != MDIOBUS_UNREGISTERED); 259 bus->state != MDIOBUS_UNREGISTERED);
255 260
261 bus->owner = owner;
256 bus->dev.parent = bus->parent; 262 bus->dev.parent = bus->parent;
257 bus->dev.class = &mdio_bus_class; 263 bus->dev.class = &mdio_bus_class;
258 bus->dev.groups = NULL; 264 bus->dev.groups = NULL;
@@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus)
288 294
289error: 295error:
290 while (--i >= 0) { 296 while (--i >= 0) {
291 if (bus->phy_map[i]) 297 struct phy_device *phydev = bus->phy_map[i];
292 device_unregister(&bus->phy_map[i]->dev); 298 if (phydev) {
299 phy_device_remove(phydev);
300 phy_device_free(phydev);
301 }
293 } 302 }
294 device_del(&bus->dev); 303 device_del(&bus->dev);
295 return err; 304 return err;
296} 305}
297EXPORT_SYMBOL(mdiobus_register); 306EXPORT_SYMBOL(__mdiobus_register);
298 307
299void mdiobus_unregister(struct mii_bus *bus) 308void mdiobus_unregister(struct mii_bus *bus)
300{ 309{
@@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus)
304 bus->state = MDIOBUS_UNREGISTERED; 313 bus->state = MDIOBUS_UNREGISTERED;
305 314
306 for (i = 0; i < PHY_MAX_ADDR; i++) { 315 for (i = 0; i < PHY_MAX_ADDR; i++) {
307 if (bus->phy_map[i]) 316 struct phy_device *phydev = bus->phy_map[i];
308 device_unregister(&bus->phy_map[i]->dev); 317 if (phydev) {
309 bus->phy_map[i] = NULL; 318 phy_device_remove(phydev);
319 phy_device_free(phydev);
320 }
310 } 321 }
311 device_del(&bus->dev); 322 device_del(&bus->dev);
312} 323}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0f211127274..f761288abe66 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev)
384EXPORT_SYMBOL(phy_device_register); 384EXPORT_SYMBOL(phy_device_register);
385 385
386/** 386/**
387 * phy_device_remove - Remove a previously registered phy device from the MDIO bus
388 * @phydev: phy_device structure to remove
389 *
390 * This doesn't free the phy_device itself, it merely reverses the effects
391 * of phy_device_register(). Use phy_device_free() to free the device
392 * after calling this function.
393 */
394void phy_device_remove(struct phy_device *phydev)
395{
396 struct mii_bus *bus = phydev->bus;
397 int addr = phydev->addr;
398
399 device_del(&phydev->dev);
400 bus->phy_map[addr] = NULL;
401}
402EXPORT_SYMBOL(phy_device_remove);
403
404/**
387 * phy_find_first - finds the first PHY device on the bus 405 * phy_find_first - finds the first PHY device on the bus
388 * @bus: the target MII bus 406 * @bus: the target MII bus
389 */ 407 */
@@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw);
578 * generic driver is used. The phy_device is given a ptr to 596 * generic driver is used. The phy_device is given a ptr to
579 * the attaching device, and given a callback for link status 597 * the attaching device, and given a callback for link status
580 * change. The phy_device is returned to the attaching driver. 598 * change. The phy_device is returned to the attaching driver.
599 * This function takes a reference on the phy device.
581 */ 600 */
582int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 601int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
583 u32 flags, phy_interface_t interface) 602 u32 flags, phy_interface_t interface)
584{ 603{
604 struct mii_bus *bus = phydev->bus;
585 struct device *d = &phydev->dev; 605 struct device *d = &phydev->dev;
586 struct module *bus_module;
587 int err; 606 int err;
588 607
608 if (!try_module_get(bus->owner)) {
609 dev_err(&dev->dev, "failed to get the bus module\n");
610 return -EIO;
611 }
612
613 get_device(d);
614
589 /* Assume that if there is no driver, that it doesn't 615 /* Assume that if there is no driver, that it doesn't
590 * exist, and we should use the genphy driver. 616 * exist, and we should use the genphy driver.
591 */ 617 */
@@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
600 err = device_bind_driver(d); 626 err = device_bind_driver(d);
601 627
602 if (err) 628 if (err)
603 return err; 629 goto error;
604 } 630 }
605 631
606 if (phydev->attached_dev) { 632 if (phydev->attached_dev) {
607 dev_err(&dev->dev, "PHY already attached\n"); 633 dev_err(&dev->dev, "PHY already attached\n");
608 return -EBUSY; 634 err = -EBUSY;
609 } 635 goto error;
610
611 /* Increment the bus module reference count */
612 bus_module = phydev->bus->dev.driver ?
613 phydev->bus->dev.driver->owner : NULL;
614 if (!try_module_get(bus_module)) {
615 dev_err(&dev->dev, "failed to get the bus module\n");
616 return -EIO;
617 } 636 }
618 637
619 phydev->attached_dev = dev; 638 phydev->attached_dev = dev;
@@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
636 phy_resume(phydev); 655 phy_resume(phydev);
637 656
638 return err; 657 return err;
658
659error:
660 put_device(d);
661 module_put(bus->owner);
662 return err;
639} 663}
640EXPORT_SYMBOL(phy_attach_direct); 664EXPORT_SYMBOL(phy_attach_direct);
641 665
@@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach);
677/** 701/**
678 * phy_detach - detach a PHY device from its network device 702 * phy_detach - detach a PHY device from its network device
679 * @phydev: target phy_device struct 703 * @phydev: target phy_device struct
704 *
705 * This detaches the phy device from its network device and the phy
706 * driver, and drops the reference count taken in phy_attach_direct().
680 */ 707 */
681void phy_detach(struct phy_device *phydev) 708void phy_detach(struct phy_device *phydev)
682{ 709{
710 struct mii_bus *bus;
683 int i; 711 int i;
684 712
685 if (phydev->bus->dev.driver)
686 module_put(phydev->bus->dev.driver->owner);
687
688 phydev->attached_dev->phydev = NULL; 713 phydev->attached_dev->phydev = NULL;
689 phydev->attached_dev = NULL; 714 phydev->attached_dev = NULL;
690 phy_suspend(phydev); 715 phy_suspend(phydev);
@@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev)
700 break; 725 break;
701 } 726 }
702 } 727 }
728
729 /*
730 * The phydev might go away on the put_device() below, so avoid
731 * a use-after-free bug by reading the underlying bus first.
732 */
733 bus = phydev->bus;
734
735 put_device(&phydev->dev);
736 module_put(bus->owner);
703} 737}
704EXPORT_SYMBOL(phy_detach); 738EXPORT_SYMBOL(phy_detach);
705 739
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 17cad185169d..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,7 +66,6 @@
66#define PHY_ID_VSC8244 0x000fc6c0 66#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 67#define PHY_ID_VSC8514 0x00070670
68#define PHY_ID_VSC8574 0x000704a0 68#define PHY_ID_VSC8574 0x000704a0
69#define PHY_ID_VSC8641 0x00070431
70#define PHY_ID_VSC8662 0x00070660 69#define PHY_ID_VSC8662 0x00070660
71#define PHY_ID_VSC8221 0x000fc550 70#define PHY_ID_VSC8221 0x000fc550
72#define PHY_ID_VSC8211 0x000fc4b0 71#define PHY_ID_VSC8211 0x000fc4b0
@@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = {
273 .config_intr = &vsc82xx_config_intr, 272 .config_intr = &vsc82xx_config_intr,
274 .driver = { .owner = THIS_MODULE,}, 273 .driver = { .owner = THIS_MODULE,},
275}, { 274}, {
276 .phy_id = PHY_ID_VSC8641,
277 .name = "Vitesse VSC8641",
278 .phy_id_mask = 0x000ffff0,
279 .features = PHY_GBIT_FEATURES,
280 .flags = PHY_HAS_INTERRUPT,
281 .config_init = &vsc824x_config_init,
282 .config_aneg = &vsc82x4_config_aneg,
283 .read_status = &genphy_read_status,
284 .ack_interrupt = &vsc824x_ack_interrupt,
285 .config_intr = &vsc82xx_config_intr,
286 .driver = { .owner = THIS_MODULE,},
287}, {
288 .phy_id = PHY_ID_VSC8662, 275 .phy_id = PHY_ID_VSC8662,
289 .name = "Vitesse VSC8662", 276 .name = "Vitesse VSC8662",
290 .phy_id_mask = 0x000ffff0, 277 .phy_id_mask = 0x000ffff0,
@@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
331 { PHY_ID_VSC8244, 0x000fffc0 }, 318 { PHY_ID_VSC8244, 0x000fffc0 },
332 { PHY_ID_VSC8514, 0x000ffff0 }, 319 { PHY_ID_VSC8514, 0x000ffff0 },
333 { PHY_ID_VSC8574, 0x000ffff0 }, 320 { PHY_ID_VSC8574, 0x000ffff0 },
334 { PHY_ID_VSC8641, 0x000ffff0 },
335 { PHY_ID_VSC8662, 0x000ffff0 }, 321 { PHY_ID_VSC8662, 0x000ffff0 },
336 { PHY_ID_VSC8221, 0x000ffff0 }, 322 { PHY_ID_VSC8221, 0x000ffff0 },
337 { PHY_ID_VSC8211, 0x000ffff0 }, 323 { PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0481daf9201a..ed00446759b2 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2755 */ 2755 */
2756 dev_net_set(dev, net); 2756 dev_net_set(dev, net);
2757 2757
2758 rtnl_lock();
2758 mutex_lock(&pn->all_ppp_mutex); 2759 mutex_lock(&pn->all_ppp_mutex);
2759 2760
2760 if (unit < 0) { 2761 if (unit < 0) {
@@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2785 ppp->file.index = unit; 2786 ppp->file.index = unit;
2786 sprintf(dev->name, "ppp%d", unit); 2787 sprintf(dev->name, "ppp%d", unit);
2787 2788
2788 ret = register_netdev(dev); 2789 ret = register_netdevice(dev);
2789 if (ret != 0) { 2790 if (ret != 0) {
2790 unit_put(&pn->units_idr, unit); 2791 unit_put(&pn->units_idr, unit);
2791 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", 2792 netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
@@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2797 2798
2798 atomic_inc(&ppp_unit_count); 2799 atomic_inc(&ppp_unit_count);
2799 mutex_unlock(&pn->all_ppp_mutex); 2800 mutex_unlock(&pn->all_ppp_mutex);
2801 rtnl_unlock();
2800 2802
2801 *retp = 0; 2803 *retp = 0;
2802 return ppp; 2804 return ppp;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1610b79ae386..fbb9325d1f6e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -583,4 +583,15 @@ config USB_VL600
583 583
584 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17 584 http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
585 585
586config USB_NET_CH9200
587 tristate "QingHeng CH9200 USB ethernet support"
588 depends on USB_USBNET
589 select MII
590 help
591 Choose this option if you have a USB ethernet adapter with a QinHeng
592 CH9200 chipset.
593
594 To compile this driver as a module, choose M here: the
595 module will be called ch9200.
596
586endif # USB_NET_DRIVERS 597endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cf6a0e610a7f..b5f04068dbe4 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
38obj-$(CONFIG_USB_VL600) += lg-vl600.o 38obj-$(CONFIG_USB_VL600) += lg-vl600.o
39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o 39obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o 40obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
41 41obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
new file mode 100644
index 000000000000..5e151e6a3e09
--- /dev/null
+++ b/drivers/net/usb/ch9200.c
@@ -0,0 +1,432 @@
1/*
2 * USB 10M/100M ethernet adapter
3 *
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied
7 *
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/stddef.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/ethtool.h>
18#include <linux/mii.h>
19#include <linux/usb.h>
20#include <linux/crc32.h>
21#include <linux/usb/usbnet.h>
22#include <linux/slab.h>
23
24#define CH9200_VID 0x1A86
25#define CH9200_PID_E092 0xE092
26
27#define CTRL_TIMEOUT_MS 1000
28
29#define CONTROL_TIMEOUT_MS 1000
30
31#define REQUEST_READ 0x0E
32#define REQUEST_WRITE 0x0F
33
34/* Address space:
35 * 00-63 : MII
36 * 64-128: MAC
37 *
38 * Note: all accesses must be 16-bit
39 */
40
41#define MAC_REG_CTRL 64
42#define MAC_REG_STATUS 66
43#define MAC_REG_INTERRUPT_MASK 68
44#define MAC_REG_PHY_COMMAND 70
45#define MAC_REG_PHY_DATA 72
46#define MAC_REG_STATION_L 74
47#define MAC_REG_STATION_M 76
48#define MAC_REG_STATION_H 78
49#define MAC_REG_HASH_L 80
50#define MAC_REG_HASH_M1 82
51#define MAC_REG_HASH_M2 84
52#define MAC_REG_HASH_H 86
53#define MAC_REG_THRESHOLD 88
54#define MAC_REG_FIFO_DEPTH 90
55#define MAC_REG_PAUSE 92
56#define MAC_REG_FLOW_CONTROL 94
57
58/* Control register bits
59 *
60 * Note: bits 13 and 15 are reserved
61 */
62#define LOOPBACK (0x01 << 14)
63#define BASE100X (0x01 << 12)
64#define MBPS_10 (0x01 << 11)
65#define DUPLEX_MODE (0x01 << 10)
66#define PAUSE_FRAME (0x01 << 9)
67#define PROMISCUOUS (0x01 << 8)
68#define MULTICAST (0x01 << 7)
69#define BROADCAST (0x01 << 6)
70#define HASH (0x01 << 5)
71#define APPEND_PAD (0x01 << 4)
72#define APPEND_CRC (0x01 << 3)
73#define TRANSMITTER_ACTION (0x01 << 2)
74#define RECEIVER_ACTION (0x01 << 1)
75#define DMA_ACTION (0x01 << 0)
76
77/* Status register bits
78 *
79 * Note: bits 7-15 are reserved
80 */
81#define ALIGNMENT (0x01 << 6)
82#define FIFO_OVER_RUN (0x01 << 5)
83#define FIFO_UNDER_RUN (0x01 << 4)
84#define RX_ERROR (0x01 << 3)
85#define RX_COMPLETE (0x01 << 2)
86#define TX_ERROR (0x01 << 1)
87#define TX_COMPLETE (0x01 << 0)
88
89/* FIFO depth register bits
90 *
91 * Note: bits 6 and 14 are reserved
92 */
93
94#define ETH_TXBD (0x01 << 15)
95#define ETN_TX_FIFO_DEPTH (0x01 << 8)
96#define ETH_RXBD (0x01 << 7)
97#define ETH_RX_FIFO_DEPTH (0x01 << 0)
98
99static int control_read(struct usbnet *dev,
100 unsigned char request, unsigned short value,
101 unsigned short index, void *data, unsigned short size,
102 int timeout)
103{
104 unsigned char *buf = NULL;
105 unsigned char request_type;
106 int err = 0;
107
108 if (request == REQUEST_READ)
109 request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER);
110 else
111 request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
112 USB_RECIP_DEVICE);
113
114 netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
115 index, size);
116
117 buf = kmalloc(size, GFP_KERNEL);
118 if (!buf) {
119 err = -ENOMEM;
120 goto err_out;
121 }
122
123 err = usb_control_msg(dev->udev,
124 usb_rcvctrlpipe(dev->udev, 0),
125 request, request_type, value, index, buf, size,
126 timeout);
127 if (err == size)
128 memcpy(data, buf, size);
129 else if (err >= 0)
130 err = -EINVAL;
131 kfree(buf);
132
133 return err;
134
135err_out:
136 return err;
137}
138
139static int control_write(struct usbnet *dev, unsigned char request,
140 unsigned short value, unsigned short index,
141 void *data, unsigned short size, int timeout)
142{
143 unsigned char *buf = NULL;
144 unsigned char request_type;
145 int err = 0;
146
147 if (request == REQUEST_WRITE)
148 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
149 USB_RECIP_OTHER);
150 else
151 request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
152 USB_RECIP_DEVICE);
153
154 netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
155 index, size);
156
157 if (data) {
158 buf = kmalloc(size, GFP_KERNEL);
159 if (!buf) {
160 err = -ENOMEM;
161 goto err_out;
162 }
163 memcpy(buf, data, size);
164 }
165
166 err = usb_control_msg(dev->udev,
167 usb_sndctrlpipe(dev->udev, 0),
168 request, request_type, value, index, buf, size,
169 timeout);
170 if (err >= 0 && err < size)
171 err = -EINVAL;
172 kfree(buf);
173
174 return 0;
175
176err_out:
177 return err;
178}
179
180static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
181{
182 struct usbnet *dev = netdev_priv(netdev);
183 unsigned char buff[2];
184
185 netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
186 phy_id, loc);
187
188 if (phy_id != 0)
189 return -ENODEV;
190
191 control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
192 CONTROL_TIMEOUT_MS);
193
194 return (buff[0] | buff[1] << 8);
195}
196
197static void ch9200_mdio_write(struct net_device *netdev,
198 int phy_id, int loc, int val)
199{
200 struct usbnet *dev = netdev_priv(netdev);
201 unsigned char buff[2];
202
203 netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
204 phy_id, loc);
205
206 if (phy_id != 0)
207 return;
208
209 buff[0] = (unsigned char)val;
210 buff[1] = (unsigned char)(val >> 8);
211
212 control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02,
213 CONTROL_TIMEOUT_MS);
214}
215
216static int ch9200_link_reset(struct usbnet *dev)
217{
218 struct ethtool_cmd ecmd;
219
220 mii_check_media(&dev->mii, 1, 1);
221 mii_ethtool_gset(&dev->mii, &ecmd);
222
223 netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
224 ecmd.speed, ecmd.duplex);
225
226 return 0;
227}
228
229static void ch9200_status(struct usbnet *dev, struct urb *urb)
230{
231 int link;
232 unsigned char *buf;
233
234 if (urb->actual_length < 16)
235 return;
236
237 buf = urb->transfer_buffer;
238 link = !!(buf[0] & 0x01);
239
240 if (link) {
241 netif_carrier_on(dev->net);
242 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
243 } else {
244 netif_carrier_off(dev->net);
245 }
246}
247
248static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
249 gfp_t flags)
250{
251 int i = 0;
252 int len = 0;
253 int tx_overhead = 0;
254
255 tx_overhead = 0x40;
256
257 len = skb->len;
258 if (skb_headroom(skb) < tx_overhead) {
259 struct sk_buff *skb2;
260
261 skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
262 dev_kfree_skb_any(skb);
263 skb = skb2;
264 if (!skb)
265 return NULL;
266 }
267
268 __skb_push(skb, tx_overhead);
269 /* usbnet adds padding if length is a multiple of packet size
270 * if so, adjust length value in header
271 */
272 if ((skb->len % dev->maxpacket) == 0)
273 len++;
274
275 skb->data[0] = len;
276 skb->data[1] = len >> 8;
277 skb->data[2] = 0x00;
278 skb->data[3] = 0x80;
279
280 for (i = 4; i < 48; i++)
281 skb->data[i] = 0x00;
282
283 skb->data[48] = len;
284 skb->data[49] = len >> 8;
285 skb->data[50] = 0x00;
286 skb->data[51] = 0x80;
287
288 for (i = 52; i < 64; i++)
289 skb->data[i] = 0x00;
290
291 return skb;
292}
293
294static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
295{
296 int len = 0;
297 int rx_overhead = 0;
298
299 rx_overhead = 64;
300
301 if (unlikely(skb->len < rx_overhead)) {
302 dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
303 return 0;
304 }
305
306 len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
307 skb_trim(skb, len);
308
309 return 1;
310}
311
312static int get_mac_address(struct usbnet *dev, unsigned char *data)
313{
314 int err = 0;
315 unsigned char mac_addr[0x06];
316 int rd_mac_len = 0;
317
318 netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
319 dev->udev->descriptor.idVendor,
320 dev->udev->descriptor.idProduct);
321
322 memset(mac_addr, 0, sizeof(mac_addr));
323 rd_mac_len = control_read(dev, REQUEST_READ, 0,
324 MAC_REG_STATION_L, mac_addr, 0x02,
325 CONTROL_TIMEOUT_MS);
326 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M,
327 mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS);
328 rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H,
329 mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS);
330 if (rd_mac_len != ETH_ALEN)
331 err = -EINVAL;
332
333 data[0] = mac_addr[5];
334 data[1] = mac_addr[4];
335 data[2] = mac_addr[3];
336 data[3] = mac_addr[2];
337 data[4] = mac_addr[1];
338 data[5] = mac_addr[0];
339
340 return err;
341}
342
343static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
344{
345 int retval = 0;
346 unsigned char data[2];
347
348 retval = usbnet_get_endpoints(dev, intf);
349 if (retval)
350 return retval;
351
352 dev->mii.dev = dev->net;
353 dev->mii.mdio_read = ch9200_mdio_read;
354 dev->mii.mdio_write = ch9200_mdio_write;
355 dev->mii.reg_num_mask = 0x1f;
356
357 dev->mii.phy_id_mask = 0x1f;
358
359 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
360 dev->rx_urb_size = 24 * 64 + 16;
361 mii_nway_restart(&dev->mii);
362
363 data[0] = 0x01;
364 data[1] = 0x0F;
365 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data,
366 0x02, CONTROL_TIMEOUT_MS);
367
368 data[0] = 0xA0;
369 data[1] = 0x90;
370 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data,
371 0x02, CONTROL_TIMEOUT_MS);
372
373 data[0] = 0x30;
374 data[1] = 0x00;
375 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data,
376 0x02, CONTROL_TIMEOUT_MS);
377
378 data[0] = 0x17;
379 data[1] = 0xD8;
380 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL,
381 data, 0x02, CONTROL_TIMEOUT_MS);
382
383 /* Undocumented register */
384 data[0] = 0x01;
385 data[1] = 0x00;
386 retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02,
387 CONTROL_TIMEOUT_MS);
388
389 data[0] = 0x5F;
390 data[1] = 0x0D;
391 retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
392 CONTROL_TIMEOUT_MS);
393
394 retval = get_mac_address(dev, dev->net->dev_addr);
395
396 return retval;
397}
398
399static const struct driver_info ch9200_info = {
400 .description = "CH9200 USB to Network Adaptor",
401 .flags = FLAG_ETHER,
402 .bind = ch9200_bind,
403 .rx_fixup = ch9200_rx_fixup,
404 .tx_fixup = ch9200_tx_fixup,
405 .status = ch9200_status,
406 .link_reset = ch9200_link_reset,
407 .reset = ch9200_link_reset,
408};
409
410static const struct usb_device_id ch9200_products[] = {
411 {
412 USB_DEVICE(0x1A86, 0xE092),
413 .driver_info = (unsigned long)&ch9200_info,
414 },
415 {},
416};
417
418MODULE_DEVICE_TABLE(usb, ch9200_products);
419
420static struct usb_driver ch9200_driver = {
421 .name = "ch9200",
422 .id_table = ch9200_products,
423 .probe = usbnet_probe,
424 .disconnect = usbnet_disconnect,
425 .suspend = usbnet_suspend,
426 .resume = usbnet_resume,
427};
428
429module_usb_driver(ch9200_driver);
430
431MODULE_DESCRIPTION("QinHeng CH9200 USB Network device");
432MODULE_LICENSE("GPL");
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e7094fbd7568..488c6f50df73 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
193 .flowi4_oif = vrf_dev->ifindex, 193 .flowi4_oif = vrf_dev->ifindex,
194 .flowi4_iif = LOOPBACK_IFINDEX, 194 .flowi4_iif = LOOPBACK_IFINDEX,
195 .flowi4_tos = RT_TOS(ip4h->tos), 195 .flowi4_tos = RT_TOS(ip4h->tos),
196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, 196 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
197 FLOWI_FLAG_SKIP_NH_OIF,
197 .daddr = ip4h->daddr, 198 .daddr = ip4h->daddr,
198 }; 199 };
199 200
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf8b7f0473b3..bbac1d35ed4e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev)
2392 2392
2393 eth_hw_addr_random(dev); 2393 eth_hw_addr_random(dev);
2394 ether_setup(dev); 2394 ether_setup(dev);
2395 if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
2396 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2397 else
2398 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2399 2395
2400 dev->netdev_ops = &vxlan_netdev_ops; 2396 dev->netdev_ops = &vxlan_netdev_ops;
2401 dev->destructor = free_netdev; 2397 dev->destructor = free_netdev;
@@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2640 dst->remote_ip.sa.sa_family = AF_INET; 2636 dst->remote_ip.sa.sa_family = AF_INET;
2641 2637
2642 if (dst->remote_ip.sa.sa_family == AF_INET6 || 2638 if (dst->remote_ip.sa.sa_family == AF_INET6 ||
2643 vxlan->cfg.saddr.sa.sa_family == AF_INET6) 2639 vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
2640 if (!IS_ENABLED(CONFIG_IPV6))
2641 return -EPFNOSUPPORT;
2644 use_ipv6 = true; 2642 use_ipv6 = true;
2643 }
2645 2644
2646 if (conf->remote_ifindex) { 2645 if (conf->remote_ifindex) {
2647 struct net_device *lowerdev 2646 struct net_device *lowerdev
@@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2670 2669
2671 dev->needed_headroom = lowerdev->hard_header_len + 2670 dev->needed_headroom = lowerdev->hard_header_len +
2672 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2671 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2673 } else if (use_ipv6) 2672 } else if (use_ipv6) {
2674 vxlan->flags |= VXLAN_F_IPV6; 2673 vxlan->flags |= VXLAN_F_IPV6;
2674 dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
2675 } else {
2676 dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
2677 }
2675 2678
2676 memcpy(&vxlan->cfg, conf, sizeof(*conf)); 2679 memcpy(&vxlan->cfg, conf, sizeof(*conf));
2677 if (!vxlan->cfg.dst_port) 2680 if (!vxlan->cfg.dst_port)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1350fa25cdb0..a87a868fed64 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np)
197 * of_phy_find_device - Give a PHY node, find the phy_device 197 * of_phy_find_device - Give a PHY node, find the phy_device
198 * @phy_np: Pointer to the phy's device tree node 198 * @phy_np: Pointer to the phy's device tree node
199 * 199 *
200 * Returns a pointer to the phy_device. 200 * If successful, returns a pointer to the phy_device with the embedded
201 * struct device refcount incremented by one, or NULL on failure.
201 */ 202 */
202struct phy_device *of_phy_find_device(struct device_node *phy_np) 203struct phy_device *of_phy_find_device(struct device_node *phy_np)
203{ 204{
@@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device);
217 * @hndlr: Link state callback for the network device 218 * @hndlr: Link state callback for the network device
218 * @iface: PHY data interface type 219 * @iface: PHY data interface type
219 * 220 *
220 * Returns a pointer to the phy_device if successful. NULL otherwise 221 * If successful, returns a pointer to the phy_device with the embedded
222 * struct device refcount incremented by one, or NULL on failure. The
223 * refcount must be dropped by calling phy_disconnect() or phy_detach().
221 */ 224 */
222struct phy_device *of_phy_connect(struct net_device *dev, 225struct phy_device *of_phy_connect(struct net_device *dev,
223 struct device_node *phy_np, 226 struct device_node *phy_np,
@@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev,
225 phy_interface_t iface) 228 phy_interface_t iface)
226{ 229{
227 struct phy_device *phy = of_phy_find_device(phy_np); 230 struct phy_device *phy = of_phy_find_device(phy_np);
231 int ret;
228 232
229 if (!phy) 233 if (!phy)
230 return NULL; 234 return NULL;
231 235
232 phy->dev_flags = flags; 236 phy->dev_flags = flags;
233 237
234 return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; 238 ret = phy_connect_direct(dev, phy, hndlr, iface);
239
240 /* refcount is held by phy_connect_direct() on success */
241 put_device(&phy->dev);
242
243 return ret ? NULL : phy;
235} 244}
236EXPORT_SYMBOL(of_phy_connect); 245EXPORT_SYMBOL(of_phy_connect);
237 246
@@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect);
241 * @phy_np: Node pointer for the PHY 250 * @phy_np: Node pointer for the PHY
242 * @flags: flags to pass to the PHY 251 * @flags: flags to pass to the PHY
243 * @iface: PHY data interface type 252 * @iface: PHY data interface type
253 *
254 * If successful, returns a pointer to the phy_device with the embedded
255 * struct device refcount incremented by one, or NULL on failure. The
256 * refcount must be dropped by calling phy_disconnect() or phy_detach().
244 */ 257 */
245struct phy_device *of_phy_attach(struct net_device *dev, 258struct phy_device *of_phy_attach(struct net_device *dev,
246 struct device_node *phy_np, u32 flags, 259 struct device_node *phy_np, u32 flags,
247 phy_interface_t iface) 260 phy_interface_t iface)
248{ 261{
249 struct phy_device *phy = of_phy_find_device(phy_np); 262 struct phy_device *phy = of_phy_find_device(phy_np);
263 int ret;
250 264
251 if (!phy) 265 if (!phy)
252 return NULL; 266 return NULL;
253 267
254 return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy; 268 ret = phy_attach_direct(dev, phy, flags, iface);
269
270 /* refcount is held by phy_attach_direct() on success */
271 put_device(&phy->dev);
272
273 return ret ? NULL : phy;
255} 274}
256EXPORT_SYMBOL(of_phy_attach); 275EXPORT_SYMBOL(of_phy_attach);
257 276
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 1710d9dc7fc2..2306313c0029 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
38 */ 38 */
39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); 39 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
40 if (rc != 0) 40 if (rc != 0)
41 return rc; 41 goto err;
42 /* No pin, exit */ 42 /* No pin, exit with no error message. */
43 if (pin == 0) 43 if (pin == 0)
44 return -ENODEV; 44 return -ENODEV;
45 45
@@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
53 ppnode = pci_bus_to_OF_node(pdev->bus); 53 ppnode = pci_bus_to_OF_node(pdev->bus);
54 54
55 /* No node for host bridge ? give up */ 55 /* No node for host bridge ? give up */
56 if (ppnode == NULL) 56 if (ppnode == NULL) {
57 return -EINVAL; 57 rc = -EINVAL;
58 goto err;
59 }
58 } else { 60 } else {
59 /* We found a P2P bridge, check if it has a node */ 61 /* We found a P2P bridge, check if it has a node */
60 ppnode = pci_device_to_OF_node(ppdev); 62 ppnode = pci_device_to_OF_node(ppdev);
@@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
86 out_irq->args[0] = pin; 88 out_irq->args[0] = pin;
87 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); 89 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
88 laddr[1] = laddr[2] = cpu_to_be32(0); 90 laddr[1] = laddr[2] = cpu_to_be32(0);
89 return of_irq_parse_raw(laddr, out_irq); 91 rc = of_irq_parse_raw(laddr, out_irq);
92 if (rc)
93 goto err;
94 return 0;
95err:
96 dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc);
97 return rc;
90} 98}
91EXPORT_SYMBOL_GPL(of_irq_parse_pci); 99EXPORT_SYMBOL_GPL(of_irq_parse_pci);
92 100
@@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
105 int ret; 113 int ret;
106 114
107 ret = of_irq_parse_pci(dev, &oirq); 115 ret = of_irq_parse_pci(dev, &oirq);
108 if (ret) { 116 if (ret)
109 dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
110 return 0; /* Proper return code 0 == NO_IRQ */ 117 return 0; /* Proper return code 0 == NO_IRQ */
111 }
112 118
113 return irq_create_of_mapping(&oirq); 119 return irq_create_of_mapping(&oirq);
114} 120}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index baec33c4e698..a0580afe1713 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus)
560 } else if (bus->parent) { 560 } else if (bus->parent) {
561 int i; 561 int i;
562 562
563 pci_read_bridge_bases(bus);
564
565
563 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { 566 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
564 if((bus->self->resource[i].flags & 567 if((bus->self->resource[i].flags &
565 (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 568 (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 7b9e89ba0465..a32c1f6c252c 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus)
693 if (bus->parent) { 693 if (bus->parent) {
694 int i; 694 int i;
695 /* PCI-PCI Bridge */ 695 /* PCI-PCI Bridge */
696 pci_read_bridge_bases(bus);
696 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) 697 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
697 pci_claim_bridge_resource(bus->self, i); 698 pci_claim_bridge_resource(bus->self, i);
698 } else { 699 } else {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 769f7e35f1a2..59ac36fe7c42 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count, 442static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
443 void *arg) 443 void *arg)
444{ 444{
445 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 445 struct pci_dev *tdev = pci_get_slot(dev->bus,
446 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
446 ssize_t ret; 447 ssize_t ret;
447 448
448 if (!tdev) 449 if (!tdev)
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
456static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count, 457static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
457 const void *arg) 458 const void *arg)
458{ 459{
459 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn)); 460 struct pci_dev *tdev = pci_get_slot(dev->bus,
461 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
460 ssize_t ret; 462 ssize_t ret;
461 463
462 if (!tdev) 464 if (!tdev)
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
473 .release = pci_vpd_pci22_release, 475 .release = pci_vpd_pci22_release,
474}; 476};
475 477
476static int pci_vpd_f0_dev_check(struct pci_dev *dev)
477{
478 struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
479 int ret = 0;
480
481 if (!tdev)
482 return -ENODEV;
483 if (!tdev->vpd || !tdev->multifunction ||
484 dev->class != tdev->class || dev->vendor != tdev->vendor ||
485 dev->device != tdev->device)
486 ret = -ENODEV;
487
488 pci_dev_put(tdev);
489 return ret;
490}
491
492int pci_vpd_pci22_init(struct pci_dev *dev) 478int pci_vpd_pci22_init(struct pci_dev *dev)
493{ 479{
494 struct pci_vpd_pci22 *vpd; 480 struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
497 cap = pci_find_capability(dev, PCI_CAP_ID_VPD); 483 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
498 if (!cap) 484 if (!cap)
499 return -ENODEV; 485 return -ENODEV;
500 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
501 int ret = pci_vpd_f0_dev_check(dev);
502 486
503 if (ret)
504 return ret;
505 }
506 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC); 487 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
507 if (!vpd) 488 if (!vpd)
508 return -ENOMEM; 489 return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6fbd3f2b5992..d3346d23963b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
256 256
257 res->start = start; 257 res->start = start;
258 res->end = end; 258 res->end = end;
259 res->flags &= ~IORESOURCE_UNSET;
260 orig_res.flags &= ~IORESOURCE_UNSET;
259 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n", 261 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
260 &orig_res, res); 262 &orig_res, res);
261 263
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 367e28fa7564..c4f64bfee551 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
362static struct of_device_id rcar_pci_of_match[] = { 362static struct of_device_id rcar_pci_of_match[] = {
363 { .compatible = "renesas,pci-r8a7790", }, 363 { .compatible = "renesas,pci-r8a7790", },
364 { .compatible = "renesas,pci-r8a7791", }, 364 { .compatible = "renesas,pci-r8a7791", },
365 { .compatible = "renesas,pci-r8a7794", },
365 { }, 366 { },
366}; 367};
367 368
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be174d981..8361d27e5eca 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
676static void pci_set_bus_msi_domain(struct pci_bus *bus) 676static void pci_set_bus_msi_domain(struct pci_bus *bus)
677{ 677{
678 struct irq_domain *d; 678 struct irq_domain *d;
679 struct pci_bus *b;
679 680
680 /* 681 /*
681 * Either bus is the root, and we must obtain it from the 682 * The bus can be a root bus, a subordinate bus, or a virtual bus
682 * firmware, or we inherit it from the bridge device. 683 * created by an SR-IOV device. Walk up to the first bridge device
684 * found or derive the domain from the host bridge.
683 */ 685 */
684 if (pci_is_root_bus(bus)) 686 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
685 d = pci_host_bridge_msi_domain(bus); 687 if (b->self)
686 else 688 d = dev_get_msi_domain(&b->self->dev);
687 d = dev_get_msi_domain(&bus->self->dev); 689 }
690
691 if (!d)
692 d = pci_host_bridge_msi_domain(b);
688 693
689 dev_set_msi_domain(&bus->dev, d); 694 dev_set_msi_domain(&bus->dev, d);
690} 695}
@@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
855 child->bridge_ctl = bctl; 860 child->bridge_ctl = bctl;
856 } 861 }
857 862
858 /* Read and initialize bridge resources */
859 pci_read_bridge_bases(child);
860
861 cmax = pci_scan_child_bus(child); 863 cmax = pci_scan_child_bus(child);
862 if (cmax > subordinate) 864 if (cmax > subordinate)
863 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n", 865 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
918 920
919 if (!is_cardbus) { 921 if (!is_cardbus) {
920 child->bridge_ctl = bctl; 922 child->bridge_ctl = bctl;
921
922 /* Read and initialize bridge resources */
923 pci_read_bridge_bases(child);
924 max = pci_scan_child_bus(child); 923 max = pci_scan_child_bus(child);
925 } else { 924 } else {
926 /* 925 /*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252cd79f..b03373fd05ca 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev)
1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, 1907DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); 1908 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1909 1909
1910/*
1911 * Quirk non-zero PCI functions to route VPD access through function 0 for
1912 * devices that share VPD resources between functions. The functions are
1913 * expected to be identical devices.
1914 */
1910static void quirk_f0_vpd_link(struct pci_dev *dev) 1915static void quirk_f0_vpd_link(struct pci_dev *dev)
1911{ 1916{
1912 if (!dev->multifunction || !PCI_FUNC(dev->devfn)) 1917 struct pci_dev *f0;
1918
1919 if (!PCI_FUNC(dev->devfn))
1913 return; 1920 return;
1914 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; 1921
1922 f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
1923 if (!f0)
1924 return;
1925
1926 if (f0->vpd && dev->class == f0->class &&
1927 dev->vendor == f0->vendor && dev->device == f0->device)
1928 dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
1929
1930 pci_dev_put(f0);
1915} 1931}
1916DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 1932DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1917 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); 1933 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 738adfa5332b..52ea605f8130 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = {
318 { .compatible = "fsl,anatop-regulator", }, 318 { .compatible = "fsl,anatop-regulator", },
319 { /* end */ } 319 { /* end */ }
320}; 320};
321MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
321 322
322static struct platform_driver anatop_regulator_driver = { 323static struct platform_driver anatop_regulator_driver = {
323 .driver = { 324 .driver = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7a85ac9e32c5..7849187d91ae 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1394,15 +1394,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1394 return 0; 1394 return 0;
1395 1395
1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret); 1396 r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
1397 if (ret == -ENODEV) {
1398 /*
1399 * No supply was specified for this regulator and
1400 * there will never be one.
1401 */
1402 return 0;
1403 }
1404
1405 if (!r) { 1397 if (!r) {
1398 if (ret == -ENODEV) {
1399 /*
1400 * No supply was specified for this regulator and
1401 * there will never be one.
1402 */
1403 return 0;
1404 }
1405
1406 if (have_full_constraints()) { 1406 if (have_full_constraints()) {
1407 r = dummy_regulator_rdev; 1407 r = dummy_regulator_rdev;
1408 } else { 1408 } else {
@@ -1422,11 +1422,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1422 return ret; 1422 return ret;
1423 1423
1424 /* Cascade always-on state to supply */ 1424 /* Cascade always-on state to supply */
1425 if (_regulator_is_enabled(rdev)) { 1425 if (_regulator_is_enabled(rdev) && rdev->supply) {
1426 ret = regulator_enable(rdev->supply); 1426 ret = regulator_enable(rdev->supply);
1427 if (ret < 0) { 1427 if (ret < 0) {
1428 if (rdev->supply) 1428 _regulator_put(rdev->supply);
1429 _regulator_put(rdev->supply);
1430 return ret; 1429 return ret;
1431 } 1430 }
1432 } 1431 }
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 464018de7e97..7bba8b747f30 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = {
394 { .compatible = "regulator-gpio", }, 394 { .compatible = "regulator-gpio", },
395 {}, 395 {},
396}; 396};
397MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
397#endif 398#endif
398 399
399static struct platform_driver gpio_regulator_driver = { 400static struct platform_driver gpio_regulator_driver = {
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 4fa7bcaf454e..f9d74d63be7c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -45,6 +45,10 @@ struct pbias_regulator_data {
45 int voltage; 45 int voltage;
46}; 46};
47 47
48struct pbias_of_data {
49 unsigned int offset;
50};
51
48static const unsigned int pbias_volt_table[] = { 52static const unsigned int pbias_volt_table[] = {
49 1800000, 53 1800000,
50 3000000 54 3000000
@@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = {
102}; 106};
103#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches) 107#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
104 108
109/* Offset from SCM general area (and syscon) base */
110
111static const struct pbias_of_data pbias_of_data_omap2 = {
112 .offset = 0x230,
113};
114
115static const struct pbias_of_data pbias_of_data_omap3 = {
116 .offset = 0x2b0,
117};
118
119static const struct pbias_of_data pbias_of_data_omap4 = {
120 .offset = 0x60,
121};
122
123static const struct pbias_of_data pbias_of_data_omap5 = {
124 .offset = 0x60,
125};
126
127static const struct pbias_of_data pbias_of_data_dra7 = {
128 .offset = 0xe00,
129};
130
105static const struct of_device_id pbias_of_match[] = { 131static const struct of_device_id pbias_of_match[] = {
106 { .compatible = "ti,pbias-omap", }, 132 { .compatible = "ti,pbias-omap", },
133 { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
134 { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
135 { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
136 { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
137 { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
107 {}, 138 {},
108}; 139};
109MODULE_DEVICE_TABLE(of, pbias_of_match); 140MODULE_DEVICE_TABLE(of, pbias_of_match);
@@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
118 const struct pbias_reg_info *info; 149 const struct pbias_reg_info *info;
119 int ret = 0; 150 int ret = 0;
120 int count, idx, data_idx = 0; 151 int count, idx, data_idx = 0;
152 const struct of_device_id *match;
153 const struct pbias_of_data *data;
154 unsigned int offset;
121 155
122 count = of_regulator_match(&pdev->dev, np, pbias_matches, 156 count = of_regulator_match(&pdev->dev, np, pbias_matches,
123 PBIAS_NUM_REGS); 157 PBIAS_NUM_REGS);
@@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev)
133 if (IS_ERR(syscon)) 167 if (IS_ERR(syscon))
134 return PTR_ERR(syscon); 168 return PTR_ERR(syscon);
135 169
170 match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
171 if (match && match->data) {
172 data = match->data;
173 offset = data->offset;
174 } else {
175 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
176 if (!res)
177 return -EINVAL;
178
179 offset = res->start;
180 dev_WARN(&pdev->dev,
181 "using legacy dt data for pbias offset\n");
182 }
183
136 cfg.regmap = syscon; 184 cfg.regmap = syscon;
137 cfg.dev = &pdev->dev; 185 cfg.dev = &pdev->dev;
138 186
@@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev)
145 if (!info) 193 if (!info)
146 return -ENODEV; 194 return -ENODEV;
147 195
148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
149 if (!res)
150 return -EINVAL;
151
152 drvdata[data_idx].syscon = syscon; 196 drvdata[data_idx].syscon = syscon;
153 drvdata[data_idx].info = info; 197 drvdata[data_idx].info = info;
154 drvdata[data_idx].desc.name = info->name; 198 drvdata[data_idx].desc.name = info->name;
@@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
158 drvdata[data_idx].desc.volt_table = pbias_volt_table; 202 drvdata[data_idx].desc.volt_table = pbias_volt_table;
159 drvdata[data_idx].desc.n_voltages = 2; 203 drvdata[data_idx].desc.n_voltages = 2;
160 drvdata[data_idx].desc.enable_time = info->enable_time; 204 drvdata[data_idx].desc.enable_time = info->enable_time;
161 drvdata[data_idx].desc.vsel_reg = res->start; 205 drvdata[data_idx].desc.vsel_reg = offset;
162 drvdata[data_idx].desc.vsel_mask = info->vmode; 206 drvdata[data_idx].desc.vsel_mask = info->vmode;
163 drvdata[data_idx].desc.enable_reg = res->start; 207 drvdata[data_idx].desc.enable_reg = offset;
164 drvdata[data_idx].desc.enable_mask = info->enable_mask; 208 drvdata[data_idx].desc.enable_mask = info->enable_mask;
165 drvdata[data_idx].desc.enable_val = info->enable; 209 drvdata[data_idx].desc.enable_val = info->enable;
166 drvdata[data_idx].desc.disable_val = info->disable_val; 210 drvdata[data_idx].desc.disable_val = info->disable_val;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 7f97223f95c5..a02c1b961039 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
73}; 73};
74 74
75static struct tps_info tps65218_pmic_regs[] = { 75static struct tps_info tps65218_pmic_regs[] = {
76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500), 76 TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000), 77 TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000), 78 TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000), 79 TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index bed9d3ee4198..c810cbbd463f 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = {
103 { .compatible = "arm,vexpress-volt", }, 103 { .compatible = "arm,vexpress-volt", },
104 { } 104 { }
105}; 105};
106MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match);
106 107
107static struct platform_driver vexpress_regulator_driver = { 108static struct platform_driver vexpress_regulator_driver = {
108 .probe = vexpress_regulator_probe, 109 .probe = vexpress_regulator_probe,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index d3d1891cda3c..25abd4eb7d10 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -35,20 +35,11 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { 37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
38 if (!of_machine_is_compatible("renesas,emev2") && 38 if (!of_find_compatible_node(NULL, NULL,
39 !of_machine_is_compatible("renesas,r7s72100") && 39 "renesas,cpg-mstp-clocks"))
40#ifndef CONFIG_PM_GENERIC_DOMAINS_OF 40 return 0;
41 !of_machine_is_compatible("renesas,r8a73a4") && 41 if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
42 !of_machine_is_compatible("renesas,r8a7740") && 42 of_find_node_with_property(NULL, "#power-domain-cells"))
43 !of_machine_is_compatible("renesas,sh73a0") &&
44#endif
45 !of_machine_is_compatible("renesas,r8a7778") &&
46 !of_machine_is_compatible("renesas,r8a7779") &&
47 !of_machine_is_compatible("renesas,r8a7790") &&
48 !of_machine_is_compatible("renesas,r8a7791") &&
49 !of_machine_is_compatible("renesas,r8a7792") &&
50 !of_machine_is_compatible("renesas,r8a7793") &&
51 !of_machine_is_compatible("renesas,r8a7794"))
52 return 0; 43 return 0;
53 } 44 }
54 45
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bf9ed380bb1c..63318e2afba1 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1720,6 +1720,7 @@ static int atmel_spi_runtime_resume(struct device *dev)
1720 return clk_prepare_enable(as->clk); 1720 return clk_prepare_enable(as->clk);
1721} 1721}
1722 1722
1723#ifdef CONFIG_PM_SLEEP
1723static int atmel_spi_suspend(struct device *dev) 1724static int atmel_spi_suspend(struct device *dev)
1724{ 1725{
1725 struct spi_master *master = dev_get_drvdata(dev); 1726 struct spi_master *master = dev_get_drvdata(dev);
@@ -1756,6 +1757,7 @@ static int atmel_spi_resume(struct device *dev)
1756 1757
1757 return ret; 1758 return ret;
1758} 1759}
1760#endif
1759 1761
1760static const struct dev_pm_ops atmel_spi_pm_ops = { 1762static const struct dev_pm_ops atmel_spi_pm_ops = {
1761 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume) 1763 SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e7874a6171ec..3e8eeb23d4e9 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
386 /* otherwise we only allow transfers within the same page 386 /* otherwise we only allow transfers within the same page
387 * to avoid wasting time on dma_mapping when it is not practical 387 * to avoid wasting time on dma_mapping when it is not practical
388 */ 388 */
389 if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 389 if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
390 dev_warn_once(&spi->dev, 390 dev_warn_once(&spi->dev,
391 "Unaligned spi tx-transfer bridging page\n"); 391 "Unaligned spi tx-transfer bridging page\n");
392 return false; 392 return false;
393 } 393 }
394 if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) { 394 if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
395 dev_warn_once(&spi->dev, 395 dev_warn_once(&spi->dev,
396 "Unaligned spi tx-transfer bridging page\n"); 396 "Unaligned spi rx-transfer bridging page\n");
397 return false; 397 return false;
398 } 398 }
399 399
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 5468fc70dbf8..2465259f6241 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = {
444 { .compatible = "amlogic,meson6-spifc", }, 444 { .compatible = "amlogic,meson6-spifc", },
445 { }, 445 { },
446}; 446};
447MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
447 448
448static struct platform_driver meson_spifc_driver = { 449static struct platform_driver meson_spifc_driver = {
449 .probe = meson_spifc_probe, 450 .probe = meson_spifc_probe,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5f6315c47920..ecb6c58238c4 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -85,7 +85,7 @@ struct mtk_spi {
85 void __iomem *base; 85 void __iomem *base;
86 u32 state; 86 u32 state;
87 u32 pad_sel; 87 u32 pad_sel;
88 struct clk *spi_clk, *parent_clk; 88 struct clk *parent_clk, *sel_clk, *spi_clk;
89 struct spi_transfer *cur_transfer; 89 struct spi_transfer *cur_transfer;
90 u32 xfer_len; 90 u32 xfer_len;
91 struct scatterlist *tx_sgl, *rx_sgl; 91 struct scatterlist *tx_sgl, *rx_sgl;
@@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata,
173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); 173 writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
174} 174}
175 175
176static int mtk_spi_prepare_hardware(struct spi_master *master)
177{
178 struct spi_transfer *trans;
179 struct mtk_spi *mdata = spi_master_get_devdata(master);
180 struct spi_message *msg = master->cur_msg;
181
182 trans = list_first_entry(&msg->transfers, struct spi_transfer,
183 transfer_list);
184 if (!trans->cs_change) {
185 mdata->state = MTK_SPI_IDLE;
186 mtk_spi_reset(mdata);
187 }
188
189 return 0;
190}
191
192static int mtk_spi_prepare_message(struct spi_master *master, 176static int mtk_spi_prepare_message(struct spi_master *master,
193 struct spi_message *msg) 177 struct spi_message *msg)
194{ 178{
@@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
228 struct mtk_spi *mdata = spi_master_get_devdata(spi->master); 212 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
229 213
230 reg_val = readl(mdata->base + SPI_CMD_REG); 214 reg_val = readl(mdata->base + SPI_CMD_REG);
231 if (!enable) 215 if (!enable) {
232 reg_val |= SPI_CMD_PAUSE_EN; 216 reg_val |= SPI_CMD_PAUSE_EN;
233 else 217 writel(reg_val, mdata->base + SPI_CMD_REG);
218 } else {
234 reg_val &= ~SPI_CMD_PAUSE_EN; 219 reg_val &= ~SPI_CMD_PAUSE_EN;
235 writel(reg_val, mdata->base + SPI_CMD_REG); 220 writel(reg_val, mdata->base + SPI_CMD_REG);
221 mdata->state = MTK_SPI_IDLE;
222 mtk_spi_reset(mdata);
223 }
236} 224}
237 225
238static void mtk_spi_prepare_transfer(struct spi_master *master, 226static void mtk_spi_prepare_transfer(struct spi_master *master,
@@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
509 master->mode_bits = SPI_CPOL | SPI_CPHA; 497 master->mode_bits = SPI_CPOL | SPI_CPHA;
510 498
511 master->set_cs = mtk_spi_set_cs; 499 master->set_cs = mtk_spi_set_cs;
512 master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
513 master->prepare_message = mtk_spi_prepare_message; 500 master->prepare_message = mtk_spi_prepare_message;
514 master->transfer_one = mtk_spi_transfer_one; 501 master->transfer_one = mtk_spi_transfer_one;
515 master->can_dma = mtk_spi_can_dma; 502 master->can_dma = mtk_spi_can_dma;
@@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
576 goto err_put_master; 563 goto err_put_master;
577 } 564 }
578 565
579 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
580 if (IS_ERR(mdata->spi_clk)) {
581 ret = PTR_ERR(mdata->spi_clk);
582 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
583 goto err_put_master;
584 }
585
586 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); 566 mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
587 if (IS_ERR(mdata->parent_clk)) { 567 if (IS_ERR(mdata->parent_clk)) {
588 ret = PTR_ERR(mdata->parent_clk); 568 ret = PTR_ERR(mdata->parent_clk);
@@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev)
590 goto err_put_master; 570 goto err_put_master;
591 } 571 }
592 572
573 mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
574 if (IS_ERR(mdata->sel_clk)) {
575 ret = PTR_ERR(mdata->sel_clk);
576 dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
577 goto err_put_master;
578 }
579
580 mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
581 if (IS_ERR(mdata->spi_clk)) {
582 ret = PTR_ERR(mdata->spi_clk);
583 dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
584 goto err_put_master;
585 }
586
593 ret = clk_prepare_enable(mdata->spi_clk); 587 ret = clk_prepare_enable(mdata->spi_clk);
594 if (ret < 0) { 588 if (ret < 0) {
595 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); 589 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
596 goto err_put_master; 590 goto err_put_master;
597 } 591 }
598 592
599 ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); 593 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
600 if (ret < 0) { 594 if (ret < 0) {
601 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); 595 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
602 goto err_disable_clk; 596 goto err_disable_clk;
@@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
630 pm_runtime_disable(&pdev->dev); 624 pm_runtime_disable(&pdev->dev);
631 625
632 mtk_spi_reset(mdata); 626 mtk_spi_reset(mdata);
633 clk_disable_unprepare(mdata->spi_clk);
634 spi_master_put(master); 627 spi_master_put(master);
635 628
636 return 0; 629 return 0;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fdd791977041..a8ef38ebb9c9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
654 if (!(sccr1_reg & SSCR1_TIE)) 654 if (!(sccr1_reg & SSCR1_TIE))
655 mask &= ~SSSR_TFS; 655 mask &= ~SSSR_TFS;
656 656
657 /* Ignore RX timeout interrupt if it is disabled */
658 if (!(sccr1_reg & SSCR1_TINTE))
659 mask &= ~SSSR_TINT;
660
657 if (!(status & mask)) 661 if (!(status & mask))
658 return IRQ_NONE; 662 return IRQ_NONE;
659 663
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2f194f..be6155cba9de 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@ struct xtfpga_spi {
34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi, 34static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
35 unsigned addr, u32 val) 35 unsigned addr, u32 val)
36{ 36{
37 iowrite32(val, spi->regs + addr); 37 __raw_writel(val, spi->regs + addr);
38} 38}
39 39
40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi, 40static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
41 unsigned addr) 41 unsigned addr)
42{ 42{
43 return ioread32(spi->regs + addr); 43 return __raw_readl(spi->regs + addr);
44} 44}
45 45
46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi) 46static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3abb3903f2ad..a5f53de813d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1610,8 +1610,7 @@ static struct class spi_master_class = {
1610 * 1610 *
1611 * The caller is responsible for assigning the bus number and initializing 1611 * The caller is responsible for assigning the bus number and initializing
1612 * the master's methods before calling spi_register_master(); and (after errors 1612 * the master's methods before calling spi_register_master(); and (after errors
1613 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1613 * adding the device) calling spi_master_put() to prevent a memory leak.
1614 * leak.
1615 */ 1614 */
1616struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1615struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1617{ 1616{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index fba92a526531..ef008e52f953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
651 kfree(spidev->rx_buffer); 651 kfree(spidev->rx_buffer);
652 spidev->rx_buffer = NULL; 652 spidev->rx_buffer = NULL;
653 653
654 spidev->speed_hz = spidev->spi->max_speed_hz; 654 if (spidev->spi)
655 spidev->speed_hz = spidev->spi->max_speed_hz;
655 656
656 /* ... after we unbound from the underlying device? */ 657 /* ... after we unbound from the underlying device? */
657 spin_lock_irq(&spidev->spi_lock); 658 spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 20288fc53946..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,5 +5,25 @@ TODO:
5 - add proper arch dependencies as needed 5 - add proper arch dependencies as needed
6 - audit userspace interfaces to make sure they are sane 6 - audit userspace interfaces to make sure they are sane
7 7
8
9ion/
10 - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
11 interface on top of dma-buf. flush_for_device needs to be added to dma-buf
12 first.
13 - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
14 vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
15 begin/end_cpu_access hooks to userspace.
16 - Clarify the tricks ion plays with explicitly managing coherency behind the
17 dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
18 explicit coherency management mode to flush_for_device to be used by drivers
19 which want to manage caches themselves and which indicates whether cpu caches
20 need flushing.
21 - With those removed there's probably no use for ION_IOC_IMPORT anymore either
22 since ion would just be the central allocator for shared buffers.
23 - Add dt-binding to expose cma regions as ion heaps, with the rule that any
24 such cma regions must already be used by some device for dma. I.e. ion only
25 exposes existing cma regions and doesn't reserve unecessarily memory when
26 booting a system which doesn't use ion.
27
8Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: 28Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
9Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com> 29Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 217aa537c4eb..6e8d8392ca38 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1179 mutex_unlock(&client->lock); 1179 mutex_unlock(&client->lock);
1180 goto end; 1180 goto end;
1181 } 1181 }
1182 mutex_unlock(&client->lock);
1183 1182
1184 handle = ion_handle_create(client, buffer); 1183 handle = ion_handle_create(client, buffer);
1185 if (IS_ERR(handle)) 1184 if (IS_ERR(handle)) {
1185 mutex_unlock(&client->lock);
1186 goto end; 1186 goto end;
1187 }
1187 1188
1188 mutex_lock(&client->lock);
1189 ret = ion_handle_add(client, handle); 1189 ret = ion_handle_add(client, handle);
1190 mutex_unlock(&client->lock); 1190 mutex_unlock(&client->lock);
1191 if (ret) { 1191 if (ret) {
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 32f3a9d921d6..5cafa50d1fac 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -76,7 +76,7 @@ static int init_display(struct fbtft_par *par)
76 76
77 /* Set CS active high */ 77 /* Set CS active high */
78 par->spi->mode |= SPI_CS_HIGH; 78 par->spi->mode |= SPI_CS_HIGH;
79 ret = par->spi->master->setup(par->spi); 79 ret = spi_setup(par->spi);
80 if (ret) { 80 if (ret) {
81 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); 81 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
82 return ret; 82 return ret;
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 88fb2c0132d5..8eae6ef25846 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -169,7 +169,7 @@ static int init_display(struct fbtft_par *par)
169 /* enable SPI interface by having CS and MOSI low during reset */ 169 /* enable SPI interface by having CS and MOSI low during reset */
170 save_mode = par->spi->mode; 170 save_mode = par->spi->mode;
171 par->spi->mode |= SPI_CS_HIGH; 171 par->spi->mode |= SPI_CS_HIGH;
172 ret = par->spi->master->setup(par->spi); /* set CS inactive low */ 172 ret = spi_setup(par->spi); /* set CS inactive low */
173 if (ret) { 173 if (ret) {
174 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n"); 174 dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
175 return ret; 175 return ret;
@@ -180,7 +180,7 @@ static int init_display(struct fbtft_par *par)
180 par->fbtftops.reset(par); 180 par->fbtftops.reset(par);
181 mdelay(1000); 181 mdelay(1000);
182 par->spi->mode = save_mode; 182 par->spi->mode = save_mode;
183 ret = par->spi->master->setup(par->spi); 183 ret = spi_setup(par->spi);
184 if (ret) { 184 if (ret) {
185 dev_err(par->info->device, "Could not restore SPI mode\n"); 185 dev_err(par->info->device, "Could not restore SPI mode\n");
186 return ret; 186 return ret;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 23392eb6799e..7f5fa3d1cab0 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1436,15 +1436,11 @@ int fbtft_probe_common(struct fbtft_display *display,
1436 1436
1437 /* 9-bit SPI setup */ 1437 /* 9-bit SPI setup */
1438 if (par->spi && display->buswidth == 9) { 1438 if (par->spi && display->buswidth == 9) {
1439 par->spi->bits_per_word = 9; 1439 if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
1440 ret = par->spi->master->setup(par->spi); 1440 par->spi->bits_per_word = 9;
1441 if (ret) { 1441 } else {
1442 dev_warn(&par->spi->dev, 1442 dev_warn(&par->spi->dev,
1443 "9-bit SPI not available, emulating using 8-bit.\n"); 1443 "9-bit SPI not available, emulating using 8-bit.\n");
1444 par->spi->bits_per_word = 8;
1445 ret = par->spi->master->setup(par->spi);
1446 if (ret)
1447 goto out_release;
1448 /* allocate buffer with room for dc bits */ 1444 /* allocate buffer with room for dc bits */
1449 par->extra = devm_kzalloc(par->info->device, 1445 par->extra = devm_kzalloc(par->info->device,
1450 par->txbuf.len + (par->txbuf.len / 8) + 8, 1446 par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c763efc5de7d..3f380a0086c3 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -463,15 +463,12 @@ static int flexfb_probe_common(struct spi_device *sdev,
463 } 463 }
464 par->fbtftops.write_register = fbtft_write_reg8_bus9; 464 par->fbtftops.write_register = fbtft_write_reg8_bus9;
465 par->fbtftops.write_vmem = fbtft_write_vmem16_bus9; 465 par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
466 sdev->bits_per_word = 9; 466 if (par->spi->master->bits_per_word_mask
467 ret = sdev->master->setup(sdev); 467 & SPI_BPW_MASK(9)) {
468 if (ret) { 468 par->spi->bits_per_word = 9;
469 } else {
469 dev_warn(dev, 470 dev_warn(dev,
470 "9-bit SPI not available, emulating using 8-bit.\n"); 471 "9-bit SPI not available, emulating using 8-bit.\n");
471 sdev->bits_per_word = 8;
472 ret = sdev->master->setup(sdev);
473 if (ret)
474 goto out_release;
475 /* allocate buffer with room for dc bits */ 472 /* allocate buffer with room for dc bits */
476 par->extra = devm_kzalloc(par->info->device, 473 par->extra = devm_kzalloc(par->info->device,
477 par->txbuf.len + (par->txbuf.len / 8) + 8, 474 par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
index cf0ca50ff83b..0676243eea9e 100644
--- a/drivers/staging/lustre/README.txt
+++ b/drivers/staging/lustre/README.txt
@@ -14,10 +14,8 @@ Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS),
14Lustre has independent Metadata and Data servers that clients can access 14Lustre has independent Metadata and Data servers that clients can access
15in parallel to maximize performance. 15in parallel to maximize performance.
16 16
17In order to use Lustre client you will need to download lustre client 17In order to use Lustre client you will need to download the "lustre-client"
18tools from 18package that contains the userspace tools from http://lustre.org/download/
19https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/
20the package name is lustre-client.
21 19
22You will need to install and configure your Lustre servers separately. 20You will need to install and configure your Lustre servers separately.
23 21
@@ -76,12 +74,10 @@ Mount Options
76 74
77More Information 75More Information
78================ 76================
79You can get more information at 77You can get more information at the Lustre website: http://wiki.lustre.org/
80OpenSFS website: http://lustre.opensfs.org/about/
81Intel HPDD wiki: https://wiki.hpdd.intel.com
82 78
83Out of tree Lustre client and server code is available at: 79Source for the userspace tools and out-of-tree client and server code
84http://git.whamcloud.com/fs/lustre-release.git 80is available at: http://git.hpdd.intel.com/fs/lustre-release.git
85 81
86Latest binary packages: 82Latest binary packages:
87http://lustre.opensfs.org/download-lustre/ 83http://lustre.org/download/
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index d50de03de7b9..0b9b9b539f70 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MOST 1menuconfig MOST
2 tristate "MOST driver" 2 tristate "MOST driver"
3 depends on HAS_DMA
3 select MOSTCORE 4 select MOSTCORE
4 default n 5 default n
5 ---help--- 6 ---help---
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 1d4ad1d67758..fc548769479b 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -5,6 +5,7 @@
5config HDM_DIM2 5config HDM_DIM2
6 tristate "DIM2 HDM" 6 tristate "DIM2 HDM"
7 depends on AIM_NETWORK 7 depends on AIM_NETWORK
8 depends on HAS_IOMEM
8 9
9 ---help--- 10 ---help---
10 Say Y here if you want to connect via MediaLB to network transceiver. 11 Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index a482c3fdf34b..ec1546312ee6 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -4,7 +4,7 @@
4 4
5config HDM_USB 5config HDM_USB
6 tristate "USB HDM" 6 tristate "USB HDM"
7 depends on USB 7 depends on USB && NET
8 select AIM_NETWORK 8 select AIM_NETWORK
9 ---help--- 9 ---help---
10 Say Y here if you want to connect via USB to network tranceiver. 10 Say Y here if you want to connect via USB to network tranceiver.
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
index 38abf1b21b66..47172546d728 100644
--- a/drivers/staging/most/mostcore/Kconfig
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -4,6 +4,7 @@
4 4
5config MOSTCORE 5config MOSTCORE
6 tristate "MOST Core" 6 tristate "MOST Core"
7 depends on HAS_DMA
7 8
8 ---help--- 9 ---help---
9 Say Y here if you want to enable MOST support. 10 Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fa27ee5f336c..fc790e7592fc 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -10,4 +10,3 @@ visorbus-y += visorchipset.o
10visorbus-y += periodic_work.o 10visorbus-y += periodic_work.o
11 11
12ccflags-y += -Idrivers/staging/unisys/include 12ccflags-y += -Idrivers/staging/unisys/include
13ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2309f5f2b238..a272b48bab28 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -37,6 +37,8 @@ static int visorbus_debugref;
37#define POLLJIFFIES_TESTWORK 100 37#define POLLJIFFIES_TESTWORK 100
38#define POLLJIFFIES_NORMALCHANNEL 10 38#define POLLJIFFIES_NORMALCHANNEL 10
39 39
40static int busreg_rc = -ENODEV; /* stores the result from bus registration */
41
40static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env); 42static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
41static int visorbus_match(struct device *xdev, struct device_driver *xdrv); 43static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
42static void fix_vbus_dev_info(struct visor_device *visordev); 44static void fix_vbus_dev_info(struct visor_device *visordev);
@@ -863,6 +865,9 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
863{ 865{
864 int rc = 0; 866 int rc = 0;
865 867
868 if (busreg_rc < 0)
869 return -ENODEV; /*can't register on a nonexistent bus*/
870
866 drv->driver.name = drv->name; 871 drv->driver.name = drv->name;
867 drv->driver.bus = &visorbus_type; 872 drv->driver.bus = &visorbus_type;
868 drv->driver.probe = visordriver_probe_device; 873 drv->driver.probe = visordriver_probe_device;
@@ -885,6 +890,8 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
885 if (rc < 0) 890 if (rc < 0)
886 return rc; 891 return rc;
887 rc = register_driver_attributes(drv); 892 rc = register_driver_attributes(drv);
893 if (rc < 0)
894 driver_unregister(&drv->driver);
888 return rc; 895 return rc;
889} 896}
890EXPORT_SYMBOL_GPL(visorbus_register_visor_driver); 897EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1260,10 +1267,8 @@ remove_bus_instance(struct visor_device *dev)
1260static int 1267static int
1261create_bus_type(void) 1268create_bus_type(void)
1262{ 1269{
1263 int rc = 0; 1270 busreg_rc = bus_register(&visorbus_type);
1264 1271 return busreg_rc;
1265 rc = bus_register(&visorbus_type);
1266 return rc;
1267} 1272}
1268 1273
1269/** Remove the one-and-only one instance of the visor bus type (visorbus_type). 1274/** Remove the one-and-only one instance of the visor bus type (visorbus_type).
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 8c9da7ea7845..9d3c1e282062 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1189,16 +1189,16 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
1189 spin_lock_irqsave(&devdata->priv_lock, flags); 1189 spin_lock_irqsave(&devdata->priv_lock, flags);
1190 atomic_dec(&devdata->num_rcvbuf_in_iovm); 1190 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1191 1191
1192 /* update rcv stats - call it with priv_lock held */
1193 devdata->net_stats.rx_packets++;
1194 devdata->net_stats.rx_bytes = skb->len;
1195
1196 /* set length to how much was ACTUALLY received - 1192 /* set length to how much was ACTUALLY received -
1197 * NOTE: rcv_done_len includes actual length of data rcvd 1193 * NOTE: rcv_done_len includes actual length of data rcvd
1198 * including ethhdr 1194 * including ethhdr
1199 */ 1195 */
1200 skb->len = cmdrsp->net.rcv.rcv_done_len; 1196 skb->len = cmdrsp->net.rcv.rcv_done_len;
1201 1197
1198 /* update rcv stats - call it with priv_lock held */
1199 devdata->net_stats.rx_packets++;
1200 devdata->net_stats.rx_bytes += skb->len;
1201
1202 /* test enabled while holding lock */ 1202 /* test enabled while holding lock */
1203 if (!(devdata->enabled && devdata->enab_dis_acked)) { 1203 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1204 /* don't process it unless we're in enable mode and until 1204 /* don't process it unless we're in enable mode and until
@@ -1924,13 +1924,16 @@ static int visornic_probe(struct visor_device *dev)
1924 "%s debugfs_create_dir %s failed\n", 1924 "%s debugfs_create_dir %s failed\n",
1925 __func__, netdev->name); 1925 __func__, netdev->name);
1926 err = -ENOMEM; 1926 err = -ENOMEM;
1927 goto cleanup_xmit_cmdrsp; 1927 goto cleanup_register_netdev;
1928 } 1928 }
1929 1929
1930 dev_info(&dev->device, "%s success netdev=%s\n", 1930 dev_info(&dev->device, "%s success netdev=%s\n",
1931 __func__, netdev->name); 1931 __func__, netdev->name);
1932 return 0; 1932 return 0;
1933 1933
1934cleanup_register_netdev:
1935 unregister_netdev(netdev);
1936
1934cleanup_napi_add: 1937cleanup_napi_add:
1935 del_timer_sync(&devdata->irq_poll_timer); 1938 del_timer_sync(&devdata->irq_poll_timer);
1936 netif_napi_del(&devdata->napi); 1939 netif_napi_del(&devdata->napi);
@@ -2128,8 +2131,9 @@ static int visornic_init(void)
2128 if (!dev_num_pool) 2131 if (!dev_num_pool)
2129 goto cleanup_workqueue; 2132 goto cleanup_workqueue;
2130 2133
2131 visorbus_register_visor_driver(&visornic_driver); 2134 err = visorbus_register_visor_driver(&visornic_driver);
2132 return 0; 2135 if (!err)
2136 return 0;
2133 2137
2134cleanup_workqueue: 2138cleanup_workqueue:
2135 if (visornic_timeout_reset_workqueue) { 2139 if (visornic_timeout_reset_workqueue) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e8a52f7d6204..51d1734d5390 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
407 TYPERANGE_UTF8, USE_INITIAL_ONLY); 407 TYPERANGE_UTF8, USE_INITIAL_ONLY);
408 if (!param) 408 if (!param)
409 goto out; 409 goto out;
410
410 /* 411 /*
411 * Extra parameters for ISER from RFC-5046 412 * Extra parameters for ISER from RFC-5046
412 */ 413 */
@@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate(
496 } else if (!strcmp(param->name, SESSIONTYPE)) { 497 } else if (!strcmp(param->name, SESSIONTYPE)) {
497 SET_PSTATE_NEGOTIATE(param); 498 SET_PSTATE_NEGOTIATE(param);
498 } else if (!strcmp(param->name, IFMARKER)) { 499 } else if (!strcmp(param->name, IFMARKER)) {
499 SET_PSTATE_NEGOTIATE(param); 500 SET_PSTATE_REJECT(param);
500 } else if (!strcmp(param->name, OFMARKER)) { 501 } else if (!strcmp(param->name, OFMARKER)) {
501 SET_PSTATE_NEGOTIATE(param); 502 SET_PSTATE_REJECT(param);
502 } else if (!strcmp(param->name, IFMARKINT)) { 503 } else if (!strcmp(param->name, IFMARKINT)) {
503 SET_PSTATE_REJECT(param); 504 SET_PSTATE_REJECT(param);
504 } else if (!strcmp(param->name, OFMARKINT)) { 505 } else if (!strcmp(param->name, OFMARKINT)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index dcc424ac35d4..88ea4e4f124b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
62 struct se_session *se_sess = se_cmd->se_sess; 62 struct se_session *se_sess = se_cmd->se_sess;
63 struct se_node_acl *nacl = se_sess->se_node_acl; 63 struct se_node_acl *nacl = se_sess->se_node_acl;
64 struct se_dev_entry *deve; 64 struct se_dev_entry *deve;
65 sense_reason_t ret = TCM_NO_SENSE;
65 66
66 rcu_read_lock(); 67 rcu_read_lock();
67 deve = target_nacl_find_deve(nacl, unpacked_lun); 68 deve = target_nacl_find_deve(nacl, unpacked_lun);
68 if (deve) { 69 if (deve) {
69 atomic_long_inc(&deve->total_cmds); 70 atomic_long_inc(&deve->total_cmds);
70 71
71 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
72 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
73 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
74 " Access for 0x%08llx\n",
75 se_cmd->se_tfo->get_fabric_name(),
76 unpacked_lun);
77 rcu_read_unlock();
78 return TCM_WRITE_PROTECTED;
79 }
80
81 if (se_cmd->data_direction == DMA_TO_DEVICE) 72 if (se_cmd->data_direction == DMA_TO_DEVICE)
82 atomic_long_add(se_cmd->data_length, 73 atomic_long_add(se_cmd->data_length,
83 &deve->write_bytes); 74 &deve->write_bytes);
@@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
93 84
94 percpu_ref_get(&se_lun->lun_ref); 85 percpu_ref_get(&se_lun->lun_ref);
95 se_cmd->lun_ref_active = true; 86 se_cmd->lun_ref_active = true;
87
88 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
89 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
90 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
91 " Access for 0x%08llx\n",
92 se_cmd->se_tfo->get_fabric_name(),
93 unpacked_lun);
94 rcu_read_unlock();
95 ret = TCM_WRITE_PROTECTED;
96 goto ref_dev;
97 }
96 } 98 }
97 rcu_read_unlock(); 99 rcu_read_unlock();
98 100
@@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
109 unpacked_lun); 111 unpacked_lun);
110 return TCM_NON_EXISTENT_LUN; 112 return TCM_NON_EXISTENT_LUN;
111 } 113 }
112 /*
113 * Force WRITE PROTECT for virtual LUN 0
114 */
115 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
116 (se_cmd->data_direction != DMA_NONE))
117 return TCM_WRITE_PROTECTED;
118 114
119 se_lun = se_sess->se_tpg->tpg_virt_lun0; 115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0; 116 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
@@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
123 119
124 percpu_ref_get(&se_lun->lun_ref); 120 percpu_ref_get(&se_lun->lun_ref);
125 se_cmd->lun_ref_active = true; 121 se_cmd->lun_ref_active = true;
122
123 /*
124 * Force WRITE PROTECT for virtual LUN 0
125 */
126 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
127 (se_cmd->data_direction != DMA_NONE)) {
128 ret = TCM_WRITE_PROTECTED;
129 goto ref_dev;
130 }
126 } 131 }
127 /* 132 /*
128 * RCU reference protected by percpu se_lun->lun_ref taken above that 133 * RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
130 * pointer can be kfree_rcu() by the final se_lun->lun_group put via 135 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
131 * target_core_fabric_configfs.c:target_fabric_port_release 136 * target_core_fabric_configfs.c:target_fabric_port_release
132 */ 137 */
138ref_dev:
133 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); 139 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
134 atomic_long_inc(&se_cmd->se_dev->num_cmds); 140 atomic_long_inc(&se_cmd->se_dev->num_cmds);
135 141
@@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
140 atomic_long_add(se_cmd->data_length, 146 atomic_long_add(se_cmd->data_length,
141 &se_cmd->se_dev->read_bytes); 147 &se_cmd->se_dev->read_bytes);
142 148
143 return 0; 149 return ret;
144} 150}
145EXPORT_SYMBOL(transport_lookup_cmd_lun); 151EXPORT_SYMBOL(transport_lookup_cmd_lun);
146 152
@@ -427,8 +433,6 @@ void core_disable_device_list_for_node(
427 433
428 hlist_del_rcu(&orig->link); 434 hlist_del_rcu(&orig->link);
429 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags); 435 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
430 rcu_assign_pointer(orig->se_lun, NULL);
431 rcu_assign_pointer(orig->se_lun_acl, NULL);
432 orig->lun_flags = 0; 436 orig->lun_flags = 0;
433 orig->creation_time = 0; 437 orig->creation_time = 0;
434 orig->attach_count--; 438 orig->attach_count--;
@@ -439,6 +443,9 @@ void core_disable_device_list_for_node(
439 kref_put(&orig->pr_kref, target_pr_kref_release); 443 kref_put(&orig->pr_kref, target_pr_kref_release);
440 wait_for_completion(&orig->pr_comp); 444 wait_for_completion(&orig->pr_comp);
441 445
446 rcu_assign_pointer(orig->se_lun, NULL);
447 rcu_assign_pointer(orig->se_lun_acl, NULL);
448
442 kfree_rcu(orig, rcu_head); 449 kfree_rcu(orig, rcu_head);
443 450
444 core_scsi3_free_pr_reg_from_nacl(dev, nacl); 451 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 9522960c7fdd..22390e0e046c 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -187,5 +187,5 @@ core_delete_hba(struct se_hba *hba)
187 187
188bool target_sense_desc_format(struct se_device *dev) 188bool target_sense_desc_format(struct se_device *dev)
189{ 189{
190 return dev->transport->get_blocks(dev) > U32_MAX; 190 return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
191} 191}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a9982f5d5d6..0f19e11acac2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -105,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev)
105 mode = FMODE_READ|FMODE_EXCL; 105 mode = FMODE_READ|FMODE_EXCL;
106 if (!ib_dev->ibd_readonly) 106 if (!ib_dev->ibd_readonly)
107 mode |= FMODE_WRITE; 107 mode |= FMODE_WRITE;
108 else
109 dev->dev_flags |= DF_READ_ONLY;
108 110
109 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 111 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
110 if (IS_ERR(bd)) { 112 if (IS_ERR(bd)) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5ab7100de17e..e7933115087a 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
618 struct se_device *dev, 618 struct se_device *dev,
619 struct se_node_acl *nacl, 619 struct se_node_acl *nacl,
620 struct se_lun *lun, 620 struct se_lun *lun,
621 struct se_dev_entry *deve, 621 struct se_dev_entry *dest_deve,
622 u64 mapped_lun, 622 u64 mapped_lun,
623 unsigned char *isid, 623 unsigned char *isid,
624 u64 sa_res_key, 624 u64 sa_res_key,
@@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
640 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list); 640 INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
641 atomic_set(&pr_reg->pr_res_holders, 0); 641 atomic_set(&pr_reg->pr_res_holders, 0);
642 pr_reg->pr_reg_nacl = nacl; 642 pr_reg->pr_reg_nacl = nacl;
643 pr_reg->pr_reg_deve = deve; 643 /*
644 * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
645 * the se_dev_entry->pr_ref will have been already obtained by
646 * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
647 *
648 * Otherwise, locate se_dev_entry now and obtain a reference until
649 * registration completes in __core_scsi3_add_registration().
650 */
651 if (dest_deve) {
652 pr_reg->pr_reg_deve = dest_deve;
653 } else {
654 rcu_read_lock();
655 pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
656 if (!pr_reg->pr_reg_deve) {
657 rcu_read_unlock();
658 pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
659 nacl->initiatorname, mapped_lun);
660 kmem_cache_free(t10_pr_reg_cache, pr_reg);
661 return NULL;
662 }
663 kref_get(&pr_reg->pr_reg_deve->pr_kref);
664 rcu_read_unlock();
665 }
644 pr_reg->pr_res_mapped_lun = mapped_lun; 666 pr_reg->pr_res_mapped_lun = mapped_lun;
645 pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; 667 pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
646 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 668 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
@@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration(
936 !(strcmp(pr_reg->pr_tport, t_port)) && 958 !(strcmp(pr_reg->pr_tport, t_port)) &&
937 (pr_reg->pr_reg_tpgt == tpgt) && 959 (pr_reg->pr_reg_tpgt == tpgt) &&
938 (pr_reg->pr_aptpl_target_lun == target_lun)) { 960 (pr_reg->pr_aptpl_target_lun == target_lun)) {
961 /*
962 * Obtain the ->pr_reg_deve pointer + reference, that
963 * is released by __core_scsi3_add_registration() below.
964 */
965 rcu_read_lock();
966 pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
967 if (!pr_reg->pr_reg_deve) {
968 pr_err("Unable to locate PR APTPL %s mapped_lun:"
969 " %llu\n", nacl->initiatorname, mapped_lun);
970 rcu_read_unlock();
971 continue;
972 }
973 kref_get(&pr_reg->pr_reg_deve->pr_kref);
974 rcu_read_unlock();
939 975
940 pr_reg->pr_reg_nacl = nacl; 976 pr_reg->pr_reg_nacl = nacl;
941 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; 977 pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
942
943 list_del(&pr_reg->pr_reg_aptpl_list); 978 list_del(&pr_reg->pr_reg_aptpl_list);
944 spin_unlock(&pr_tmpl->aptpl_reg_lock); 979 spin_unlock(&pr_tmpl->aptpl_reg_lock);
945 /* 980 /*
946 * At this point all of the pointers in *pr_reg will 981 * At this point all of the pointers in *pr_reg will
947 * be setup, so go ahead and add the registration. 982 * be setup, so go ahead and add the registration.
948 */ 983 */
949
950 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); 984 __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
951 /* 985 /*
952 * If this registration is the reservation holder, 986 * If this registration is the reservation holder,
@@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration(
1044 1078
1045 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); 1079 __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
1046 spin_unlock(&pr_tmpl->registration_lock); 1080 spin_unlock(&pr_tmpl->registration_lock);
1047
1048 rcu_read_lock();
1049 deve = pr_reg->pr_reg_deve;
1050 if (deve)
1051 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1052 rcu_read_unlock();
1053
1054 /* 1081 /*
1055 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. 1082 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
1056 */ 1083 */
1057 if (!pr_reg->pr_reg_all_tg_pt || register_move) 1084 if (!pr_reg->pr_reg_all_tg_pt || register_move)
1058 return; 1085 goto out;
1059 /* 1086 /*
1060 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 1087 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
1061 * allocated in __core_scsi3_alloc_registration() 1088 * allocated in __core_scsi3_alloc_registration()
@@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration(
1075 __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, 1102 __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
1076 register_type); 1103 register_type);
1077 spin_unlock(&pr_tmpl->registration_lock); 1104 spin_unlock(&pr_tmpl->registration_lock);
1078 1105 /*
1106 * Drop configfs group dependency reference and deve->pr_kref
1107 * obtained from __core_scsi3_alloc_registration() code.
1108 */
1079 rcu_read_lock(); 1109 rcu_read_lock();
1080 deve = pr_reg_tmp->pr_reg_deve; 1110 deve = pr_reg_tmp->pr_reg_deve;
1081 if (deve) 1111 if (deve) {
1082 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags); 1112 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1113 core_scsi3_lunacl_undepend_item(deve);
1114 pr_reg_tmp->pr_reg_deve = NULL;
1115 }
1083 rcu_read_unlock(); 1116 rcu_read_unlock();
1084
1085 /*
1086 * Drop configfs group dependency reference from
1087 * __core_scsi3_alloc_registration()
1088 */
1089 core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
1090 } 1117 }
1118out:
1119 /*
1120 * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
1121 */
1122 rcu_read_lock();
1123 deve = pr_reg->pr_reg_deve;
1124 if (deve) {
1125 set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
1126 kref_put(&deve->pr_kref, target_pr_kref_release);
1127 pr_reg->pr_reg_deve = NULL;
1128 }
1129 rcu_read_unlock();
1091} 1130}
1092 1131
1093static int core_scsi3_alloc_registration( 1132static int core_scsi3_alloc_registration(
@@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port(
1785 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ? 1824 dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
1786 dest_se_deve->mapped_lun : 0); 1825 dest_se_deve->mapped_lun : 0);
1787 1826
1788 if (!dest_se_deve) 1827 if (!dest_se_deve) {
1828 kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
1829 target_pr_kref_release);
1789 continue; 1830 continue;
1790 1831 }
1791 core_scsi3_lunacl_undepend_item(dest_se_deve); 1832 core_scsi3_lunacl_undepend_item(dest_se_deve);
1792 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1833 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1793 core_scsi3_tpg_undepend_item(dest_tpg); 1834 core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1823,9 +1864,11 @@ out:
1823 1864
1824 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); 1865 kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
1825 1866
1826 if (!dest_se_deve) 1867 if (!dest_se_deve) {
1868 kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
1869 target_pr_kref_release);
1827 continue; 1870 continue;
1828 1871 }
1829 core_scsi3_lunacl_undepend_item(dest_se_deve); 1872 core_scsi3_lunacl_undepend_item(dest_se_deve);
1830 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1873 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1831 core_scsi3_tpg_undepend_item(dest_tpg); 1874 core_scsi3_tpg_undepend_item(dest_tpg);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2d0381dd105c..5fb9dd7f08bb 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -668,7 +668,10 @@ int core_tpg_add_lun(
668 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list); 668 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
669 spin_unlock(&dev->se_port_lock); 669 spin_unlock(&dev->se_port_lock);
670 670
671 lun->lun_access = lun_access; 671 if (dev->dev_flags & DF_READ_ONLY)
672 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
673 else
674 lun->lun_access = lun_access;
672 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) 675 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
673 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist); 676 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
674 mutex_unlock(&tpg->tpg_lun_mutex); 677 mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 039004400987..5aabc4bc0d75 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -163,7 +163,7 @@ config THERMAL_EMULATION
163 163
164config HISI_THERMAL 164config HISI_THERMAL
165 tristate "Hisilicon thermal driver" 165 tristate "Hisilicon thermal driver"
166 depends on ARCH_HISI && CPU_THERMAL && OF 166 depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
167 help 167 help
168 Enable this to plug hisilicon's thermal sensor driver into the Linux 168 Enable this to plug hisilicon's thermal sensor driver into the Linux
169 thermal framework. cpufreq is used as the cooling device to throttle 169 thermal framework. cpufreq is used as the cooling device to throttle
@@ -182,7 +182,7 @@ config IMX_THERMAL
182 182
183config SPEAR_THERMAL 183config SPEAR_THERMAL
184 bool "SPEAr thermal sensor driver" 184 bool "SPEAr thermal sensor driver"
185 depends on PLAT_SPEAR 185 depends on PLAT_SPEAR || COMPILE_TEST
186 depends on OF 186 depends on OF
187 help 187 help
188 Enable this to plug the SPEAr thermal sensor driver into the Linux 188 Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -190,7 +190,7 @@ config SPEAR_THERMAL
190 190
191config ROCKCHIP_THERMAL 191config ROCKCHIP_THERMAL
192 tristate "Rockchip thermal driver" 192 tristate "Rockchip thermal driver"
193 depends on ARCH_ROCKCHIP 193 depends on ARCH_ROCKCHIP || COMPILE_TEST
194 depends on RESET_CONTROLLER 194 depends on RESET_CONTROLLER
195 help 195 help
196 Rockchip thermal driver provides support for Temperature sensor 196 Rockchip thermal driver provides support for Temperature sensor
@@ -208,7 +208,7 @@ config RCAR_THERMAL
208 208
209config KIRKWOOD_THERMAL 209config KIRKWOOD_THERMAL
210 tristate "Temperature sensor on Marvell Kirkwood SoCs" 210 tristate "Temperature sensor on Marvell Kirkwood SoCs"
211 depends on MACH_KIRKWOOD 211 depends on MACH_KIRKWOOD || COMPILE_TEST
212 depends on OF 212 depends on OF
213 help 213 help
214 Support for the Kirkwood thermal sensor driver into the Linux thermal 214 Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL
216 216
217config DOVE_THERMAL 217config DOVE_THERMAL
218 tristate "Temperature sensor on Marvell Dove SoCs" 218 tristate "Temperature sensor on Marvell Dove SoCs"
219 depends on ARCH_DOVE || MACH_DOVE 219 depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
220 depends on OF 220 depends on OF
221 help 221 help
222 Support for the Dove thermal sensor driver in the Linux thermal 222 Support for the Dove thermal sensor driver in the Linux thermal
@@ -234,7 +234,7 @@ config DB8500_THERMAL
234 234
235config ARMADA_THERMAL 235config ARMADA_THERMAL
236 tristate "Armada 370/XP thermal management" 236 tristate "Armada 370/XP thermal management"
237 depends on ARCH_MVEBU 237 depends on ARCH_MVEBU || COMPILE_TEST
238 depends on OF 238 depends on OF
239 help 239 help
240 Enable this option if you want to have support for thermal management 240 Enable this option if you want to have support for thermal management
@@ -349,11 +349,12 @@ config INTEL_PCH_THERMAL
349 programmable trip points and other information. 349 programmable trip points and other information.
350 350
351menu "Texas Instruments thermal drivers" 351menu "Texas Instruments thermal drivers"
352depends on ARCH_HAS_BANDGAP || COMPILE_TEST
352source "drivers/thermal/ti-soc-thermal/Kconfig" 353source "drivers/thermal/ti-soc-thermal/Kconfig"
353endmenu 354endmenu
354 355
355menu "Samsung thermal drivers" 356menu "Samsung thermal drivers"
356depends on ARCH_EXYNOS 357depends on ARCH_EXYNOS || COMPILE_TEST
357source "drivers/thermal/samsung/Kconfig" 358source "drivers/thermal/samsung/Kconfig"
358endmenu 359endmenu
359 360
@@ -364,7 +365,7 @@ endmenu
364 365
365config QCOM_SPMI_TEMP_ALARM 366config QCOM_SPMI_TEMP_ALARM
366 tristate "Qualcomm SPMI PMIC Temperature Alarm" 367 tristate "Qualcomm SPMI PMIC Temperature Alarm"
367 depends on OF && SPMI && IIO 368 depends on OF && (SPMI || COMPILE_TEST) && IIO
368 select REGMAP_SPMI 369 select REGMAP_SPMI
369 help 370 help
370 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 371 This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 620dcd405ff6..42c6f71bdcc1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
262 * efficiently. Power is stored in mW, frequency in KHz. The 262 * efficiently. Power is stored in mW, frequency in KHz. The
263 * resulting table is in ascending order. 263 * resulting table is in ascending order.
264 * 264 *
265 * Return: 0 on success, -E* on error. 265 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
266 * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
267 * added/enabled while the function was executing.
266 */ 268 */
267static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, 269static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
268 u32 capacitance) 270 u32 capacitance)
@@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
273 int num_opps = 0, cpu, i, ret = 0; 275 int num_opps = 0, cpu, i, ret = 0;
274 unsigned long freq; 276 unsigned long freq;
275 277
276 rcu_read_lock();
277
278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { 278 for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
279 dev = get_cpu_device(cpu); 279 dev = get_cpu_device(cpu);
280 if (!dev) { 280 if (!dev) {
@@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
284 } 284 }
285 285
286 num_opps = dev_pm_opp_get_opp_count(dev); 286 num_opps = dev_pm_opp_get_opp_count(dev);
287 if (num_opps > 0) { 287 if (num_opps > 0)
288 break; 288 break;
289 } else if (num_opps < 0) { 289 else if (num_opps < 0)
290 ret = num_opps; 290 return num_opps;
291 goto unlock;
292 }
293 } 291 }
294 292
295 if (num_opps == 0) { 293 if (num_opps == 0)
296 ret = -EINVAL; 294 return -EINVAL;
297 goto unlock;
298 }
299 295
300 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); 296 power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
301 if (!power_table) { 297 if (!power_table)
302 ret = -ENOMEM; 298 return -ENOMEM;
303 goto unlock; 299
304 } 300 rcu_read_lock();
305 301
306 for (freq = 0, i = 0; 302 for (freq = 0, i = 0;
307 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); 303 opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
@@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
309 u32 freq_mhz, voltage_mv; 305 u32 freq_mhz, voltage_mv;
310 u64 power; 306 u64 power;
311 307
308 if (i >= num_opps) {
309 rcu_read_unlock();
310 ret = -EAGAIN;
311 goto free_power_table;
312 }
313
312 freq_mhz = freq / 1000000; 314 freq_mhz = freq / 1000000;
313 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; 315 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
314 316
@@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
326 power_table[i].power = power; 328 power_table[i].power = power;
327 } 329 }
328 330
329 if (i == 0) { 331 rcu_read_unlock();
332
333 if (i != num_opps) {
330 ret = PTR_ERR(opp); 334 ret = PTR_ERR(opp);
331 goto unlock; 335 goto free_power_table;
332 } 336 }
333 337
334 cpufreq_device->cpu_dev = dev; 338 cpufreq_device->cpu_dev = dev;
335 cpufreq_device->dyn_power_table = power_table; 339 cpufreq_device->dyn_power_table = power_table;
336 cpufreq_device->dyn_power_table_entries = i; 340 cpufreq_device->dyn_power_table_entries = i;
337 341
338unlock: 342 return 0;
339 rcu_read_unlock(); 343
344free_power_table:
345 kfree(power_table);
346
340 return ret; 347 return ret;
341} 348}
342 349
@@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np,
847 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 854 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
848 if (ret) { 855 if (ret) {
849 cool_dev = ERR_PTR(ret); 856 cool_dev = ERR_PTR(ret);
850 goto free_table; 857 goto free_power_table;
851 } 858 }
852 859
853 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", 860 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np,
889 896
890remove_idr: 897remove_idr:
891 release_idr(&cpufreq_idr, cpufreq_dev->id); 898 release_idr(&cpufreq_idr, cpufreq_dev->id);
899free_power_table:
900 kfree(cpufreq_dev->dyn_power_table);
892free_table: 901free_table:
893 kfree(cpufreq_dev->freq_table); 902 kfree(cpufreq_dev->freq_table);
894free_time_in_idle_timestamp: 903free_time_in_idle_timestamp:
@@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1039 1048
1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1049 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
1041 release_idr(&cpufreq_idr, cpufreq_dev->id); 1050 release_idr(&cpufreq_idr, cpufreq_dev->id);
1051 kfree(cpufreq_dev->dyn_power_table);
1042 kfree(cpufreq_dev->time_in_idle_timestamp); 1052 kfree(cpufreq_dev->time_in_idle_timestamp);
1043 kfree(cpufreq_dev->time_in_idle); 1053 kfree(cpufreq_dev->time_in_idle);
1044 kfree(cpufreq_dev->freq_table); 1054 kfree(cpufreq_dev->freq_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 607b62c7e611..e58bd0b658b5 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
72 { .compatible = "stericsson,db8500-cpufreq-cooling" }, 72 { .compatible = "stericsson,db8500-cpufreq-cooling" },
73 {}, 73 {},
74}; 74};
75MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
75#endif 76#endif
76 77
77static struct platform_driver db8500_cpufreq_cooling_driver = { 78static struct platform_driver db8500_cpufreq_cooling_driver = {
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 9c8a7aad0252..7ff96270c933 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -24,6 +24,8 @@
24 24
25#include "thermal_core.h" 25#include "thermal_core.h"
26 26
27#define INVALID_TRIP -1
28
27#define FRAC_BITS 10 29#define FRAC_BITS 10
28#define int_to_frac(x) ((x) << FRAC_BITS) 30#define int_to_frac(x) ((x) << FRAC_BITS)
29#define frac_to_int(x) ((x) >> FRAC_BITS) 31#define frac_to_int(x) ((x) >> FRAC_BITS)
@@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y)
56 58
57/** 59/**
58 * struct power_allocator_params - parameters for the power allocator governor 60 * struct power_allocator_params - parameters for the power allocator governor
61 * @allocated_tzp: whether we have allocated tzp for this thermal zone and
62 * it needs to be freed on unbind
59 * @err_integral: accumulated error in the PID controller. 63 * @err_integral: accumulated error in the PID controller.
60 * @prev_err: error in the previous iteration of the PID controller. 64 * @prev_err: error in the previous iteration of the PID controller.
61 * Used to calculate the derivative term. 65 * Used to calculate the derivative term.
62 * @trip_switch_on: first passive trip point of the thermal zone. The 66 * @trip_switch_on: first passive trip point of the thermal zone. The
63 * governor switches on when this trip point is crossed. 67 * governor switches on when this trip point is crossed.
68 * If the thermal zone only has one passive trip point,
69 * @trip_switch_on should be INVALID_TRIP.
64 * @trip_max_desired_temperature: last passive trip point of the thermal 70 * @trip_max_desired_temperature: last passive trip point of the thermal
65 * zone. The temperature we are 71 * zone. The temperature we are
66 * controlling for. 72 * controlling for.
67 */ 73 */
68struct power_allocator_params { 74struct power_allocator_params {
75 bool allocated_tzp;
69 s64 err_integral; 76 s64 err_integral;
70 s32 prev_err; 77 s32 prev_err;
71 int trip_switch_on; 78 int trip_switch_on;
@@ -73,6 +80,88 @@ struct power_allocator_params {
73}; 80};
74 81
75/** 82/**
83 * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
84 * @tz: thermal zone we are operating in
85 *
86 * For thermal zones that don't provide a sustainable_power in their
87 * thermal_zone_params, estimate one. Calculate it using the minimum
88 * power of all the cooling devices as that gives a valid value that
89 * can give some degree of functionality. For optimal performance of
90 * this governor, provide a sustainable_power in the thermal zone's
91 * thermal_zone_params.
92 */
93static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
94{
95 u32 sustainable_power = 0;
96 struct thermal_instance *instance;
97 struct power_allocator_params *params = tz->governor_data;
98
99 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
100 struct thermal_cooling_device *cdev = instance->cdev;
101 u32 min_power;
102
103 if (instance->trip != params->trip_max_desired_temperature)
104 continue;
105
106 if (power_actor_get_min_power(cdev, tz, &min_power))
107 continue;
108
109 sustainable_power += min_power;
110 }
111
112 return sustainable_power;
113}
114
115/**
116 * estimate_pid_constants() - Estimate the constants for the PID controller
117 * @tz: thermal zone for which to estimate the constants
118 * @sustainable_power: sustainable power for the thermal zone
119 * @trip_switch_on: trip point number for the switch on temperature
120 * @control_temp: target temperature for the power allocator governor
121 * @force: whether to force the update of the constants
122 *
123 * This function is used to update the estimation of the PID
124 * controller constants in struct thermal_zone_parameters.
125 * Sustainable power is provided in case it was estimated. The
126 * estimated sustainable_power should not be stored in the
127 * thermal_zone_parameters so it has to be passed explicitly to this
128 * function.
129 *
130 * If @force is not set, the values in the thermal zone's parameters
131 * are preserved if they are not zero. If @force is set, the values
132 * in thermal zone's parameters are overwritten.
133 */
134static void estimate_pid_constants(struct thermal_zone_device *tz,
135 u32 sustainable_power, int trip_switch_on,
136 int control_temp, bool force)
137{
138 int ret;
139 int switch_on_temp;
140 u32 temperature_threshold;
141
142 ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
143 if (ret)
144 switch_on_temp = 0;
145
146 temperature_threshold = control_temp - switch_on_temp;
147
148 if (!tz->tzp->k_po || force)
149 tz->tzp->k_po = int_to_frac(sustainable_power) /
150 temperature_threshold;
151
152 if (!tz->tzp->k_pu || force)
153 tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
154 temperature_threshold;
155
156 if (!tz->tzp->k_i || force)
157 tz->tzp->k_i = int_to_frac(10) / 1000;
158 /*
159 * The default for k_d and integral_cutoff is 0, so we can
160 * leave them as they are.
161 */
162}
163
164/**
76 * pid_controller() - PID controller 165 * pid_controller() - PID controller
77 * @tz: thermal zone we are operating in 166 * @tz: thermal zone we are operating in
78 * @current_temp: the current temperature in millicelsius 167 * @current_temp: the current temperature in millicelsius
@@ -98,10 +187,20 @@ static u32 pid_controller(struct thermal_zone_device *tz,
98{ 187{
99 s64 p, i, d, power_range; 188 s64 p, i, d, power_range;
100 s32 err, max_power_frac; 189 s32 err, max_power_frac;
190 u32 sustainable_power;
101 struct power_allocator_params *params = tz->governor_data; 191 struct power_allocator_params *params = tz->governor_data;
102 192
103 max_power_frac = int_to_frac(max_allocatable_power); 193 max_power_frac = int_to_frac(max_allocatable_power);
104 194
195 if (tz->tzp->sustainable_power) {
196 sustainable_power = tz->tzp->sustainable_power;
197 } else {
198 sustainable_power = estimate_sustainable_power(tz);
199 estimate_pid_constants(tz, sustainable_power,
200 params->trip_switch_on, control_temp,
201 true);
202 }
203
105 err = control_temp - current_temp; 204 err = control_temp - current_temp;
106 err = int_to_frac(err); 205 err = int_to_frac(err);
107 206
@@ -139,7 +238,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
139 power_range = p + i + d; 238 power_range = p + i + d;
140 239
141 /* feed-forward the known sustainable dissipatable power */ 240 /* feed-forward the known sustainable dissipatable power */
142 power_range = tz->tzp->sustainable_power + frac_to_int(power_range); 241 power_range = sustainable_power + frac_to_int(power_range);
143 242
144 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); 243 power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
145 244
@@ -247,6 +346,11 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 346 }
248 } 347 }
249 348
349 if (!num_actors) {
350 ret = -ENODEV;
351 goto unlock;
352 }
353
250 /* 354 /*
251 * We need to allocate five arrays of the same size: 355 * We need to allocate five arrays of the same size:
252 * req_power, max_power, granted_power, extra_actor_power and 356 * req_power, max_power, granted_power, extra_actor_power and
@@ -340,43 +444,66 @@ unlock:
340 return ret; 444 return ret;
341} 445}
342 446
343static int get_governor_trips(struct thermal_zone_device *tz, 447/**
344 struct power_allocator_params *params) 448 * get_governor_trips() - get the number of the two trip points that are key for this governor
449 * @tz: thermal zone to operate on
450 * @params: pointer to private data for this governor
451 *
452 * The power allocator governor works optimally with two trips points:
453 * a "switch on" trip point and a "maximum desired temperature". These
454 * are defined as the first and last passive trip points.
455 *
456 * If there is only one trip point, then that's considered to be the
457 * "maximum desired temperature" trip point and the governor is always
458 * on. If there are no passive or active trip points, then the
459 * governor won't do anything. In fact, its throttle function
460 * won't be called at all.
461 */
462static void get_governor_trips(struct thermal_zone_device *tz,
463 struct power_allocator_params *params)
345{ 464{
346 int i, ret, last_passive; 465 int i, last_active, last_passive;
347 bool found_first_passive; 466 bool found_first_passive;
348 467
349 found_first_passive = false; 468 found_first_passive = false;
350 last_passive = -1; 469 last_active = INVALID_TRIP;
351 ret = -EINVAL; 470 last_passive = INVALID_TRIP;
352 471
353 for (i = 0; i < tz->trips; i++) { 472 for (i = 0; i < tz->trips; i++) {
354 enum thermal_trip_type type; 473 enum thermal_trip_type type;
474 int ret;
355 475
356 ret = tz->ops->get_trip_type(tz, i, &type); 476 ret = tz->ops->get_trip_type(tz, i, &type);
357 if (ret) 477 if (ret) {
358 return ret; 478 dev_warn(&tz->device,
479 "Failed to get trip point %d type: %d\n", i,
480 ret);
481 continue;
482 }
359 483
360 if (!found_first_passive) { 484 if (type == THERMAL_TRIP_PASSIVE) {
361 if (type == THERMAL_TRIP_PASSIVE) { 485 if (!found_first_passive) {
362 params->trip_switch_on = i; 486 params->trip_switch_on = i;
363 found_first_passive = true; 487 found_first_passive = true;
488 } else {
489 last_passive = i;
364 } 490 }
365 } else if (type == THERMAL_TRIP_PASSIVE) { 491 } else if (type == THERMAL_TRIP_ACTIVE) {
366 last_passive = i; 492 last_active = i;
367 } else { 493 } else {
368 break; 494 break;
369 } 495 }
370 } 496 }
371 497
372 if (last_passive != -1) { 498 if (last_passive != INVALID_TRIP) {
373 params->trip_max_desired_temperature = last_passive; 499 params->trip_max_desired_temperature = last_passive;
374 ret = 0; 500 } else if (found_first_passive) {
501 params->trip_max_desired_temperature = params->trip_switch_on;
502 params->trip_switch_on = INVALID_TRIP;
375 } else { 503 } else {
376 ret = -EINVAL; 504 params->trip_switch_on = INVALID_TRIP;
505 params->trip_max_desired_temperature = last_active;
377 } 506 }
378
379 return ret;
380} 507}
381 508
382static void reset_pid_controller(struct power_allocator_params *params) 509static void reset_pid_controller(struct power_allocator_params *params)
@@ -405,60 +532,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
405 * power_allocator_bind() - bind the power_allocator governor to a thermal zone 532 * power_allocator_bind() - bind the power_allocator governor to a thermal zone
406 * @tz: thermal zone to bind it to 533 * @tz: thermal zone to bind it to
407 * 534 *
408 * Check that the thermal zone is valid for this governor, that is, it 535 * Initialize the PID controller parameters and bind it to the thermal
409 * has two thermal trips. If so, initialize the PID controller 536 * zone.
410 * parameters and bind it to the thermal zone.
411 * 537 *
412 * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM 538 * Return: 0 on success, or -ENOMEM if we ran out of memory.
413 * if we ran out of memory.
414 */ 539 */
415static int power_allocator_bind(struct thermal_zone_device *tz) 540static int power_allocator_bind(struct thermal_zone_device *tz)
416{ 541{
417 int ret; 542 int ret;
418 struct power_allocator_params *params; 543 struct power_allocator_params *params;
419 int switch_on_temp, control_temp; 544 int control_temp;
420 u32 temperature_threshold;
421
422 if (!tz->tzp || !tz->tzp->sustainable_power) {
423 dev_err(&tz->device,
424 "power_allocator: missing sustainable_power\n");
425 return -EINVAL;
426 }
427 545
428 params = kzalloc(sizeof(*params), GFP_KERNEL); 546 params = kzalloc(sizeof(*params), GFP_KERNEL);
429 if (!params) 547 if (!params)
430 return -ENOMEM; 548 return -ENOMEM;
431 549
432 ret = get_governor_trips(tz, params); 550 if (!tz->tzp) {
433 if (ret) { 551 tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
434 dev_err(&tz->device, 552 if (!tz->tzp) {
435 "thermal zone %s has wrong trip setup for power allocator\n", 553 ret = -ENOMEM;
436 tz->type); 554 goto free_params;
437 goto free; 555 }
438 }
439 556
440 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 557 params->allocated_tzp = true;
441 &switch_on_temp); 558 }
442 if (ret)
443 goto free;
444 559
445 ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, 560 if (!tz->tzp->sustainable_power)
446 &control_temp); 561 dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
447 if (ret)
448 goto free;
449 562
450 temperature_threshold = control_temp - switch_on_temp; 563 get_governor_trips(tz, params);
451 564
452 tz->tzp->k_po = tz->tzp->k_po ?: 565 if (tz->trips > 0) {
453 int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; 566 ret = tz->ops->get_trip_temp(tz,
454 tz->tzp->k_pu = tz->tzp->k_pu ?: 567 params->trip_max_desired_temperature,
455 int_to_frac(2 * tz->tzp->sustainable_power) / 568 &control_temp);
456 temperature_threshold; 569 if (!ret)
457 tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; 570 estimate_pid_constants(tz, tz->tzp->sustainable_power,
458 /* 571 params->trip_switch_on,
459 * The default for k_d and integral_cutoff is 0, so we can 572 control_temp, false);
460 * leave them as they are. 573 }
461 */
462 574
463 reset_pid_controller(params); 575 reset_pid_controller(params);
464 576
@@ -466,14 +578,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
466 578
467 return 0; 579 return 0;
468 580
469free: 581free_params:
470 kfree(params); 582 kfree(params);
583
471 return ret; 584 return ret;
472} 585}
473 586
474static void power_allocator_unbind(struct thermal_zone_device *tz) 587static void power_allocator_unbind(struct thermal_zone_device *tz)
475{ 588{
589 struct power_allocator_params *params = tz->governor_data;
590
476 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 591 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
592
593 if (params->allocated_tzp) {
594 kfree(tz->tzp);
595 tz->tzp = NULL;
596 }
597
477 kfree(tz->governor_data); 598 kfree(tz->governor_data);
478 tz->governor_data = NULL; 599 tz->governor_data = NULL;
479} 600}
@@ -499,13 +620,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
499 620
500 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 621 ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
501 &switch_on_temp); 622 &switch_on_temp);
502 if (ret) { 623 if (!ret && (current_temp < switch_on_temp)) {
503 dev_warn(&tz->device,
504 "Failed to get switch on temperature: %d\n", ret);
505 return ret;
506 }
507
508 if (current_temp < switch_on_temp) {
509 tz->passive = 0; 624 tz->passive = 0;
510 reset_pid_controller(params); 625 reset_pid_controller(params);
511 allow_maximum_power(tz); 626 allow_maximum_power(tz);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5e5fc7015c7f..d9e525cc9c1c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1013,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev,
1013} 1013}
1014 1014
1015/** 1015/**
1016 * power_actor_get_min_power() - get the mainimum power that a cdev can consume
1017 * @cdev: pointer to &thermal_cooling_device
1018 * @tz: a valid thermal zone device pointer
1019 * @min_power: pointer in which to store the minimum power
1020 *
1021 * Calculate the minimum power consumption in milliwatts that the
1022 * cooling device can currently consume and store it in @min_power.
1023 *
1024 * Return: 0 on success, -EINVAL if @cdev doesn't support the
1025 * power_actor API or -E* on other error.
1026 */
1027int power_actor_get_min_power(struct thermal_cooling_device *cdev,
1028 struct thermal_zone_device *tz, u32 *min_power)
1029{
1030 unsigned long max_state;
1031 int ret;
1032
1033 if (!cdev_is_power_actor(cdev))
1034 return -EINVAL;
1035
1036 ret = cdev->ops->get_max_state(cdev, &max_state);
1037 if (ret)
1038 return ret;
1039
1040 return cdev->ops->state2power(cdev, tz, max_state, min_power);
1041}
1042
1043/**
1016 * power_actor_set_power() - limit the maximum power that a cooling device can consume 1044 * power_actor_set_power() - limit the maximum power that a cooling device can consume
1017 * @cdev: pointer to &thermal_cooling_device 1045 * @cdev: pointer to &thermal_cooling_device
1018 * @instance: thermal instance to update 1046 * @instance: thermal instance to update
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig
index bd4c7beba679..cb6686ff09ae 100644
--- a/drivers/thermal/ti-soc-thermal/Kconfig
+++ b/drivers/thermal/ti-soc-thermal/Kconfig
@@ -1,7 +1,5 @@
1config TI_SOC_THERMAL 1config TI_SOC_THERMAL
2 tristate "Texas Instruments SoCs temperature sensor driver" 2 tristate "Texas Instruments SoCs temperature sensor driver"
3 depends on THERMAL
4 depends on ARCH_HAS_BANDGAP
5 help 3 help
6 If you say yes here you get support for the Texas Instruments 4 If you say yes here you get support for the Texas Instruments
7 OMAP4460+ on die bandgap temperature sensor support. The register 5 OMAP4460+ on die bandgap temperature sensor support. The register
@@ -24,7 +22,7 @@ config TI_THERMAL
24config OMAP4_THERMAL 22config OMAP4_THERMAL
25 bool "Texas Instruments OMAP4 thermal support" 23 bool "Texas Instruments OMAP4 thermal support"
26 depends on TI_SOC_THERMAL 24 depends on TI_SOC_THERMAL
27 depends on ARCH_OMAP4 25 depends on ARCH_OMAP4 || COMPILE_TEST
28 help 26 help
29 If you say yes here you get thermal support for the Texas Instruments 27 If you say yes here you get thermal support for the Texas Instruments
30 OMAP4 SoC family. The current chip supported are: 28 OMAP4 SoC family. The current chip supported are:
@@ -38,7 +36,7 @@ config OMAP4_THERMAL
38config OMAP5_THERMAL 36config OMAP5_THERMAL
39 bool "Texas Instruments OMAP5 thermal support" 37 bool "Texas Instruments OMAP5 thermal support"
40 depends on TI_SOC_THERMAL 38 depends on TI_SOC_THERMAL
41 depends on SOC_OMAP5 39 depends on SOC_OMAP5 || COMPILE_TEST
42 help 40 help
43 If you say yes here you get thermal support for the Texas Instruments 41 If you say yes here you get thermal support for the Texas Instruments
44 OMAP5 SoC family. The current chip supported are: 42 OMAP5 SoC family. The current chip supported are:
@@ -50,7 +48,7 @@ config OMAP5_THERMAL
50config DRA752_THERMAL 48config DRA752_THERMAL
51 bool "Texas Instruments DRA752 thermal support" 49 bool "Texas Instruments DRA752 thermal support"
52 depends on TI_SOC_THERMAL 50 depends on TI_SOC_THERMAL
53 depends on SOC_DRA7XX 51 depends on SOC_DRA7XX || COMPILE_TEST
54 help 52 help
55 If you say yes here you get thermal support for the Texas Instruments 53 If you say yes here you get thermal support for the Texas Instruments
56 DRA752 SoC family. The current chip supported are: 54 DRA752 SoC family. The current chip supported are:
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index c68fe1222c16..20a41f7de76f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -643,7 +643,7 @@ static struct pci_device_id nhi_ids[] = {
643 { 643 {
644 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, 644 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
645 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, 645 .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
646 .subvendor = 0x2222, .subdevice = 0x1111, 646 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
647 }, 647 },
648 { 0,} 648 { 0,}
649}; 649};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 54e6c8ddef5d..b1e0ba3e525b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2910,3 +2910,5 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
2910} 2910}
2911 2911
2912#endif /* CONFIG_SERIAL_8250_CONSOLE */ 2912#endif /* CONFIG_SERIAL_8250_CONSOLE */
2913
2914MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 867e9f3f3859..dcc50c878159 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -61,7 +61,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
61 { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data}, 61 { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
62 { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data}, 62 { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
63 { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data}, 63 { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
64 { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data}, 64 { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
65 { /* sentinel */ } 65 { /* sentinel */ }
66}; 66};
67MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); 67MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 9eae1a16cef9..4456d2cf80ff 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -12,6 +12,7 @@
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/of_platform.h>
15#include <linux/phy/phy.h> 16#include <linux/phy/phy.h>
16#include <linux/platform_device.h> 17#include <linux/platform_device.h>
17#include <linux/usb/chipidea.h> 18#include <linux/usb/chipidea.h>
@@ -30,18 +31,36 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
30 .flags = CI_HDRC_DISABLE_STREAMING, 31 .flags = CI_HDRC_DISABLE_STREAMING,
31}; 32};
32 33
34static struct ci_hdrc_platform_data ci_zynq_pdata = {
35 .capoffset = DEF_CAPOFFSET,
36};
37
38static const struct of_device_id ci_hdrc_usb2_of_match[] = {
39 { .compatible = "chipidea,usb2"},
40 { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
41 { }
42};
43MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
44
33static int ci_hdrc_usb2_probe(struct platform_device *pdev) 45static int ci_hdrc_usb2_probe(struct platform_device *pdev)
34{ 46{
35 struct device *dev = &pdev->dev; 47 struct device *dev = &pdev->dev;
36 struct ci_hdrc_usb2_priv *priv; 48 struct ci_hdrc_usb2_priv *priv;
37 struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev); 49 struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
38 int ret; 50 int ret;
51 const struct of_device_id *match;
39 52
40 if (!ci_pdata) { 53 if (!ci_pdata) {
41 ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL); 54 ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
42 *ci_pdata = ci_default_pdata; /* struct copy */ 55 *ci_pdata = ci_default_pdata; /* struct copy */
43 } 56 }
44 57
58 match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
59 if (match && match->data) {
60 /* struct copy */
61 *ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
62 }
63
45 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 64 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
46 if (!priv) 65 if (!priv)
47 return -ENOMEM; 66 return -ENOMEM;
@@ -96,12 +115,6 @@ static int ci_hdrc_usb2_remove(struct platform_device *pdev)
96 return 0; 115 return 0;
97} 116}
98 117
99static const struct of_device_id ci_hdrc_usb2_of_match[] = {
100 { .compatible = "chipidea,usb2" },
101 { }
102};
103MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
104
105static struct platform_driver ci_hdrc_usb2_driver = { 118static struct platform_driver ci_hdrc_usb2_driver = {
106 .probe = ci_hdrc_usb2_probe, 119 .probe = ci_hdrc_usb2_probe,
107 .remove = ci_hdrc_usb2_remove, 120 .remove = ci_hdrc_usb2_remove,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index a637da25dda0..8223fe73ea85 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -656,6 +656,44 @@ __acquires(hwep->lock)
656 return 0; 656 return 0;
657} 657}
658 658
659static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
660{
661 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
662 int direction, retval = 0;
663 unsigned long flags;
664
665 if (ep == NULL || hwep->ep.desc == NULL)
666 return -EINVAL;
667
668 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
669 return -EOPNOTSUPP;
670
671 spin_lock_irqsave(hwep->lock, flags);
672
673 if (value && hwep->dir == TX && check_transfer &&
674 !list_empty(&hwep->qh.queue) &&
675 !usb_endpoint_xfer_control(hwep->ep.desc)) {
676 spin_unlock_irqrestore(hwep->lock, flags);
677 return -EAGAIN;
678 }
679
680 direction = hwep->dir;
681 do {
682 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
683
684 if (!value)
685 hwep->wedge = 0;
686
687 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
688 hwep->dir = (hwep->dir == TX) ? RX : TX;
689
690 } while (hwep->dir != direction);
691
692 spin_unlock_irqrestore(hwep->lock, flags);
693 return retval;
694}
695
696
659/** 697/**
660 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts 698 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
661 * @gadget: gadget 699 * @gadget: gadget
@@ -1051,7 +1089,7 @@ __acquires(ci->lock)
1051 num += ci->hw_ep_max / 2; 1089 num += ci->hw_ep_max / 2;
1052 1090
1053 spin_unlock(&ci->lock); 1091 spin_unlock(&ci->lock);
1054 err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep); 1092 err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1055 spin_lock(&ci->lock); 1093 spin_lock(&ci->lock);
1056 if (!err) 1094 if (!err)
1057 isr_setup_status_phase(ci); 1095 isr_setup_status_phase(ci);
@@ -1117,8 +1155,8 @@ delegate:
1117 1155
1118 if (err < 0) { 1156 if (err < 0) {
1119 spin_unlock(&ci->lock); 1157 spin_unlock(&ci->lock);
1120 if (usb_ep_set_halt(&hwep->ep)) 1158 if (_ep_set_halt(&hwep->ep, 1, false))
1121 dev_err(ci->dev, "error: ep_set_halt\n"); 1159 dev_err(ci->dev, "error: _ep_set_halt\n");
1122 spin_lock(&ci->lock); 1160 spin_lock(&ci->lock);
1123 } 1161 }
1124} 1162}
@@ -1149,9 +1187,9 @@ __acquires(ci->lock)
1149 err = isr_setup_status_phase(ci); 1187 err = isr_setup_status_phase(ci);
1150 if (err < 0) { 1188 if (err < 0) {
1151 spin_unlock(&ci->lock); 1189 spin_unlock(&ci->lock);
1152 if (usb_ep_set_halt(&hwep->ep)) 1190 if (_ep_set_halt(&hwep->ep, 1, false))
1153 dev_err(ci->dev, 1191 dev_err(ci->dev,
1154 "error: ep_set_halt\n"); 1192 "error: _ep_set_halt\n");
1155 spin_lock(&ci->lock); 1193 spin_lock(&ci->lock);
1156 } 1194 }
1157 } 1195 }
@@ -1397,41 +1435,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1397 */ 1435 */
1398static int ep_set_halt(struct usb_ep *ep, int value) 1436static int ep_set_halt(struct usb_ep *ep, int value)
1399{ 1437{
1400 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); 1438 return _ep_set_halt(ep, value, true);
1401 int direction, retval = 0;
1402 unsigned long flags;
1403
1404 if (ep == NULL || hwep->ep.desc == NULL)
1405 return -EINVAL;
1406
1407 if (usb_endpoint_xfer_isoc(hwep->ep.desc))
1408 return -EOPNOTSUPP;
1409
1410 spin_lock_irqsave(hwep->lock, flags);
1411
1412#ifndef STALL_IN
1413 /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
1414 if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
1415 !list_empty(&hwep->qh.queue)) {
1416 spin_unlock_irqrestore(hwep->lock, flags);
1417 return -EAGAIN;
1418 }
1419#endif
1420
1421 direction = hwep->dir;
1422 do {
1423 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
1424
1425 if (!value)
1426 hwep->wedge = 0;
1427
1428 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1429 hwep->dir = (hwep->dir == TX) ? RX : TX;
1430
1431 } while (hwep->dir != direction);
1432
1433 spin_unlock_irqrestore(hwep->lock, flags);
1434 return retval;
1435} 1439}
1436 1440
1437/** 1441/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b43f97..b9ddf0c1ffe5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
112 cfgno, inum, asnum, ep->desc.bEndpointAddress); 112 cfgno, inum, asnum, ep->desc.bEndpointAddress);
113 ep->ss_ep_comp.bmAttributes = 16; 113 ep->ss_ep_comp.bmAttributes = 16;
114 } else if (usb_endpoint_xfer_isoc(&ep->desc) && 114 } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
115 desc->bmAttributes > 2) { 115 USB_SS_MULT(desc->bmAttributes) > 3) {
116 dev_warn(ddev, "Isoc endpoint has Mult of %d in " 116 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
117 "config %d interface %d altsetting %d ep %d: " 117 "config %d interface %d altsetting %d ep %d: "
118 "setting to 3\n", desc->bmAttributes + 1, 118 "setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
121 } 121 }
122 122
123 if (usb_endpoint_xfer_isoc(&ep->desc)) 123 if (usb_endpoint_xfer_isoc(&ep->desc))
124 max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * 124 max_tx = (desc->bMaxBurst + 1) *
125 (USB_SS_MULT(desc->bmAttributes)) *
125 usb_endpoint_maxp(&ep->desc); 126 usb_endpoint_maxp(&ep->desc);
126 else if (usb_endpoint_xfer_int(&ep->desc)) 127 else if (usb_endpoint_xfer_int(&ep->desc))
127 max_tx = usb_endpoint_maxp(&ep->desc) * 128 max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a5a1b7c45743..22e9606d8e08 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -514,8 +514,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
514 goto err1; 514 goto err1;
515 } 515 }
516 516
517 dwc3_omap_enable_irqs(omap);
518
519 ret = dwc3_omap_extcon_register(omap); 517 ret = dwc3_omap_extcon_register(omap);
520 if (ret < 0) 518 if (ret < 0)
521 goto err2; 519 goto err2;
@@ -526,6 +524,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
526 goto err3; 524 goto err3;
527 } 525 }
528 526
527 dwc3_omap_enable_irqs(omap);
528
529 return 0; 529 return 0;
530 530
531err3: 531err3:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c25704dcb6b..1e8bdf817811 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2665,8 +2665,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2665 int i; 2665 int i;
2666 irqreturn_t ret = IRQ_NONE; 2666 irqreturn_t ret = IRQ_NONE;
2667 2667
2668 spin_lock(&dwc->lock);
2669
2670 for (i = 0; i < dwc->num_event_buffers; i++) { 2668 for (i = 0; i < dwc->num_event_buffers; i++) {
2671 irqreturn_t status; 2669 irqreturn_t status;
2672 2670
@@ -2675,8 +2673,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2675 ret = status; 2673 ret = status;
2676 } 2674 }
2677 2675
2678 spin_unlock(&dwc->lock);
2679
2680 return ret; 2676 return ret;
2681} 2677}
2682 2678
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 978435a51038..6399c106a3a5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -186,6 +186,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
186 186
187 list_for_each_entry (ep, &gadget->ep_list, ep_list) { 187 list_for_each_entry (ep, &gadget->ep_list, ep_list) {
188 ep->claimed = false; 188 ep->claimed = false;
189 ep->driver_data = NULL;
189 } 190 }
190 gadget->in_epnum = 0; 191 gadget->in_epnum = 0;
191 gadget->out_epnum = 0; 192 gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index fdacddb18c00..175ca93fe5e2 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3138,8 +3138,8 @@ static void udc_pci_remove(struct pci_dev *pdev)
3138 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg); 3138 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3139 if (dev->irq_registered) 3139 if (dev->irq_registered)
3140 free_irq(pdev->irq, dev); 3140 free_irq(pdev->irq, dev);
3141 if (dev->regs) 3141 if (dev->virt_addr)
3142 iounmap(dev->regs); 3142 iounmap(dev->virt_addr);
3143 if (dev->mem_region) 3143 if (dev->mem_region)
3144 release_mem_region(pci_resource_start(pdev, 0), 3144 release_mem_region(pci_resource_start(pdev, 0),
3145 pci_resource_len(pdev, 0)); 3145 pci_resource_len(pdev, 0));
@@ -3226,17 +3226,13 @@ static int udc_pci_probe(
3226 3226
3227 /* init */ 3227 /* init */
3228 dev = kzalloc(sizeof(struct udc), GFP_KERNEL); 3228 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3229 if (!dev) { 3229 if (!dev)
3230 retval = -ENOMEM; 3230 return -ENOMEM;
3231 goto finished;
3232 }
3233 3231
3234 /* pci setup */ 3232 /* pci setup */
3235 if (pci_enable_device(pdev) < 0) { 3233 if (pci_enable_device(pdev) < 0) {
3236 kfree(dev);
3237 dev = NULL;
3238 retval = -ENODEV; 3234 retval = -ENODEV;
3239 goto finished; 3235 goto err_pcidev;
3240 } 3236 }
3241 dev->active = 1; 3237 dev->active = 1;
3242 3238
@@ -3246,28 +3242,22 @@ static int udc_pci_probe(
3246 3242
3247 if (!request_mem_region(resource, len, name)) { 3243 if (!request_mem_region(resource, len, name)) {
3248 dev_dbg(&pdev->dev, "pci device used already\n"); 3244 dev_dbg(&pdev->dev, "pci device used already\n");
3249 kfree(dev);
3250 dev = NULL;
3251 retval = -EBUSY; 3245 retval = -EBUSY;
3252 goto finished; 3246 goto err_memreg;
3253 } 3247 }
3254 dev->mem_region = 1; 3248 dev->mem_region = 1;
3255 3249
3256 dev->virt_addr = ioremap_nocache(resource, len); 3250 dev->virt_addr = ioremap_nocache(resource, len);
3257 if (dev->virt_addr == NULL) { 3251 if (dev->virt_addr == NULL) {
3258 dev_dbg(&pdev->dev, "start address cannot be mapped\n"); 3252 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3259 kfree(dev);
3260 dev = NULL;
3261 retval = -EFAULT; 3253 retval = -EFAULT;
3262 goto finished; 3254 goto err_ioremap;
3263 } 3255 }
3264 3256
3265 if (!pdev->irq) { 3257 if (!pdev->irq) {
3266 dev_err(&pdev->dev, "irq not set\n"); 3258 dev_err(&pdev->dev, "irq not set\n");
3267 kfree(dev);
3268 dev = NULL;
3269 retval = -ENODEV; 3259 retval = -ENODEV;
3270 goto finished; 3260 goto err_irq;
3271 } 3261 }
3272 3262
3273 spin_lock_init(&dev->lock); 3263 spin_lock_init(&dev->lock);
@@ -3283,10 +3273,8 @@ static int udc_pci_probe(
3283 3273
3284 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { 3274 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3285 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq); 3275 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3286 kfree(dev);
3287 dev = NULL;
3288 retval = -EBUSY; 3276 retval = -EBUSY;
3289 goto finished; 3277 goto err_irq;
3290 } 3278 }
3291 dev->irq_registered = 1; 3279 dev->irq_registered = 1;
3292 3280
@@ -3314,8 +3302,17 @@ static int udc_pci_probe(
3314 return 0; 3302 return 0;
3315 3303
3316finished: 3304finished:
3317 if (dev) 3305 udc_pci_remove(pdev);
3318 udc_pci_remove(pdev); 3306 return retval;
3307
3308err_irq:
3309 iounmap(dev->virt_addr);
3310err_ioremap:
3311 release_mem_region(resource, len);
3312err_memreg:
3313 pci_disable_device(pdev);
3314err_pcidev:
3315 kfree(dev);
3319 return retval; 3316 return retval;
3320} 3317}
3321 3318
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3dfada8d6061..f0f2b066ac08 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2002,6 +2002,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
2002 ep->udc = udc; 2002 ep->udc = udc;
2003 INIT_LIST_HEAD(&ep->queue); 2003 INIT_LIST_HEAD(&ep->queue);
2004 2004
2005 if (ep->index == 0) {
2006 ep->ep.caps.type_control = true;
2007 } else {
2008 ep->ep.caps.type_iso = ep->can_isoc;
2009 ep->ep.caps.type_bulk = true;
2010 ep->ep.caps.type_int = true;
2011 }
2012
2013 ep->ep.caps.dir_in = true;
2014 ep->ep.caps.dir_out = true;
2015
2005 if (i) 2016 if (i)
2006 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); 2017 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
2007 2018
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5c8f4effb62a..ccb9c213cc9f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -324,8 +324,7 @@ static void bdc_mem_free(struct bdc *bdc)
324 bdc->scratchpad.buff, bdc->scratchpad.sp_dma); 324 bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
325 325
326 /* Destroy the dma pools */ 326 /* Destroy the dma pools */
327 if (bdc->bd_table_pool) 327 dma_pool_destroy(bdc->bd_table_pool);
328 dma_pool_destroy(bdc->bd_table_pool);
329 328
330 /* Free the bdc_ep array */ 329 /* Free the bdc_ep array */
331 kfree(bdc->bdc_ep_array); 330 kfree(bdc->bdc_ep_array);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 1379ad40d864..27af0f008b57 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1348,6 +1348,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
1348{ 1348{
1349 struct dummy *dum = dum_hcd->dum; 1349 struct dummy *dum = dum_hcd->dum;
1350 struct dummy_request *req; 1350 struct dummy_request *req;
1351 int sent = 0;
1351 1352
1352top: 1353top:
1353 /* if there's no request queued, the device is NAKing; return */ 1354 /* if there's no request queued, the device is NAKing; return */
@@ -1385,12 +1386,15 @@ top:
1385 if (len == 0) 1386 if (len == 0)
1386 break; 1387 break;
1387 1388
1388 /* use an extra pass for the final short packet */ 1389 /* send multiple of maxpacket first, then remainder */
1389 if (len > ep->ep.maxpacket) { 1390 if (len >= ep->ep.maxpacket) {
1390 rescan = 1; 1391 is_short = 0;
1391 len -= (len % ep->ep.maxpacket); 1392 if (len % ep->ep.maxpacket)
1393 rescan = 1;
1394 len -= len % ep->ep.maxpacket;
1395 } else {
1396 is_short = 1;
1392 } 1397 }
1393 is_short = (len % ep->ep.maxpacket) != 0;
1394 1398
1395 len = dummy_perform_transfer(urb, req, len); 1399 len = dummy_perform_transfer(urb, req, len);
1396 1400
@@ -1399,6 +1403,7 @@ top:
1399 req->req.status = len; 1403 req->req.status = len;
1400 } else { 1404 } else {
1401 limit -= len; 1405 limit -= len;
1406 sent += len;
1402 urb->actual_length += len; 1407 urb->actual_length += len;
1403 req->req.actual += len; 1408 req->req.actual += len;
1404 } 1409 }
@@ -1421,7 +1426,7 @@ top:
1421 *status = -EOVERFLOW; 1426 *status = -EOVERFLOW;
1422 else 1427 else
1423 *status = 0; 1428 *status = 0;
1424 } else if (!to_host) { 1429 } else {
1425 *status = 0; 1430 *status = 0;
1426 if (host_len > dev_len) 1431 if (host_len > dev_len)
1427 req->req.status = -EOVERFLOW; 1432 req->req.status = -EOVERFLOW;
@@ -1429,15 +1434,24 @@ top:
1429 req->req.status = 0; 1434 req->req.status = 0;
1430 } 1435 }
1431 1436
1432 /* many requests terminate without a short packet */ 1437 /*
1438 * many requests terminate without a short packet.
1439 * send a zlp if demanded by flags.
1440 */
1433 } else { 1441 } else {
1434 if (req->req.length == req->req.actual 1442 if (req->req.length == req->req.actual) {
1435 && !req->req.zero) 1443 if (req->req.zero && to_host)
1436 req->req.status = 0; 1444 rescan = 1;
1437 if (urb->transfer_buffer_length == urb->actual_length 1445 else
1438 && !(urb->transfer_flags 1446 req->req.status = 0;
1439 & URB_ZERO_PACKET)) 1447 }
1440 *status = 0; 1448 if (urb->transfer_buffer_length == urb->actual_length) {
1449 if (urb->transfer_flags & URB_ZERO_PACKET &&
1450 !to_host)
1451 rescan = 1;
1452 else
1453 *status = 0;
1454 }
1441 } 1455 }
1442 1456
1443 /* device side completion --> continuable */ 1457 /* device side completion --> continuable */
@@ -1460,7 +1474,7 @@ top:
1460 if (rescan) 1474 if (rescan)
1461 goto top; 1475 goto top;
1462 } 1476 }
1463 return limit; 1477 return sent;
1464} 1478}
1465 1479
1466static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep) 1480static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
@@ -1890,7 +1904,7 @@ restart:
1890 default: 1904 default:
1891treat_control_like_bulk: 1905treat_control_like_bulk:
1892 ep->last_io = jiffies; 1906 ep->last_io = jiffies;
1893 total = transfer(dum_hcd, urb, ep, limit, &status); 1907 total -= transfer(dum_hcd, urb, ep, limit, &status);
1894 break; 1908 break;
1895 } 1909 }
1896 1910
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 8aa2593c2c36..b9429bc42511 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2117,8 +2117,7 @@ static int gr_remove(struct platform_device *pdev)
2117 return -EBUSY; 2117 return -EBUSY;
2118 2118
2119 gr_dfs_delete(dev); 2119 gr_dfs_delete(dev);
2120 if (dev->desc_pool) 2120 dma_pool_destroy(dev->desc_pool);
2121 dma_pool_destroy(dev->desc_pool);
2122 platform_set_drvdata(pdev, NULL); 2121 platform_set_drvdata(pdev, NULL);
2123 2122
2124 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); 2123 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 4c489692745e..dafe74eb9ade 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1767,8 +1767,7 @@ static int mv_u3d_remove(struct platform_device *dev)
1767 usb_del_gadget_udc(&u3d->gadget); 1767 usb_del_gadget_udc(&u3d->gadget);
1768 1768
1769 /* free memory allocated in probe */ 1769 /* free memory allocated in probe */
1770 if (u3d->trb_pool) 1770 dma_pool_destroy(u3d->trb_pool);
1771 dma_pool_destroy(u3d->trb_pool);
1772 1771
1773 if (u3d->ep_context) 1772 if (u3d->ep_context)
1774 dma_free_coherent(&dev->dev, u3d->ep_context_size, 1773 dma_free_coherent(&dev->dev, u3d->ep_context_size,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 339af51df57d..81b6229c7805 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2100,8 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev)
2100 } 2100 }
2101 2101
2102 /* free memory allocated in probe */ 2102 /* free memory allocated in probe */
2103 if (udc->dtd_pool) 2103 dma_pool_destroy(udc->dtd_pool);
2104 dma_pool_destroy(udc->dtd_pool);
2105 2104
2106 if (udc->ep_dqh) 2105 if (udc->ep_dqh)
2107 dma_free_coherent(&pdev->dev, udc->ep_dqh_size, 2106 dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9a8c936cd42c..41f841fa6c4d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1498 * use Event Data TRBs, and we don't chain in a link TRB on short 1498 * use Event Data TRBs, and we don't chain in a link TRB on short
1499 * transfers, we're basically dividing by 1. 1499 * transfers, we're basically dividing by 1.
1500 * 1500 *
1501 * xHCI 1.0 specification indicates that the Average TRB Length should 1501 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
1502 * be set to 8 for control endpoints. 1502 * should be set to 8 for control endpoints.
1503 */ 1503 */
1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100) 1504 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1505 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8)); 1505 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
1506 else 1506 else
1507 ep_ctx->tx_info |= 1507 ep_ctx->tx_info |=
@@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 if (timer_pending(&xhci->cmd_timer)) 1795 del_timer_sync(&xhci->cmd_timer);
1796 del_timer_sync(&xhci->cmd_timer);
1797 1796
1798 /* Free the Event Ring Segment Table and the actual Event Ring */ 1797 /* Free the Event Ring Segment Table and the actual Event Ring */
1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2321 2320
2322 INIT_LIST_HEAD(&xhci->cmd_list); 2321 INIT_LIST_HEAD(&xhci->cmd_list);
2323 2322
2323 /* init command timeout timer */
2324 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2325 (unsigned long)xhci);
2326
2324 page_size = readl(&xhci->op_regs->page_size); 2327 page_size = readl(&xhci->op_regs->page_size);
2325 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 2328 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2326 "Supported page size register = 0x%x", page_size); 2329 "Supported page size register = 0x%x", page_size);
@@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2505 "Wrote ERST address to ir_set 0."); 2508 "Wrote ERST address to ir_set 0.");
2506 xhci_print_ir_set(xhci, 0); 2509 xhci_print_ir_set(xhci, 0);
2507 2510
2508 /* init command timeout timer */
2509 setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
2510 (unsigned long)xhci);
2511
2512 /* 2511 /*
2513 * XXX: Might need to set the Interrupter Moderation Register to 2512 * XXX: Might need to set the Interrupter Moderation Register to
2514 * something other than the default (~1ms minimum between interrupts). 2513 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5590eac2b22d..c79d33676672 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -180,51 +180,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
180 "QUIRK: Resetting on resume"); 180 "QUIRK: Resetting on resume");
181} 181}
182 182
183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
191 */
192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
196 u32 val;
197 void __iomem *reg;
198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
223 val = readl(reg);
224 writel(val | BIT(28), reg);
225 readl(reg);
226}
227
228#ifdef CONFIG_ACPI 183#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) 184static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{ 185{
@@ -345,6 +300,51 @@ static void xhci_pci_remove(struct pci_dev *dev)
345} 300}
346 301
347#ifdef CONFIG_PM 302#ifdef CONFIG_PM
303/*
304 * In some Intel xHCI controllers, in order to get D3 working,
305 * through a vendor specific SSIC CONFIG register at offset 0x883c,
306 * SSIC PORT need to be marked as "unused" before putting xHCI
307 * into D3. After D3 exit, the SSIC port need to be marked as "used".
308 * Without this change, xHCI might not enter D3 state.
309 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
310 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
311 */
312static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
313{
314 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
315 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
316 u32 val;
317 void __iomem *reg;
318
319 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
320 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
321
322 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
323
324 /* Notify SSIC that SSIC profile programming is not done */
325 val = readl(reg) & ~PROG_DONE;
326 writel(val, reg);
327
328 /* Mark SSIC port as unused(suspend) or used(resume) */
329 val = readl(reg);
330 if (suspend)
331 val |= SSIC_PORT_UNUSED;
332 else
333 val &= ~SSIC_PORT_UNUSED;
334 writel(val, reg);
335
336 /* Notify SSIC that SSIC profile programming is done */
337 val = readl(reg) | PROG_DONE;
338 writel(val, reg);
339 readl(reg);
340 }
341
342 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
343 val = readl(reg);
344 writel(val | BIT(28), reg);
345 readl(reg);
346}
347
348static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) 348static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
349{ 349{
350 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 350 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47a1e897086..43291f93afeb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
302 ret = xhci_handshake(&xhci->op_regs->cmd_ring, 302 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000); 303 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
304 if (ret < 0) { 304 if (ret < 0) {
305 /* we are about to kill xhci, give it one more chance */
306 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
307 &xhci->op_regs->cmd_ring);
308 udelay(1000);
309 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
310 CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
311 if (ret == 0)
312 return 0;
313
305 xhci_err(xhci, "Stopped the command ring failed, " 314 xhci_err(xhci, "Stopped the command ring failed, "
306 "maybe the host is dead\n"); 315 "maybe the host is dead\n");
307 xhci->xhc_state |= XHCI_STATE_DYING; 316 xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3461,8 +3470,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3461 if (start_cycle == 0) 3470 if (start_cycle == 0)
3462 field |= 0x1; 3471 field |= 0x1;
3463 3472
3464 /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ 3473 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3465 if (xhci->hci_version == 0x100) { 3474 if (xhci->hci_version >= 0x100) {
3466 if (urb->transfer_buffer_length > 0) { 3475 if (urb->transfer_buffer_length > 0) {
3467 if (setup->bRequestType & USB_DIR_IN) 3476 if (setup->bRequestType & USB_DIR_IN)
3468 field |= TRB_TX_TYPE(TRB_DATA_IN); 3477 field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6b0f4a47e402..9957bd96d4bc 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
146 "waited %u microseconds.\n", 146 "waited %u microseconds.\n",
147 XHCI_MAX_HALT_USEC); 147 XHCI_MAX_HALT_USEC);
148 if (!ret) 148 if (!ret)
149 xhci->xhc_state &= ~XHCI_STATE_HALTED; 149 xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
150
150 return ret; 151 return ret;
151} 152}
152 153
@@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd)
654} 655}
655EXPORT_SYMBOL_GPL(xhci_run); 656EXPORT_SYMBOL_GPL(xhci_run);
656 657
657static void xhci_only_stop_hcd(struct usb_hcd *hcd)
658{
659 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
660
661 spin_lock_irq(&xhci->lock);
662 xhci_halt(xhci);
663 spin_unlock_irq(&xhci->lock);
664}
665
666/* 658/*
667 * Stop xHCI driver. 659 * Stop xHCI driver.
668 * 660 *
@@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd)
677 u32 temp; 669 u32 temp;
678 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 670 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
679 671
680 if (!usb_hcd_is_primary_hcd(hcd)) { 672 if (xhci->xhc_state & XHCI_STATE_HALTED)
681 xhci_only_stop_hcd(xhci->shared_hcd);
682 return; 673 return;
683 }
684 674
675 mutex_lock(&xhci->mutex);
685 spin_lock_irq(&xhci->lock); 676 spin_lock_irq(&xhci->lock);
677 xhci->xhc_state |= XHCI_STATE_HALTED;
678 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
679
686 /* Make sure the xHC is halted for a USB3 roothub 680 /* Make sure the xHC is halted for a USB3 roothub
687 * (xhci_stop() could be called as part of failed init). 681 * (xhci_stop() could be called as part of failed init).
688 */ 682 */
@@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd)
717 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 711 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
718 "xhci_stop completed - status = %x", 712 "xhci_stop completed - status = %x",
719 readl(&xhci->op_regs->status)); 713 readl(&xhci->op_regs->status));
714 mutex_unlock(&xhci->mutex);
720} 715}
721 716
722/* 717/*
@@ -3793,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3793 3788
3794 mutex_lock(&xhci->mutex); 3789 mutex_lock(&xhci->mutex);
3795 3790
3791 if (xhci->xhc_state) /* dying or halted */
3792 goto out;
3793
3796 if (!udev->slot_id) { 3794 if (!udev->slot_id) {
3797 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3795 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
3798 "Bad Slot ID %d", udev->slot_id); 3796 "Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 514a6cdaeff6..4a518ff12310 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1051,6 +1051,7 @@ void musb_start(struct musb *musb)
1051 * (c) peripheral initiates, using SRP 1051 * (c) peripheral initiates, using SRP
1052 */ 1052 */
1053 if (musb->port_mode != MUSB_PORT_MODE_HOST && 1053 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1054 musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1054 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) { 1055 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1055 musb->is_active = 1; 1056 musb->is_active = 1;
1056 } else { 1057 } else {
@@ -2448,6 +2449,9 @@ static int musb_suspend(struct device *dev)
2448 struct musb *musb = dev_to_musb(dev); 2449 struct musb *musb = dev_to_musb(dev);
2449 unsigned long flags; 2450 unsigned long flags;
2450 2451
2452 musb_platform_disable(musb);
2453 musb_generic_disable(musb);
2454
2451 spin_lock_irqsave(&musb->lock, flags); 2455 spin_lock_irqsave(&musb->lock, flags);
2452 2456
2453 if (is_peripheral_active(musb)) { 2457 if (is_peripheral_active(musb)) {
@@ -2501,6 +2505,9 @@ static int musb_resume(struct device *dev)
2501 pm_runtime_disable(dev); 2505 pm_runtime_disable(dev);
2502 pm_runtime_set_active(dev); 2506 pm_runtime_set_active(dev);
2503 pm_runtime_enable(dev); 2507 pm_runtime_enable(dev);
2508
2509 musb_start(musb);
2510
2504 return 0; 2511 return 0;
2505} 2512}
2506 2513
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d07cafb7d5f5..e499b862a946 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -551,6 +551,9 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
551 } else { 551 } else {
552 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); 552 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
553 553
554 /* delay to drain to cppi dma pipeline for isoch */
555 udelay(250);
556
554 csr = musb_readw(epio, MUSB_RXCSR); 557 csr = musb_readw(epio, MUSB_RXCSR);
555 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); 558 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
556 musb_writew(epio, MUSB_RXCSR, csr); 559 musb_writew(epio, MUSB_RXCSR, csr);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a0cfead6150f..84512d1d5eee 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
225 225
226 dsps_writel(reg_base, wrp->epintr_set, epmask); 226 dsps_writel(reg_base, wrp->epintr_set, epmask);
227 dsps_writel(reg_base, wrp->coreintr_set, coremask); 227 dsps_writel(reg_base, wrp->coreintr_set, coremask);
228 /* start polling for ID change. */ 228 /* start polling for ID change in dual-role idle mode */
229 mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout)); 229 if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
230 musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
231 mod_timer(&glue->timer, jiffies +
232 msecs_to_jiffies(wrp->poll_timeout));
230 dsps_musb_try_idle(musb, 0); 233 dsps_musb_try_idle(musb, 0);
231} 234}
232 235
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 39168fe9b406..b2685e75a683 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -379,6 +379,8 @@ static const struct of_device_id ux500_match[] = {
379 {} 379 {}
380}; 380};
381 381
382MODULE_DEVICE_TABLE(of, ux500_match);
383
382static struct platform_driver ux500_driver = { 384static struct platform_driver ux500_driver = {
383 .probe = ux500_probe, 385 .probe = ux500_probe,
384 .remove = ux500_remove, 386 .remove = ux500_remove,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d3beee2a587..173132416170 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -155,7 +155,7 @@ config USB_MSM_OTG
155config USB_QCOM_8X16_PHY 155config USB_QCOM_8X16_PHY
156 tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support" 156 tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
157 depends on ARCH_QCOM || COMPILE_TEST 157 depends on ARCH_QCOM || COMPILE_TEST
158 depends on RESET_CONTROLLER 158 depends on RESET_CONTROLLER && EXTCON
159 select USB_PHY 159 select USB_PHY
160 select USB_ULPI_VIEWPORT 160 select USB_ULPI_VIEWPORT
161 help 161 help
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index ec6ecd03269c..5320cb8642cb 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -232,7 +232,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
232 clk_rate = pdata->clk_rate; 232 clk_rate = pdata->clk_rate;
233 needs_vcc = pdata->needs_vcc; 233 needs_vcc = pdata->needs_vcc;
234 if (gpio_is_valid(pdata->gpio_reset)) { 234 if (gpio_is_valid(pdata->gpio_reset)) {
235 err = devm_gpio_request_one(dev, pdata->gpio_reset, 0, 235 err = devm_gpio_request_one(dev, pdata->gpio_reset,
236 GPIOF_ACTIVE_LOW,
236 dev_name(dev)); 237 dev_name(dev));
237 if (!err) 238 if (!err)
238 nop->gpiod_reset = 239 nop->gpiod_reset =
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 8a55b37d1a02..db68156568e6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -31,6 +31,7 @@ static const struct i2c_device_id isp1301_id[] = {
31 { "isp1301", 0 }, 31 { "isp1301", 0 },
32 { } 32 { }
33}; 33};
34MODULE_DEVICE_TABLE(i2c, isp1301_id);
34 35
35static struct i2c_client *isp1301_i2c_client; 36static struct i2c_client *isp1301_i2c_client;
36 37
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6d1941a2396a..6956c4f62216 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
278#define ZTE_PRODUCT_MF622 0x0001 278#define ZTE_PRODUCT_MF622 0x0001
279#define ZTE_PRODUCT_MF628 0x0015 279#define ZTE_PRODUCT_MF628 0x0015
280#define ZTE_PRODUCT_MF626 0x0031 280#define ZTE_PRODUCT_MF626 0x0031
281#define ZTE_PRODUCT_ZM8620_X 0x0396
282#define ZTE_PRODUCT_ME3620_MBIM 0x0426
283#define ZTE_PRODUCT_ME3620_X 0x1432
284#define ZTE_PRODUCT_ME3620_L 0x1433
281#define ZTE_PRODUCT_AC2726 0xfff1 285#define ZTE_PRODUCT_AC2726 0xfff1
282#define ZTE_PRODUCT_MG880 0xfffd 286#define ZTE_PRODUCT_MG880 0xfffd
283#define ZTE_PRODUCT_CDMA_TECH 0xfffe 287#define ZTE_PRODUCT_CDMA_TECH 0xfffe
@@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
544 .sendsetup = BIT(1) | BIT(2) | BIT(3), 548 .sendsetup = BIT(1) | BIT(2) | BIT(3),
545}; 549};
546 550
551static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
552 .reserved = BIT(2) | BIT(3) | BIT(4),
553};
554
555static const struct option_blacklist_info zte_me3620_xl_blacklist = {
556 .reserved = BIT(3) | BIT(4) | BIT(5),
557};
558
559static const struct option_blacklist_info zte_zm8620_x_blacklist = {
560 .reserved = BIT(3) | BIT(4) | BIT(5),
561};
562
547static const struct option_blacklist_info huawei_cdc12_blacklist = { 563static const struct option_blacklist_info huawei_cdc12_blacklist = {
548 .reserved = BIT(1) | BIT(2), 564 .reserved = BIT(1) | BIT(2),
549}; 565};
@@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
1591 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, 1607 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
1592 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), 1608 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
1593 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, 1609 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
1610 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
1611 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
1612 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
1613 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
1614 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
1615 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
1616 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
1617 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
1594 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, 1618 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
1595 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, 1619 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
1596 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, 1620 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 6c3734d2b45a..d3ea90bef84d 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
80static int whiteheat_firmware_attach(struct usb_serial *serial); 80static int whiteheat_firmware_attach(struct usb_serial *serial);
81 81
82/* function prototypes for the Connect Tech WhiteHEAT serial converter */ 82/* function prototypes for the Connect Tech WhiteHEAT serial converter */
83static int whiteheat_probe(struct usb_serial *serial,
84 const struct usb_device_id *id);
83static int whiteheat_attach(struct usb_serial *serial); 85static int whiteheat_attach(struct usb_serial *serial);
84static void whiteheat_release(struct usb_serial *serial); 86static void whiteheat_release(struct usb_serial *serial);
85static int whiteheat_port_probe(struct usb_serial_port *port); 87static int whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
116 .description = "Connect Tech - WhiteHEAT", 118 .description = "Connect Tech - WhiteHEAT",
117 .id_table = id_table_std, 119 .id_table = id_table_std,
118 .num_ports = 4, 120 .num_ports = 4,
121 .probe = whiteheat_probe,
119 .attach = whiteheat_attach, 122 .attach = whiteheat_attach,
120 .release = whiteheat_release, 123 .release = whiteheat_release,
121 .port_probe = whiteheat_port_probe, 124 .port_probe = whiteheat_port_probe,
@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
217/***************************************************************************** 220/*****************************************************************************
218 * Connect Tech's White Heat serial driver functions 221 * Connect Tech's White Heat serial driver functions
219 *****************************************************************************/ 222 *****************************************************************************/
223
224static int whiteheat_probe(struct usb_serial *serial,
225 const struct usb_device_id *id)
226{
227 struct usb_host_interface *iface_desc;
228 struct usb_endpoint_descriptor *endpoint;
229 size_t num_bulk_in = 0;
230 size_t num_bulk_out = 0;
231 size_t min_num_bulk;
232 unsigned int i;
233
234 iface_desc = serial->interface->cur_altsetting;
235
236 for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
237 endpoint = &iface_desc->endpoint[i].desc;
238 if (usb_endpoint_is_bulk_in(endpoint))
239 ++num_bulk_in;
240 if (usb_endpoint_is_bulk_out(endpoint))
241 ++num_bulk_out;
242 }
243
244 min_num_bulk = COMMAND_PORT + 1;
245 if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
246 return -ENODEV;
247
248 return 0;
249}
250
220static int whiteheat_attach(struct usb_serial *serial) 251static int whiteheat_attach(struct usb_serial *serial)
221{ 252{
222 struct usb_serial_port *command_port; 253 struct usb_serial_port *command_port;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 81220b2203c6..0ef5cc13fae2 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,8 +44,6 @@
44#define BTRFS_INODE_IN_DELALLOC_LIST 9 44#define BTRFS_INODE_IN_DELALLOC_LIST 9
45#define BTRFS_INODE_READDIO_NEED_LOCK 10 45#define BTRFS_INODE_READDIO_NEED_LOCK 10
46#define BTRFS_INODE_HAS_PROPS 11 46#define BTRFS_INODE_HAS_PROPS 11
47/* DIO is ready to submit */
48#define BTRFS_INODE_DIO_READY 12
49/* 47/*
50 * The following 3 bits are meant only for the btree inode. 48 * The following 3 bits are meant only for the btree inode.
51 * When any of them is set, it means an error happened while writing an 49 * When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0d98aee34fee..295795aebe0b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3765,9 +3765,7 @@ void close_ctree(struct btrfs_root *root)
3765 * block groups queued for removal, the deletion will be 3765 * block groups queued for removal, the deletion will be
3766 * skipped when we quit the cleaner thread. 3766 * skipped when we quit the cleaner thread.
3767 */ 3767 */
3768 mutex_lock(&root->fs_info->cleaner_mutex);
3769 btrfs_delete_unused_bgs(root->fs_info); 3768 btrfs_delete_unused_bgs(root->fs_info);
3770 mutex_unlock(&root->fs_info->cleaner_mutex);
3771 3769
3772 ret = btrfs_commit_super(root); 3770 ret = btrfs_commit_super(root);
3773 if (ret) 3771 if (ret)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5411f0ab5683..9f9604201333 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3742,10 +3742,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3742 found->bytes_reserved = 0; 3742 found->bytes_reserved = 0;
3743 found->bytes_readonly = 0; 3743 found->bytes_readonly = 0;
3744 found->bytes_may_use = 0; 3744 found->bytes_may_use = 0;
3745 if (total_bytes > 0) 3745 found->full = 0;
3746 found->full = 0;
3747 else
3748 found->full = 1;
3749 found->force_alloc = CHUNK_ALLOC_NO_FORCE; 3746 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3750 found->chunk_alloc = 0; 3747 found->chunk_alloc = 0;
3751 found->flush = 0; 3748 found->flush = 0;
@@ -8668,7 +8665,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
8668 } 8665 }
8669 8666
8670 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) { 8667 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8671 btrfs_drop_and_free_fs_root(tree_root->fs_info, root); 8668 btrfs_add_dropped_root(trans, root);
8672 } else { 8669 } else {
8673 free_extent_buffer(root->node); 8670 free_extent_buffer(root->node);
8674 free_extent_buffer(root->commit_root); 8671 free_extent_buffer(root->commit_root);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index f1018cfbfefa..e2357e31609a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2798 bio_end_io_t end_io_func, 2798 bio_end_io_t end_io_func,
2799 int mirror_num, 2799 int mirror_num,
2800 unsigned long prev_bio_flags, 2800 unsigned long prev_bio_flags,
2801 unsigned long bio_flags) 2801 unsigned long bio_flags,
2802 bool force_bio_submit)
2802{ 2803{
2803 int ret = 0; 2804 int ret = 0;
2804 struct bio *bio; 2805 struct bio *bio;
@@ -2814,6 +2815,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2814 contig = bio_end_sector(bio) == sector; 2815 contig = bio_end_sector(bio) == sector;
2815 2816
2816 if (prev_bio_flags != bio_flags || !contig || 2817 if (prev_bio_flags != bio_flags || !contig ||
2818 force_bio_submit ||
2817 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || 2819 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
2818 bio_add_page(bio, page, page_size, offset) < page_size) { 2820 bio_add_page(bio, page, page_size, offset) < page_size) {
2819 ret = submit_one_bio(rw, bio, mirror_num, 2821 ret = submit_one_bio(rw, bio, mirror_num,
@@ -2910,7 +2912,8 @@ static int __do_readpage(struct extent_io_tree *tree,
2910 get_extent_t *get_extent, 2912 get_extent_t *get_extent,
2911 struct extent_map **em_cached, 2913 struct extent_map **em_cached,
2912 struct bio **bio, int mirror_num, 2914 struct bio **bio, int mirror_num,
2913 unsigned long *bio_flags, int rw) 2915 unsigned long *bio_flags, int rw,
2916 u64 *prev_em_start)
2914{ 2917{
2915 struct inode *inode = page->mapping->host; 2918 struct inode *inode = page->mapping->host;
2916 u64 start = page_offset(page); 2919 u64 start = page_offset(page);
@@ -2958,6 +2961,7 @@ static int __do_readpage(struct extent_io_tree *tree,
2958 } 2961 }
2959 while (cur <= end) { 2962 while (cur <= end) {
2960 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2963 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2964 bool force_bio_submit = false;
2961 2965
2962 if (cur >= last_byte) { 2966 if (cur >= last_byte) {
2963 char *userpage; 2967 char *userpage;
@@ -3008,6 +3012,49 @@ static int __do_readpage(struct extent_io_tree *tree,
3008 block_start = em->block_start; 3012 block_start = em->block_start;
3009 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3013 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3010 block_start = EXTENT_MAP_HOLE; 3014 block_start = EXTENT_MAP_HOLE;
3015
3016 /*
3017 * If we have a file range that points to a compressed extent
3018 * and it's followed by a consecutive file range that points to
3019 * to the same compressed extent (possibly with a different
3020 * offset and/or length, so it either points to the whole extent
3021 * or only part of it), we must make sure we do not submit a
3022 * single bio to populate the pages for the 2 ranges because
3023 * this makes the compressed extent read zero out the pages
3024 * belonging to the 2nd range. Imagine the following scenario:
3025 *
3026 * File layout
3027 * [0 - 8K] [8K - 24K]
3028 * | |
3029 * | |
3030 * points to extent X, points to extent X,
3031 * offset 4K, length of 8K offset 0, length 16K
3032 *
3033 * [extent X, compressed length = 4K uncompressed length = 16K]
3034 *
3035 * If the bio to read the compressed extent covers both ranges,
3036 * it will decompress extent X into the pages belonging to the
3037 * first range and then it will stop, zeroing out the remaining
3038 * pages that belong to the other range that points to extent X.
3039 * So here we make sure we submit 2 bios, one for the first
3040 * range and another one for the third range. Both will target
3041 * the same physical extent from disk, but we can't currently
3042 * make the compressed bio endio callback populate the pages
3043 * for both ranges because each compressed bio is tightly
3044 * coupled with a single extent map, and each range can have
3045 * an extent map with a different offset value relative to the
3046 * uncompressed data of our extent and different lengths. This
3047 * is a corner case so we prioritize correctness over
3048 * non-optimal behavior (submitting 2 bios for the same extent).
3049 */
3050 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
3051 prev_em_start && *prev_em_start != (u64)-1 &&
3052 *prev_em_start != em->orig_start)
3053 force_bio_submit = true;
3054
3055 if (prev_em_start)
3056 *prev_em_start = em->orig_start;
3057
3011 free_extent_map(em); 3058 free_extent_map(em);
3012 em = NULL; 3059 em = NULL;
3013 3060
@@ -3057,7 +3104,8 @@ static int __do_readpage(struct extent_io_tree *tree,
3057 bdev, bio, pnr, 3104 bdev, bio, pnr,
3058 end_bio_extent_readpage, mirror_num, 3105 end_bio_extent_readpage, mirror_num,
3059 *bio_flags, 3106 *bio_flags,
3060 this_bio_flag); 3107 this_bio_flag,
3108 force_bio_submit);
3061 if (!ret) { 3109 if (!ret) {
3062 nr++; 3110 nr++;
3063 *bio_flags = this_bio_flag; 3111 *bio_flags = this_bio_flag;
@@ -3089,6 +3137,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3089 struct inode *inode; 3137 struct inode *inode;
3090 struct btrfs_ordered_extent *ordered; 3138 struct btrfs_ordered_extent *ordered;
3091 int index; 3139 int index;
3140 u64 prev_em_start = (u64)-1;
3092 3141
3093 inode = pages[0]->mapping->host; 3142 inode = pages[0]->mapping->host;
3094 while (1) { 3143 while (1) {
@@ -3104,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
3104 3153
3105 for (index = 0; index < nr_pages; index++) { 3154 for (index = 0; index < nr_pages; index++) {
3106 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3155 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
3107 mirror_num, bio_flags, rw); 3156 mirror_num, bio_flags, rw, &prev_em_start);
3108 page_cache_release(pages[index]); 3157 page_cache_release(pages[index]);
3109 } 3158 }
3110} 3159}
@@ -3172,7 +3221,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
3172 } 3221 }
3173 3222
3174 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, 3223 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
3175 bio_flags, rw); 3224 bio_flags, rw, NULL);
3176 return ret; 3225 return ret;
3177} 3226}
3178 3227
@@ -3198,7 +3247,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
3198 int ret; 3247 int ret;
3199 3248
3200 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, 3249 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
3201 &bio_flags, READ); 3250 &bio_flags, READ, NULL);
3202 if (bio) 3251 if (bio)
3203 ret = submit_one_bio(READ, bio, mirror_num, bio_flags); 3252 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
3204 return ret; 3253 return ret;
@@ -3451,7 +3500,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
3451 sector, iosize, pg_offset, 3500 sector, iosize, pg_offset,
3452 bdev, &epd->bio, max_nr, 3501 bdev, &epd->bio, max_nr,
3453 end_bio_extent_writepage, 3502 end_bio_extent_writepage,
3454 0, 0, 0); 3503 0, 0, 0, false);
3455 if (ret) 3504 if (ret)
3456 SetPageError(page); 3505 SetPageError(page);
3457 } 3506 }
@@ -3754,7 +3803,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
3754 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, 3803 ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
3755 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3804 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3756 -1, end_bio_extent_buffer_writepage, 3805 -1, end_bio_extent_buffer_writepage,
3757 0, epd->bio_flags, bio_flags); 3806 0, epd->bio_flags, bio_flags, false);
3758 epd->bio_flags = bio_flags; 3807 epd->bio_flags = bio_flags;
3759 if (ret) { 3808 if (ret) {
3760 set_btree_ioerr(p); 3809 set_btree_ioerr(p);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a0fa7253a2d7..611b66d73e80 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5084,7 +5084,8 @@ void btrfs_evict_inode(struct inode *inode)
5084 goto no_delete; 5084 goto no_delete;
5085 } 5085 }
5086 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ 5086 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5087 btrfs_wait_ordered_range(inode, 0, (u64)-1); 5087 if (!special_file(inode->i_mode))
5088 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5088 5089
5089 btrfs_free_io_failure_record(inode, 0, (u64)-1); 5090 btrfs_free_io_failure_record(inode, 0, (u64)-1);
5090 5091
@@ -7408,6 +7409,10 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7408 return em; 7409 return em;
7409} 7410}
7410 7411
7412struct btrfs_dio_data {
7413 u64 outstanding_extents;
7414 u64 reserve;
7415};
7411 7416
7412static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, 7417static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7413 struct buffer_head *bh_result, int create) 7418 struct buffer_head *bh_result, int create)
@@ -7415,10 +7420,10 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7415 struct extent_map *em; 7420 struct extent_map *em;
7416 struct btrfs_root *root = BTRFS_I(inode)->root; 7421 struct btrfs_root *root = BTRFS_I(inode)->root;
7417 struct extent_state *cached_state = NULL; 7422 struct extent_state *cached_state = NULL;
7423 struct btrfs_dio_data *dio_data = NULL;
7418 u64 start = iblock << inode->i_blkbits; 7424 u64 start = iblock << inode->i_blkbits;
7419 u64 lockstart, lockend; 7425 u64 lockstart, lockend;
7420 u64 len = bh_result->b_size; 7426 u64 len = bh_result->b_size;
7421 u64 *outstanding_extents = NULL;
7422 int unlock_bits = EXTENT_LOCKED; 7427 int unlock_bits = EXTENT_LOCKED;
7423 int ret = 0; 7428 int ret = 0;
7424 7429
@@ -7436,7 +7441,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7436 * that anything that needs to check if there's a transction doesn't get 7441 * that anything that needs to check if there's a transction doesn't get
7437 * confused. 7442 * confused.
7438 */ 7443 */
7439 outstanding_extents = current->journal_info; 7444 dio_data = current->journal_info;
7440 current->journal_info = NULL; 7445 current->journal_info = NULL;
7441 } 7446 }
7442 7447
@@ -7568,17 +7573,18 @@ unlock:
7568 * within our reservation, otherwise we need to adjust our inode 7573 * within our reservation, otherwise we need to adjust our inode
7569 * counter appropriately. 7574 * counter appropriately.
7570 */ 7575 */
7571 if (*outstanding_extents) { 7576 if (dio_data->outstanding_extents) {
7572 (*outstanding_extents)--; 7577 (dio_data->outstanding_extents)--;
7573 } else { 7578 } else {
7574 spin_lock(&BTRFS_I(inode)->lock); 7579 spin_lock(&BTRFS_I(inode)->lock);
7575 BTRFS_I(inode)->outstanding_extents++; 7580 BTRFS_I(inode)->outstanding_extents++;
7576 spin_unlock(&BTRFS_I(inode)->lock); 7581 spin_unlock(&BTRFS_I(inode)->lock);
7577 } 7582 }
7578 7583
7579 current->journal_info = outstanding_extents;
7580 btrfs_free_reserved_data_space(inode, len); 7584 btrfs_free_reserved_data_space(inode, len);
7581 set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags); 7585 WARN_ON(dio_data->reserve < len);
7586 dio_data->reserve -= len;
7587 current->journal_info = dio_data;
7582 } 7588 }
7583 7589
7584 /* 7590 /*
@@ -7601,8 +7607,8 @@ unlock:
7601unlock_err: 7607unlock_err:
7602 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, 7608 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7603 unlock_bits, 1, 0, &cached_state, GFP_NOFS); 7609 unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7604 if (outstanding_extents) 7610 if (dio_data)
7605 current->journal_info = outstanding_extents; 7611 current->journal_info = dio_data;
7606 return ret; 7612 return ret;
7607} 7613}
7608 7614
@@ -8329,7 +8335,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8329{ 8335{
8330 struct file *file = iocb->ki_filp; 8336 struct file *file = iocb->ki_filp;
8331 struct inode *inode = file->f_mapping->host; 8337 struct inode *inode = file->f_mapping->host;
8332 u64 outstanding_extents = 0; 8338 struct btrfs_root *root = BTRFS_I(inode)->root;
8339 struct btrfs_dio_data dio_data = { 0 };
8333 size_t count = 0; 8340 size_t count = 0;
8334 int flags = 0; 8341 int flags = 0;
8335 bool wakeup = true; 8342 bool wakeup = true;
@@ -8367,7 +8374,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8367 ret = btrfs_delalloc_reserve_space(inode, count); 8374 ret = btrfs_delalloc_reserve_space(inode, count);
8368 if (ret) 8375 if (ret)
8369 goto out; 8376 goto out;
8370 outstanding_extents = div64_u64(count + 8377 dio_data.outstanding_extents = div64_u64(count +
8371 BTRFS_MAX_EXTENT_SIZE - 1, 8378 BTRFS_MAX_EXTENT_SIZE - 1,
8372 BTRFS_MAX_EXTENT_SIZE); 8379 BTRFS_MAX_EXTENT_SIZE);
8373 8380
@@ -8376,7 +8383,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8376 * do the accounting properly if we go over the number we 8383 * do the accounting properly if we go over the number we
8377 * originally calculated. Abuse current->journal_info for this. 8384 * originally calculated. Abuse current->journal_info for this.
8378 */ 8385 */
8379 current->journal_info = &outstanding_extents; 8386 dio_data.reserve = round_up(count, root->sectorsize);
8387 current->journal_info = &dio_data;
8380 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, 8388 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8381 &BTRFS_I(inode)->runtime_flags)) { 8389 &BTRFS_I(inode)->runtime_flags)) {
8382 inode_dio_end(inode); 8390 inode_dio_end(inode);
@@ -8391,16 +8399,9 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8391 if (iov_iter_rw(iter) == WRITE) { 8399 if (iov_iter_rw(iter) == WRITE) {
8392 current->journal_info = NULL; 8400 current->journal_info = NULL;
8393 if (ret < 0 && ret != -EIOCBQUEUED) { 8401 if (ret < 0 && ret != -EIOCBQUEUED) {
8394 /* 8402 if (dio_data.reserve)
8395 * If the error comes from submitting stage, 8403 btrfs_delalloc_release_space(inode,
8396 * btrfs_get_blocsk_direct() has free'd data space, 8404 dio_data.reserve);
8397 * and metadata space will be handled by
8398 * finish_ordered_fn, don't do that again to make
8399 * sure bytes_may_use is correct.
8400 */
8401 if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
8402 &BTRFS_I(inode)->runtime_flags))
8403 btrfs_delalloc_release_space(inode, count);
8404 } else if (ret >= 0 && (size_t)ret < count) 8405 } else if (ret >= 0 && (size_t)ret < count)
8405 btrfs_delalloc_release_space(inode, 8406 btrfs_delalloc_release_space(inode,
8406 count - (size_t)ret); 8407 count - (size_t)ret);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2b07b3581781..11d1eab9234d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1658,9 +1658,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1658 * groups on disk until we're mounted read-write again 1658 * groups on disk until we're mounted read-write again
1659 * unless we clean them up here. 1659 * unless we clean them up here.
1660 */ 1660 */
1661 mutex_lock(&root->fs_info->cleaner_mutex);
1662 btrfs_delete_unused_bgs(fs_info); 1661 btrfs_delete_unused_bgs(fs_info);
1663 mutex_unlock(&root->fs_info->cleaner_mutex);
1664 1662
1665 btrfs_dev_replace_suspend_for_unmount(fs_info); 1663 btrfs_dev_replace_suspend_for_unmount(fs_info);
1666 btrfs_scrub_cancel(fs_info); 1664 btrfs_scrub_cancel(fs_info);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8f259b3a66b3..74bc3338418b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -117,6 +117,18 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans,
117 btrfs_unpin_free_ino(root); 117 btrfs_unpin_free_ino(root);
118 clear_btree_io_tree(&root->dirty_log_pages); 118 clear_btree_io_tree(&root->dirty_log_pages);
119 } 119 }
120
121 /* We can free old roots now. */
122 spin_lock(&trans->dropped_roots_lock);
123 while (!list_empty(&trans->dropped_roots)) {
124 root = list_first_entry(&trans->dropped_roots,
125 struct btrfs_root, root_list);
126 list_del_init(&root->root_list);
127 spin_unlock(&trans->dropped_roots_lock);
128 btrfs_drop_and_free_fs_root(fs_info, root);
129 spin_lock(&trans->dropped_roots_lock);
130 }
131 spin_unlock(&trans->dropped_roots_lock);
120 up_write(&fs_info->commit_root_sem); 132 up_write(&fs_info->commit_root_sem);
121} 133}
122 134
@@ -255,11 +267,13 @@ loop:
255 INIT_LIST_HEAD(&cur_trans->pending_ordered); 267 INIT_LIST_HEAD(&cur_trans->pending_ordered);
256 INIT_LIST_HEAD(&cur_trans->dirty_bgs); 268 INIT_LIST_HEAD(&cur_trans->dirty_bgs);
257 INIT_LIST_HEAD(&cur_trans->io_bgs); 269 INIT_LIST_HEAD(&cur_trans->io_bgs);
270 INIT_LIST_HEAD(&cur_trans->dropped_roots);
258 mutex_init(&cur_trans->cache_write_mutex); 271 mutex_init(&cur_trans->cache_write_mutex);
259 cur_trans->num_dirty_bgs = 0; 272 cur_trans->num_dirty_bgs = 0;
260 spin_lock_init(&cur_trans->dirty_bgs_lock); 273 spin_lock_init(&cur_trans->dirty_bgs_lock);
261 INIT_LIST_HEAD(&cur_trans->deleted_bgs); 274 INIT_LIST_HEAD(&cur_trans->deleted_bgs);
262 spin_lock_init(&cur_trans->deleted_bgs_lock); 275 spin_lock_init(&cur_trans->deleted_bgs_lock);
276 spin_lock_init(&cur_trans->dropped_roots_lock);
263 list_add_tail(&cur_trans->list, &fs_info->trans_list); 277 list_add_tail(&cur_trans->list, &fs_info->trans_list);
264 extent_io_tree_init(&cur_trans->dirty_pages, 278 extent_io_tree_init(&cur_trans->dirty_pages,
265 fs_info->btree_inode->i_mapping); 279 fs_info->btree_inode->i_mapping);
@@ -336,6 +350,24 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
336} 350}
337 351
338 352
353void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
354 struct btrfs_root *root)
355{
356 struct btrfs_transaction *cur_trans = trans->transaction;
357
358 /* Add ourselves to the transaction dropped list */
359 spin_lock(&cur_trans->dropped_roots_lock);
360 list_add_tail(&root->root_list, &cur_trans->dropped_roots);
361 spin_unlock(&cur_trans->dropped_roots_lock);
362
363 /* Make sure we don't try to update the root at commit time */
364 spin_lock(&root->fs_info->fs_roots_radix_lock);
365 radix_tree_tag_clear(&root->fs_info->fs_roots_radix,
366 (unsigned long)root->root_key.objectid,
367 BTRFS_ROOT_TRANS_TAG);
368 spin_unlock(&root->fs_info->fs_roots_radix_lock);
369}
370
339int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 371int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
340 struct btrfs_root *root) 372 struct btrfs_root *root)
341{ 373{
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index edc2fbc262d7..87964bf8892d 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -65,6 +65,7 @@ struct btrfs_transaction {
65 struct list_head switch_commits; 65 struct list_head switch_commits;
66 struct list_head dirty_bgs; 66 struct list_head dirty_bgs;
67 struct list_head io_bgs; 67 struct list_head io_bgs;
68 struct list_head dropped_roots;
68 u64 num_dirty_bgs; 69 u64 num_dirty_bgs;
69 70
70 /* 71 /*
@@ -76,6 +77,7 @@ struct btrfs_transaction {
76 spinlock_t dirty_bgs_lock; 77 spinlock_t dirty_bgs_lock;
77 struct list_head deleted_bgs; 78 struct list_head deleted_bgs;
78 spinlock_t deleted_bgs_lock; 79 spinlock_t deleted_bgs_lock;
80 spinlock_t dropped_roots_lock;
79 struct btrfs_delayed_ref_root delayed_refs; 81 struct btrfs_delayed_ref_root delayed_refs;
80 int aborted; 82 int aborted;
81 int dirty_bg_run; 83 int dirty_bg_run;
@@ -216,5 +218,6 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info);
216int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 218int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
217void btrfs_put_transaction(struct btrfs_transaction *transaction); 219void btrfs_put_transaction(struct btrfs_transaction *transaction);
218void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info); 220void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
219 221void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
222 struct btrfs_root *root);
220#endif 223#endif
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index aa0dc2573374..afa09fce8151 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
444 return 0; 444 return 0;
445} 445}
446 446
447/* Server has provided av pairs/target info in the type 2 challenge
448 * packet and we have plucked it and stored within smb session.
449 * We parse that blob here to find the server given timestamp
450 * as part of ntlmv2 authentication (or local current time as
451 * default in case of failure)
452 */
453static __le64
454find_timestamp(struct cifs_ses *ses)
455{
456 unsigned int attrsize;
457 unsigned int type;
458 unsigned int onesize = sizeof(struct ntlmssp2_name);
459 unsigned char *blobptr;
460 unsigned char *blobend;
461 struct ntlmssp2_name *attrptr;
462
463 if (!ses->auth_key.len || !ses->auth_key.response)
464 return 0;
465
466 blobptr = ses->auth_key.response;
467 blobend = blobptr + ses->auth_key.len;
468
469 while (blobptr + onesize < blobend) {
470 attrptr = (struct ntlmssp2_name *) blobptr;
471 type = le16_to_cpu(attrptr->type);
472 if (type == NTLMSSP_AV_EOL)
473 break;
474 blobptr += 2; /* advance attr type */
475 attrsize = le16_to_cpu(attrptr->length);
476 blobptr += 2; /* advance attr size */
477 if (blobptr + attrsize > blobend)
478 break;
479 if (type == NTLMSSP_AV_TIMESTAMP) {
480 if (attrsize == sizeof(u64))
481 return *((__le64 *)blobptr);
482 }
483 blobptr += attrsize; /* advance attr value */
484 }
485
486 return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
487}
488
447static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, 489static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
448 const struct nls_table *nls_cp) 490 const struct nls_table *nls_cp)
449{ 491{
@@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
641 struct ntlmv2_resp *ntlmv2; 683 struct ntlmv2_resp *ntlmv2;
642 char ntlmv2_hash[16]; 684 char ntlmv2_hash[16];
643 unsigned char *tiblob = NULL; /* target info blob */ 685 unsigned char *tiblob = NULL; /* target info blob */
686 __le64 rsp_timestamp;
644 687
645 if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { 688 if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
646 if (!ses->domainName) { 689 if (!ses->domainName) {
@@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
659 } 702 }
660 } 703 }
661 704
705 /* Must be within 5 minutes of the server (or in range +/-2h
706 * in case of Mac OS X), so simply carry over server timestamp
707 * (as Windows 7 does)
708 */
709 rsp_timestamp = find_timestamp(ses);
710
662 baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp); 711 baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
663 tilen = ses->auth_key.len; 712 tilen = ses->auth_key.len;
664 tiblob = ses->auth_key.response; 713 tiblob = ses->auth_key.response;
@@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
675 (ses->auth_key.response + CIFS_SESS_KEY_SIZE); 724 (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
676 ntlmv2->blob_signature = cpu_to_le32(0x00000101); 725 ntlmv2->blob_signature = cpu_to_le32(0x00000101);
677 ntlmv2->reserved = 0; 726 ntlmv2->reserved = 0;
678 /* Must be within 5 minutes of the server */ 727 ntlmv2->time = rsp_timestamp;
679 ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME)); 728
680 get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal)); 729 get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
681 ntlmv2->reserved2 = 0; 730 ntlmv2->reserved2 = 0;
682 731
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index df91bcf56d67..18da19f4f811 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server)
50 break; 50 break;
51 default: 51 default:
52 server->echoes = true; 52 server->echoes = true;
53 server->oplocks = true; 53 if (enable_oplocks) {
54 server->oplocks = true;
55 server->oplock_credits = 1;
56 } else
57 server->oplocks = false;
58
54 server->echo_credits = 1; 59 server->echo_credits = 1;
55 server->oplock_credits = 1;
56 } 60 }
57 server->credits -= server->echo_credits + server->oplock_credits; 61 server->credits -= server->echo_credits + server->oplock_credits;
58 return 0; 62 return 0;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 070fb2ad85ce..ce83e2edbe0a 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -46,6 +46,7 @@
46#include "smb2status.h" 46#include "smb2status.h"
47#include "smb2glob.h" 47#include "smb2glob.h"
48#include "cifspdu.h" 48#include "cifspdu.h"
49#include "cifs_spnego.h"
49 50
50/* 51/*
51 * The following table defines the expected "StructureSize" of SMB2 requests 52 * The following table defines the expected "StructureSize" of SMB2 requests
@@ -486,19 +487,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
486 cifs_dbg(FYI, "missing security blob on negprot\n"); 487 cifs_dbg(FYI, "missing security blob on negprot\n");
487 488
488 rc = cifs_enable_signing(server, ses->sign); 489 rc = cifs_enable_signing(server, ses->sign);
489#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
490 if (rc) 490 if (rc)
491 goto neg_exit; 491 goto neg_exit;
492 if (blob_length) 492 if (blob_length) {
493 rc = decode_negTokenInit(security_blob, blob_length, server); 493 rc = decode_negTokenInit(security_blob, blob_length, server);
494 if (rc == 1) 494 if (rc == 1)
495 rc = 0; 495 rc = 0;
496 else if (rc == 0) { 496 else if (rc == 0)
497 rc = -EIO; 497 rc = -EIO;
498 goto neg_exit;
499 } 498 }
500#endif
501
502neg_exit: 499neg_exit:
503 free_rsp_buf(resp_buftype, rsp); 500 free_rsp_buf(resp_buftype, rsp);
504 return rc; 501 return rc;
@@ -592,7 +589,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
592 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ 589 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
593 struct TCP_Server_Info *server = ses->server; 590 struct TCP_Server_Info *server = ses->server;
594 u16 blob_length = 0; 591 u16 blob_length = 0;
595 char *security_blob; 592 struct key *spnego_key = NULL;
593 char *security_blob = NULL;
596 char *ntlmssp_blob = NULL; 594 char *ntlmssp_blob = NULL;
597 bool use_spnego = false; /* else use raw ntlmssp */ 595 bool use_spnego = false; /* else use raw ntlmssp */
598 596
@@ -620,7 +618,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
620 ses->ntlmssp->sesskey_per_smbsess = true; 618 ses->ntlmssp->sesskey_per_smbsess = true;
621 619
622 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */ 620 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
623 ses->sectype = RawNTLMSSP; 621 if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
622 ses->sectype = RawNTLMSSP;
624 623
625ssetup_ntlmssp_authenticate: 624ssetup_ntlmssp_authenticate:
626 if (phase == NtLmChallenge) 625 if (phase == NtLmChallenge)
@@ -649,7 +648,48 @@ ssetup_ntlmssp_authenticate:
649 iov[0].iov_base = (char *)req; 648 iov[0].iov_base = (char *)req;
650 /* 4 for rfc1002 length field and 1 for pad */ 649 /* 4 for rfc1002 length field and 1 for pad */
651 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; 650 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
652 if (phase == NtLmNegotiate) { 651
652 if (ses->sectype == Kerberos) {
653#ifdef CONFIG_CIFS_UPCALL
654 struct cifs_spnego_msg *msg;
655
656 spnego_key = cifs_get_spnego_key(ses);
657 if (IS_ERR(spnego_key)) {
658 rc = PTR_ERR(spnego_key);
659 spnego_key = NULL;
660 goto ssetup_exit;
661 }
662
663 msg = spnego_key->payload.data;
664 /*
665 * check version field to make sure that cifs.upcall is
666 * sending us a response in an expected form
667 */
668 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
669 cifs_dbg(VFS,
670 "bad cifs.upcall version. Expected %d got %d",
671 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
672 rc = -EKEYREJECTED;
673 goto ssetup_exit;
674 }
675 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
676 GFP_KERNEL);
677 if (!ses->auth_key.response) {
678 cifs_dbg(VFS,
679 "Kerberos can't allocate (%u bytes) memory",
680 msg->sesskey_len);
681 rc = -ENOMEM;
682 goto ssetup_exit;
683 }
684 ses->auth_key.len = msg->sesskey_len;
685 blob_length = msg->secblob_len;
686 iov[1].iov_base = msg->data + msg->sesskey_len;
687 iov[1].iov_len = blob_length;
688#else
689 rc = -EOPNOTSUPP;
690 goto ssetup_exit;
691#endif /* CONFIG_CIFS_UPCALL */
692 } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
653 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE), 693 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
654 GFP_KERNEL); 694 GFP_KERNEL);
655 if (ntlmssp_blob == NULL) { 695 if (ntlmssp_blob == NULL) {
@@ -672,6 +712,8 @@ ssetup_ntlmssp_authenticate:
672 /* with raw NTLMSSP we don't encapsulate in SPNEGO */ 712 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
673 security_blob = ntlmssp_blob; 713 security_blob = ntlmssp_blob;
674 } 714 }
715 iov[1].iov_base = security_blob;
716 iov[1].iov_len = blob_length;
675 } else if (phase == NtLmAuthenticate) { 717 } else if (phase == NtLmAuthenticate) {
676 req->hdr.SessionId = ses->Suid; 718 req->hdr.SessionId = ses->Suid;
677 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500, 719 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
@@ -699,6 +741,8 @@ ssetup_ntlmssp_authenticate:
699 } else { 741 } else {
700 security_blob = ntlmssp_blob; 742 security_blob = ntlmssp_blob;
701 } 743 }
744 iov[1].iov_base = security_blob;
745 iov[1].iov_len = blob_length;
702 } else { 746 } else {
703 cifs_dbg(VFS, "illegal ntlmssp phase\n"); 747 cifs_dbg(VFS, "illegal ntlmssp phase\n");
704 rc = -EIO; 748 rc = -EIO;
@@ -710,8 +754,6 @@ ssetup_ntlmssp_authenticate:
710 cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 754 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
711 1 /* pad */ - 4 /* rfc1001 len */); 755 1 /* pad */ - 4 /* rfc1001 len */);
712 req->SecurityBufferLength = cpu_to_le16(blob_length); 756 req->SecurityBufferLength = cpu_to_le16(blob_length);
713 iov[1].iov_base = security_blob;
714 iov[1].iov_len = blob_length;
715 757
716 inc_rfc1001_len(req, blob_length - 1 /* pad */); 758 inc_rfc1001_len(req, blob_length - 1 /* pad */);
717 759
@@ -722,6 +764,7 @@ ssetup_ntlmssp_authenticate:
722 764
723 kfree(security_blob); 765 kfree(security_blob);
724 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base; 766 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
767 ses->Suid = rsp->hdr.SessionId;
725 if (resp_buftype != CIFS_NO_BUFFER && 768 if (resp_buftype != CIFS_NO_BUFFER &&
726 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) { 769 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
727 if (phase != NtLmNegotiate) { 770 if (phase != NtLmNegotiate) {
@@ -739,7 +782,6 @@ ssetup_ntlmssp_authenticate:
739 /* NTLMSSP Negotiate sent now processing challenge (response) */ 782 /* NTLMSSP Negotiate sent now processing challenge (response) */
740 phase = NtLmChallenge; /* process ntlmssp challenge */ 783 phase = NtLmChallenge; /* process ntlmssp challenge */
741 rc = 0; /* MORE_PROCESSING is not an error here but expected */ 784 rc = 0; /* MORE_PROCESSING is not an error here but expected */
742 ses->Suid = rsp->hdr.SessionId;
743 rc = decode_ntlmssp_challenge(rsp->Buffer, 785 rc = decode_ntlmssp_challenge(rsp->Buffer,
744 le16_to_cpu(rsp->SecurityBufferLength), ses); 786 le16_to_cpu(rsp->SecurityBufferLength), ses);
745 } 787 }
@@ -796,6 +838,10 @@ keygen_exit:
796 kfree(ses->auth_key.response); 838 kfree(ses->auth_key.response);
797 ses->auth_key.response = NULL; 839 ses->auth_key.response = NULL;
798 } 840 }
841 if (spnego_key) {
842 key_invalidate(spnego_key);
843 key_put(spnego_key);
844 }
799 kfree(ses->ntlmssp); 845 kfree(ses->ntlmssp);
800 846
801 return rc; 847 return rc;
@@ -876,6 +922,12 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
876 if (tcon && tcon->bad_network_name) 922 if (tcon && tcon->bad_network_name)
877 return -ENOENT; 923 return -ENOENT;
878 924
925 if ((tcon->seal) &&
926 ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
927 cifs_dbg(VFS, "encryption requested but no server support");
928 return -EOPNOTSUPP;
929 }
930
879 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); 931 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
880 if (unc_path == NULL) 932 if (unc_path == NULL)
881 return -ENOMEM; 933 return -ENOMEM;
@@ -955,6 +1007,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
955 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0)) 1007 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
956 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n"); 1008 cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
957 init_copy_chunk_defaults(tcon); 1009 init_copy_chunk_defaults(tcon);
1010 if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)
1011 cifs_dbg(VFS, "Encrypted shares not supported");
958 if (tcon->ses->server->ops->validate_negotiate) 1012 if (tcon->ses->server->ops->validate_negotiate)
959 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon); 1013 rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
960tcon_exit: 1014tcon_exit:
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 2714ef835bdd..be806ead7f4d 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -113,7 +113,8 @@ out:
113 return status; 113 return status;
114} 114}
115 115
116static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid) 116static int nfs_delegation_claim_opens(struct inode *inode,
117 const nfs4_stateid *stateid, fmode_t type)
117{ 118{
118 struct nfs_inode *nfsi = NFS_I(inode); 119 struct nfs_inode *nfsi = NFS_I(inode);
119 struct nfs_open_context *ctx; 120 struct nfs_open_context *ctx;
@@ -140,7 +141,7 @@ again:
140 /* Block nfs4_proc_unlck */ 141 /* Block nfs4_proc_unlck */
141 mutex_lock(&sp->so_delegreturn_mutex); 142 mutex_lock(&sp->so_delegreturn_mutex);
142 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 143 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
143 err = nfs4_open_delegation_recall(ctx, state, stateid); 144 err = nfs4_open_delegation_recall(ctx, state, stateid, type);
144 if (!err) 145 if (!err)
145 err = nfs_delegation_claim_locks(ctx, state, stateid); 146 err = nfs_delegation_claim_locks(ctx, state, stateid);
146 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 147 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -411,7 +412,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
411 do { 412 do {
412 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) 413 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
413 break; 414 break;
414 err = nfs_delegation_claim_opens(inode, &delegation->stateid); 415 err = nfs_delegation_claim_opens(inode, &delegation->stateid,
416 delegation->type);
415 if (!issync || err != -EAGAIN) 417 if (!issync || err != -EAGAIN)
416 break; 418 break;
417 /* 419 /*
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index a44829173e57..333063e032f0 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
54 54
55/* NFSv4 delegation-related procedures */ 55/* NFSv4 delegation-related procedures */
56int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync); 56int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
57int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); 57int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
58int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); 58int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
59bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags); 59bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
60 60
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 38678d9a5cc4..4b1d08f56aba 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -166,8 +166,11 @@ nfs_direct_select_verf(struct nfs_direct_req *dreq,
166 struct nfs_writeverf *verfp = &dreq->verf; 166 struct nfs_writeverf *verfp = &dreq->verf;
167 167
168#ifdef CONFIG_NFS_V4_1 168#ifdef CONFIG_NFS_V4_1
169 if (ds_clp) { 169 /*
170 /* pNFS is in use, use the DS verf */ 170 * pNFS is in use, use the DS verf except commit_through_mds is set
171 * for layout segment where nbuckets is zero.
172 */
173 if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
171 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) 174 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
172 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; 175 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
173 else 176 else
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index b34f2e228601..02ec07973bc4 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -629,23 +629,18 @@ out_put:
629 goto out; 629 goto out;
630} 630}
631 631
632static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl) 632static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
633{ 633{
634 int i; 634 int i;
635 635
636 for (i = 0; i < fl->num_fh; i++) { 636 if (fl->fh_array) {
637 if (!fl->fh_array[i]) 637 for (i = 0; i < fl->num_fh; i++) {
638 break; 638 if (!fl->fh_array[i])
639 kfree(fl->fh_array[i]); 639 break;
640 kfree(fl->fh_array[i]);
641 }
642 kfree(fl->fh_array);
640 } 643 }
641 kfree(fl->fh_array);
642 fl->fh_array = NULL;
643}
644
645static void
646_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
647{
648 filelayout_free_fh_array(fl);
649 kfree(fl); 644 kfree(fl);
650} 645}
651 646
@@ -716,21 +711,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
716 /* Do we want to use a mempool here? */ 711 /* Do we want to use a mempool here? */
717 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags); 712 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
718 if (!fl->fh_array[i]) 713 if (!fl->fh_array[i])
719 goto out_err_free; 714 goto out_err;
720 715
721 p = xdr_inline_decode(&stream, 4); 716 p = xdr_inline_decode(&stream, 4);
722 if (unlikely(!p)) 717 if (unlikely(!p))
723 goto out_err_free; 718 goto out_err;
724 fl->fh_array[i]->size = be32_to_cpup(p++); 719 fl->fh_array[i]->size = be32_to_cpup(p++);
725 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { 720 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
726 printk(KERN_ERR "NFS: Too big fh %d received %d\n", 721 printk(KERN_ERR "NFS: Too big fh %d received %d\n",
727 i, fl->fh_array[i]->size); 722 i, fl->fh_array[i]->size);
728 goto out_err_free; 723 goto out_err;
729 } 724 }
730 725
731 p = xdr_inline_decode(&stream, fl->fh_array[i]->size); 726 p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
732 if (unlikely(!p)) 727 if (unlikely(!p))
733 goto out_err_free; 728 goto out_err;
734 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size); 729 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
735 dprintk("DEBUG: %s: fh len %d\n", __func__, 730 dprintk("DEBUG: %s: fh len %d\n", __func__,
736 fl->fh_array[i]->size); 731 fl->fh_array[i]->size);
@@ -739,8 +734,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
739 __free_page(scratch); 734 __free_page(scratch);
740 return 0; 735 return 0;
741 736
742out_err_free:
743 filelayout_free_fh_array(fl);
744out_err: 737out_err:
745 __free_page(scratch); 738 __free_page(scratch);
746 return -EIO; 739 return -EIO;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index d731bbf974aa..0f020e4d8421 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -175,10 +175,12 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{ 175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { }; 177 struct nfs4_exception exception = { };
178 int err; 178 loff_t err;
179 179
180 do { 180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence); 181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err >= 0)
183 break;
182 if (err == -ENOTSUPP) 184 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception); 186 err = nfs4_handle_exception(server, err, &exception);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 693b903b48bd..f93b9cdb4934 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1127,6 +1127,21 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1127 return ret; 1127 return ret;
1128} 1128}
1129 1129
1130static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1131 fmode_t fmode)
1132{
1133 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1134 case FMODE_READ|FMODE_WRITE:
1135 return state->n_rdwr != 0;
1136 case FMODE_WRITE:
1137 return state->n_wronly != 0;
1138 case FMODE_READ:
1139 return state->n_rdonly != 0;
1140 }
1141 WARN_ON_ONCE(1);
1142 return false;
1143}
1144
1130static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1145static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1131{ 1146{
1132 int ret = 0; 1147 int ret = 0;
@@ -1571,17 +1586,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
1571 return opendata; 1586 return opendata;
1572} 1587}
1573 1588
1574static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1589static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1590 fmode_t fmode)
1575{ 1591{
1576 struct nfs4_state *newstate; 1592 struct nfs4_state *newstate;
1577 int ret; 1593 int ret;
1578 1594
1579 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || 1595 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1580 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
1581 (opendata->o_arg.u.delegation_type & fmode) != fmode)
1582 /* This mode can't have been delegated, so we must have
1583 * a valid open_stateid to cover it - not need to reclaim.
1584 */
1585 return 0; 1596 return 0;
1586 opendata->o_arg.open_flags = 0; 1597 opendata->o_arg.open_flags = 0;
1587 opendata->o_arg.fmode = fmode; 1598 opendata->o_arg.fmode = fmode;
@@ -1597,14 +1608,14 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
1597 newstate = nfs4_opendata_to_nfs4_state(opendata); 1608 newstate = nfs4_opendata_to_nfs4_state(opendata);
1598 if (IS_ERR(newstate)) 1609 if (IS_ERR(newstate))
1599 return PTR_ERR(newstate); 1610 return PTR_ERR(newstate);
1611 if (newstate != opendata->state)
1612 ret = -ESTALE;
1600 nfs4_close_state(newstate, fmode); 1613 nfs4_close_state(newstate, fmode);
1601 *res = newstate; 1614 return ret;
1602 return 0;
1603} 1615}
1604 1616
1605static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1617static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1606{ 1618{
1607 struct nfs4_state *newstate;
1608 int ret; 1619 int ret;
1609 1620
1610 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1621 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
@@ -1615,27 +1626,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
1615 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1626 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1616 clear_bit(NFS_OPEN_STATE, &state->flags); 1627 clear_bit(NFS_OPEN_STATE, &state->flags);
1617 smp_rmb(); 1628 smp_rmb();
1618 if (state->n_rdwr != 0) { 1629 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1619 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1630 if (ret != 0)
1620 if (ret != 0) 1631 return ret;
1621 return ret; 1632 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1622 if (newstate != state) 1633 if (ret != 0)
1623 return -ESTALE; 1634 return ret;
1624 } 1635 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1625 if (state->n_wronly != 0) { 1636 if (ret != 0)
1626 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1637 return ret;
1627 if (ret != 0)
1628 return ret;
1629 if (newstate != state)
1630 return -ESTALE;
1631 }
1632 if (state->n_rdonly != 0) {
1633 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1634 if (ret != 0)
1635 return ret;
1636 if (newstate != state)
1637 return -ESTALE;
1638 }
1639 /* 1638 /*
1640 * We may have performed cached opens for all three recoveries. 1639 * We may have performed cached opens for all three recoveries.
1641 * Check if we need to update the current stateid. 1640 * Check if we need to update the current stateid.
@@ -1759,18 +1758,32 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
1759 return err; 1758 return err;
1760} 1759}
1761 1760
1762int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1761int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1762 struct nfs4_state *state, const nfs4_stateid *stateid,
1763 fmode_t type)
1763{ 1764{
1764 struct nfs_server *server = NFS_SERVER(state->inode); 1765 struct nfs_server *server = NFS_SERVER(state->inode);
1765 struct nfs4_opendata *opendata; 1766 struct nfs4_opendata *opendata;
1766 int err; 1767 int err = 0;
1767 1768
1768 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1769 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1769 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1770 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1770 if (IS_ERR(opendata)) 1771 if (IS_ERR(opendata))
1771 return PTR_ERR(opendata); 1772 return PTR_ERR(opendata);
1772 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1773 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1773 err = nfs4_open_recover(opendata, state); 1774 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1775 switch (type & (FMODE_READ|FMODE_WRITE)) {
1776 case FMODE_READ|FMODE_WRITE:
1777 case FMODE_WRITE:
1778 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1779 if (err)
1780 break;
1781 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1782 if (err)
1783 break;
1784 case FMODE_READ:
1785 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1786 }
1774 nfs4_opendata_put(opendata); 1787 nfs4_opendata_put(opendata);
1775 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1788 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1776} 1789}
@@ -2645,6 +2658,15 @@ out:
2645 return err; 2658 return err;
2646} 2659}
2647 2660
2661static bool
2662nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2663{
2664 if (inode == NULL || !nfs_have_layout(inode))
2665 return false;
2666
2667 return pnfs_wait_on_layoutreturn(inode, task);
2668}
2669
2648struct nfs4_closedata { 2670struct nfs4_closedata {
2649 struct inode *inode; 2671 struct inode *inode;
2650 struct nfs4_state *state; 2672 struct nfs4_state *state;
@@ -2763,6 +2785,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
2763 goto out_no_action; 2785 goto out_no_action;
2764 } 2786 }
2765 2787
2788 if (nfs4_wait_on_layoutreturn(inode, task)) {
2789 nfs_release_seqid(calldata->arg.seqid);
2790 goto out_wait;
2791 }
2792
2766 if (calldata->arg.fmode == 0) 2793 if (calldata->arg.fmode == 0)
2767 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2794 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2768 if (calldata->roc) 2795 if (calldata->roc)
@@ -5308,6 +5335,9 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5308 5335
5309 d_data = (struct nfs4_delegreturndata *)data; 5336 d_data = (struct nfs4_delegreturndata *)data;
5310 5337
5338 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5339 return;
5340
5311 if (d_data->roc) 5341 if (d_data->roc)
5312 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5342 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5313 5343
@@ -7800,39 +7830,46 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7800 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7830 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
7801 __func__, delay); 7831 __func__, delay);
7802 rpc_delay(task, delay); 7832 rpc_delay(task, delay);
7803 task->tk_status = 0; 7833 /* Do not call nfs4_async_handle_error() */
7804 rpc_restart_call_prepare(task); 7834 goto out_restart;
7805 goto out; /* Do not call nfs4_async_handle_error() */
7806 } 7835 }
7807 break; 7836 break;
7808 case -NFS4ERR_EXPIRED: 7837 case -NFS4ERR_EXPIRED:
7809 case -NFS4ERR_BAD_STATEID: 7838 case -NFS4ERR_BAD_STATEID:
7810 spin_lock(&inode->i_lock); 7839 spin_lock(&inode->i_lock);
7811 lo = NFS_I(inode)->layout; 7840 if (nfs4_stateid_match(&lgp->args.stateid,
7812 if (!lo || list_empty(&lo->plh_segs)) { 7841 &lgp->args.ctx->state->stateid)) {
7813 spin_unlock(&inode->i_lock); 7842 spin_unlock(&inode->i_lock);
7814 /* If the open stateid was bad, then recover it. */ 7843 /* If the open stateid was bad, then recover it. */
7815 state = lgp->args.ctx->state; 7844 state = lgp->args.ctx->state;
7816 } else { 7845 break;
7846 }
7847 lo = NFS_I(inode)->layout;
7848 if (lo && nfs4_stateid_match(&lgp->args.stateid,
7849 &lo->plh_stateid)) {
7817 LIST_HEAD(head); 7850 LIST_HEAD(head);
7818 7851
7819 /* 7852 /*
7820 * Mark the bad layout state as invalid, then retry 7853 * Mark the bad layout state as invalid, then retry
7821 * with the current stateid. 7854 * with the current stateid.
7822 */ 7855 */
7856 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
7823 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7857 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
7824 spin_unlock(&inode->i_lock); 7858 spin_unlock(&inode->i_lock);
7825 pnfs_free_lseg_list(&head); 7859 pnfs_free_lseg_list(&head);
7826 7860 } else
7827 task->tk_status = 0; 7861 spin_unlock(&inode->i_lock);
7828 rpc_restart_call_prepare(task); 7862 goto out_restart;
7829 }
7830 } 7863 }
7831 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7864 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
7832 rpc_restart_call_prepare(task); 7865 goto out_restart;
7833out: 7866out:
7834 dprintk("<-- %s\n", __func__); 7867 dprintk("<-- %s\n", __func__);
7835 return; 7868 return;
7869out_restart:
7870 task->tk_status = 0;
7871 rpc_restart_call_prepare(task);
7872 return;
7836out_overflow: 7873out_overflow:
7837 task->tk_status = -EOVERFLOW; 7874 task->tk_status = -EOVERFLOW;
7838 goto out; 7875 goto out;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index da73bc443238..5db324635e92 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1481,7 +1481,7 @@ restart:
1481 spin_unlock(&state->state_lock); 1481 spin_unlock(&state->state_lock);
1482 } 1482 }
1483 nfs4_put_open_state(state); 1483 nfs4_put_open_state(state);
1484 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, 1484 clear_bit(NFS_STATE_RECLAIM_NOGRACE,
1485 &state->flags); 1485 &state->flags);
1486 spin_lock(&sp->so_lock); 1486 spin_lock(&sp->so_lock);
1487 goto restart; 1487 goto restart;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 7c5718ba625e..fe3ddd20ff89 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
508 * for it without upsetting the slab allocator. 508 * for it without upsetting the slab allocator.
509 */ 509 */
510 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * 510 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
511 sizeof(struct page) > PAGE_SIZE) 511 sizeof(struct page *) > PAGE_SIZE)
512 return 0; 512 return 0;
513 513
514 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); 514 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index ba1246433794..8abe27165ad0 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1104,20 +1104,15 @@ bool pnfs_roc(struct inode *ino)
1104 mark_lseg_invalid(lseg, &tmp_list); 1104 mark_lseg_invalid(lseg, &tmp_list);
1105 found = true; 1105 found = true;
1106 } 1106 }
1107 /* pnfs_prepare_layoutreturn() grabs lo ref and it will be put 1107 /* ROC in two conditions:
1108 * in pnfs_roc_release(). We don't really send a layoutreturn but
1109 * still want others to view us like we are sending one!
1110 *
1111 * If pnfs_prepare_layoutreturn() fails, it means someone else is doing
1112 * LAYOUTRETURN, so we proceed like there are no layouts to return.
1113 *
1114 * ROC in three conditions:
1115 * 1. there are ROC lsegs 1108 * 1. there are ROC lsegs
1116 * 2. we don't send layoutreturn 1109 * 2. we don't send layoutreturn
1117 * 3. no others are sending layoutreturn
1118 */ 1110 */
1119 if (found && !layoutreturn && pnfs_prepare_layoutreturn(lo)) 1111 if (found && !layoutreturn) {
1112 /* lo ref dropped in pnfs_roc_release() */
1113 pnfs_get_layout_hdr(lo);
1120 roc = true; 1114 roc = true;
1115 }
1121 1116
1122out_noroc: 1117out_noroc:
1123 spin_unlock(&ino->i_lock); 1118 spin_unlock(&ino->i_lock);
@@ -1172,6 +1167,26 @@ void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
1172 spin_unlock(&ino->i_lock); 1167 spin_unlock(&ino->i_lock);
1173} 1168}
1174 1169
1170bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1171{
1172 struct nfs_inode *nfsi = NFS_I(ino);
1173 struct pnfs_layout_hdr *lo;
1174 bool sleep = false;
1175
1176 /* we might not have grabbed lo reference. so need to check under
1177 * i_lock */
1178 spin_lock(&ino->i_lock);
1179 lo = nfsi->layout;
1180 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1181 sleep = true;
1182 spin_unlock(&ino->i_lock);
1183
1184 if (sleep)
1185 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1186
1187 return sleep;
1188}
1189
1175/* 1190/*
1176 * Compare two layout segments for sorting into layout cache. 1191 * Compare two layout segments for sorting into layout cache.
1177 * We want to preferentially return RW over RO layouts, so ensure those 1192 * We want to preferentially return RW over RO layouts, so ensure those
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 78c9351ff117..d1990e90e7a0 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -270,6 +270,7 @@ bool pnfs_roc(struct inode *ino);
270void pnfs_roc_release(struct inode *ino); 270void pnfs_roc_release(struct inode *ino);
271void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); 271void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
272void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier); 272void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier);
273bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task);
273void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t); 274void pnfs_set_layoutcommit(struct inode *, struct pnfs_layout_segment *, loff_t);
274void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); 275void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
275int pnfs_layoutcommit_inode(struct inode *inode, bool sync); 276int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
@@ -639,6 +640,12 @@ pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
639{ 640{
640} 641}
641 642
643static inline bool
644pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
645{
646 return false;
647}
648
642static inline void set_pnfs_layoutdriver(struct nfs_server *s, 649static inline void set_pnfs_layoutdriver(struct nfs_server *s,
643 const struct nfs_fh *mntfh, u32 id) 650 const struct nfs_fh *mntfh, u32 id)
644{ 651{
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index ae0ff7a11b40..01b8cc8e8cfc 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
72{ 72{
73 struct nfs_pgio_mirror *mirror; 73 struct nfs_pgio_mirror *mirror;
74 74
75 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
76 pgio->pg_ops->pg_cleanup(pgio);
77
75 pgio->pg_ops = &nfs_pgio_rw_ops; 78 pgio->pg_ops = &nfs_pgio_rw_ops;
76 79
77 /* read path should never have more than one mirror */ 80 /* read path should never have more than one mirror */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 388f48079c43..72624dc4a623 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1351,6 +1351,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1351{ 1351{
1352 struct nfs_pgio_mirror *mirror; 1352 struct nfs_pgio_mirror *mirror;
1353 1353
1354 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1355 pgio->pg_ops->pg_cleanup(pgio);
1356
1354 pgio->pg_ops = &nfs_pgio_rw_ops; 1357 pgio->pg_ops = &nfs_pgio_rw_ops;
1355 1358
1356 nfs_pageio_stop_mirroring(pgio); 1359 nfs_pageio_stop_mirroring(pgio);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 46b8b2bbc95a..ee5aa4daaea0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1439 int found, ret; 1439 int found, ret;
1440 int set_maybe; 1440 int set_maybe;
1441 int dispatch_assert = 0; 1441 int dispatch_assert = 0;
1442 int dispatched = 0;
1442 1443
1443 if (!dlm_grab(dlm)) 1444 if (!dlm_grab(dlm))
1444 return DLM_MASTER_RESP_NO; 1445 return DLM_MASTER_RESP_NO;
@@ -1658,15 +1659,18 @@ send_response:
1658 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1659 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1659 response = DLM_MASTER_RESP_ERROR; 1660 response = DLM_MASTER_RESP_ERROR;
1660 dlm_lockres_put(res); 1661 dlm_lockres_put(res);
1661 } else 1662 } else {
1663 dispatched = 1;
1662 __dlm_lockres_grab_inflight_worker(dlm, res); 1664 __dlm_lockres_grab_inflight_worker(dlm, res);
1665 }
1663 spin_unlock(&res->spinlock); 1666 spin_unlock(&res->spinlock);
1664 } else { 1667 } else {
1665 if (res) 1668 if (res)
1666 dlm_lockres_put(res); 1669 dlm_lockres_put(res);
1667 } 1670 }
1668 1671
1669 dlm_put(dlm); 1672 if (!dispatched)
1673 dlm_put(dlm);
1670 return response; 1674 return response;
1671} 1675}
1672 1676
@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2090 2094
2091 2095
2092 /* queue up work for dlm_assert_master_worker */ 2096 /* queue up work for dlm_assert_master_worker */
2093 dlm_grab(dlm); /* get an extra ref for the work item */
2094 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); 2097 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2095 item->u.am.lockres = res; /* already have a ref */ 2098 item->u.am.lockres = res; /* already have a ref */
2096 /* can optionally ignore node numbers higher than this node */ 2099 /* can optionally ignore node numbers higher than this node */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index ce12e0b1a31f..3d90ad7ff91f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1694 unsigned int hash; 1694 unsigned int hash;
1695 int master = DLM_LOCK_RES_OWNER_UNKNOWN; 1695 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1696 u32 flags = DLM_ASSERT_MASTER_REQUERY; 1696 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1697 int dispatched = 0;
1697 1698
1698 if (!dlm_grab(dlm)) { 1699 if (!dlm_grab(dlm)) {
1699 /* since the domain has gone away on this 1700 /* since the domain has gone away on this
@@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1719 dlm_put(dlm); 1720 dlm_put(dlm);
1720 /* sender will take care of this and retry */ 1721 /* sender will take care of this and retry */
1721 return ret; 1722 return ret;
1722 } else 1723 } else {
1724 dispatched = 1;
1723 __dlm_lockres_grab_inflight_worker(dlm, res); 1725 __dlm_lockres_grab_inflight_worker(dlm, res);
1726 }
1724 spin_unlock(&res->spinlock); 1727 spin_unlock(&res->spinlock);
1725 } else { 1728 } else {
1726 /* put.. incase we are not the master */ 1729 /* put.. incase we are not the master */
@@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1730 } 1733 }
1731 spin_unlock(&dlm->spinlock); 1734 spin_unlock(&dlm->spinlock);
1732 1735
1733 dlm_put(dlm); 1736 if (!dispatched)
1737 dlm_put(dlm);
1734 return master; 1738 return master;
1735} 1739}
1736 1740
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index f9aeb40a7197..50311703135b 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -467,8 +467,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
467 * the fault_*wqh. 467 * the fault_*wqh.
468 */ 468 */
469 spin_lock(&ctx->fault_pending_wqh.lock); 469 spin_lock(&ctx->fault_pending_wqh.lock);
470 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, &range); 470 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
471 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, &range); 471 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
472 spin_unlock(&ctx->fault_pending_wqh.lock); 472 spin_unlock(&ctx->fault_pending_wqh.lock);
473 473
474 wake_up_poll(&ctx->fd_wqh, POLLHUP); 474 wake_up_poll(&ctx->fd_wqh, POLLHUP);
@@ -650,10 +650,10 @@ static void __wake_userfault(struct userfaultfd_ctx *ctx,
650 spin_lock(&ctx->fault_pending_wqh.lock); 650 spin_lock(&ctx->fault_pending_wqh.lock);
651 /* wake all in the range and autoremove */ 651 /* wake all in the range and autoremove */
652 if (waitqueue_active(&ctx->fault_pending_wqh)) 652 if (waitqueue_active(&ctx->fault_pending_wqh))
653 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, 0, 653 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
654 range); 654 range);
655 if (waitqueue_active(&ctx->fault_wqh)) 655 if (waitqueue_active(&ctx->fault_wqh))
656 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, 0, range); 656 __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
657 spin_unlock(&ctx->fault_pending_wqh.lock); 657 spin_unlock(&ctx->fault_pending_wqh.lock);
658} 658}
659 659
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5a5d79ee256f..d5eb4ad1c534 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/memcontrol.h>
16#include <linux/blk-cgroup.h> 17#include <linux/blk-cgroup.h>
17#include <linux/backing-dev-defs.h> 18#include <linux/backing-dev-defs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
@@ -252,13 +253,19 @@ int inode_congested(struct inode *inode, int cong_bits);
252 * @inode: inode of interest 253 * @inode: inode of interest
253 * 254 *
254 * cgroup writeback requires support from both the bdi and filesystem. 255 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both. 256 * Also, both memcg and iocg have to be on the default hierarchy. Test
257 * whether all conditions are met.
258 *
259 * Note that the test result may change dynamically on the same inode
260 * depending on how memcg and iocg are configured.
256 */ 261 */
257static inline bool inode_cgwb_enabled(struct inode *inode) 262static inline bool inode_cgwb_enabled(struct inode *inode)
258{ 263{
259 struct backing_dev_info *bdi = inode_to_bdi(inode); 264 struct backing_dev_info *bdi = inode_to_bdi(inode);
260 265
261 return bdi_cap_account_dirty(bdi) && 266 return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
267 cgroup_on_dfl(blkcg_root_css->cgroup) &&
268 bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 269 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 270 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
264} 271}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 4d8fcf2187dc..8492721b39be 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -473,31 +473,8 @@ struct cgroup_subsys {
473 unsigned int depends_on; 473 unsigned int depends_on;
474}; 474};
475 475
476extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 476void cgroup_threadgroup_change_begin(struct task_struct *tsk);
477 477void cgroup_threadgroup_change_end(struct task_struct *tsk);
478/**
479 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
480 * @tsk: target task
481 *
482 * Called from threadgroup_change_begin() and allows cgroup operations to
483 * synchronize against threadgroup changes using a percpu_rw_semaphore.
484 */
485static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
486{
487 percpu_down_read(&cgroup_threadgroup_rwsem);
488}
489
490/**
491 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
492 * @tsk: target task
493 *
494 * Called from threadgroup_change_end(). Counterpart of
495 * cgroup_threadcgroup_change_begin().
496 */
497static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
498{
499 percpu_up_read(&cgroup_threadgroup_rwsem);
500}
501 478
502#else /* CONFIG_CGROUPS */ 479#else /* CONFIG_CGROUPS */
503 480
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d0b380ee7d67..e38681f4912d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,6 +25,13 @@
25extern struct files_struct init_files; 25extern struct files_struct init_files;
26extern struct fs_struct init_fs; 26extern struct fs_struct init_fs;
27 27
28#ifdef CONFIG_CGROUPS
29#define INIT_GROUP_RWSEM(sig) \
30 .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
31#else
32#define INIT_GROUP_RWSEM(sig)
33#endif
34
28#ifdef CONFIG_CPUSETS 35#ifdef CONFIG_CPUSETS
29#define INIT_CPUSET_SEQ(tsk) \ 36#define INIT_CPUSET_SEQ(tsk) \
30 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq), 37 .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -57,6 +64,7 @@ extern struct fs_struct init_fs;
57 INIT_PREV_CPUTIME(sig) \ 64 INIT_PREV_CPUTIME(sig) \
58 .cred_guard_mutex = \ 65 .cred_guard_mutex = \
59 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ 66 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
67 INIT_GROUP_RWSEM(sig) \
60} 68}
61 69
62extern struct nsproxy init_nsproxy; 70extern struct nsproxy init_nsproxy;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 88a00694eda5..2d15e3831440 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -507,6 +507,7 @@ static inline void napi_enable(struct napi_struct *n)
507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 507 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
508 smp_mb__before_atomic(); 508 smp_mb__before_atomic();
509 clear_bit(NAPI_STATE_SCHED, &n->state); 509 clear_bit(NAPI_STATE_SCHED, &n->state);
510 clear_bit(NAPI_STATE_NPSVC, &n->state);
510} 511}
511 512
512#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 962387a192f1..4a4e3a092337 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -19,6 +19,7 @@
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/ethtool.h> 20#include <linux/ethtool.h>
21#include <linux/mii.h> 21#include <linux/mii.h>
22#include <linux/module.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
24#include <linux/mod_devicetable.h> 25#include <linux/mod_devicetable.h>
@@ -153,6 +154,7 @@ struct sk_buff;
153 * PHYs should register using this structure 154 * PHYs should register using this structure
154 */ 155 */
155struct mii_bus { 156struct mii_bus {
157 struct module *owner;
156 const char *name; 158 const char *name;
157 char id[MII_BUS_ID_SIZE]; 159 char id[MII_BUS_ID_SIZE];
158 void *priv; 160 void *priv;
@@ -198,7 +200,8 @@ static inline struct mii_bus *mdiobus_alloc(void)
198 return mdiobus_alloc_size(0); 200 return mdiobus_alloc_size(0);
199} 201}
200 202
201int mdiobus_register(struct mii_bus *bus); 203int __mdiobus_register(struct mii_bus *bus, struct module *owner);
204#define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
202void mdiobus_unregister(struct mii_bus *bus); 205void mdiobus_unregister(struct mii_bus *bus);
203void mdiobus_free(struct mii_bus *bus); 206void mdiobus_free(struct mii_bus *bus);
204struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); 207struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
@@ -742,6 +745,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
742 struct phy_c45_device_ids *c45_ids); 745 struct phy_c45_device_ids *c45_ids);
743struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); 746struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
744int phy_device_register(struct phy_device *phy); 747int phy_device_register(struct phy_device *phy);
748void phy_device_remove(struct phy_device *phydev);
745int phy_init_hw(struct phy_device *phydev); 749int phy_init_hw(struct phy_device *phydev);
746int phy_suspend(struct phy_device *phydev); 750int phy_suspend(struct phy_device *phydev);
747int phy_resume(struct phy_device *phydev); 751int phy_resume(struct phy_device *phydev);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4ab9daa387c..b7b9501b41af 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -762,6 +762,18 @@ struct signal_struct {
762 unsigned audit_tty_log_passwd; 762 unsigned audit_tty_log_passwd;
763 struct tty_audit_buf *tty_audit_buf; 763 struct tty_audit_buf *tty_audit_buf;
764#endif 764#endif
765#ifdef CONFIG_CGROUPS
766 /*
767 * group_rwsem prevents new tasks from entering the threadgroup and
768 * member tasks from exiting,a more specifically, setting of
769 * PF_EXITING. fork and exit paths are protected with this rwsem
770 * using threadgroup_change_begin/end(). Users which require
771 * threadgroup to remain stable should use threadgroup_[un]lock()
772 * which also takes care of exec path. Currently, cgroup is the
773 * only user.
774 */
775 struct rw_semaphore group_rwsem;
776#endif
765 777
766 oom_flags_t oom_flags; 778 oom_flags_t oom_flags;
767 short oom_score_adj; /* OOM kill score adjustment */ 779 short oom_score_adj; /* OOM kill score adjustment */
diff --git a/include/linux/security.h b/include/linux/security.h
index 79d85ddf8093..2f4c1f7aa7db 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
946 unsigned long arg4, 946 unsigned long arg4,
947 unsigned long arg5) 947 unsigned long arg5)
948{ 948{
949 return cap_task_prctl(option, arg2, arg3, arg3, arg5); 949 return cap_task_prctl(option, arg2, arg3, arg4, arg5);
950} 950}
951 951
952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) 952static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2738d355cdf9..2b0a30a6e31c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -179,6 +179,9 @@ struct nf_bridge_info {
179 u8 bridged_dnat:1; 179 u8 bridged_dnat:1;
180 __u16 frag_max_size; 180 __u16 frag_max_size;
181 struct net_device *physindev; 181 struct net_device *physindev;
182
183 /* always valid & non-NULL from FORWARD on, for physdev match */
184 struct net_device *physoutdev;
182 union { 185 union {
183 /* prerouting: detect dnat in orig/reply direction */ 186 /* prerouting: detect dnat in orig/reply direction */
184 __be32 ipv4_daddr; 187 __be32 ipv4_daddr;
@@ -189,9 +192,6 @@ struct nf_bridge_info {
189 * skb is out in neigh layer. 192 * skb is out in neigh layer.
190 */ 193 */
191 char neigh_header[8]; 194 char neigh_header[8];
192
193 /* always valid & non-NULL from FORWARD on, for physdev match */
194 struct net_device *physoutdev;
195 }; 195 };
196}; 196};
197#endif 197#endif
@@ -2707,6 +2707,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
2707{ 2707{
2708 if (skb->ip_summed == CHECKSUM_COMPLETE) 2708 if (skb->ip_summed == CHECKSUM_COMPLETE)
2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2709 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2710 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2711 skb_checksum_start_offset(skb) <= len)
2712 skb->ip_summed = CHECKSUM_NONE;
2710} 2713}
2711 2714
2712unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2715unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 269e8afd3e2a..6b00f18f5e6b 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -34,7 +34,7 @@ extern struct bus_type spi_bus_type;
34 34
35/** 35/**
36 * struct spi_statistics - statistics for spi transfers 36 * struct spi_statistics - statistics for spi transfers
37 * @clock: lock protecting this structure 37 * @lock: lock protecting this structure
38 * 38 *
39 * @messages: number of spi-messages handled 39 * @messages: number of spi-messages handled
40 * @transfers: number of spi_transfers handled 40 * @transfers: number of spi_transfers handled
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 7591788e9fbf..357e44c1a46b 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -42,6 +42,7 @@ struct sock_xprt {
42 /* 42 /*
43 * Connection of transports 43 * Connection of transports
44 */ 44 */
45 unsigned long sock_state;
45 struct delayed_work connect_worker; 46 struct delayed_work connect_worker;
46 struct sockaddr_storage srcaddr; 47 struct sockaddr_storage srcaddr;
47 unsigned short srcport; 48 unsigned short srcport;
@@ -76,6 +77,8 @@ struct sock_xprt {
76 */ 77 */
77#define TCP_RPC_REPLY (1UL << 6) 78#define TCP_RPC_REPLY (1UL << 6)
78 79
80#define XPRT_SOCK_CONNECTING 1U
81
79#endif /* __KERNEL__ */ 82#endif /* __KERNEL__ */
80 83
81#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ 84#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 17292fee8686..157d366e761b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -360,7 +360,7 @@ static inline struct thermal_zone_device *
360thermal_zone_of_sensor_register(struct device *dev, int id, void *data, 360thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
361 const struct thermal_zone_of_device_ops *ops) 361 const struct thermal_zone_of_device_ops *ops)
362{ 362{
363 return NULL; 363 return ERR_PTR(-ENODEV);
364} 364}
365 365
366static inline 366static inline
@@ -380,6 +380,8 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
380 380
381int power_actor_get_max_power(struct thermal_cooling_device *, 381int power_actor_get_max_power(struct thermal_cooling_device *,
382 struct thermal_zone_device *tz, u32 *max_power); 382 struct thermal_zone_device *tz, u32 *max_power);
383int power_actor_get_min_power(struct thermal_cooling_device *,
384 struct thermal_zone_device *tz, u32 *min_power);
383int power_actor_set_power(struct thermal_cooling_device *, 385int power_actor_set_power(struct thermal_cooling_device *,
384 struct thermal_instance *, u32); 386 struct thermal_instance *, u32);
385struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 387struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
@@ -415,6 +417,10 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
415static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, 417static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev,
416 struct thermal_zone_device *tz, u32 *max_power) 418 struct thermal_zone_device *tz, u32 *max_power)
417{ return 0; } 419{ return 0; }
420static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev,
421 struct thermal_zone_device *tz,
422 u32 *min_power)
423{ return -ENODEV; }
418static inline int power_actor_set_power(struct thermal_cooling_device *cdev, 424static inline int power_actor_set_power(struct thermal_cooling_device *cdev,
419 struct thermal_instance *tz, u32 power) 425 struct thermal_instance *tz, u32 power)
420{ return 0; } 426{ return 0; }
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d3d077228d4c..1e1bf9f963a9 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -147,8 +147,7 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
147 147
148typedef int wait_bit_action_f(struct wait_bit_key *); 148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151 void *key);
152void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
153void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
154void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
@@ -180,7 +179,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
180#define wake_up_poll(x, m) \ 179#define wake_up_poll(x, m) \
181 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
182#define wake_up_locked_poll(x, m) \ 181#define wake_up_locked_poll(x, m) \
183 __wake_up_locked_key((x), TASK_NORMAL, 1, (void *) (m)) 182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
184#define wake_up_interruptible_poll(x, m) \ 183#define wake_up_interruptible_poll(x, m) \
185 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
186#define wake_up_interruptible_sync_poll(x, m) \ 185#define wake_up_interruptible_sync_poll(x, m) \
diff --git a/include/net/flow.h b/include/net/flow.h
index acd6a096250e..9b85db85f13c 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -35,6 +35,7 @@ struct flowi_common {
35#define FLOWI_FLAG_ANYSRC 0x01 35#define FLOWI_FLAG_ANYSRC 0x01
36#define FLOWI_FLAG_KNOWN_NH 0x02 36#define FLOWI_FLAG_KNOWN_NH 0x02
37#define FLOWI_FLAG_VRFSRC 0x04 37#define FLOWI_FLAG_VRFSRC 0x04
38#define FLOWI_FLAG_SKIP_NH_OIF 0x08
38 __u32 flowic_secid; 39 __u32 flowic_secid;
39 struct flowi_tunnel flowic_tun_key; 40 struct flowi_tunnel flowic_tun_key;
40}; 41};
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 879d6e5a973b..186f3a1e1b1f 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -110,7 +110,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, 110void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
111 struct inet_hashinfo *hashinfo); 111 struct inet_hashinfo *hashinfo);
112 112
113void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo); 113void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
114 bool rearm);
115
116static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
117{
118 __inet_twsk_schedule(tw, timeo, false);
119}
120
121static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
122{
123 __inet_twsk_schedule(tw, timeo, true);
124}
125
114void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); 126void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
115 127
116void inet_twsk_purge(struct inet_hashinfo *hashinfo, 128void inet_twsk_purge(struct inet_hashinfo *hashinfo,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 063d30474cf6..aaf9700fc9e5 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -275,7 +275,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
275 struct nl_info *info, struct mx6_config *mxc); 275 struct nl_info *info, struct mx6_config *mxc);
276int fib6_del(struct rt6_info *rt, struct nl_info *info); 276int fib6_del(struct rt6_info *rt, struct nl_info *info);
277 277
278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info); 278void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
279 unsigned int flags);
279 280
280void fib6_run_gc(unsigned long expires, struct net *net, bool force); 281void fib6_run_gc(unsigned long expires, struct net *net, bool force);
281 282
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index b8529aa1dae7..fa915fa0f703 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -32,6 +32,12 @@ struct __ip6_tnl_parm {
32 __be32 o_key; 32 __be32 o_key;
33}; 33};
34 34
35struct ip6_tnl_dst {
36 seqlock_t lock;
37 struct dst_entry __rcu *dst;
38 u32 cookie;
39};
40
35/* IPv6 tunnel */ 41/* IPv6 tunnel */
36struct ip6_tnl { 42struct ip6_tnl {
37 struct ip6_tnl __rcu *next; /* next tunnel in list */ 43 struct ip6_tnl __rcu *next; /* next tunnel in list */
@@ -39,8 +45,7 @@ struct ip6_tnl {
39 struct net *net; /* netns for packet i/o */ 45 struct net *net; /* netns for packet i/o */
40 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */ 46 struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
41 struct flowi fl; /* flowi template for xmit */ 47 struct flowi fl; /* flowi template for xmit */
42 struct dst_entry *dst_cache; /* cached dst */ 48 struct ip6_tnl_dst __percpu *dst_cache; /* cached dst */
43 u32 dst_cookie;
44 49
45 int err_count; 50 int err_count;
46 unsigned long err_time; 51 unsigned long err_time;
@@ -60,9 +65,11 @@ struct ipv6_tlv_tnl_enc_lim {
60 __u8 encap_limit; /* tunnel encapsulation limit */ 65 __u8 encap_limit; /* tunnel encapsulation limit */
61} __packed; 66} __packed;
62 67
63struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t); 68struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t);
69int ip6_tnl_dst_init(struct ip6_tnl *t);
70void ip6_tnl_dst_destroy(struct ip6_tnl *t);
64void ip6_tnl_dst_reset(struct ip6_tnl *t); 71void ip6_tnl_dst_reset(struct ip6_tnl *t);
65void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst); 72void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst);
66int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 73int ip6_tnl_rcv_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
67 const struct in6_addr *raddr); 74 const struct in6_addr *raddr);
68int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr, 75int ip6_tnl_xmit_ctl(struct ip6_tnl *t, const struct in6_addr *laddr,
@@ -79,7 +86,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
79 struct net_device_stats *stats = &dev->stats; 86 struct net_device_stats *stats = &dev->stats;
80 int pkt_len, err; 87 int pkt_len, err;
81 88
82 pkt_len = skb->len; 89 pkt_len = skb->len - skb_inner_network_offset(skb);
83 err = ip6_local_out_sk(sk, skb); 90 err = ip6_local_out_sk(sk, skb);
84 91
85 if (net_xmit_eval(err) == 0) { 92 if (net_xmit_eval(err) == 0) {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a37d0432bebd..727d6e9a9685 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -236,8 +236,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
236 rcu_read_lock(); 236 rcu_read_lock();
237 237
238 tb = fib_get_table(net, RT_TABLE_MAIN); 238 tb = fib_get_table(net, RT_TABLE_MAIN);
239 if (tb && !fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF)) 239 if (tb)
240 err = 0; 240 err = fib_table_lookup(tb, flp, res, flags | FIB_LOOKUP_NOREF);
241
242 if (err == -EAGAIN)
243 err = -ENETUNREACH;
241 244
242 rcu_read_unlock(); 245 rcu_read_unlock();
243 246
@@ -258,7 +261,7 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
258 struct fib_result *res, unsigned int flags) 261 struct fib_result *res, unsigned int flags)
259{ 262{
260 struct fib_table *tb; 263 struct fib_table *tb;
261 int err; 264 int err = -ENETUNREACH;
262 265
263 flags |= FIB_LOOKUP_NOREF; 266 flags |= FIB_LOOKUP_NOREF;
264 if (net->ipv4.fib_has_custom_rules) 267 if (net->ipv4.fib_has_custom_rules)
@@ -268,15 +271,20 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
268 271
269 res->tclassid = 0; 272 res->tclassid = 0;
270 273
271 for (err = 0; !err; err = -ENETUNREACH) { 274 tb = rcu_dereference_rtnl(net->ipv4.fib_main);
272 tb = rcu_dereference_rtnl(net->ipv4.fib_main); 275 if (tb)
273 if (tb && !fib_table_lookup(tb, flp, res, flags)) 276 err = fib_table_lookup(tb, flp, res, flags);
274 break; 277
278 if (!err)
279 goto out;
280
281 tb = rcu_dereference_rtnl(net->ipv4.fib_default);
282 if (tb)
283 err = fib_table_lookup(tb, flp, res, flags);
275 284
276 tb = rcu_dereference_rtnl(net->ipv4.fib_default); 285out:
277 if (tb && !fib_table_lookup(tb, flp, res, flags)) 286 if (err == -EAGAIN)
278 break; 287 err = -ENETUNREACH;
279 }
280 288
281 rcu_read_unlock(); 289 rcu_read_unlock();
282 290
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9a6a3ba888e8..f6dafec9102c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -276,6 +276,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 276int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
277 __be32 src, __be32 dst, u8 proto, 277 __be32 src, __be32 dst, u8 proto,
278 u8 tos, u8 ttl, __be16 df, bool xnet); 278 u8 tos, u8 ttl, __be16 df, bool xnet);
279struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
280 gfp_t flags);
279 281
280struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, 282struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
281 int gso_type_mask); 283 int gso_type_mask);
diff --git a/include/net/route.h b/include/net/route.h
index cc61cb95f059..f46af256880c 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -255,7 +255,7 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
255 flow_flags |= FLOWI_FLAG_ANYSRC; 255 flow_flags |= FLOWI_FLAG_ANYSRC;
256 256
257 if (netif_index_is_vrf(sock_net(sk), oif)) 257 if (netif_index_is_vrf(sock_net(sk), oif))
258 flow_flags |= FLOWI_FLAG_VRFSRC; 258 flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
259 259
260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, 260 flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
261 protocol, flow_flags, dst, src, dport, sport); 261 protocol, flow_flags, dst, src, dport, sport);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 7845fae6f2df..e1f65e204d37 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -137,6 +137,8 @@ enum ib_device_cap_flags {
137 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 137 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
138 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23), 138 IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
139 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24), 139 IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
140 IB_DEVICE_RC_IP_CSUM = (1<<25),
141 IB_DEVICE_RAW_IP_CSUM = (1<<26),
140 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29), 142 IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29),
141 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30), 143 IB_DEVICE_SIGNATURE_HANDOVER = (1<<30),
142 IB_DEVICE_ON_DEMAND_PAGING = (1<<31), 144 IB_DEVICE_ON_DEMAND_PAGING = (1<<31),
@@ -873,7 +875,6 @@ enum ib_qp_create_flags {
873 IB_QP_CREATE_RESERVED_END = 1 << 31, 875 IB_QP_CREATE_RESERVED_END = 1 << 31,
874}; 876};
875 877
876
877/* 878/*
878 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler 879 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
879 * callback to destroy the passed in QP. 880 * callback to destroy the passed in QP.
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index ac9bf1c0e42d..5f48754dc36a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -730,6 +730,7 @@ struct se_device {
730#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004 730#define DF_EMULATED_VPD_UNIT_SERIAL 0x00000004
731#define DF_USING_UDEV_PATH 0x00000008 731#define DF_USING_UDEV_PATH 0x00000008
732#define DF_USING_ALIAS 0x00000010 732#define DF_USING_ALIAS 0x00000010
733#define DF_READ_ONLY 0x00000020
733 /* Physical device queue depth */ 734 /* Physical device queue depth */
734 u32 queue_depth; 735 u32 queue_depth;
735 /* Used for SPC-2 reservations enforce of ISIDs */ 736 /* Used for SPC-2 reservations enforce of ISIDs */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 8da542a2874d..ee124009e12a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -709,17 +709,19 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create)
709__SYSCALL(__NR_bpf, sys_bpf) 709__SYSCALL(__NR_bpf, sys_bpf)
710#define __NR_execveat 281 710#define __NR_execveat 281
711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) 711__SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat)
712#define __NR_membarrier 282 712#define __NR_userfaultfd 282
713__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
714#define __NR_membarrier 283
713__SYSCALL(__NR_membarrier, sys_membarrier) 715__SYSCALL(__NR_membarrier, sys_membarrier)
714 716
715#undef __NR_syscalls 717#undef __NR_syscalls
716#define __NR_syscalls 283 718#define __NR_syscalls 284
717 719
718/* 720/*
719 * All syscalls below here should go away really, 721 * All syscalls below here should go away really,
720 * these are provided for both review and as a porting 722 * these are provided for both review and as a porting
721 * help for the C library version. 723 * help for the C library version.
722* 724 *
723 * Last chance: are any of these important enough to 725 * Last chance: are any of these important enough to
724 * enable by default? 726 * enable by default?
725 */ 727 */
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index 34141a5dfe74..f8b01887a495 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -21,8 +21,6 @@ enum lwtunnel_ip_t {
21 LWTUNNEL_IP_SRC, 21 LWTUNNEL_IP_SRC,
22 LWTUNNEL_IP_TTL, 22 LWTUNNEL_IP_TTL,
23 LWTUNNEL_IP_TOS, 23 LWTUNNEL_IP_TOS,
24 LWTUNNEL_IP_SPORT,
25 LWTUNNEL_IP_DPORT,
26 LWTUNNEL_IP_FLAGS, 24 LWTUNNEL_IP_FLAGS,
27 __LWTUNNEL_IP_MAX, 25 __LWTUNNEL_IP_MAX,
28}; 26};
@@ -36,8 +34,6 @@ enum lwtunnel_ip6_t {
36 LWTUNNEL_IP6_SRC, 34 LWTUNNEL_IP6_SRC,
37 LWTUNNEL_IP6_HOPLIMIT, 35 LWTUNNEL_IP6_HOPLIMIT,
38 LWTUNNEL_IP6_TC, 36 LWTUNNEL_IP6_TC,
39 LWTUNNEL_IP6_SPORT,
40 LWTUNNEL_IP6_DPORT,
41 LWTUNNEL_IP6_FLAGS, 37 LWTUNNEL_IP6_FLAGS,
42 __LWTUNNEL_IP6_MAX, 38 __LWTUNNEL_IP6_MAX,
43}; 39};
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2cf0f79f1fc9..2c9eae6ad970 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -46,7 +46,6 @@
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/spinlock.h> 47#include <linux/spinlock.h>
48#include <linux/rwsem.h> 48#include <linux/rwsem.h>
49#include <linux/percpu-rwsem.h>
50#include <linux/string.h> 49#include <linux/string.h>
51#include <linux/sort.h> 50#include <linux/sort.h>
52#include <linux/kmod.h> 51#include <linux/kmod.h>
@@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
104 */ 103 */
105static DEFINE_SPINLOCK(release_agent_path_lock); 104static DEFINE_SPINLOCK(release_agent_path_lock);
106 105
107struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
108
109#define cgroup_assert_mutex_or_rcu_locked() \ 106#define cgroup_assert_mutex_or_rcu_locked() \
110 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ 107 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
111 !lockdep_is_held(&cgroup_mutex), \ 108 !lockdep_is_held(&cgroup_mutex), \
@@ -874,6 +871,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
874 return cset; 871 return cset;
875} 872}
876 873
874void cgroup_threadgroup_change_begin(struct task_struct *tsk)
875{
876 down_read(&tsk->signal->group_rwsem);
877}
878
879void cgroup_threadgroup_change_end(struct task_struct *tsk)
880{
881 up_read(&tsk->signal->group_rwsem);
882}
883
884/**
885 * threadgroup_lock - lock threadgroup
886 * @tsk: member task of the threadgroup to lock
887 *
888 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
889 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
890 * change ->group_leader/pid. This is useful for cases where the threadgroup
891 * needs to stay stable across blockable operations.
892 *
893 * fork and exit explicitly call threadgroup_change_{begin|end}() for
894 * synchronization. While held, no new task will be added to threadgroup
895 * and no existing live task will have its PF_EXITING set.
896 *
897 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
898 * sub-thread becomes a new leader.
899 */
900static void threadgroup_lock(struct task_struct *tsk)
901{
902 down_write(&tsk->signal->group_rwsem);
903}
904
905/**
906 * threadgroup_unlock - unlock threadgroup
907 * @tsk: member task of the threadgroup to unlock
908 *
909 * Reverse threadgroup_lock().
910 */
911static inline void threadgroup_unlock(struct task_struct *tsk)
912{
913 up_write(&tsk->signal->group_rwsem);
914}
915
877static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) 916static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
878{ 917{
879 struct cgroup *root_cgrp = kf_root->kn->priv; 918 struct cgroup *root_cgrp = kf_root->kn->priv;
@@ -2074,9 +2113,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
2074 lockdep_assert_held(&css_set_rwsem); 2113 lockdep_assert_held(&css_set_rwsem);
2075 2114
2076 /* 2115 /*
2077 * We are synchronized through cgroup_threadgroup_rwsem against 2116 * We are synchronized through threadgroup_lock() against PF_EXITING
2078 * PF_EXITING setting such that we can't race against cgroup_exit() 2117 * setting such that we can't race against cgroup_exit() changing the
2079 * changing the css_set to init_css_set and dropping the old one. 2118 * css_set to init_css_set and dropping the old one.
2080 */ 2119 */
2081 WARN_ON_ONCE(tsk->flags & PF_EXITING); 2120 WARN_ON_ONCE(tsk->flags & PF_EXITING);
2082 old_cset = task_css_set(tsk); 2121 old_cset = task_css_set(tsk);
@@ -2133,11 +2172,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2133 * @src_cset and add it to @preloaded_csets, which should later be cleaned 2172 * @src_cset and add it to @preloaded_csets, which should later be cleaned
2134 * up by cgroup_migrate_finish(). 2173 * up by cgroup_migrate_finish().
2135 * 2174 *
2136 * This function may be called without holding cgroup_threadgroup_rwsem 2175 * This function may be called without holding threadgroup_lock even if the
2137 * even if the target is a process. Threads may be created and destroyed 2176 * target is a process. Threads may be created and destroyed but as long
2138 * but as long as cgroup_mutex is not dropped, no new css_set can be put 2177 * as cgroup_mutex is not dropped, no new css_set can be put into play and
2139 * into play and the preloaded css_sets are guaranteed to cover all 2178 * the preloaded css_sets are guaranteed to cover all migrations.
2140 * migrations.
2141 */ 2179 */
2142static void cgroup_migrate_add_src(struct css_set *src_cset, 2180static void cgroup_migrate_add_src(struct css_set *src_cset,
2143 struct cgroup *dst_cgrp, 2181 struct cgroup *dst_cgrp,
@@ -2240,7 +2278,7 @@ err:
2240 * @threadgroup: whether @leader points to the whole process or a single task 2278 * @threadgroup: whether @leader points to the whole process or a single task
2241 * 2279 *
2242 * Migrate a process or task denoted by @leader to @cgrp. If migrating a 2280 * Migrate a process or task denoted by @leader to @cgrp. If migrating a
2243 * process, the caller must be holding cgroup_threadgroup_rwsem. The 2281 * process, the caller must be holding threadgroup_lock of @leader. The
2244 * caller is also responsible for invoking cgroup_migrate_add_src() and 2282 * caller is also responsible for invoking cgroup_migrate_add_src() and
2245 * cgroup_migrate_prepare_dst() on the targets before invoking this 2283 * cgroup_migrate_prepare_dst() on the targets before invoking this
2246 * function and following up with cgroup_migrate_finish(). 2284 * function and following up with cgroup_migrate_finish().
@@ -2368,7 +2406,7 @@ out_release_tset:
2368 * @leader: the task or the leader of the threadgroup to be attached 2406 * @leader: the task or the leader of the threadgroup to be attached
2369 * @threadgroup: attach the whole threadgroup? 2407 * @threadgroup: attach the whole threadgroup?
2370 * 2408 *
2371 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem. 2409 * Call holding cgroup_mutex and threadgroup_lock of @leader.
2372 */ 2410 */
2373static int cgroup_attach_task(struct cgroup *dst_cgrp, 2411static int cgroup_attach_task(struct cgroup *dst_cgrp,
2374 struct task_struct *leader, bool threadgroup) 2412 struct task_struct *leader, bool threadgroup)
@@ -2460,13 +2498,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2460 if (!cgrp) 2498 if (!cgrp)
2461 return -ENODEV; 2499 return -ENODEV;
2462 2500
2463 percpu_down_write(&cgroup_threadgroup_rwsem); 2501retry_find_task:
2464 rcu_read_lock(); 2502 rcu_read_lock();
2465 if (pid) { 2503 if (pid) {
2466 tsk = find_task_by_vpid(pid); 2504 tsk = find_task_by_vpid(pid);
2467 if (!tsk) { 2505 if (!tsk) {
2506 rcu_read_unlock();
2468 ret = -ESRCH; 2507 ret = -ESRCH;
2469 goto out_unlock_rcu; 2508 goto out_unlock_cgroup;
2470 } 2509 }
2471 } else { 2510 } else {
2472 tsk = current; 2511 tsk = current;
@@ -2482,23 +2521,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2482 */ 2521 */
2483 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { 2522 if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
2484 ret = -EINVAL; 2523 ret = -EINVAL;
2485 goto out_unlock_rcu; 2524 rcu_read_unlock();
2525 goto out_unlock_cgroup;
2486 } 2526 }
2487 2527
2488 get_task_struct(tsk); 2528 get_task_struct(tsk);
2489 rcu_read_unlock(); 2529 rcu_read_unlock();
2490 2530
2531 threadgroup_lock(tsk);
2532 if (threadgroup) {
2533 if (!thread_group_leader(tsk)) {
2534 /*
2535 * a race with de_thread from another thread's exec()
2536 * may strip us of our leadership, if this happens,
2537 * there is no choice but to throw this task away and
2538 * try again; this is
2539 * "double-double-toil-and-trouble-check locking".
2540 */
2541 threadgroup_unlock(tsk);
2542 put_task_struct(tsk);
2543 goto retry_find_task;
2544 }
2545 }
2546
2491 ret = cgroup_procs_write_permission(tsk, cgrp, of); 2547 ret = cgroup_procs_write_permission(tsk, cgrp, of);
2492 if (!ret) 2548 if (!ret)
2493 ret = cgroup_attach_task(cgrp, tsk, threadgroup); 2549 ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2494 2550
2495 put_task_struct(tsk); 2551 threadgroup_unlock(tsk);
2496 goto out_unlock_threadgroup;
2497 2552
2498out_unlock_rcu: 2553 put_task_struct(tsk);
2499 rcu_read_unlock(); 2554out_unlock_cgroup:
2500out_unlock_threadgroup:
2501 percpu_up_write(&cgroup_threadgroup_rwsem);
2502 cgroup_kn_unlock(of->kn); 2555 cgroup_kn_unlock(of->kn);
2503 return ret ?: nbytes; 2556 return ret ?: nbytes;
2504} 2557}
@@ -2643,8 +2696,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2643 2696
2644 lockdep_assert_held(&cgroup_mutex); 2697 lockdep_assert_held(&cgroup_mutex);
2645 2698
2646 percpu_down_write(&cgroup_threadgroup_rwsem);
2647
2648 /* look up all csses currently attached to @cgrp's subtree */ 2699 /* look up all csses currently attached to @cgrp's subtree */
2649 down_read(&css_set_rwsem); 2700 down_read(&css_set_rwsem);
2650 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { 2701 css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
@@ -2700,8 +2751,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2700 goto out_finish; 2751 goto out_finish;
2701 last_task = task; 2752 last_task = task;
2702 2753
2754 threadgroup_lock(task);
2755 /* raced against de_thread() from another thread? */
2756 if (!thread_group_leader(task)) {
2757 threadgroup_unlock(task);
2758 put_task_struct(task);
2759 continue;
2760 }
2761
2703 ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); 2762 ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
2704 2763
2764 threadgroup_unlock(task);
2705 put_task_struct(task); 2765 put_task_struct(task);
2706 2766
2707 if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) 2767 if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
@@ -2711,7 +2771,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2711 2771
2712out_finish: 2772out_finish:
2713 cgroup_migrate_finish(&preloaded_csets); 2773 cgroup_migrate_finish(&preloaded_csets);
2714 percpu_up_write(&cgroup_threadgroup_rwsem);
2715 return ret; 2774 return ret;
2716} 2775}
2717 2776
@@ -5024,7 +5083,6 @@ int __init cgroup_init(void)
5024 unsigned long key; 5083 unsigned long key;
5025 int ssid, err; 5084 int ssid, err;
5026 5085
5027 BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5028 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files)); 5086 BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5029 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files)); 5087 BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5030 5088
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d5f0f118a63..2845623fb582 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1149,6 +1149,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1149 tty_audit_fork(sig); 1149 tty_audit_fork(sig);
1150 sched_autogroup_fork(sig); 1150 sched_autogroup_fork(sig);
1151 1151
1152#ifdef CONFIG_CGROUPS
1153 init_rwsem(&sig->group_rwsem);
1154#endif
1155
1152 sig->oom_score_adj = current->signal->oom_score_adj; 1156 sig->oom_score_adj = current->signal->oom_score_adj;
1153 sig->oom_score_adj_min = current->signal->oom_score_adj_min; 1157 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1154 1158
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 272d9322bc5d..052e02672d12 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -106,10 +106,9 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
106} 106}
107EXPORT_SYMBOL_GPL(__wake_up_locked); 107EXPORT_SYMBOL_GPL(__wake_up_locked);
108 108
109void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, int nr, 109void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
110 void *key)
111{ 110{
112 __wake_up_common(q, mode, nr, 0, key); 111 __wake_up_common(q, mode, 1, 0, key);
113} 112}
114EXPORT_SYMBOL_GPL(__wake_up_locked_key); 113EXPORT_SYMBOL_GPL(__wake_up_locked_key);
115 114
@@ -284,7 +283,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
284 if (!list_empty(&wait->task_list)) 283 if (!list_empty(&wait->task_list))
285 list_del_init(&wait->task_list); 284 list_del_init(&wait->task_list);
286 else if (waitqueue_active(q)) 285 else if (waitqueue_active(q))
287 __wake_up_locked_key(q, mode, 1, key); 286 __wake_up_locked_key(q, mode, key);
288 spin_unlock_irqrestore(&q->lock, flags); 287 spin_unlock_irqrestore(&q->lock, flags);
289} 288}
290EXPORT_SYMBOL(abort_exclusive_wait); 289EXPORT_SYMBOL(abort_exclusive_wait);
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index ff19f66d3f7f..b1c93e94ca7a 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
21 21
22static inline bool need_flush(struct iommu_map_table *iommu) 22static inline bool need_flush(struct iommu_map_table *iommu)
23{ 23{
24 return (iommu->lazy_flush != NULL && 24 return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
25 (iommu->flags & IOMMU_NEED_FLUSH) != 0);
26} 25}
27 26
28static inline void set_flush(struct iommu_map_table *iommu) 27static inline void set_flush(struct iommu_map_table *iommu)
@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
211 goto bail; 210 goto bail;
212 } 211 }
213 } 212 }
214 if (n < pool->hint || need_flush(iommu)) { 213 if (iommu->lazy_flush &&
214 (n < pool->hint || need_flush(iommu))) {
215 clear_flush(iommu); 215 clear_flush(iommu);
216 iommu->lazy_flush(iommu); 216 iommu->lazy_flush(iommu);
217 } 217 }
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc0c69710dcf..a54ff8949f91 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -187,10 +187,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
187 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 187 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188 new_tbl, new_hash); 188 new_tbl, new_hash);
189 189
190 if (rht_is_a_nulls(head)) 190 RCU_INIT_POINTER(entry->next, head);
191 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
192 else
193 RCU_INIT_POINTER(entry->next, head);
194 191
195 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 192 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
196 spin_unlock(new_bucket_lock); 193 spin_unlock(new_bucket_lock);
diff --git a/mm/migrate.c b/mm/migrate.c
index c3cb566af3e2..7452a00bbb50 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1075,7 +1075,7 @@ out:
1075 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 1075 if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
1076 put_new_page(new_hpage, private); 1076 put_new_page(new_hpage, private);
1077 else 1077 else
1078 put_page(new_hpage); 1078 putback_active_hugepage(new_hpage);
1079 1079
1080 if (result) { 1080 if (result) {
1081 if (rc) 1081 if (rc)
diff --git a/mm/mmap.c b/mm/mmap.c
index c739d6db7193..79bcc9f92e48 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1490,13 +1490,14 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1490int vma_wants_writenotify(struct vm_area_struct *vma) 1490int vma_wants_writenotify(struct vm_area_struct *vma)
1491{ 1491{
1492 vm_flags_t vm_flags = vma->vm_flags; 1492 vm_flags_t vm_flags = vma->vm_flags;
1493 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1493 1494
1494 /* If it was private or non-writable, the write bit is already clear */ 1495 /* If it was private or non-writable, the write bit is already clear */
1495 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1496 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1496 return 0; 1497 return 0;
1497 1498
1498 /* The backer wishes to know when pages are first written to? */ 1499 /* The backer wishes to know when pages are first written to? */
1499 if (vma->vm_ops && vma->vm_ops->page_mkwrite) 1500 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1500 return 1; 1501 return 1;
1501 1502
1502 /* The open routine did something to the protections that pgprot_modify 1503 /* The open routine did something to the protections that pgprot_modify
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2d978b28a410..7f63a9381f71 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -175,7 +175,7 @@ static bool sane_reclaim(struct scan_control *sc)
175 if (!memcg) 175 if (!memcg)
176 return true; 176 return true;
177#ifdef CONFIG_CGROUP_WRITEBACK 177#ifdef CONFIG_CGROUP_WRITEBACK
178 if (memcg->css.cgroup) 178 if (cgroup_on_dfl(memcg->css.cgroup))
179 return true; 179 return true;
180#endif 180#endif
181 return false; 181 return false;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 17e55dfecbe2..e07f551a863c 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -317,6 +317,9 @@ static int clip_constructor(struct neighbour *neigh)
317 317
318static int clip_encap(struct atm_vcc *vcc, int mode) 318static int clip_encap(struct atm_vcc *vcc, int mode)
319{ 319{
320 if (!CLIP_VCC(vcc))
321 return -EBADFD;
322
320 CLIP_VCC(vcc)->encap = mode; 323 CLIP_VCC(vcc)->encap = mode;
321 return 0; 324 return 0;
322} 325}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ad82324f710f..0510a577a7b5 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2311 if (!conn) 2311 if (!conn)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2319
2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2314 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2321 return 1; 2315 return 1;
2322 2316
@@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2330 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 2324 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
2331 return 0; 2325 return 0;
2332 2326
2327 chan = conn->smp;
2328 if (!chan) {
2329 BT_ERR("SMP security requested but not available");
2330 return 1;
2331 }
2332
2333 l2cap_chan_lock(chan); 2333 l2cap_chan_lock(chan);
2334 2334
2335 /* If SMP is already in progress ignore this request */ 2335 /* If SMP is already in progress ignore this request */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 66efdc21f548..480b3de1a0e3 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1006,7 +1006,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1006 1006
1007 ih = igmpv3_report_hdr(skb); 1007 ih = igmpv3_report_hdr(skb);
1008 num = ntohs(ih->ngrec); 1008 num = ntohs(ih->ngrec);
1009 len = sizeof(*ih); 1009 len = skb_transport_offset(skb) + sizeof(*ih);
1010 1010
1011 for (i = 0; i < num; i++) { 1011 for (i = 0; i < num; i++) {
1012 len += sizeof(*grec); 1012 len += sizeof(*grec);
@@ -1067,7 +1067,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1067 1067
1068 icmp6h = icmp6_hdr(skb); 1068 icmp6h = icmp6_hdr(skb);
1069 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1069 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1070 len = sizeof(*icmp6h); 1070 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1071 1071
1072 for (i = 0; i < num; i++) { 1072 for (i = 0; i < num; i++) {
1073 __be16 *nsrcs, _nsrcs; 1073 __be16 *nsrcs, _nsrcs;
diff --git a/net/core/dev.c b/net/core/dev.c
index 877c84834d81..6bb6470f5b7b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4713,6 +4713,8 @@ void napi_disable(struct napi_struct *n)
4713 4713
4714 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 4714 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4715 msleep(1); 4715 msleep(1);
4716 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4717 msleep(1);
4716 4718
4717 hrtimer_cancel(&n->timer); 4719 hrtimer_cancel(&n->timer);
4718 4720
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index bf77e3639ce0..365de66436ac 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -631,15 +631,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
631{ 631{
632 int idx = 0; 632 int idx = 0;
633 struct fib_rule *rule; 633 struct fib_rule *rule;
634 int err = 0;
634 635
635 rcu_read_lock(); 636 rcu_read_lock();
636 list_for_each_entry_rcu(rule, &ops->rules_list, list) { 637 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
637 if (idx < cb->args[1]) 638 if (idx < cb->args[1])
638 goto skip; 639 goto skip;
639 640
640 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, 641 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
641 cb->nlh->nlmsg_seq, RTM_NEWRULE, 642 cb->nlh->nlmsg_seq, RTM_NEWRULE,
642 NLM_F_MULTI, ops) < 0) 643 NLM_F_MULTI, ops);
644 if (err)
643 break; 645 break;
644skip: 646skip:
645 idx++; 647 idx++;
@@ -648,7 +650,7 @@ skip:
648 cb->args[1] = idx; 650 cb->args[1] = idx;
649 rules_ops_put(ops); 651 rules_ops_put(ops);
650 652
651 return skb->len; 653 return err;
652} 654}
653 655
654static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) 656static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -664,7 +666,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
664 if (ops == NULL) 666 if (ops == NULL)
665 return -EAFNOSUPPORT; 667 return -EAFNOSUPPORT;
666 668
667 return dump_rules(skb, cb, ops); 669 dump_rules(skb, cb, ops);
670
671 return skb->len;
668 } 672 }
669 673
670 rcu_read_lock(); 674 rcu_read_lock();
diff --git a/net/core/filter.c b/net/core/filter.c
index 13079f03902e..05a04ea87172 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -478,9 +478,9 @@ do_pass:
478 bpf_src = BPF_X; 478 bpf_src = BPF_X;
479 } else { 479 } else {
480 insn->dst_reg = BPF_REG_A; 480 insn->dst_reg = BPF_REG_A;
481 insn->src_reg = BPF_REG_X;
482 insn->imm = fp->k; 481 insn->imm = fp->k;
483 bpf_src = BPF_SRC(fp->code); 482 bpf_src = BPF_SRC(fp->code);
483 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
484 } 484 }
485 485
486 /* Common case where 'jump_false' is next insn. */ 486 /* Common case where 'jump_false' is next insn. */
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b279077c3089..805a95a48107 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1481,6 +1481,15 @@ static int of_dev_node_match(struct device *dev, const void *data)
1481 return ret == 0 ? dev->of_node == data : ret; 1481 return ret == 0 ? dev->of_node == data : ret;
1482} 1482}
1483 1483
1484/*
1485 * of_find_net_device_by_node - lookup the net device for the device node
1486 * @np: OF device node
1487 *
1488 * Looks up the net_device structure corresponding with the device node.
1489 * If successful, returns a pointer to the net_device with the embedded
1490 * struct device refcount incremented by one, or NULL on failure. The
1491 * refcount must be dropped when done with the net_device.
1492 */
1484struct net_device *of_find_net_device_by_node(struct device_node *np) 1493struct net_device *of_find_net_device_by_node(struct device_node *np)
1485{ 1494{
1486 struct device *dev; 1495 struct device *dev;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6aa3db8dfc3b..8bdada242a7d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -142,7 +142,7 @@ static void queue_process(struct work_struct *work)
142 */ 142 */
143static int poll_one_napi(struct napi_struct *napi, int budget) 143static int poll_one_napi(struct napi_struct *napi, int budget)
144{ 144{
145 int work; 145 int work = 0;
146 146
147 /* net_rx_action's ->poll() invocations and our's are 147 /* net_rx_action's ->poll() invocations and our's are
148 * synchronized by this test which is only made while 148 * synchronized by this test which is only made while
@@ -151,7 +151,12 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
151 if (!test_bit(NAPI_STATE_SCHED, &napi->state)) 151 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
152 return budget; 152 return budget;
153 153
154 set_bit(NAPI_STATE_NPSVC, &napi->state); 154 /* If we set this bit but see that it has already been set,
155 * that indicates that napi has been disabled and we need
156 * to abort this operation
157 */
158 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
159 goto out;
155 160
156 work = napi->poll(napi, budget); 161 work = napi->poll(napi, budget);
157 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll); 162 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
@@ -159,6 +164,7 @@ static int poll_one_napi(struct napi_struct *napi, int budget)
159 164
160 clear_bit(NAPI_STATE_NPSVC, &napi->state); 165 clear_bit(NAPI_STATE_NPSVC, &napi->state);
161 166
167out:
162 return budget - work; 168 return budget - work;
163} 169}
164 170
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a466821d1441..0ec48403ed68 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3047,6 +3047,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3047 u32 portid = NETLINK_CB(cb->skb).portid; 3047 u32 portid = NETLINK_CB(cb->skb).portid;
3048 u32 seq = cb->nlh->nlmsg_seq; 3048 u32 seq = cb->nlh->nlmsg_seq;
3049 u32 filter_mask = 0; 3049 u32 filter_mask = 0;
3050 int err;
3050 3051
3051 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { 3052 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
3052 struct nlattr *extfilt; 3053 struct nlattr *extfilt;
@@ -3067,20 +3068,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3067 struct net_device *br_dev = netdev_master_upper_dev_get(dev); 3068 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3068 3069
3069 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { 3070 if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
3070 if (idx >= cb->args[0] && 3071 if (idx >= cb->args[0]) {
3071 br_dev->netdev_ops->ndo_bridge_getlink( 3072 err = br_dev->netdev_ops->ndo_bridge_getlink(
3072 skb, portid, seq, dev, filter_mask, 3073 skb, portid, seq, dev,
3073 NLM_F_MULTI) < 0) 3074 filter_mask, NLM_F_MULTI);
3074 break; 3075 if (err < 0 && err != -EOPNOTSUPP)
3076 break;
3077 }
3075 idx++; 3078 idx++;
3076 } 3079 }
3077 3080
3078 if (ops->ndo_bridge_getlink) { 3081 if (ops->ndo_bridge_getlink) {
3079 if (idx >= cb->args[0] && 3082 if (idx >= cb->args[0]) {
3080 ops->ndo_bridge_getlink(skb, portid, seq, dev, 3083 err = ops->ndo_bridge_getlink(skb, portid,
3081 filter_mask, 3084 seq, dev,
3082 NLM_F_MULTI) < 0) 3085 filter_mask,
3083 break; 3086 NLM_F_MULTI);
3087 if (err < 0 && err != -EOPNOTSUPP)
3088 break;
3089 }
3084 idx++; 3090 idx++;
3085 } 3091 }
3086 } 3092 }
diff --git a/net/core/sock.c b/net/core/sock.c
index ca2984afe16e..3307c02244d3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2740,10 +2740,8 @@ static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2740 return; 2740 return;
2741 kfree(rsk_prot->slab_name); 2741 kfree(rsk_prot->slab_name);
2742 rsk_prot->slab_name = NULL; 2742 rsk_prot->slab_name = NULL;
2743 if (rsk_prot->slab) { 2743 kmem_cache_destroy(rsk_prot->slab);
2744 kmem_cache_destroy(rsk_prot->slab); 2744 rsk_prot->slab = NULL;
2745 rsk_prot->slab = NULL;
2746 }
2747} 2745}
2748 2746
2749static int req_prot_init(const struct proto *prot) 2747static int req_prot_init(const struct proto *prot)
@@ -2828,10 +2826,8 @@ void proto_unregister(struct proto *prot)
2828 list_del(&prot->node); 2826 list_del(&prot->node);
2829 mutex_unlock(&proto_list_mutex); 2827 mutex_unlock(&proto_list_mutex);
2830 2828
2831 if (prot->slab != NULL) { 2829 kmem_cache_destroy(prot->slab);
2832 kmem_cache_destroy(prot->slab); 2830 prot->slab = NULL;
2833 prot->slab = NULL;
2834 }
2835 2831
2836 req_prot_cleanup(prot->rsk_prot); 2832 req_prot_cleanup(prot->rsk_prot);
2837 2833
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index bd9e718c2a20..3de0d0362d7f 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -398,12 +398,8 @@ out_err:
398 398
399void dccp_ackvec_exit(void) 399void dccp_ackvec_exit(void)
400{ 400{
401 if (dccp_ackvec_slab != NULL) { 401 kmem_cache_destroy(dccp_ackvec_slab);
402 kmem_cache_destroy(dccp_ackvec_slab); 402 dccp_ackvec_slab = NULL;
403 dccp_ackvec_slab = NULL; 403 kmem_cache_destroy(dccp_ackvec_record_slab);
404 } 404 dccp_ackvec_record_slab = NULL;
405 if (dccp_ackvec_record_slab != NULL) {
406 kmem_cache_destroy(dccp_ackvec_record_slab);
407 dccp_ackvec_record_slab = NULL;
408 }
409} 405}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 83498975165f..90f77d08cc37 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -95,8 +95,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
95 95
96static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 96static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
97{ 97{
98 if (slab != NULL) 98 kmem_cache_destroy(slab);
99 kmem_cache_destroy(slab);
100} 99}
101 100
102static int __init ccid_activate(struct ccid_operations *ccid_ops) 101static int __init ccid_activate(struct ccid_operations *ccid_ops)
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 30addee2dd03..838f524cf11a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
48 tw->tw_ipv6only = sk->sk_ipv6only; 48 tw->tw_ipv6only = sk->sk_ipv6only;
49 } 49 }
50#endif 50#endif
51 /* Linkage updates. */
52 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
53 51
54 /* Get the TIME_WAIT timeout firing. */ 52 /* Get the TIME_WAIT timeout firing. */
55 if (timeo < rto) 53 if (timeo < rto)
@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
60 timeo = DCCP_TIMEWAIT_LEN; 58 timeo = DCCP_TIMEWAIT_LEN;
61 59
62 inet_twsk_schedule(tw, timeo); 60 inet_twsk_schedule(tw, timeo);
61 /* Linkage updates. */
62 __inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
63 inet_twsk_put(tw); 63 inet_twsk_put(tw);
64 } else { 64 } else {
65 /* Sorry, if we're out of memory, just CLOSE this 65 /* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 76e3800765f8..c59fa5d9c22c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -634,6 +634,10 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
634 port_index++; 634 port_index++;
635 } 635 }
636 kfree(pd->chip[i].rtable); 636 kfree(pd->chip[i].rtable);
637
638 /* Drop our reference to the MDIO bus device */
639 if (pd->chip[i].host_dev)
640 put_device(pd->chip[i].host_dev);
637 } 641 }
638 kfree(pd->chip); 642 kfree(pd->chip);
639} 643}
@@ -661,16 +665,22 @@ static int dsa_of_probe(struct device *dev)
661 return -EPROBE_DEFER; 665 return -EPROBE_DEFER;
662 666
663 ethernet = of_parse_phandle(np, "dsa,ethernet", 0); 667 ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
664 if (!ethernet) 668 if (!ethernet) {
665 return -EINVAL; 669 ret = -EINVAL;
670 goto out_put_mdio;
671 }
666 672
667 ethernet_dev = of_find_net_device_by_node(ethernet); 673 ethernet_dev = of_find_net_device_by_node(ethernet);
668 if (!ethernet_dev) 674 if (!ethernet_dev) {
669 return -EPROBE_DEFER; 675 ret = -EPROBE_DEFER;
676 goto out_put_mdio;
677 }
670 678
671 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 679 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
672 if (!pd) 680 if (!pd) {
673 return -ENOMEM; 681 ret = -ENOMEM;
682 goto out_put_ethernet;
683 }
674 684
675 dev->platform_data = pd; 685 dev->platform_data = pd;
676 pd->of_netdev = ethernet_dev; 686 pd->of_netdev = ethernet_dev;
@@ -691,7 +701,9 @@ static int dsa_of_probe(struct device *dev)
691 cd = &pd->chip[chip_index]; 701 cd = &pd->chip[chip_index];
692 702
693 cd->of_node = child; 703 cd->of_node = child;
694 cd->host_dev = &mdio_bus->dev; 704
705 /* When assigning the host device, increment its refcount */
706 cd->host_dev = get_device(&mdio_bus->dev);
695 707
696 sw_addr = of_get_property(child, "reg", NULL); 708 sw_addr = of_get_property(child, "reg", NULL);
697 if (!sw_addr) 709 if (!sw_addr)
@@ -711,6 +723,12 @@ static int dsa_of_probe(struct device *dev)
711 ret = -EPROBE_DEFER; 723 ret = -EPROBE_DEFER;
712 goto out_free_chip; 724 goto out_free_chip;
713 } 725 }
726
727 /* Drop the mdio_bus device ref, replacing the host
728 * device with the mdio_bus_switch device, keeping
729 * the refcount from of_mdio_find_bus() above.
730 */
731 put_device(cd->host_dev);
714 cd->host_dev = &mdio_bus_switch->dev; 732 cd->host_dev = &mdio_bus_switch->dev;
715 } 733 }
716 734
@@ -744,6 +762,10 @@ static int dsa_of_probe(struct device *dev)
744 } 762 }
745 } 763 }
746 764
765 /* The individual chips hold their own refcount on the mdio bus,
766 * so drop ours */
767 put_device(&mdio_bus->dev);
768
747 return 0; 769 return 0;
748 770
749out_free_chip: 771out_free_chip:
@@ -751,6 +773,10 @@ out_free_chip:
751out_free: 773out_free:
752 kfree(pd); 774 kfree(pd);
753 dev->platform_data = NULL; 775 dev->platform_data = NULL;
776out_put_ethernet:
777 put_device(&ethernet_dev->dev);
778out_put_mdio:
779 put_device(&mdio_bus->dev);
754 return ret; 780 return ret;
755} 781}
756 782
@@ -762,6 +788,7 @@ static void dsa_of_remove(struct device *dev)
762 return; 788 return;
763 789
764 dsa_of_free_platform_data(pd); 790 dsa_of_free_platform_data(pd);
791 put_device(&pd->of_netdev->dev);
765 kfree(pd); 792 kfree(pd);
766} 793}
767#else 794#else
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index d25efc93d8f1..b6ca0890d018 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -78,7 +78,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
78 78
79 trailer = skb_tail_pointer(skb) - 4; 79 trailer = skb_tail_pointer(skb) - 4;
80 if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 || 80 if (trailer[0] != 0x80 || (trailer[1] & 0xf8) != 0x00 ||
81 (trailer[3] & 0xef) != 0x00 || trailer[3] != 0x00) 81 (trailer[2] & 0xef) != 0x00 || trailer[3] != 0x00)
82 goto out_drop; 82 goto out_drop;
83 83
84 source_port = trailer[1] & 7; 84 source_port = trailer[1] & 7;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 30409b75e925..f03db8b7abee 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -113,6 +113,8 @@
113#include <net/arp.h> 113#include <net/arp.h>
114#include <net/ax25.h> 114#include <net/ax25.h>
115#include <net/netrom.h> 115#include <net/netrom.h>
116#include <net/dst_metadata.h>
117#include <net/ip_tunnels.h>
116 118
117#include <linux/uaccess.h> 119#include <linux/uaccess.h>
118 120
@@ -296,7 +298,8 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
296 struct net_device *dev, __be32 src_ip, 298 struct net_device *dev, __be32 src_ip,
297 const unsigned char *dest_hw, 299 const unsigned char *dest_hw,
298 const unsigned char *src_hw, 300 const unsigned char *src_hw,
299 const unsigned char *target_hw, struct sk_buff *oskb) 301 const unsigned char *target_hw,
302 struct dst_entry *dst)
300{ 303{
301 struct sk_buff *skb; 304 struct sk_buff *skb;
302 305
@@ -309,9 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip,
309 if (!skb) 312 if (!skb)
310 return; 313 return;
311 314
312 if (oskb) 315 skb_dst_set(skb, dst);
313 skb_dst_copy(skb, oskb);
314
315 arp_xmit(skb); 316 arp_xmit(skb);
316} 317}
317 318
@@ -333,6 +334,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
333 __be32 target = *(__be32 *)neigh->primary_key; 334 __be32 target = *(__be32 *)neigh->primary_key;
334 int probes = atomic_read(&neigh->probes); 335 int probes = atomic_read(&neigh->probes);
335 struct in_device *in_dev; 336 struct in_device *in_dev;
337 struct dst_entry *dst = NULL;
336 338
337 rcu_read_lock(); 339 rcu_read_lock();
338 in_dev = __in_dev_get_rcu(dev); 340 in_dev = __in_dev_get_rcu(dev);
@@ -381,9 +383,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
381 } 383 }
382 } 384 }
383 385
386 if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
387 dst = dst_clone(skb_dst(skb));
384 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 388 arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
385 dst_hw, dev->dev_addr, NULL, 389 dst_hw, dev->dev_addr, NULL, dst);
386 dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
387} 390}
388 391
389static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip) 392static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -649,6 +652,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
649 int addr_type; 652 int addr_type;
650 struct neighbour *n; 653 struct neighbour *n;
651 struct net *net = dev_net(dev); 654 struct net *net = dev_net(dev);
655 struct dst_entry *reply_dst = NULL;
652 bool is_garp = false; 656 bool is_garp = false;
653 657
654 /* arp_rcv below verifies the ARP header and verifies the device 658 /* arp_rcv below verifies the ARP header and verifies the device
@@ -749,13 +753,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
749 * cache. 753 * cache.
750 */ 754 */
751 755
756 if (arp->ar_op == htons(ARPOP_REQUEST) && skb_metadata_dst(skb))
757 reply_dst = (struct dst_entry *)
758 iptunnel_metadata_reply(skb_metadata_dst(skb),
759 GFP_ATOMIC);
760
752 /* Special case: IPv4 duplicate address detection packet (RFC2131) */ 761 /* Special case: IPv4 duplicate address detection packet (RFC2131) */
753 if (sip == 0) { 762 if (sip == 0) {
754 if (arp->ar_op == htons(ARPOP_REQUEST) && 763 if (arp->ar_op == htons(ARPOP_REQUEST) &&
755 inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && 764 inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
756 !arp_ignore(in_dev, sip, tip)) 765 !arp_ignore(in_dev, sip, tip))
757 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 766 arp_send_dst(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip,
758 dev->dev_addr, sha); 767 sha, dev->dev_addr, sha, reply_dst);
759 goto out; 768 goto out;
760 } 769 }
761 770
@@ -774,9 +783,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
774 if (!dont_send) { 783 if (!dont_send) {
775 n = neigh_event_ns(&arp_tbl, sha, &sip, dev); 784 n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
776 if (n) { 785 if (n) {
777 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, 786 arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
778 dev, tip, sha, dev->dev_addr, 787 sip, dev, tip, sha,
779 sha); 788 dev->dev_addr, sha,
789 reply_dst);
780 neigh_release(n); 790 neigh_release(n);
781 } 791 }
782 } 792 }
@@ -794,9 +804,10 @@ static int arp_process(struct sock *sk, struct sk_buff *skb)
794 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED || 804 if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
795 skb->pkt_type == PACKET_HOST || 805 skb->pkt_type == PACKET_HOST ||
796 NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) { 806 NEIGH_VAR(in_dev->arp_parms, PROXY_DELAY) == 0) {
797 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, 807 arp_send_dst(ARPOP_REPLY, ETH_P_ARP,
798 dev, tip, sha, dev->dev_addr, 808 sip, dev, tip, sha,
799 sha); 809 dev->dev_addr, sha,
810 reply_dst);
800 } else { 811 } else {
801 pneigh_enqueue(&arp_tbl, 812 pneigh_enqueue(&arp_tbl,
802 in_dev->arp_parms, skb); 813 in_dev->arp_parms, skb);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 26d6ffb6d23c..6c2af797f2f9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1426,7 +1426,7 @@ found:
1426 nh->nh_flags & RTNH_F_LINKDOWN && 1426 nh->nh_flags & RTNH_F_LINKDOWN &&
1427 !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1427 !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
1428 continue; 1428 continue;
1429 if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { 1429 if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
1430 if (flp->flowi4_oif && 1430 if (flp->flowi4_oif &&
1431 flp->flowi4_oif != nh->nh_oif) 1431 flp->flowi4_oif != nh->nh_oif)
1432 continue; 1432 continue;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79fe05befcae..e5eb8ac4089d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
427 fl4.flowi4_mark = mark; 427 fl4.flowi4_mark = mark;
428 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 428 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
429 fl4.flowi4_proto = IPPROTO_ICMP; 429 fl4.flowi4_proto = IPPROTO_ICMP;
430 fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; 430 fl4.flowi4_oif = vrf_master_ifindex(skb->dev);
431 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 431 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
432 rt = ip_route_output_key(net, &fl4); 432 rt = ip_route_output_key(net, &fl4);
433 if (IS_ERR(rt)) 433 if (IS_ERR(rt))
@@ -461,7 +461,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
461 fl4->flowi4_proto = IPPROTO_ICMP; 461 fl4->flowi4_proto = IPPROTO_ICMP;
462 fl4->fl4_icmp_type = type; 462 fl4->fl4_icmp_type = type;
463 fl4->fl4_icmp_code = code; 463 fl4->fl4_icmp_code = code;
464 fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; 464 fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev);
465 465
466 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 466 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
467 rt = __ip_route_output_key(net, fl4); 467 rt = __ip_route_output_key(net, fl4);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 134957159c27..7bb9c39e0a4d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -685,20 +685,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
685 req->num_timeout = 0; 685 req->num_timeout = 0;
686 req->sk = NULL; 686 req->sk = NULL;
687 687
688 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
689 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
690 req->rsk_hash = hash;
691
688 /* before letting lookups find us, make sure all req fields 692 /* before letting lookups find us, make sure all req fields
689 * are committed to memory and refcnt initialized. 693 * are committed to memory and refcnt initialized.
690 */ 694 */
691 smp_wmb(); 695 smp_wmb();
692 atomic_set(&req->rsk_refcnt, 2); 696 atomic_set(&req->rsk_refcnt, 2);
693 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
694 req->rsk_hash = hash;
695 697
696 spin_lock(&queue->syn_wait_lock); 698 spin_lock(&queue->syn_wait_lock);
697 req->dl_next = lopt->syn_table[hash]; 699 req->dl_next = lopt->syn_table[hash];
698 lopt->syn_table[hash] = req; 700 lopt->syn_table[hash] = req;
699 spin_unlock(&queue->syn_wait_lock); 701 spin_unlock(&queue->syn_wait_lock);
700
701 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
702} 702}
703EXPORT_SYMBOL(reqsk_queue_hash_req); 703EXPORT_SYMBOL(reqsk_queue_hash_req);
704 704
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index ae22cc24fbe8..c67f9bd7699c 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -123,13 +123,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
123 /* 123 /*
124 * Step 2: Hash TW into tcp ehash chain. 124 * Step 2: Hash TW into tcp ehash chain.
125 * Notes : 125 * Notes :
126 * - tw_refcnt is set to 3 because : 126 * - tw_refcnt is set to 4 because :
127 * - We have one reference from bhash chain. 127 * - We have one reference from bhash chain.
128 * - We have one reference from ehash chain. 128 * - We have one reference from ehash chain.
129 * - We have one reference from timer.
130 * - One reference for ourself (our caller will release it).
129 * We can use atomic_set() because prior spin_lock()/spin_unlock() 131 * We can use atomic_set() because prior spin_lock()/spin_unlock()
130 * committed into memory all tw fields. 132 * committed into memory all tw fields.
131 */ 133 */
132 atomic_set(&tw->tw_refcnt, 1 + 1 + 1); 134 atomic_set(&tw->tw_refcnt, 4);
133 inet_twsk_add_node_rcu(tw, &ehead->chain); 135 inet_twsk_add_node_rcu(tw, &ehead->chain);
134 136
135 /* Step 3: Remove SK from hash chain */ 137 /* Step 3: Remove SK from hash chain */
@@ -217,7 +219,7 @@ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
217} 219}
218EXPORT_SYMBOL(inet_twsk_deschedule_put); 220EXPORT_SYMBOL(inet_twsk_deschedule_put);
219 221
220void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo) 222void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
221{ 223{
222 /* timeout := RTO * 3.5 224 /* timeout := RTO * 3.5
223 * 225 *
@@ -245,12 +247,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
245 */ 247 */
246 248
247 tw->tw_kill = timeo <= 4*HZ; 249 tw->tw_kill = timeo <= 4*HZ;
248 if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) { 250 if (!rearm) {
249 atomic_inc(&tw->tw_refcnt); 251 BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
250 atomic_inc(&tw->tw_dr->tw_count); 252 atomic_inc(&tw->tw_dr->tw_count);
253 } else {
254 mod_timer_pending(&tw->tw_timer, jiffies + timeo);
251 } 255 }
252} 256}
253EXPORT_SYMBOL_GPL(inet_twsk_schedule); 257EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
254 258
255void inet_twsk_purge(struct inet_hashinfo *hashinfo, 259void inet_twsk_purge(struct inet_hashinfo *hashinfo,
256 struct inet_timewait_death_row *twdr, int family) 260 struct inet_timewait_death_row *twdr, int family)
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 29ed6c5a5185..84dce6a92f93 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,12 +46,13 @@
46#include <net/net_namespace.h> 46#include <net/net_namespace.h>
47#include <net/netns/generic.h> 47#include <net/netns/generic.h>
48#include <net/rtnetlink.h> 48#include <net/rtnetlink.h>
49#include <net/dst_metadata.h>
49 50
50int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, 51int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
51 __be32 src, __be32 dst, __u8 proto, 52 __be32 src, __be32 dst, __u8 proto,
52 __u8 tos, __u8 ttl, __be16 df, bool xnet) 53 __u8 tos, __u8 ttl, __be16 df, bool xnet)
53{ 54{
54 int pkt_len = skb->len; 55 int pkt_len = skb->len - skb_inner_network_offset(skb);
55 struct iphdr *iph; 56 struct iphdr *iph;
56 int err; 57 int err;
57 58
@@ -119,6 +120,33 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
119} 120}
120EXPORT_SYMBOL_GPL(iptunnel_pull_header); 121EXPORT_SYMBOL_GPL(iptunnel_pull_header);
121 122
123struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
124 gfp_t flags)
125{
126 struct metadata_dst *res;
127 struct ip_tunnel_info *dst, *src;
128
129 if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
130 return NULL;
131
132 res = metadata_dst_alloc(0, flags);
133 if (!res)
134 return NULL;
135
136 dst = &res->u.tun_info;
137 src = &md->u.tun_info;
138 dst->key.tun_id = src->key.tun_id;
139 if (src->mode & IP_TUNNEL_INFO_IPV6)
140 memcpy(&dst->key.u.ipv6.dst, &src->key.u.ipv6.src,
141 sizeof(struct in6_addr));
142 else
143 dst->key.u.ipv4.dst = src->key.u.ipv4.src;
144 dst->mode = src->mode | IP_TUNNEL_INFO_TX;
145
146 return res;
147}
148EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
149
122struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, 150struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
123 bool csum_help, 151 bool csum_help,
124 int gso_type_mask) 152 int gso_type_mask)
@@ -198,8 +226,6 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
198 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, 226 [LWTUNNEL_IP_SRC] = { .type = NLA_U32 },
199 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, 227 [LWTUNNEL_IP_TTL] = { .type = NLA_U8 },
200 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, 228 [LWTUNNEL_IP_TOS] = { .type = NLA_U8 },
201 [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 },
202 [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 },
203 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, 229 [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
204}; 230};
205 231
@@ -239,12 +265,6 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
239 if (tb[LWTUNNEL_IP_TOS]) 265 if (tb[LWTUNNEL_IP_TOS])
240 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); 266 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
241 267
242 if (tb[LWTUNNEL_IP_SPORT])
243 tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
244
245 if (tb[LWTUNNEL_IP_DPORT])
246 tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
247
248 if (tb[LWTUNNEL_IP_FLAGS]) 268 if (tb[LWTUNNEL_IP_FLAGS])
249 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); 269 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
250 270
@@ -266,8 +286,6 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb,
266 nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) || 286 nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
267 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) || 287 nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
268 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) || 288 nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
269 nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
270 nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
271 nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) 289 nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
272 return -ENOMEM; 290 return -ENOMEM;
273 291
@@ -281,8 +299,6 @@ static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
281 + nla_total_size(4) /* LWTUNNEL_IP_SRC */ 299 + nla_total_size(4) /* LWTUNNEL_IP_SRC */
282 + nla_total_size(1) /* LWTUNNEL_IP_TOS */ 300 + nla_total_size(1) /* LWTUNNEL_IP_TOS */
283 + nla_total_size(1) /* LWTUNNEL_IP_TTL */ 301 + nla_total_size(1) /* LWTUNNEL_IP_TTL */
284 + nla_total_size(2) /* LWTUNNEL_IP_SPORT */
285 + nla_total_size(2) /* LWTUNNEL_IP_DPORT */
286 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ 302 + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */
287} 303}
288 304
@@ -305,8 +321,6 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
305 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) }, 321 [LWTUNNEL_IP6_SRC] = { .len = sizeof(struct in6_addr) },
306 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 }, 322 [LWTUNNEL_IP6_HOPLIMIT] = { .type = NLA_U8 },
307 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 }, 323 [LWTUNNEL_IP6_TC] = { .type = NLA_U8 },
308 [LWTUNNEL_IP6_SPORT] = { .type = NLA_U16 },
309 [LWTUNNEL_IP6_DPORT] = { .type = NLA_U16 },
310 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 }, 324 [LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
311}; 325};
312 326
@@ -346,12 +360,6 @@ static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
346 if (tb[LWTUNNEL_IP6_TC]) 360 if (tb[LWTUNNEL_IP6_TC])
347 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]); 361 tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
348 362
349 if (tb[LWTUNNEL_IP6_SPORT])
350 tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
351
352 if (tb[LWTUNNEL_IP6_DPORT])
353 tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
354
355 if (tb[LWTUNNEL_IP6_FLAGS]) 363 if (tb[LWTUNNEL_IP6_FLAGS])
356 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]); 364 tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
357 365
@@ -373,8 +381,6 @@ static int ip6_tun_fill_encap_info(struct sk_buff *skb,
373 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) || 381 nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
374 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) || 382 nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
375 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) || 383 nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
376 nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
377 nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
378 nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags)) 384 nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
379 return -ENOMEM; 385 return -ENOMEM;
380 386
@@ -388,8 +394,6 @@ static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
388 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */ 394 + nla_total_size(16) /* LWTUNNEL_IP6_SRC */
389 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */ 395 + nla_total_size(1) /* LWTUNNEL_IP6_HOPLIMIT */
390 + nla_total_size(1) /* LWTUNNEL_IP6_TC */ 396 + nla_total_size(1) /* LWTUNNEL_IP6_TC */
391 + nla_total_size(2) /* LWTUNNEL_IP6_SPORT */
392 + nla_total_size(2) /* LWTUNNEL_IP6_DPORT */
393 + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */ 397 + nla_total_size(2); /* LWTUNNEL_IP6_FLAGS */
394} 398}
395 399
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5f4a5565ad8b..c6ad99ad0ffb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2045,6 +2045,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2045 struct fib_result res; 2045 struct fib_result res;
2046 struct rtable *rth; 2046 struct rtable *rth;
2047 int orig_oif; 2047 int orig_oif;
2048 int err = -ENETUNREACH;
2048 2049
2049 res.tclassid = 0; 2050 res.tclassid = 0;
2050 res.fi = NULL; 2051 res.fi = NULL;
@@ -2153,7 +2154,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2153 goto make_route; 2154 goto make_route;
2154 } 2155 }
2155 2156
2156 if (fib_lookup(net, fl4, &res, 0)) { 2157 err = fib_lookup(net, fl4, &res, 0);
2158 if (err) {
2157 res.fi = NULL; 2159 res.fi = NULL;
2158 res.table = NULL; 2160 res.table = NULL;
2159 if (fl4->flowi4_oif) { 2161 if (fl4->flowi4_oif) {
@@ -2181,7 +2183,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2181 res.type = RTN_UNICAST; 2183 res.type = RTN_UNICAST;
2182 goto make_route; 2184 goto make_route;
2183 } 2185 }
2184 rth = ERR_PTR(-ENETUNREACH); 2186 rth = ERR_PTR(err);
2185 goto out; 2187 goto out;
2186 } 2188 }
2187 2189
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index c6ded6b2a79f..448c2615fece 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -154,14 +154,20 @@ static void bictcp_init(struct sock *sk)
154static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) 154static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
155{ 155{
156 if (event == CA_EVENT_TX_START) { 156 if (event == CA_EVENT_TX_START) {
157 s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
158 struct bictcp *ca = inet_csk_ca(sk); 157 struct bictcp *ca = inet_csk_ca(sk);
158 u32 now = tcp_time_stamp;
159 s32 delta;
160
161 delta = now - tcp_sk(sk)->lsndtime;
159 162
160 /* We were application limited (idle) for a while. 163 /* We were application limited (idle) for a while.
161 * Shift epoch_start to keep cwnd growth to cubic curve. 164 * Shift epoch_start to keep cwnd growth to cubic curve.
162 */ 165 */
163 if (ca->epoch_start && delta > 0) 166 if (ca->epoch_start && delta > 0) {
164 ca->epoch_start += delta; 167 ca->epoch_start += delta;
168 if (after(ca->epoch_start, now))
169 ca->epoch_start = now;
170 }
165 return; 171 return;
166 } 172 }
167} 173}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6d8795b066ac..def765911ff8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -162,9 +162,9 @@ kill_with_rst:
162 if (tcp_death_row.sysctl_tw_recycle && 162 if (tcp_death_row.sysctl_tw_recycle &&
163 tcptw->tw_ts_recent_stamp && 163 tcptw->tw_ts_recent_stamp &&
164 tcp_tw_remember_stamp(tw)) 164 tcp_tw_remember_stamp(tw))
165 inet_twsk_schedule(tw, tw->tw_timeout); 165 inet_twsk_reschedule(tw, tw->tw_timeout);
166 else 166 else
167 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 167 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
168 return TCP_TW_ACK; 168 return TCP_TW_ACK;
169 } 169 }
170 170
@@ -201,7 +201,7 @@ kill:
201 return TCP_TW_SUCCESS; 201 return TCP_TW_SUCCESS;
202 } 202 }
203 } 203 }
204 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 204 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
205 205
206 if (tmp_opt.saw_tstamp) { 206 if (tmp_opt.saw_tstamp) {
207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval; 207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
@@ -251,7 +251,7 @@ kill:
251 * Do not reschedule in the last case. 251 * Do not reschedule in the last case.
252 */ 252 */
253 if (paws_reject || th->ack) 253 if (paws_reject || th->ack)
254 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN); 254 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
255 255
256 return tcp_timewait_check_oow_rate_limit( 256 return tcp_timewait_check_oow_rate_limit(
257 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); 257 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
@@ -322,9 +322,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
322 } while (0); 322 } while (0);
323#endif 323#endif
324 324
325 /* Linkage updates. */
326 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
327
328 /* Get the TIME_WAIT timeout firing. */ 325 /* Get the TIME_WAIT timeout firing. */
329 if (timeo < rto) 326 if (timeo < rto)
330 timeo = rto; 327 timeo = rto;
@@ -338,6 +335,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
338 } 335 }
339 336
340 inet_twsk_schedule(tw, timeo); 337 inet_twsk_schedule(tw, timeo);
338 /* Linkage updates. */
339 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
341 inet_twsk_put(tw); 340 inet_twsk_put(tw);
342 } else { 341 } else {
343 /* Sorry, if we're out of memory, just CLOSE this 342 /* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f9a8a12b62ee..1100ffe4a722 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2897,6 +2897,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2897 skb_reserve(skb, MAX_TCP_HEADER); 2897 skb_reserve(skb, MAX_TCP_HEADER);
2898 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2898 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2899 TCPHDR_ACK | TCPHDR_RST); 2899 TCPHDR_ACK | TCPHDR_RST);
2900 skb_mstamp_get(&skb->skb_mstamp);
2900 /* Send it off. */ 2901 /* Send it off. */
2901 if (tcp_transmit_skb(sk, skb, 0, priority)) 2902 if (tcp_transmit_skb(sk, skb, 0, priority))
2902 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2903 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c0a15e7f359f..f7d1d5e19e95 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1024,7 +1024,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1024 if (netif_index_is_vrf(net, ipc.oif)) { 1024 if (netif_index_is_vrf(net, ipc.oif)) {
1025 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, 1025 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1026 RT_SCOPE_UNIVERSE, sk->sk_protocol, 1026 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1027 (flow_flags | FLOWI_FLAG_VRFSRC), 1027 (flow_flags | FLOWI_FLAG_VRFSRC |
1028 FLOWI_FLAG_SKIP_NH_OIF),
1028 faddr, saddr, dport, 1029 faddr, saddr, dport,
1029 inet->inet_sport); 1030 inet->inet_sport);
1030 1031
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index bb919b28619f..c10a9ee68433 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -33,6 +33,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
33 if (saddr) 33 if (saddr)
34 fl4->saddr = saddr->a4; 34 fl4->saddr = saddr->a4;
35 35
36 fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
37
36 rt = __ip_route_output_key(net, fl4); 38 rt = __ip_route_output_key(net, fl4);
37 if (!IS_ERR(rt)) 39 if (!IS_ERR(rt))
38 return &rt->dst; 40 return &rt->dst;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 030fefdc9aed..900113376d4e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5127,13 +5127,12 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5127 5127
5128 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, 5128 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5129 ifp->idev->dev, 0, 0); 5129 ifp->idev->dev, 0, 0);
5130 if (rt && ip6_del_rt(rt)) 5130 if (rt)
5131 dst_free(&rt->dst); 5131 ip6_del_rt(rt);
5132 } 5132 }
5133 dst_hold(&ifp->rt->dst); 5133 dst_hold(&ifp->rt->dst);
5134 5134
5135 if (ip6_del_rt(ifp->rt)) 5135 ip6_del_rt(ifp->rt);
5136 dst_free(&ifp->rt->dst);
5137 5136
5138 rt_genid_bump_ipv6(net); 5137 rt_genid_bump_ipv6(net);
5139 break; 5138 break;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 418d9823692b..7d2e0023c72d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -155,6 +155,11 @@ static void node_free(struct fib6_node *fn)
155 kmem_cache_free(fib6_node_kmem, fn); 155 kmem_cache_free(fib6_node_kmem, fn);
156} 156}
157 157
158static void rt6_rcu_free(struct rt6_info *rt)
159{
160 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
161}
162
158static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) 163static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
159{ 164{
160 int cpu; 165 int cpu;
@@ -169,7 +174,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
169 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); 174 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
170 pcpu_rt = *ppcpu_rt; 175 pcpu_rt = *ppcpu_rt;
171 if (pcpu_rt) { 176 if (pcpu_rt) {
172 dst_free(&pcpu_rt->dst); 177 rt6_rcu_free(pcpu_rt);
173 *ppcpu_rt = NULL; 178 *ppcpu_rt = NULL;
174 } 179 }
175 } 180 }
@@ -181,7 +186,7 @@ static void rt6_release(struct rt6_info *rt)
181{ 186{
182 if (atomic_dec_and_test(&rt->rt6i_ref)) { 187 if (atomic_dec_and_test(&rt->rt6i_ref)) {
183 rt6_free_pcpu(rt); 188 rt6_free_pcpu(rt);
184 dst_free(&rt->dst); 189 rt6_rcu_free(rt);
185 } 190 }
186} 191}
187 192
@@ -846,7 +851,7 @@ add:
846 *ins = rt; 851 *ins = rt;
847 rt->rt6i_node = fn; 852 rt->rt6i_node = fn;
848 atomic_inc(&rt->rt6i_ref); 853 atomic_inc(&rt->rt6i_ref);
849 inet6_rt_notify(RTM_NEWROUTE, rt, info); 854 inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
850 info->nl_net->ipv6.rt6_stats->fib_rt_entries++; 855 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
851 856
852 if (!(fn->fn_flags & RTN_RTINFO)) { 857 if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -872,7 +877,7 @@ add:
872 rt->rt6i_node = fn; 877 rt->rt6i_node = fn;
873 rt->dst.rt6_next = iter->dst.rt6_next; 878 rt->dst.rt6_next = iter->dst.rt6_next;
874 atomic_inc(&rt->rt6i_ref); 879 atomic_inc(&rt->rt6i_ref);
875 inet6_rt_notify(RTM_NEWROUTE, rt, info); 880 inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
876 if (!(fn->fn_flags & RTN_RTINFO)) { 881 if (!(fn->fn_flags & RTN_RTINFO)) {
877 info->nl_net->ipv6.rt6_stats->fib_route_nodes++; 882 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
878 fn->fn_flags |= RTN_RTINFO; 883 fn->fn_flags |= RTN_RTINFO;
@@ -933,6 +938,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
933 int replace_required = 0; 938 int replace_required = 0;
934 int sernum = fib6_new_sernum(info->nl_net); 939 int sernum = fib6_new_sernum(info->nl_net);
935 940
941 if (WARN_ON_ONCE((rt->dst.flags & DST_NOCACHE) &&
942 !atomic_read(&rt->dst.__refcnt)))
943 return -EINVAL;
944
936 if (info->nlh) { 945 if (info->nlh) {
937 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) 946 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
938 allow_create = 0; 947 allow_create = 0;
@@ -1025,6 +1034,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
1025 fib6_start_gc(info->nl_net, rt); 1034 fib6_start_gc(info->nl_net, rt);
1026 if (!(rt->rt6i_flags & RTF_CACHE)) 1035 if (!(rt->rt6i_flags & RTF_CACHE))
1027 fib6_prune_clones(info->nl_net, pn); 1036 fib6_prune_clones(info->nl_net, pn);
1037 rt->dst.flags &= ~DST_NOCACHE;
1028 } 1038 }
1029 1039
1030out: 1040out:
@@ -1049,7 +1059,8 @@ out:
1049 atomic_inc(&pn->leaf->rt6i_ref); 1059 atomic_inc(&pn->leaf->rt6i_ref);
1050 } 1060 }
1051#endif 1061#endif
1052 dst_free(&rt->dst); 1062 if (!(rt->dst.flags & DST_NOCACHE))
1063 dst_free(&rt->dst);
1053 } 1064 }
1054 return err; 1065 return err;
1055 1066
@@ -1060,7 +1071,8 @@ out:
1060st_failure: 1071st_failure:
1061 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 1072 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
1062 fib6_repair_tree(info->nl_net, fn); 1073 fib6_repair_tree(info->nl_net, fn);
1063 dst_free(&rt->dst); 1074 if (!(rt->dst.flags & DST_NOCACHE))
1075 dst_free(&rt->dst);
1064 return err; 1076 return err;
1065#endif 1077#endif
1066} 1078}
@@ -1410,7 +1422,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1410 1422
1411 fib6_purge_rt(rt, fn, net); 1423 fib6_purge_rt(rt, fn, net);
1412 1424
1413 inet6_rt_notify(RTM_DELROUTE, rt, info); 1425 inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
1414 rt6_release(rt); 1426 rt6_release(rt);
1415} 1427}
1416 1428
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4038c694ec03..3c7b9310b33f 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -404,13 +404,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
404 struct ipv6_tlv_tnl_enc_lim *tel; 404 struct ipv6_tlv_tnl_enc_lim *tel;
405 __u32 mtu; 405 __u32 mtu;
406 case ICMPV6_DEST_UNREACH: 406 case ICMPV6_DEST_UNREACH:
407 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 407 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
408 t->parms.name); 408 t->parms.name);
409 break; 409 break;
410 case ICMPV6_TIME_EXCEED: 410 case ICMPV6_TIME_EXCEED:
411 if (code == ICMPV6_EXC_HOPLIMIT) { 411 if (code == ICMPV6_EXC_HOPLIMIT) {
412 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 412 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
413 t->parms.name); 413 t->parms.name);
414 } 414 }
415 break; 415 break;
416 case ICMPV6_PARAMPROB: 416 case ICMPV6_PARAMPROB:
@@ -421,12 +421,12 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 if (teli && teli == be32_to_cpu(info) - 2) { 421 if (teli && teli == be32_to_cpu(info) - 2) {
422 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 422 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
423 if (tel->encap_limit == 0) { 423 if (tel->encap_limit == 0) {
424 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 424 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
425 t->parms.name); 425 t->parms.name);
426 } 426 }
427 } else { 427 } else {
428 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 428 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
429 t->parms.name); 429 t->parms.name);
430 } 430 }
431 break; 431 break;
432 case ICMPV6_PKT_TOOBIG: 432 case ICMPV6_PKT_TOOBIG:
@@ -634,20 +634,20 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
634 } 634 }
635 635
636 if (!fl6->flowi6_mark) 636 if (!fl6->flowi6_mark)
637 dst = ip6_tnl_dst_check(tunnel); 637 dst = ip6_tnl_dst_get(tunnel);
638 638
639 if (!dst) { 639 if (!dst) {
640 ndst = ip6_route_output(net, NULL, fl6); 640 dst = ip6_route_output(net, NULL, fl6);
641 641
642 if (ndst->error) 642 if (dst->error)
643 goto tx_err_link_failure; 643 goto tx_err_link_failure;
644 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 644 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
645 if (IS_ERR(ndst)) { 645 if (IS_ERR(dst)) {
646 err = PTR_ERR(ndst); 646 err = PTR_ERR(dst);
647 ndst = NULL; 647 dst = NULL;
648 goto tx_err_link_failure; 648 goto tx_err_link_failure;
649 } 649 }
650 dst = ndst; 650 ndst = dst;
651 } 651 }
652 652
653 tdev = dst->dev; 653 tdev = dst->dev;
@@ -702,12 +702,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
702 skb = new_skb; 702 skb = new_skb;
703 } 703 }
704 704
705 if (fl6->flowi6_mark) { 705 if (!fl6->flowi6_mark && ndst)
706 skb_dst_set(skb, dst); 706 ip6_tnl_dst_set(tunnel, ndst);
707 ndst = NULL; 707 skb_dst_set(skb, dst);
708 } else {
709 skb_dst_set_noref(skb, dst);
710 }
711 708
712 proto = NEXTHDR_GRE; 709 proto = NEXTHDR_GRE;
713 if (encap_limit >= 0) { 710 if (encap_limit >= 0) {
@@ -762,14 +759,12 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
762 skb_set_inner_protocol(skb, protocol); 759 skb_set_inner_protocol(skb, protocol);
763 760
764 ip6tunnel_xmit(NULL, skb, dev); 761 ip6tunnel_xmit(NULL, skb, dev);
765 if (ndst)
766 ip6_tnl_dst_store(tunnel, ndst);
767 return 0; 762 return 0;
768tx_err_link_failure: 763tx_err_link_failure:
769 stats->tx_carrier_errors++; 764 stats->tx_carrier_errors++;
770 dst_link_failure(skb); 765 dst_link_failure(skb);
771tx_err_dst_release: 766tx_err_dst_release:
772 dst_release(ndst); 767 dst_release(dst);
773 return err; 768 return err;
774} 769}
775 770
@@ -1223,6 +1218,9 @@ static const struct net_device_ops ip6gre_netdev_ops = {
1223 1218
1224static void ip6gre_dev_free(struct net_device *dev) 1219static void ip6gre_dev_free(struct net_device *dev)
1225{ 1220{
1221 struct ip6_tnl *t = netdev_priv(dev);
1222
1223 ip6_tnl_dst_destroy(t);
1226 free_percpu(dev->tstats); 1224 free_percpu(dev->tstats);
1227 free_netdev(dev); 1225 free_netdev(dev);
1228} 1226}
@@ -1245,9 +1243,10 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
1245 netif_keep_dst(dev); 1243 netif_keep_dst(dev);
1246} 1244}
1247 1245
1248static int ip6gre_tunnel_init(struct net_device *dev) 1246static int ip6gre_tunnel_init_common(struct net_device *dev)
1249{ 1247{
1250 struct ip6_tnl *tunnel; 1248 struct ip6_tnl *tunnel;
1249 int ret;
1251 1250
1252 tunnel = netdev_priv(dev); 1251 tunnel = netdev_priv(dev);
1253 1252
@@ -1255,16 +1254,37 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1255 tunnel->net = dev_net(dev); 1254 tunnel->net = dev_net(dev);
1256 strcpy(tunnel->parms.name, dev->name); 1255 strcpy(tunnel->parms.name, dev->name);
1257 1256
1257 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1258 if (!dev->tstats)
1259 return -ENOMEM;
1260
1261 ret = ip6_tnl_dst_init(tunnel);
1262 if (ret) {
1263 free_percpu(dev->tstats);
1264 dev->tstats = NULL;
1265 return ret;
1266 }
1267
1268 return 0;
1269}
1270
1271static int ip6gre_tunnel_init(struct net_device *dev)
1272{
1273 struct ip6_tnl *tunnel;
1274 int ret;
1275
1276 ret = ip6gre_tunnel_init_common(dev);
1277 if (ret)
1278 return ret;
1279
1280 tunnel = netdev_priv(dev);
1281
1258 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); 1282 memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
1259 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); 1283 memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1260 1284
1261 if (ipv6_addr_any(&tunnel->parms.raddr)) 1285 if (ipv6_addr_any(&tunnel->parms.raddr))
1262 dev->header_ops = &ip6gre_header_ops; 1286 dev->header_ops = &ip6gre_header_ops;
1263 1287
1264 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1265 if (!dev->tstats)
1266 return -ENOMEM;
1267
1268 return 0; 1288 return 0;
1269} 1289}
1270 1290
@@ -1460,19 +1480,16 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1460static int ip6gre_tap_init(struct net_device *dev) 1480static int ip6gre_tap_init(struct net_device *dev)
1461{ 1481{
1462 struct ip6_tnl *tunnel; 1482 struct ip6_tnl *tunnel;
1483 int ret;
1463 1484
1464 tunnel = netdev_priv(dev); 1485 ret = ip6gre_tunnel_init_common(dev);
1486 if (ret)
1487 return ret;
1465 1488
1466 tunnel->dev = dev; 1489 tunnel = netdev_priv(dev);
1467 tunnel->net = dev_net(dev);
1468 strcpy(tunnel->parms.name, dev->name);
1469 1490
1470 ip6gre_tnl_link_config(tunnel, 1); 1491 ip6gre_tnl_link_config(tunnel, 1);
1471 1492
1472 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1473 if (!dev->tstats)
1474 return -ENOMEM;
1475
1476 return 0; 1493 return 0;
1477} 1494}
1478 1495
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 26ea47930740..92b1aa38f121 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -586,20 +586,22 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
586 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr, 586 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
587 &ipv6_hdr(skb)->saddr); 587 &ipv6_hdr(skb)->saddr);
588 588
589 hroom = LL_RESERVED_SPACE(rt->dst.dev);
589 if (skb_has_frag_list(skb)) { 590 if (skb_has_frag_list(skb)) {
590 int first_len = skb_pagelen(skb); 591 int first_len = skb_pagelen(skb);
591 struct sk_buff *frag2; 592 struct sk_buff *frag2;
592 593
593 if (first_len - hlen > mtu || 594 if (first_len - hlen > mtu ||
594 ((first_len - hlen) & 7) || 595 ((first_len - hlen) & 7) ||
595 skb_cloned(skb)) 596 skb_cloned(skb) ||
597 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
596 goto slow_path; 598 goto slow_path;
597 599
598 skb_walk_frags(skb, frag) { 600 skb_walk_frags(skb, frag) {
599 /* Correct geometry. */ 601 /* Correct geometry. */
600 if (frag->len > mtu || 602 if (frag->len > mtu ||
601 ((frag->len & 7) && frag->next) || 603 ((frag->len & 7) && frag->next) ||
602 skb_headroom(frag) < hlen) 604 skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
603 goto slow_path_clean; 605 goto slow_path_clean;
604 606
605 /* Partially cloned skb? */ 607 /* Partially cloned skb? */
@@ -616,8 +618,6 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
616 618
617 err = 0; 619 err = 0;
618 offset = 0; 620 offset = 0;
619 frag = skb_shinfo(skb)->frag_list;
620 skb_frag_list_init(skb);
621 /* BUILD HEADER */ 621 /* BUILD HEADER */
622 622
623 *prevhdr = NEXTHDR_FRAGMENT; 623 *prevhdr = NEXTHDR_FRAGMENT;
@@ -625,8 +625,11 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
625 if (!tmp_hdr) { 625 if (!tmp_hdr) {
626 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 626 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
627 IPSTATS_MIB_FRAGFAILS); 627 IPSTATS_MIB_FRAGFAILS);
628 return -ENOMEM; 628 err = -ENOMEM;
629 goto fail;
629 } 630 }
631 frag = skb_shinfo(skb)->frag_list;
632 skb_frag_list_init(skb);
630 633
631 __skb_pull(skb, hlen); 634 __skb_pull(skb, hlen);
632 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); 635 fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
@@ -723,7 +726,6 @@ slow_path:
723 */ 726 */
724 727
725 *prevhdr = NEXTHDR_FRAGMENT; 728 *prevhdr = NEXTHDR_FRAGMENT;
726 hroom = LL_RESERVED_SPACE(rt->dst.dev);
727 troom = rt->dst.dev->needed_tailroom; 729 troom = rt->dst.dev->needed_tailroom;
728 730
729 /* 731 /*
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b0ab420612bc..eabffbb89795 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -126,36 +126,92 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
126 * Locking : hash tables are protected by RCU and RTNL 126 * Locking : hash tables are protected by RCU and RTNL
127 */ 127 */
128 128
129struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t) 129static void ip6_tnl_per_cpu_dst_set(struct ip6_tnl_dst *idst,
130 struct dst_entry *dst)
130{ 131{
131 struct dst_entry *dst = t->dst_cache; 132 write_seqlock_bh(&idst->lock);
133 dst_release(rcu_dereference_protected(
134 idst->dst,
135 lockdep_is_held(&idst->lock.lock)));
136 if (dst) {
137 dst_hold(dst);
138 idst->cookie = rt6_get_cookie((struct rt6_info *)dst);
139 } else {
140 idst->cookie = 0;
141 }
142 rcu_assign_pointer(idst->dst, dst);
143 write_sequnlock_bh(&idst->lock);
144}
145
146struct dst_entry *ip6_tnl_dst_get(struct ip6_tnl *t)
147{
148 struct ip6_tnl_dst *idst;
149 struct dst_entry *dst;
150 unsigned int seq;
151 u32 cookie;
132 152
133 if (dst && dst->obsolete && 153 idst = raw_cpu_ptr(t->dst_cache);
134 !dst->ops->check(dst, t->dst_cookie)) { 154
135 t->dst_cache = NULL; 155 rcu_read_lock();
156 do {
157 seq = read_seqbegin(&idst->lock);
158 dst = rcu_dereference(idst->dst);
159 cookie = idst->cookie;
160 } while (read_seqretry(&idst->lock, seq));
161
162 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
163 dst = NULL;
164 rcu_read_unlock();
165
166 if (dst && dst->obsolete && !dst->ops->check(dst, cookie)) {
167 ip6_tnl_per_cpu_dst_set(idst, NULL);
136 dst_release(dst); 168 dst_release(dst);
137 return NULL; 169 dst = NULL;
138 } 170 }
139
140 return dst; 171 return dst;
141} 172}
142EXPORT_SYMBOL_GPL(ip6_tnl_dst_check); 173EXPORT_SYMBOL_GPL(ip6_tnl_dst_get);
143 174
144void ip6_tnl_dst_reset(struct ip6_tnl *t) 175void ip6_tnl_dst_reset(struct ip6_tnl *t)
145{ 176{
146 dst_release(t->dst_cache); 177 int i;
147 t->dst_cache = NULL; 178
179 for_each_possible_cpu(i)
180 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL);
148} 181}
149EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); 182EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
150 183
151void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst) 184void ip6_tnl_dst_set(struct ip6_tnl *t, struct dst_entry *dst)
185{
186 ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), dst);
187
188}
189EXPORT_SYMBOL_GPL(ip6_tnl_dst_set);
190
191void ip6_tnl_dst_destroy(struct ip6_tnl *t)
152{ 192{
153 struct rt6_info *rt = (struct rt6_info *) dst; 193 if (!t->dst_cache)
154 t->dst_cookie = rt6_get_cookie(rt); 194 return;
155 dst_release(t->dst_cache); 195
156 t->dst_cache = dst; 196 ip6_tnl_dst_reset(t);
197 free_percpu(t->dst_cache);
157} 198}
158EXPORT_SYMBOL_GPL(ip6_tnl_dst_store); 199EXPORT_SYMBOL_GPL(ip6_tnl_dst_destroy);
200
201int ip6_tnl_dst_init(struct ip6_tnl *t)
202{
203 int i;
204
205 t->dst_cache = alloc_percpu(struct ip6_tnl_dst);
206 if (!t->dst_cache)
207 return -ENOMEM;
208
209 for_each_possible_cpu(i)
210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock);
211
212 return 0;
213}
214EXPORT_SYMBOL_GPL(ip6_tnl_dst_init);
159 215
160/** 216/**
161 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses 217 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
@@ -271,6 +327,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
271 327
272static void ip6_dev_free(struct net_device *dev) 328static void ip6_dev_free(struct net_device *dev)
273{ 329{
330 struct ip6_tnl *t = netdev_priv(dev);
331
332 ip6_tnl_dst_destroy(t);
274 free_percpu(dev->tstats); 333 free_percpu(dev->tstats);
275 free_netdev(dev); 334 free_netdev(dev);
276} 335}
@@ -510,14 +569,14 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
510 struct ipv6_tlv_tnl_enc_lim *tel; 569 struct ipv6_tlv_tnl_enc_lim *tel;
511 __u32 mtu; 570 __u32 mtu;
512 case ICMPV6_DEST_UNREACH: 571 case ICMPV6_DEST_UNREACH:
513 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", 572 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
514 t->parms.name); 573 t->parms.name);
515 rel_msg = 1; 574 rel_msg = 1;
516 break; 575 break;
517 case ICMPV6_TIME_EXCEED: 576 case ICMPV6_TIME_EXCEED:
518 if ((*code) == ICMPV6_EXC_HOPLIMIT) { 577 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
519 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", 578 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
520 t->parms.name); 579 t->parms.name);
521 rel_msg = 1; 580 rel_msg = 1;
522 } 581 }
523 break; 582 break;
@@ -529,13 +588,13 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
529 if (teli && teli == *info - 2) { 588 if (teli && teli == *info - 2) {
530 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; 589 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
531 if (tel->encap_limit == 0) { 590 if (tel->encap_limit == 0) {
532 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", 591 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
533 t->parms.name); 592 t->parms.name);
534 rel_msg = 1; 593 rel_msg = 1;
535 } 594 }
536 } else { 595 } else {
537 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", 596 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
538 t->parms.name); 597 t->parms.name);
539 } 598 }
540 break; 599 break;
541 case ICMPV6_PKT_TOOBIG: 600 case ICMPV6_PKT_TOOBIG:
@@ -1010,23 +1069,23 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1010 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1069 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1011 neigh_release(neigh); 1070 neigh_release(neigh);
1012 } else if (!fl6->flowi6_mark) 1071 } else if (!fl6->flowi6_mark)
1013 dst = ip6_tnl_dst_check(t); 1072 dst = ip6_tnl_dst_get(t);
1014 1073
1015 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1074 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1016 goto tx_err_link_failure; 1075 goto tx_err_link_failure;
1017 1076
1018 if (!dst) { 1077 if (!dst) {
1019 ndst = ip6_route_output(net, NULL, fl6); 1078 dst = ip6_route_output(net, NULL, fl6);
1020 1079
1021 if (ndst->error) 1080 if (dst->error)
1022 goto tx_err_link_failure; 1081 goto tx_err_link_failure;
1023 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); 1082 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1024 if (IS_ERR(ndst)) { 1083 if (IS_ERR(dst)) {
1025 err = PTR_ERR(ndst); 1084 err = PTR_ERR(dst);
1026 ndst = NULL; 1085 dst = NULL;
1027 goto tx_err_link_failure; 1086 goto tx_err_link_failure;
1028 } 1087 }
1029 dst = ndst; 1088 ndst = dst;
1030 } 1089 }
1031 1090
1032 tdev = dst->dev; 1091 tdev = dst->dev;
@@ -1072,12 +1131,11 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1072 consume_skb(skb); 1131 consume_skb(skb);
1073 skb = new_skb; 1132 skb = new_skb;
1074 } 1133 }
1075 if (fl6->flowi6_mark) { 1134
1076 skb_dst_set(skb, dst); 1135 if (!fl6->flowi6_mark && ndst)
1077 ndst = NULL; 1136 ip6_tnl_dst_set(t, ndst);
1078 } else { 1137 skb_dst_set(skb, dst);
1079 skb_dst_set_noref(skb, dst); 1138
1080 }
1081 skb->transport_header = skb->network_header; 1139 skb->transport_header = skb->network_header;
1082 1140
1083 proto = fl6->flowi6_proto; 1141 proto = fl6->flowi6_proto;
@@ -1101,14 +1159,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1101 ipv6h->saddr = fl6->saddr; 1159 ipv6h->saddr = fl6->saddr;
1102 ipv6h->daddr = fl6->daddr; 1160 ipv6h->daddr = fl6->daddr;
1103 ip6tunnel_xmit(NULL, skb, dev); 1161 ip6tunnel_xmit(NULL, skb, dev);
1104 if (ndst)
1105 ip6_tnl_dst_store(t, ndst);
1106 return 0; 1162 return 0;
1107tx_err_link_failure: 1163tx_err_link_failure:
1108 stats->tx_carrier_errors++; 1164 stats->tx_carrier_errors++;
1109 dst_link_failure(skb); 1165 dst_link_failure(skb);
1110tx_err_dst_release: 1166tx_err_dst_release:
1111 dst_release(ndst); 1167 dst_release(dst);
1112 return err; 1168 return err;
1113} 1169}
1114 1170
@@ -1573,12 +1629,21 @@ static inline int
1573ip6_tnl_dev_init_gen(struct net_device *dev) 1629ip6_tnl_dev_init_gen(struct net_device *dev)
1574{ 1630{
1575 struct ip6_tnl *t = netdev_priv(dev); 1631 struct ip6_tnl *t = netdev_priv(dev);
1632 int ret;
1576 1633
1577 t->dev = dev; 1634 t->dev = dev;
1578 t->net = dev_net(dev); 1635 t->net = dev_net(dev);
1579 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1636 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1580 if (!dev->tstats) 1637 if (!dev->tstats)
1581 return -ENOMEM; 1638 return -ENOMEM;
1639
1640 ret = ip6_tnl_dst_init(t);
1641 if (ret) {
1642 free_percpu(dev->tstats);
1643 dev->tstats = NULL;
1644 return ret;
1645 }
1646
1582 return 0; 1647 return 0;
1583} 1648}
1584 1649
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 53617d715188..f204089e854c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1322,8 +1322,7 @@ static void ip6_link_failure(struct sk_buff *skb)
1322 if (rt) { 1322 if (rt) {
1323 if (rt->rt6i_flags & RTF_CACHE) { 1323 if (rt->rt6i_flags & RTF_CACHE) {
1324 dst_hold(&rt->dst); 1324 dst_hold(&rt->dst);
1325 if (ip6_del_rt(rt)) 1325 ip6_del_rt(rt);
1326 dst_free(&rt->dst);
1327 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { 1326 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1328 rt->rt6i_node->fn_sernum = -1; 1327 rt->rt6i_node->fn_sernum = -1;
1329 } 1328 }
@@ -1886,9 +1885,11 @@ int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
1886 rt->dst.input = ip6_pkt_prohibit; 1885 rt->dst.input = ip6_pkt_prohibit;
1887 break; 1886 break;
1888 case RTN_THROW: 1887 case RTN_THROW:
1888 case RTN_UNREACHABLE:
1889 default: 1889 default:
1890 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN 1890 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1891 : -ENETUNREACH; 1891 : (cfg->fc_type == RTN_UNREACHABLE)
1892 ? -EHOSTUNREACH : -ENETUNREACH;
1892 rt->dst.output = ip6_pkt_discard_out; 1893 rt->dst.output = ip6_pkt_discard_out;
1893 rt->dst.input = ip6_pkt_discard; 1894 rt->dst.input = ip6_pkt_discard;
1894 break; 1895 break;
@@ -2028,7 +2029,8 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2028 struct fib6_table *table; 2029 struct fib6_table *table;
2029 struct net *net = dev_net(rt->dst.dev); 2030 struct net *net = dev_net(rt->dst.dev);
2030 2031
2031 if (rt == net->ipv6.ip6_null_entry) { 2032 if (rt == net->ipv6.ip6_null_entry ||
2033 rt->dst.flags & DST_NOCACHE) {
2032 err = -ENOENT; 2034 err = -ENOENT;
2033 goto out; 2035 goto out;
2034 } 2036 }
@@ -2515,6 +2517,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2515 rt->rt6i_dst.addr = *addr; 2517 rt->rt6i_dst.addr = *addr;
2516 rt->rt6i_dst.plen = 128; 2518 rt->rt6i_dst.plen = 128;
2517 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); 2519 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2520 rt->dst.flags |= DST_NOCACHE;
2518 2521
2519 atomic_set(&rt->dst.__refcnt, 1); 2522 atomic_set(&rt->dst.__refcnt, 1);
2520 2523
@@ -3303,7 +3306,8 @@ errout:
3303 return err; 3306 return err;
3304} 3307}
3305 3308
3306void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info) 3309void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3310 unsigned int nlm_flags)
3307{ 3311{
3308 struct sk_buff *skb; 3312 struct sk_buff *skb;
3309 struct net *net = info->nl_net; 3313 struct net *net = info->nl_net;
@@ -3318,7 +3322,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
3318 goto errout; 3322 goto errout;
3319 3323
3320 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, 3324 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3321 event, info->portid, seq, 0, 0, 0); 3325 event, info->portid, seq, 0, 0, nlm_flags);
3322 if (err < 0) { 3326 if (err < 0) {
3323 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ 3327 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3324 WARN_ON(err == -EMSGSIZE); 3328 WARN_ON(err == -EMSGSIZE);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 17b1fe961c5d..7a77a1470f25 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2474,6 +2474,7 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
2474 2474
2475 bss_conf->cqm_rssi_thold = rssi_thold; 2475 bss_conf->cqm_rssi_thold = rssi_thold;
2476 bss_conf->cqm_rssi_hyst = rssi_hyst; 2476 bss_conf->cqm_rssi_hyst = rssi_hyst;
2477 sdata->u.mgd.last_cqm_event_signal = 0;
2477 2478
2478 /* tell the driver upon association, unless already associated */ 2479 /* tell the driver upon association, unless already associated */
2479 if (sdata->u.mgd.associated && 2480 if (sdata->u.mgd.associated &&
@@ -2518,15 +2519,17 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
2518 continue; 2519 continue;
2519 2520
2520 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { 2521 for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
2521 if (~sdata->rc_rateidx_mcs_mask[i][j]) 2522 if (~sdata->rc_rateidx_mcs_mask[i][j]) {
2522 sdata->rc_has_mcs_mask[i] = true; 2523 sdata->rc_has_mcs_mask[i] = true;
2524 break;
2525 }
2526 }
2523 2527
2524 if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) 2528 for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
2529 if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
2525 sdata->rc_has_vht_mcs_mask[i] = true; 2530 sdata->rc_has_vht_mcs_mask[i] = true;
2526
2527 if (sdata->rc_has_mcs_mask[i] &&
2528 sdata->rc_has_vht_mcs_mask[i])
2529 break; 2531 break;
2532 }
2530 } 2533 }
2531 } 2534 }
2532 2535
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 675d12c69e32..a5d41dfa9f05 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);
107 107
108void nf_log_unregister(struct nf_logger *logger) 108void nf_log_unregister(struct nf_logger *logger)
109{ 109{
110 const struct nf_logger *log;
110 int i; 111 int i;
111 112
112 mutex_lock(&nf_log_mutex); 113 mutex_lock(&nf_log_mutex);
113 for (i = 0; i < NFPROTO_NUMPROTO; i++) 114 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
114 RCU_INIT_POINTER(loggers[i][logger->type], NULL); 115 log = nft_log_dereference(loggers[i][logger->type]);
116 if (log == logger)
117 RCU_INIT_POINTER(loggers[i][logger->type], NULL);
118 }
115 mutex_unlock(&nf_log_mutex); 119 mutex_unlock(&nf_log_mutex);
120 synchronize_rcu();
116} 121}
117EXPORT_SYMBOL(nf_log_unregister); 122EXPORT_SYMBOL(nf_log_unregister);
118 123
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 66def315eb56..9c8fab00164b 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -619,6 +619,13 @@ struct nft_xt {
619 619
620static struct nft_expr_type nft_match_type; 620static struct nft_expr_type nft_match_type;
621 621
622static bool nft_match_cmp(const struct xt_match *match,
623 const char *name, u32 rev, u32 family)
624{
625 return strcmp(match->name, name) == 0 && match->revision == rev &&
626 (match->family == NFPROTO_UNSPEC || match->family == family);
627}
628
622static const struct nft_expr_ops * 629static const struct nft_expr_ops *
623nft_match_select_ops(const struct nft_ctx *ctx, 630nft_match_select_ops(const struct nft_ctx *ctx,
624 const struct nlattr * const tb[]) 631 const struct nlattr * const tb[])
@@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
626 struct nft_xt *nft_match; 633 struct nft_xt *nft_match;
627 struct xt_match *match; 634 struct xt_match *match;
628 char *mt_name; 635 char *mt_name;
629 __u32 rev, family; 636 u32 rev, family;
630 637
631 if (tb[NFTA_MATCH_NAME] == NULL || 638 if (tb[NFTA_MATCH_NAME] == NULL ||
632 tb[NFTA_MATCH_REV] == NULL || 639 tb[NFTA_MATCH_REV] == NULL ||
@@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
641 list_for_each_entry(nft_match, &nft_match_list, head) { 648 list_for_each_entry(nft_match, &nft_match_list, head) {
642 struct xt_match *match = nft_match->ops.data; 649 struct xt_match *match = nft_match->ops.data;
643 650
644 if (strcmp(match->name, mt_name) == 0 && 651 if (nft_match_cmp(match, mt_name, rev, family)) {
645 match->revision == rev && match->family == family) {
646 if (!try_module_get(match->me)) 652 if (!try_module_get(match->me))
647 return ERR_PTR(-ENOENT); 653 return ERR_PTR(-ENOENT);
648 654
@@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list);
693 699
694static struct nft_expr_type nft_target_type; 700static struct nft_expr_type nft_target_type;
695 701
702static bool nft_target_cmp(const struct xt_target *tg,
703 const char *name, u32 rev, u32 family)
704{
705 return strcmp(tg->name, name) == 0 && tg->revision == rev &&
706 (tg->family == NFPROTO_UNSPEC || tg->family == family);
707}
708
696static const struct nft_expr_ops * 709static const struct nft_expr_ops *
697nft_target_select_ops(const struct nft_ctx *ctx, 710nft_target_select_ops(const struct nft_ctx *ctx,
698 const struct nlattr * const tb[]) 711 const struct nlattr * const tb[])
@@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
700 struct nft_xt *nft_target; 713 struct nft_xt *nft_target;
701 struct xt_target *target; 714 struct xt_target *target;
702 char *tg_name; 715 char *tg_name;
703 __u32 rev, family; 716 u32 rev, family;
704 717
705 if (tb[NFTA_TARGET_NAME] == NULL || 718 if (tb[NFTA_TARGET_NAME] == NULL ||
706 tb[NFTA_TARGET_REV] == NULL || 719 tb[NFTA_TARGET_REV] == NULL ||
@@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
715 list_for_each_entry(nft_target, &nft_target_list, head) { 728 list_for_each_entry(nft_target, &nft_target_list, head) {
716 struct xt_target *target = nft_target->ops.data; 729 struct xt_target *target = nft_target->ops.data;
717 730
718 if (strcmp(target->name, tg_name) == 0 && 731 if (nft_target_cmp(target, tg_name, rev, family)) {
719 target->revision == rev && target->family == family) {
720 if (!try_module_get(target->me)) 732 if (!try_module_get(target->me))
721 return ERR_PTR(-ENOENT); 733 return ERR_PTR(-ENOENT);
722 734
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7f86d3b55060..8f060d7f9a0e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -125,6 +125,24 @@ static inline u32 netlink_group_mask(u32 group)
125 return group ? 1 << (group - 1) : 0; 125 return group ? 1 << (group - 1) : 0;
126} 126}
127 127
128static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
129 gfp_t gfp_mask)
130{
131 unsigned int len = skb_end_offset(skb);
132 struct sk_buff *new;
133
134 new = alloc_skb(len, gfp_mask);
135 if (new == NULL)
136 return NULL;
137
138 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
139 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
140 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
141
142 memcpy(skb_put(new, len), skb->data, len);
143 return new;
144}
145
128int netlink_add_tap(struct netlink_tap *nt) 146int netlink_add_tap(struct netlink_tap *nt)
129{ 147{
130 if (unlikely(nt->dev->type != ARPHRD_NETLINK)) 148 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -206,7 +224,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
206 int ret = -ENOMEM; 224 int ret = -ENOMEM;
207 225
208 dev_hold(dev); 226 dev_hold(dev);
209 nskb = skb_clone(skb, GFP_ATOMIC); 227
228 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
230 else
231 nskb = skb_clone(skb, GFP_ATOMIC);
210 if (nskb) { 232 if (nskb) {
211 nskb->dev = dev; 233 nskb->dev = dev;
212 nskb->protocol = htons((u16) sk->sk_protocol); 234 nskb->protocol = htons((u16) sk->sk_protocol);
@@ -279,11 +301,6 @@ static void netlink_rcv_wake(struct sock *sk)
279} 301}
280 302
281#ifdef CONFIG_NETLINK_MMAP 303#ifdef CONFIG_NETLINK_MMAP
282static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
283{
284 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
285}
286
287static bool netlink_rx_is_mmaped(struct sock *sk) 304static bool netlink_rx_is_mmaped(struct sock *sk)
288{ 305{
289 return nlk_sk(sk)->rx_ring.pg_vec != NULL; 306 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
@@ -846,7 +863,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
846} 863}
847 864
848#else /* CONFIG_NETLINK_MMAP */ 865#else /* CONFIG_NETLINK_MMAP */
849#define netlink_skb_is_mmaped(skb) false
850#define netlink_rx_is_mmaped(sk) false 866#define netlink_rx_is_mmaped(sk) false
851#define netlink_tx_is_mmaped(sk) false 867#define netlink_tx_is_mmaped(sk) false
852#define netlink_mmap sock_no_mmap 868#define netlink_mmap sock_no_mmap
@@ -1094,8 +1110,8 @@ static int netlink_insert(struct sock *sk, u32 portid)
1094 1110
1095 lock_sock(sk); 1111 lock_sock(sk);
1096 1112
1097 err = -EBUSY; 1113 err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
1098 if (nlk_sk(sk)->portid) 1114 if (nlk_sk(sk)->bound)
1099 goto err; 1115 goto err;
1100 1116
1101 err = -ENOMEM; 1117 err = -ENOMEM;
@@ -1115,10 +1131,14 @@ static int netlink_insert(struct sock *sk, u32 portid)
1115 err = -EOVERFLOW; 1131 err = -EOVERFLOW;
1116 if (err == -EEXIST) 1132 if (err == -EEXIST)
1117 err = -EADDRINUSE; 1133 err = -EADDRINUSE;
1118 nlk_sk(sk)->portid = 0;
1119 sock_put(sk); 1134 sock_put(sk);
1135 goto err;
1120 } 1136 }
1121 1137
1138 /* We need to ensure that the socket is hashed and visible. */
1139 smp_wmb();
1140 nlk_sk(sk)->bound = portid;
1141
1122err: 1142err:
1123 release_sock(sk); 1143 release_sock(sk);
1124 return err; 1144 return err;
@@ -1503,6 +1523,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1503 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 1523 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1504 int err; 1524 int err;
1505 long unsigned int groups = nladdr->nl_groups; 1525 long unsigned int groups = nladdr->nl_groups;
1526 bool bound;
1506 1527
1507 if (addr_len < sizeof(struct sockaddr_nl)) 1528 if (addr_len < sizeof(struct sockaddr_nl))
1508 return -EINVAL; 1529 return -EINVAL;
@@ -1519,9 +1540,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1519 return err; 1540 return err;
1520 } 1541 }
1521 1542
1522 if (nlk->portid) 1543 bound = nlk->bound;
1544 if (bound) {
1545 /* Ensure nlk->portid is up-to-date. */
1546 smp_rmb();
1547
1523 if (nladdr->nl_pid != nlk->portid) 1548 if (nladdr->nl_pid != nlk->portid)
1524 return -EINVAL; 1549 return -EINVAL;
1550 }
1525 1551
1526 if (nlk->netlink_bind && groups) { 1552 if (nlk->netlink_bind && groups) {
1527 int group; 1553 int group;
@@ -1537,7 +1563,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1537 } 1563 }
1538 } 1564 }
1539 1565
1540 if (!nlk->portid) { 1566 /* No need for barriers here as we return to user-space without
1567 * using any of the bound attributes.
1568 */
1569 if (!bound) {
1541 err = nladdr->nl_pid ? 1570 err = nladdr->nl_pid ?
1542 netlink_insert(sk, nladdr->nl_pid) : 1571 netlink_insert(sk, nladdr->nl_pid) :
1543 netlink_autobind(sock); 1572 netlink_autobind(sock);
@@ -1585,7 +1614,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1585 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) 1614 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1586 return -EPERM; 1615 return -EPERM;
1587 1616
1588 if (!nlk->portid) 1617 /* No need for barriers here as we return to user-space without
1618 * using any of the bound attributes.
1619 */
1620 if (!nlk->bound)
1589 err = netlink_autobind(sock); 1621 err = netlink_autobind(sock);
1590 1622
1591 if (err == 0) { 1623 if (err == 0) {
@@ -2426,10 +2458,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2426 dst_group = nlk->dst_group; 2458 dst_group = nlk->dst_group;
2427 } 2459 }
2428 2460
2429 if (!nlk->portid) { 2461 if (!nlk->bound) {
2430 err = netlink_autobind(sock); 2462 err = netlink_autobind(sock);
2431 if (err) 2463 if (err)
2432 goto out; 2464 goto out;
2465 } else {
2466 /* Ensure nlk is hashed and visible. */
2467 smp_rmb();
2433 } 2468 }
2434 2469
2435 /* It's a really convoluted way for userland to ask for mmaped 2470 /* It's a really convoluted way for userland to ask for mmaped
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 89008405d6b4..14437d9b1965 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -35,6 +35,7 @@ struct netlink_sock {
35 unsigned long state; 35 unsigned long state;
36 size_t max_recvmsg_len; 36 size_t max_recvmsg_len;
37 wait_queue_head_t wait; 37 wait_queue_head_t wait;
38 bool bound;
38 bool cb_running; 39 bool cb_running;
39 struct netlink_callback cb; 40 struct netlink_callback cb;
40 struct mutex *cb_mutex; 41 struct mutex *cb_mutex;
@@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
59 return container_of(sk, struct netlink_sock, sk); 60 return container_of(sk, struct netlink_sock, sk);
60} 61}
61 62
63static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
64{
65#ifdef CONFIG_NETLINK_MMAP
66 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
67#else
68 return false;
69#endif /* CONFIG_NETLINK_MMAP */
70}
71
62struct netlink_table { 72struct netlink_table {
63 struct rhashtable hash; 73 struct rhashtable hash;
64 struct hlist_head mc_list; 74 struct hlist_head mc_list;
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 2a071f470d57..d143aa9f6654 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -5,7 +5,8 @@
5config OPENVSWITCH 5config OPENVSWITCH
6 tristate "Open vSwitch" 6 tristate "Open vSwitch"
7 depends on INET 7 depends on INET
8 depends on (!NF_CONNTRACK || NF_CONNTRACK) 8 depends on !NF_CONNTRACK || \
9 (NF_CONNTRACK && (!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6))
9 select LIBCRC32C 10 select LIBCRC32C
10 select MPLS 11 select MPLS
11 select NET_MPLS_GSO 12 select NET_MPLS_GSO
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index e8e524ad8a01..002a755fa07e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -275,13 +275,15 @@ static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
275 case NFPROTO_IPV6: { 275 case NFPROTO_IPV6: {
276 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 276 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
277 __be16 frag_off; 277 __be16 frag_off;
278 int ofs;
278 279
279 protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), 280 ofs = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
280 &nexthdr, &frag_off); 281 &frag_off);
281 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) { 282 if (ofs < 0 || (frag_off & htons(~0x7)) != 0) {
282 pr_debug("proto header not found\n"); 283 pr_debug("proto header not found\n");
283 return NF_ACCEPT; 284 return NF_ACCEPT;
284 } 285 }
286 protoff = ofs;
285 break; 287 break;
286 } 288 }
287 default: 289 default:
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6fbd2decb19e..b816ff871528 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -952,7 +952,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
952 if (error) 952 if (error)
953 goto err_kfree_flow; 953 goto err_kfree_flow;
954 954
955 ovs_flow_mask_key(&new_flow->key, &key, &mask); 955 ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
956 956
957 /* Extract flow identifier. */ 957 /* Extract flow identifier. */
958 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], 958 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
@@ -1080,7 +1080,7 @@ static struct sw_flow_actions *get_flow_actions(struct net *net,
1080 struct sw_flow_key masked_key; 1080 struct sw_flow_key masked_key;
1081 int error; 1081 int error;
1082 1082
1083 ovs_flow_mask_key(&masked_key, key, mask); 1083 ovs_flow_mask_key(&masked_key, key, true, mask);
1084 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log); 1084 error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1085 if (error) { 1085 if (error) {
1086 OVS_NLERR(log, 1086 OVS_NLERR(log,
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c92d6a262bc5..5c030a4d7338 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -57,6 +57,7 @@ struct ovs_len_tbl {
57}; 57};
58 58
59#define OVS_ATTR_NESTED -1 59#define OVS_ATTR_NESTED -1
60#define OVS_ATTR_VARIABLE -2
60 61
61static void update_range(struct sw_flow_match *match, 62static void update_range(struct sw_flow_match *match,
62 size_t offset, size_t size, bool is_mask) 63 size_t offset, size_t size, bool is_mask)
@@ -304,6 +305,10 @@ size_t ovs_key_attr_size(void)
304 + nla_total_size(28); /* OVS_KEY_ATTR_ND */ 305 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
305} 306}
306 307
308static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
309 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
310};
311
307static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { 312static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
308 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) }, 313 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
309 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) }, 314 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
@@ -315,8 +320,9 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1]
315 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) }, 320 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
316 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) }, 321 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
317 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 }, 322 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
318 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_NESTED }, 323 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
319 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED }, 324 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
325 .next = ovs_vxlan_ext_key_lens },
320}; 326};
321 327
322/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 328/* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
@@ -349,6 +355,13 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
349 [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, 355 [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) },
350}; 356};
351 357
358static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
359{
360 return expected_len == attr_len ||
361 expected_len == OVS_ATTR_NESTED ||
362 expected_len == OVS_ATTR_VARIABLE;
363}
364
352static bool is_all_zero(const u8 *fp, size_t size) 365static bool is_all_zero(const u8 *fp, size_t size)
353{ 366{
354 int i; 367 int i;
@@ -388,7 +401,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
388 } 401 }
389 402
390 expected_len = ovs_key_lens[type].len; 403 expected_len = ovs_key_lens[type].len;
391 if (nla_len(nla) != expected_len && expected_len != OVS_ATTR_NESTED) { 404 if (!check_attr_len(nla_len(nla), expected_len)) {
392 OVS_NLERR(log, "Key %d has unexpected len %d expected %d", 405 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
393 type, nla_len(nla), expected_len); 406 type, nla_len(nla), expected_len);
394 return -EINVAL; 407 return -EINVAL;
@@ -473,29 +486,50 @@ static int genev_tun_opt_from_nlattr(const struct nlattr *a,
473 return 0; 486 return 0;
474} 487}
475 488
476static const struct nla_policy vxlan_opt_policy[OVS_VXLAN_EXT_MAX + 1] = { 489static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
477 [OVS_VXLAN_EXT_GBP] = { .type = NLA_U32 },
478};
479
480static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
481 struct sw_flow_match *match, bool is_mask, 490 struct sw_flow_match *match, bool is_mask,
482 bool log) 491 bool log)
483{ 492{
484 struct nlattr *tb[OVS_VXLAN_EXT_MAX+1]; 493 struct nlattr *a;
494 int rem;
485 unsigned long opt_key_offset; 495 unsigned long opt_key_offset;
486 struct vxlan_metadata opts; 496 struct vxlan_metadata opts;
487 int err;
488 497
489 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); 498 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
490 499
491 err = nla_parse_nested(tb, OVS_VXLAN_EXT_MAX, a, vxlan_opt_policy);
492 if (err < 0)
493 return err;
494
495 memset(&opts, 0, sizeof(opts)); 500 memset(&opts, 0, sizeof(opts));
501 nla_for_each_nested(a, attr, rem) {
502 int type = nla_type(a);
496 503
497 if (tb[OVS_VXLAN_EXT_GBP]) 504 if (type > OVS_VXLAN_EXT_MAX) {
498 opts.gbp = nla_get_u32(tb[OVS_VXLAN_EXT_GBP]); 505 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
506 type, OVS_VXLAN_EXT_MAX);
507 return -EINVAL;
508 }
509
510 if (!check_attr_len(nla_len(a),
511 ovs_vxlan_ext_key_lens[type].len)) {
512 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
513 type, nla_len(a),
514 ovs_vxlan_ext_key_lens[type].len);
515 return -EINVAL;
516 }
517
518 switch (type) {
519 case OVS_VXLAN_EXT_GBP:
520 opts.gbp = nla_get_u32(a);
521 break;
522 default:
523 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
524 type);
525 return -EINVAL;
526 }
527 }
528 if (rem) {
529 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
530 rem);
531 return -EINVAL;
532 }
499 533
500 if (!is_mask) 534 if (!is_mask)
501 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false); 535 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
@@ -528,8 +562,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
528 return -EINVAL; 562 return -EINVAL;
529 } 563 }
530 564
531 if (ovs_tunnel_key_lens[type].len != nla_len(a) && 565 if (!check_attr_len(nla_len(a),
532 ovs_tunnel_key_lens[type].len != OVS_ATTR_NESTED) { 566 ovs_tunnel_key_lens[type].len)) {
533 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", 567 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
534 type, nla_len(a), ovs_tunnel_key_lens[type].len); 568 type, nla_len(a), ovs_tunnel_key_lens[type].len);
535 return -EINVAL; 569 return -EINVAL;
@@ -1052,10 +1086,13 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1052 1086
1053 /* The nlattr stream should already have been validated */ 1087 /* The nlattr stream should already have been validated */
1054 nla_for_each_nested(nla, attr, rem) { 1088 nla_for_each_nested(nla, attr, rem) {
1055 if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) 1089 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1056 nlattr_set(nla, val, tbl[nla_type(nla)].next); 1090 if (tbl[nla_type(nla)].next)
1057 else 1091 tbl = tbl[nla_type(nla)].next;
1092 nlattr_set(nla, val, tbl);
1093 } else {
1058 memset(nla_data(nla), val, nla_len(nla)); 1094 memset(nla_data(nla), val, nla_len(nla));
1095 }
1059 } 1096 }
1060} 1097}
1061 1098
@@ -1922,8 +1959,7 @@ static int validate_set(const struct nlattr *a,
1922 key_len /= 2; 1959 key_len /= 2;
1923 1960
1924 if (key_type > OVS_KEY_ATTR_MAX || 1961 if (key_type > OVS_KEY_ATTR_MAX ||
1925 (ovs_key_lens[key_type].len != key_len && 1962 !check_attr_len(key_len, ovs_key_lens[key_type].len))
1926 ovs_key_lens[key_type].len != OVS_ATTR_NESTED))
1927 return -EINVAL; 1963 return -EINVAL;
1928 1964
1929 if (masked && !validate_masked(nla_data(ovs_key), key_len)) 1965 if (masked && !validate_masked(nla_data(ovs_key), key_len))
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d22d8e948d0f..f2ea83ba4763 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -57,20 +57,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
57} 57}
58 58
59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 59void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
60 const struct sw_flow_mask *mask) 60 bool full, const struct sw_flow_mask *mask)
61{ 61{
62 const long *m = (const long *)((const u8 *)&mask->key + 62 int start = full ? 0 : mask->range.start;
63 mask->range.start); 63 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
64 const long *s = (const long *)((const u8 *)src + 64 const long *m = (const long *)((const u8 *)&mask->key + start);
65 mask->range.start); 65 const long *s = (const long *)((const u8 *)src + start);
66 long *d = (long *)((u8 *)dst + mask->range.start); 66 long *d = (long *)((u8 *)dst + start);
67 int i; 67 int i;
68 68
69 /* The memory outside of the 'mask->range' are not set since 69 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
70 * further operations on 'dst' only uses contents within 70 * if 'full' is false the memory outside of the 'mask->range' is left
71 * 'mask->range'. 71 * uninitialized. This can be used as an optimization when further
72 * operations on 'dst' only use contents within 'mask->range'.
72 */ 73 */
73 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) 74 for (i = 0; i < len; i += sizeof(long))
74 *d++ = *s++ & *m++; 75 *d++ = *s++ & *m++;
75} 76}
76 77
@@ -475,7 +476,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
475 u32 hash; 476 u32 hash;
476 struct sw_flow_key masked_key; 477 struct sw_flow_key masked_key;
477 478
478 ovs_flow_mask_key(&masked_key, unmasked, mask); 479 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
479 hash = flow_hash(&masked_key, &mask->range); 480 hash = flow_hash(&masked_key, &mask->range);
480 head = find_bucket(ti, hash); 481 head = find_bucket(ti, hash);
481 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { 482 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 616eda10d955..2dd9900f533d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
86bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); 86bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
87 87
88void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, 88void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
89 const struct sw_flow_mask *mask); 89 bool full, const struct sw_flow_mask *mask);
90#endif /* flow_table.h */ 90#endif /* flow_table.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7b8e39a22387..aa4b15c35884 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -230,6 +230,8 @@ struct packet_skb_cb {
230 } sa; 230 } sa;
231}; 231};
232 232
233#define vio_le() virtio_legacy_is_little_endian()
234
233#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 235#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
234 236
235#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) 237#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
@@ -2680,15 +2682,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2680 goto out_unlock; 2682 goto out_unlock;
2681 2683
2682 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 2684 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2683 (__virtio16_to_cpu(false, vnet_hdr.csum_start) + 2685 (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2684 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > 2686 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
2685 __virtio16_to_cpu(false, vnet_hdr.hdr_len))) 2687 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
2686 vnet_hdr.hdr_len = __cpu_to_virtio16(false, 2688 vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
2687 __virtio16_to_cpu(false, vnet_hdr.csum_start) + 2689 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2688 __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); 2690 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
2689 2691
2690 err = -EINVAL; 2692 err = -EINVAL;
2691 if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) 2693 if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
2692 goto out_unlock; 2694 goto out_unlock;
2693 2695
2694 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 2696 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
@@ -2731,7 +2733,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2731 hlen = LL_RESERVED_SPACE(dev); 2733 hlen = LL_RESERVED_SPACE(dev);
2732 tlen = dev->needed_tailroom; 2734 tlen = dev->needed_tailroom;
2733 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, 2735 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2734 __virtio16_to_cpu(false, vnet_hdr.hdr_len), 2736 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2735 msg->msg_flags & MSG_DONTWAIT, &err); 2737 msg->msg_flags & MSG_DONTWAIT, &err);
2736 if (skb == NULL) 2738 if (skb == NULL)
2737 goto out_unlock; 2739 goto out_unlock;
@@ -2778,8 +2780,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2778 2780
2779 if (po->has_vnet_hdr) { 2781 if (po->has_vnet_hdr) {
2780 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 2782 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2781 u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); 2783 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
2782 u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); 2784 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
2783 if (!skb_partial_csum_set(skb, s, o)) { 2785 if (!skb_partial_csum_set(skb, s, o)) {
2784 err = -EINVAL; 2786 err = -EINVAL;
2785 goto out_free; 2787 goto out_free;
@@ -2787,7 +2789,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2787 } 2789 }
2788 2790
2789 skb_shinfo(skb)->gso_size = 2791 skb_shinfo(skb)->gso_size =
2790 __virtio16_to_cpu(false, vnet_hdr.gso_size); 2792 __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
2791 skb_shinfo(skb)->gso_type = gso_type; 2793 skb_shinfo(skb)->gso_type = gso_type;
2792 2794
2793 /* Header must be checked, and gso_segs computed. */ 2795 /* Header must be checked, and gso_segs computed. */
@@ -3161,9 +3163,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3161 3163
3162 /* This is a hint as to how much should be linear. */ 3164 /* This is a hint as to how much should be linear. */
3163 vnet_hdr.hdr_len = 3165 vnet_hdr.hdr_len =
3164 __cpu_to_virtio16(false, skb_headlen(skb)); 3166 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
3165 vnet_hdr.gso_size = 3167 vnet_hdr.gso_size =
3166 __cpu_to_virtio16(false, sinfo->gso_size); 3168 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
3167 if (sinfo->gso_type & SKB_GSO_TCPV4) 3169 if (sinfo->gso_type & SKB_GSO_TCPV4)
3168 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 3170 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3169 else if (sinfo->gso_type & SKB_GSO_TCPV6) 3171 else if (sinfo->gso_type & SKB_GSO_TCPV6)
@@ -3181,9 +3183,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3181 3183
3182 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3184 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3183 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 3185 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3184 vnet_hdr.csum_start = __cpu_to_virtio16(false, 3186 vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
3185 skb_checksum_start_offset(skb)); 3187 skb_checksum_start_offset(skb));
3186 vnet_hdr.csum_offset = __cpu_to_virtio16(false, 3188 vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
3187 skb->csum_offset); 3189 skb->csum_offset);
3188 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 3190 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3189 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; 3191 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 715e01e5910a..f23a3b68bba6 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -33,7 +33,6 @@
33 33
34struct fw_head { 34struct fw_head {
35 u32 mask; 35 u32 mask;
36 bool mask_set;
37 struct fw_filter __rcu *ht[HTSIZE]; 36 struct fw_filter __rcu *ht[HTSIZE];
38 struct rcu_head rcu; 37 struct rcu_head rcu;
39}; 38};
@@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
84 } 83 }
85 } 84 }
86 } else { 85 } else {
87 /* old method */ 86 /* Old method: classify the packet using its skb mark. */
88 if (id && (TC_H_MAJ(id) == 0 || 87 if (id && (TC_H_MAJ(id) == 0 ||
89 !(TC_H_MAJ(id ^ tp->q->handle)))) { 88 !(TC_H_MAJ(id ^ tp->q->handle)))) {
90 res->classid = id; 89 res->classid = id;
@@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
114 113
115static int fw_init(struct tcf_proto *tp) 114static int fw_init(struct tcf_proto *tp)
116{ 115{
117 struct fw_head *head; 116 /* We don't allocate fw_head here, because in the old method
118 117 * we don't need it at all.
119 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); 118 */
120 if (head == NULL)
121 return -ENOBUFS;
122
123 head->mask_set = false;
124 rcu_assign_pointer(tp->root, head);
125 return 0; 119 return 0;
126} 120}
127 121
@@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
252 int err; 246 int err;
253 247
254 if (!opt) 248 if (!opt)
255 return handle ? -EINVAL : 0; 249 return handle ? -EINVAL : 0; /* Succeed if it is old method. */
256 250
257 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); 251 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
258 if (err < 0) 252 if (err < 0)
@@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
302 if (!handle) 296 if (!handle)
303 return -EINVAL; 297 return -EINVAL;
304 298
305 if (!head->mask_set) { 299 if (!head) {
306 head->mask = 0xFFFFFFFF; 300 u32 mask = 0xFFFFFFFF;
307 if (tb[TCA_FW_MASK]) 301 if (tb[TCA_FW_MASK])
308 head->mask = nla_get_u32(tb[TCA_FW_MASK]); 302 mask = nla_get_u32(tb[TCA_FW_MASK]);
309 head->mask_set = true; 303
304 head = kzalloc(sizeof(*head), GFP_KERNEL);
305 if (!head)
306 return -ENOBUFS;
307 head->mask = mask;
308
309 rcu_assign_pointer(tp->root, head);
310 } 310 }
311 311
312 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); 312 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b7143337e4fa..3d9ea9a48289 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1186,7 +1186,7 @@ static void sctp_v4_del_protocol(void)
1186 unregister_inetaddr_notifier(&sctp_inetaddr_notifier); 1186 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1187} 1187}
1188 1188
1189static int __net_init sctp_net_init(struct net *net) 1189static int __net_init sctp_defaults_init(struct net *net)
1190{ 1190{
1191 int status; 1191 int status;
1192 1192
@@ -1279,12 +1279,6 @@ static int __net_init sctp_net_init(struct net *net)
1279 1279
1280 sctp_dbg_objcnt_init(net); 1280 sctp_dbg_objcnt_init(net);
1281 1281
1282 /* Initialize the control inode/socket for handling OOTB packets. */
1283 if ((status = sctp_ctl_sock_init(net))) {
1284 pr_err("Failed to initialize the SCTP control sock\n");
1285 goto err_ctl_sock_init;
1286 }
1287
1288 /* Initialize the local address list. */ 1282 /* Initialize the local address list. */
1289 INIT_LIST_HEAD(&net->sctp.local_addr_list); 1283 INIT_LIST_HEAD(&net->sctp.local_addr_list);
1290 spin_lock_init(&net->sctp.local_addr_lock); 1284 spin_lock_init(&net->sctp.local_addr_lock);
@@ -1300,9 +1294,6 @@ static int __net_init sctp_net_init(struct net *net)
1300 1294
1301 return 0; 1295 return 0;
1302 1296
1303err_ctl_sock_init:
1304 sctp_dbg_objcnt_exit(net);
1305 sctp_proc_exit(net);
1306err_init_proc: 1297err_init_proc:
1307 cleanup_sctp_mibs(net); 1298 cleanup_sctp_mibs(net);
1308err_init_mibs: 1299err_init_mibs:
@@ -1311,15 +1302,12 @@ err_sysctl_register:
1311 return status; 1302 return status;
1312} 1303}
1313 1304
1314static void __net_exit sctp_net_exit(struct net *net) 1305static void __net_exit sctp_defaults_exit(struct net *net)
1315{ 1306{
1316 /* Free the local address list */ 1307 /* Free the local address list */
1317 sctp_free_addr_wq(net); 1308 sctp_free_addr_wq(net);
1318 sctp_free_local_addr_list(net); 1309 sctp_free_local_addr_list(net);
1319 1310
1320 /* Free the control endpoint. */
1321 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1322
1323 sctp_dbg_objcnt_exit(net); 1311 sctp_dbg_objcnt_exit(net);
1324 1312
1325 sctp_proc_exit(net); 1313 sctp_proc_exit(net);
@@ -1327,9 +1315,32 @@ static void __net_exit sctp_net_exit(struct net *net)
1327 sctp_sysctl_net_unregister(net); 1315 sctp_sysctl_net_unregister(net);
1328} 1316}
1329 1317
1330static struct pernet_operations sctp_net_ops = { 1318static struct pernet_operations sctp_defaults_ops = {
1331 .init = sctp_net_init, 1319 .init = sctp_defaults_init,
1332 .exit = sctp_net_exit, 1320 .exit = sctp_defaults_exit,
1321};
1322
1323static int __net_init sctp_ctrlsock_init(struct net *net)
1324{
1325 int status;
1326
1327 /* Initialize the control inode/socket for handling OOTB packets. */
1328 status = sctp_ctl_sock_init(net);
1329 if (status)
1330 pr_err("Failed to initialize the SCTP control sock\n");
1331
1332 return status;
1333}
1334
1335static void __net_init sctp_ctrlsock_exit(struct net *net)
1336{
1337 /* Free the control endpoint. */
1338 inet_ctl_sock_destroy(net->sctp.ctl_sock);
1339}
1340
1341static struct pernet_operations sctp_ctrlsock_ops = {
1342 .init = sctp_ctrlsock_init,
1343 .exit = sctp_ctrlsock_exit,
1333}; 1344};
1334 1345
1335/* Initialize the universe into something sensible. */ 1346/* Initialize the universe into something sensible. */
@@ -1462,8 +1473,11 @@ static __init int sctp_init(void)
1462 sctp_v4_pf_init(); 1473 sctp_v4_pf_init();
1463 sctp_v6_pf_init(); 1474 sctp_v6_pf_init();
1464 1475
1465 status = sctp_v4_protosw_init(); 1476 status = register_pernet_subsys(&sctp_defaults_ops);
1477 if (status)
1478 goto err_register_defaults;
1466 1479
1480 status = sctp_v4_protosw_init();
1467 if (status) 1481 if (status)
1468 goto err_protosw_init; 1482 goto err_protosw_init;
1469 1483
@@ -1471,9 +1485,9 @@ static __init int sctp_init(void)
1471 if (status) 1485 if (status)
1472 goto err_v6_protosw_init; 1486 goto err_v6_protosw_init;
1473 1487
1474 status = register_pernet_subsys(&sctp_net_ops); 1488 status = register_pernet_subsys(&sctp_ctrlsock_ops);
1475 if (status) 1489 if (status)
1476 goto err_register_pernet_subsys; 1490 goto err_register_ctrlsock;
1477 1491
1478 status = sctp_v4_add_protocol(); 1492 status = sctp_v4_add_protocol();
1479 if (status) 1493 if (status)
@@ -1489,12 +1503,14 @@ out:
1489err_v6_add_protocol: 1503err_v6_add_protocol:
1490 sctp_v4_del_protocol(); 1504 sctp_v4_del_protocol();
1491err_add_protocol: 1505err_add_protocol:
1492 unregister_pernet_subsys(&sctp_net_ops); 1506 unregister_pernet_subsys(&sctp_ctrlsock_ops);
1493err_register_pernet_subsys: 1507err_register_ctrlsock:
1494 sctp_v6_protosw_exit(); 1508 sctp_v6_protosw_exit();
1495err_v6_protosw_init: 1509err_v6_protosw_init:
1496 sctp_v4_protosw_exit(); 1510 sctp_v4_protosw_exit();
1497err_protosw_init: 1511err_protosw_init:
1512 unregister_pernet_subsys(&sctp_defaults_ops);
1513err_register_defaults:
1498 sctp_v4_pf_exit(); 1514 sctp_v4_pf_exit();
1499 sctp_v6_pf_exit(); 1515 sctp_v6_pf_exit();
1500 sctp_sysctl_unregister(); 1516 sctp_sysctl_unregister();
@@ -1527,12 +1543,14 @@ static __exit void sctp_exit(void)
1527 sctp_v6_del_protocol(); 1543 sctp_v6_del_protocol();
1528 sctp_v4_del_protocol(); 1544 sctp_v4_del_protocol();
1529 1545
1530 unregister_pernet_subsys(&sctp_net_ops); 1546 unregister_pernet_subsys(&sctp_ctrlsock_ops);
1531 1547
1532 /* Free protosw registrations */ 1548 /* Free protosw registrations */
1533 sctp_v6_protosw_exit(); 1549 sctp_v6_protosw_exit();
1534 sctp_v4_protosw_exit(); 1550 sctp_v4_protosw_exit();
1535 1551
1552 unregister_pernet_subsys(&sctp_defaults_ops);
1553
1536 /* Unregister with socket layer. */ 1554 /* Unregister with socket layer. */
1537 sctp_v6_pf_exit(); 1555 sctp_v6_pf_exit();
1538 sctp_v4_pf_exit(); 1556 sctp_v4_pf_exit();
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b140c092d226..f14f24ee9983 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -297,7 +297,7 @@ static int rpc_complete_task(struct rpc_task *task)
297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 297 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
298 ret = atomic_dec_and_test(&task->tk_count); 298 ret = atomic_dec_and_test(&task->tk_count);
299 if (waitqueue_active(wq)) 299 if (waitqueue_active(wq))
300 __wake_up_locked_key(wq, TASK_NORMAL, 1, &k); 300 __wake_up_locked_key(wq, TASK_NORMAL, &k);
301 spin_unlock_irqrestore(&wq->lock, flags); 301 spin_unlock_irqrestore(&wq->lock, flags);
302 return ret; 302 return ret;
303} 303}
@@ -1092,14 +1092,10 @@ void
1092rpc_destroy_mempool(void) 1092rpc_destroy_mempool(void)
1093{ 1093{
1094 rpciod_stop(); 1094 rpciod_stop();
1095 if (rpc_buffer_mempool) 1095 mempool_destroy(rpc_buffer_mempool);
1096 mempool_destroy(rpc_buffer_mempool); 1096 mempool_destroy(rpc_task_mempool);
1097 if (rpc_task_mempool) 1097 kmem_cache_destroy(rpc_task_slabp);
1098 mempool_destroy(rpc_task_mempool); 1098 kmem_cache_destroy(rpc_buffer_slabp);
1099 if (rpc_task_slabp)
1100 kmem_cache_destroy(rpc_task_slabp);
1101 if (rpc_buffer_slabp)
1102 kmem_cache_destroy(rpc_buffer_slabp);
1103 rpc_destroy_wait_queue(&delay_queue); 1099 rpc_destroy_wait_queue(&delay_queue);
1104} 1100}
1105 1101
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ab5dd621ae0c..2e98f4a243e5 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work)
614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state); 614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
615 xprt->ops->close(xprt); 615 xprt->ops->close(xprt);
616 xprt_release_write(xprt, NULL); 616 xprt_release_write(xprt, NULL);
617 wake_up_bit(&xprt->state, XPRT_LOCKED);
617} 618}
618 619
619/** 620/**
@@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
723 xprt->ops->release_xprt(xprt, NULL); 724 xprt->ops->release_xprt(xprt, NULL);
724out: 725out:
725 spin_unlock_bh(&xprt->transport_lock); 726 spin_unlock_bh(&xprt->transport_lock);
727 wake_up_bit(&xprt->state, XPRT_LOCKED);
726} 728}
727 729
728/** 730/**
@@ -1394,6 +1396,10 @@ out:
1394static void xprt_destroy(struct rpc_xprt *xprt) 1396static void xprt_destroy(struct rpc_xprt *xprt)
1395{ 1397{
1396 dprintk("RPC: destroying transport %p\n", xprt); 1398 dprintk("RPC: destroying transport %p\n", xprt);
1399
1400 /* Exclude transport connect/disconnect handlers */
1401 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1402
1397 del_timer_sync(&xprt->timer); 1403 del_timer_sync(&xprt->timer);
1398 1404
1399 rpc_xprt_debugfs_unregister(xprt); 1405 rpc_xprt_debugfs_unregister(xprt);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7be90bc1a7c2..1a85e0ed0b48 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -777,7 +777,6 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
777 xs_sock_reset_connection_flags(xprt); 777 xs_sock_reset_connection_flags(xprt);
778 /* Mark transport as closed and wake up all pending tasks */ 778 /* Mark transport as closed and wake up all pending tasks */
779 xprt_disconnect_done(xprt); 779 xprt_disconnect_done(xprt);
780 xprt_force_disconnect(xprt);
781} 780}
782 781
783/** 782/**
@@ -881,8 +880,11 @@ static void xs_xprt_free(struct rpc_xprt *xprt)
881 */ 880 */
882static void xs_destroy(struct rpc_xprt *xprt) 881static void xs_destroy(struct rpc_xprt *xprt)
883{ 882{
883 struct sock_xprt *transport = container_of(xprt,
884 struct sock_xprt, xprt);
884 dprintk("RPC: xs_destroy xprt %p\n", xprt); 885 dprintk("RPC: xs_destroy xprt %p\n", xprt);
885 886
887 cancel_delayed_work_sync(&transport->connect_worker);
886 xs_close(xprt); 888 xs_close(xprt);
887 xs_xprt_free(xprt); 889 xs_xprt_free(xprt);
888 module_put(THIS_MODULE); 890 module_put(THIS_MODULE);
@@ -1435,6 +1437,7 @@ out:
1435static void xs_tcp_state_change(struct sock *sk) 1437static void xs_tcp_state_change(struct sock *sk)
1436{ 1438{
1437 struct rpc_xprt *xprt; 1439 struct rpc_xprt *xprt;
1440 struct sock_xprt *transport;
1438 1441
1439 read_lock_bh(&sk->sk_callback_lock); 1442 read_lock_bh(&sk->sk_callback_lock);
1440 if (!(xprt = xprt_from_sock(sk))) 1443 if (!(xprt = xprt_from_sock(sk)))
@@ -1446,13 +1449,12 @@ static void xs_tcp_state_change(struct sock *sk)
1446 sock_flag(sk, SOCK_ZAPPED), 1449 sock_flag(sk, SOCK_ZAPPED),
1447 sk->sk_shutdown); 1450 sk->sk_shutdown);
1448 1451
1452 transport = container_of(xprt, struct sock_xprt, xprt);
1449 trace_rpc_socket_state_change(xprt, sk->sk_socket); 1453 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1450 switch (sk->sk_state) { 1454 switch (sk->sk_state) {
1451 case TCP_ESTABLISHED: 1455 case TCP_ESTABLISHED:
1452 spin_lock(&xprt->transport_lock); 1456 spin_lock(&xprt->transport_lock);
1453 if (!xprt_test_and_set_connected(xprt)) { 1457 if (!xprt_test_and_set_connected(xprt)) {
1454 struct sock_xprt *transport = container_of(xprt,
1455 struct sock_xprt, xprt);
1456 1458
1457 /* Reset TCP record info */ 1459 /* Reset TCP record info */
1458 transport->tcp_offset = 0; 1460 transport->tcp_offset = 0;
@@ -1461,6 +1463,8 @@ static void xs_tcp_state_change(struct sock *sk)
1461 transport->tcp_flags = 1463 transport->tcp_flags =
1462 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; 1464 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1463 xprt->connect_cookie++; 1465 xprt->connect_cookie++;
1466 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1467 xprt_clear_connecting(xprt);
1464 1468
1465 xprt_wake_pending_tasks(xprt, -EAGAIN); 1469 xprt_wake_pending_tasks(xprt, -EAGAIN);
1466 } 1470 }
@@ -1496,6 +1500,9 @@ static void xs_tcp_state_change(struct sock *sk)
1496 smp_mb__after_atomic(); 1500 smp_mb__after_atomic();
1497 break; 1501 break;
1498 case TCP_CLOSE: 1502 case TCP_CLOSE:
1503 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1504 &transport->sock_state))
1505 xprt_clear_connecting(xprt);
1499 xs_sock_mark_closed(xprt); 1506 xs_sock_mark_closed(xprt);
1500 } 1507 }
1501 out: 1508 out:
@@ -2179,6 +2186,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2179 /* Tell the socket layer to start connecting... */ 2186 /* Tell the socket layer to start connecting... */
2180 xprt->stat.connect_count++; 2187 xprt->stat.connect_count++;
2181 xprt->stat.connect_start = jiffies; 2188 xprt->stat.connect_start = jiffies;
2189 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2182 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2190 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2183 switch (ret) { 2191 switch (ret) {
2184 case 0: 2192 case 0:
@@ -2240,7 +2248,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2240 case -EINPROGRESS: 2248 case -EINPROGRESS:
2241 case -EALREADY: 2249 case -EALREADY:
2242 xprt_unlock_connect(xprt, transport); 2250 xprt_unlock_connect(xprt, transport);
2243 xprt_clear_connecting(xprt);
2244 return; 2251 return;
2245 case -EINVAL: 2252 case -EINVAL:
2246 /* Happens, for instance, if the user specified a link 2253 /* Happens, for instance, if the user specified a link
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 562c926a51cc..c5ac436235e0 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -539,6 +539,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
539 *err = -TIPC_ERR_NO_NAME; 539 *err = -TIPC_ERR_NO_NAME;
540 if (skb_linearize(skb)) 540 if (skb_linearize(skb))
541 return false; 541 return false;
542 msg = buf_msg(skb);
542 if (msg_reroute_cnt(msg)) 543 if (msg_reroute_cnt(msg))
543 return false; 544 return false;
544 dnode = addr_domain(net, msg_lookup_scope(msg)); 545 dnode = addr_domain(net, msg_lookup_scope(msg));
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a3b0bd..e0406211716b 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@ menuconfig SND_ARM
9 Drivers that are implemented on ASoC can be found in 9 Drivers that are implemented on ASoC can be found in
10 "ALSA for SoC audio support" section. 10 "ALSA for SoC audio support" section.
11 11
12config SND_PXA2XX_LIB
13 tristate
14 select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
15 select SND_DMAENGINE_PCM
16
17config SND_PXA2XX_LIB_AC97
18 bool
19
12if SND_ARM 20if SND_ARM
13 21
14config SND_ARMAACI 22config SND_ARMAACI
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
21 tristate 29 tristate
22 select SND_PCM 30 select SND_PCM
23 31
24config SND_PXA2XX_LIB
25 tristate
26 select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
27
28config SND_PXA2XX_LIB_AC97
29 bool
30
31config SND_PXA2XX_AC97 32config SND_PXA2XX_AC97
32 tristate "AC97 driver for the Intel PXA2xx chip" 33 tristate "AC97 driver for the Intel PXA2xx chip"
33 depends on ARCH_PXA 34 depends on ARCH_PXA
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 477742cb70a2..58c0aad37284 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -73,6 +73,7 @@ struct hda_tegra {
73 struct clk *hda2codec_2x_clk; 73 struct clk *hda2codec_2x_clk;
74 struct clk *hda2hdmi_clk; 74 struct clk *hda2hdmi_clk;
75 void __iomem *regs; 75 void __iomem *regs;
76 struct work_struct probe_work;
76}; 77};
77 78
78#ifdef CONFIG_PM 79#ifdef CONFIG_PM
@@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(struct snd_device *device)
294static int hda_tegra_dev_free(struct snd_device *device) 295static int hda_tegra_dev_free(struct snd_device *device)
295{ 296{
296 struct azx *chip = device->device_data; 297 struct azx *chip = device->device_data;
298 struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
297 299
300 cancel_work_sync(&hda->probe_work);
298 if (azx_bus(chip)->chip_init) { 301 if (azx_bus(chip)->chip_init) {
299 azx_stop_all_streams(chip); 302 azx_stop_all_streams(chip);
300 azx_stop_chip(chip); 303 azx_stop_chip(chip);
@@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
426/* 429/*
427 * constructor 430 * constructor
428 */ 431 */
432
433static void hda_tegra_probe_work(struct work_struct *work);
434
429static int hda_tegra_create(struct snd_card *card, 435static int hda_tegra_create(struct snd_card *card,
430 unsigned int driver_caps, 436 unsigned int driver_caps,
431 struct hda_tegra *hda) 437 struct hda_tegra *hda)
@@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_card *card,
452 chip->single_cmd = false; 458 chip->single_cmd = false;
453 chip->snoop = true; 459 chip->snoop = true;
454 460
461 INIT_WORK(&hda->probe_work, hda_tegra_probe_work);
462
455 err = azx_bus_init(chip, NULL, &hda_tegra_io_ops); 463 err = azx_bus_init(chip, NULL, &hda_tegra_io_ops);
456 if (err < 0) 464 if (err < 0)
457 return err; 465 return err;
@@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platform_device *pdev)
499 card->private_data = chip; 507 card->private_data = chip;
500 508
501 dev_set_drvdata(&pdev->dev, card); 509 dev_set_drvdata(&pdev->dev, card);
510 schedule_work(&hda->probe_work);
511
512 return 0;
513
514out_free:
515 snd_card_free(card);
516 return err;
517}
518
519static void hda_tegra_probe_work(struct work_struct *work)
520{
521 struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
522 struct azx *chip = &hda->chip;
523 struct platform_device *pdev = to_platform_device(hda->dev);
524 int err;
502 525
503 err = hda_tegra_first_init(chip, pdev); 526 err = hda_tegra_first_init(chip, pdev);
504 if (err < 0) 527 if (err < 0)
@@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platform_device *pdev)
520 chip->running = 1; 543 chip->running = 1;
521 snd_hda_set_power_save(&chip->bus, power_save * 1000); 544 snd_hda_set_power_save(&chip->bus, power_save * 1000);
522 545
523 return 0; 546 out_free:
524 547 return; /* no error return from async probe */
525out_free:
526 snd_card_free(card);
527 return err;
528} 548}
529 549
530static int hda_tegra_remove(struct platform_device *pdev) 550static int hda_tegra_remove(struct platform_device *pdev)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a75b5611d1e4..afec6dc9f91f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
4188 } 4188 }
4189} 4189}
4190 4190
4191/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
4192static void alc_fixup_tpt440_dock(struct hda_codec *codec,
4193 const struct hda_fixup *fix, int action)
4194{
4195 static const struct hda_pintbl pincfgs[] = {
4196 { 0x16, 0x21211010 }, /* dock headphone */
4197 { 0x19, 0x21a11010 }, /* dock mic */
4198 { }
4199 };
4200 struct alc_spec *spec = codec->spec;
4201
4202 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4203 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
4204 codec->power_save_node = 0; /* avoid click noises */
4205 snd_hda_apply_pincfgs(codec, pincfgs);
4206 }
4207}
4208
4191static void alc_shutup_dell_xps13(struct hda_codec *codec) 4209static void alc_shutup_dell_xps13(struct hda_codec *codec)
4192{ 4210{
4193 struct alc_spec *spec = codec->spec; 4211 struct alc_spec *spec = codec->spec;
@@ -4562,7 +4580,6 @@ enum {
4562 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, 4580 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
4563 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 4581 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
4564 ALC292_FIXUP_TPT440_DOCK, 4582 ALC292_FIXUP_TPT440_DOCK,
4565 ALC292_FIXUP_TPT440_DOCK2,
4566 ALC283_FIXUP_BXBT2807_MIC, 4583 ALC283_FIXUP_BXBT2807_MIC,
4567 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4584 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4568 ALC282_FIXUP_ASPIRE_V5_PINS, 4585 ALC282_FIXUP_ASPIRE_V5_PINS,
@@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fixups[] = {
5029 }, 5046 },
5030 [ALC292_FIXUP_TPT440_DOCK] = { 5047 [ALC292_FIXUP_TPT440_DOCK] = {
5031 .type = HDA_FIXUP_FUNC, 5048 .type = HDA_FIXUP_FUNC,
5032 .v.func = alc269_fixup_pincfg_no_hp_to_lineout, 5049 .v.func = alc_fixup_tpt440_dock,
5033 .chained = true,
5034 .chain_id = ALC292_FIXUP_TPT440_DOCK2
5035 },
5036 [ALC292_FIXUP_TPT440_DOCK2] = {
5037 .type = HDA_FIXUP_PINS,
5038 .v.pins = (const struct hda_pintbl[]) {
5039 { 0x16, 0x21211010 }, /* dock headphone */
5040 { 0x19, 0x21a11010 }, /* dock mic */
5041 { }
5042 },
5043 .chained = true, 5050 .chained = true,
5044 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST 5051 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
5045 }, 5052 },
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index 38e853add96e..0bf9d62b91a0 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -296,7 +296,6 @@ static int au1xpsc_i2s_drvprobe(struct platform_device *pdev)
296{ 296{
297 struct resource *iores, *dmares; 297 struct resource *iores, *dmares;
298 unsigned long sel; 298 unsigned long sel;
299 int ret;
300 struct au1xpsc_audio_data *wd; 299 struct au1xpsc_audio_data *wd;
301 300
302 wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), 301 wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data),
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 4972bf3efa91..268a28bd1df4 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -732,14 +732,14 @@ static const struct snd_kcontrol_new rt5645_mono_adc_r_mix[] = {
732static const struct snd_kcontrol_new rt5645_dac_l_mix[] = { 732static const struct snd_kcontrol_new rt5645_dac_l_mix[] = {
733 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, 733 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
734 RT5645_M_ADCMIX_L_SFT, 1, 1), 734 RT5645_M_ADCMIX_L_SFT, 1, 1),
735 SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, 735 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
736 RT5645_M_DAC1_L_SFT, 1, 1), 736 RT5645_M_DAC1_L_SFT, 1, 1),
737}; 737};
738 738
739static const struct snd_kcontrol_new rt5645_dac_r_mix[] = { 739static const struct snd_kcontrol_new rt5645_dac_r_mix[] = {
740 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER, 740 SOC_DAPM_SINGLE("Stereo ADC Switch", RT5645_AD_DA_MIXER,
741 RT5645_M_ADCMIX_R_SFT, 1, 1), 741 RT5645_M_ADCMIX_R_SFT, 1, 1),
742 SOC_DAPM_SINGLE("DAC1 Switch", RT5645_AD_DA_MIXER, 742 SOC_DAPM_SINGLE_AUTODISABLE("DAC1 Switch", RT5645_AD_DA_MIXER,
743 RT5645_M_DAC1_R_SFT, 1, 1), 743 RT5645_M_DAC1_R_SFT, 1, 1),
744}; 744};
745 745
@@ -1381,7 +1381,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
1381 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1381 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1382 RT5645_MAMP_INT_REG2, 0xfc00); 1382 RT5645_MAMP_INT_REG2, 0xfc00);
1383 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); 1383 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
1384 mdelay(5); 1384 msleep(40);
1385 rt5645->hp_on = true; 1385 rt5645->hp_on = true;
1386 } else { 1386 } else {
1387 /* depop parameters */ 1387 /* depop parameters */
@@ -2829,13 +2829,12 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
2829 snd_soc_dapm_sync(dapm); 2829 snd_soc_dapm_sync(dapm);
2830 rt5645->jack_type = SND_JACK_HEADPHONE; 2830 rt5645->jack_type = SND_JACK_HEADPHONE;
2831 } 2831 }
2832
2833 snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
2834 snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
2835 snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
2836 } else { /* jack out */ 2832 } else { /* jack out */
2837 rt5645->jack_type = 0; 2833 rt5645->jack_type = 0;
2838 2834
2835 regmap_update_bits(rt5645->regmap, RT5645_HP_VOL,
2836 RT5645_L_MUTE | RT5645_R_MUTE,
2837 RT5645_L_MUTE | RT5645_R_MUTE);
2839 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, 2838 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
2840 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD); 2839 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
2841 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, 2840 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
@@ -2880,8 +2879,6 @@ int rt5645_set_jack_detect(struct snd_soc_codec *codec,
2880 rt5645->en_button_func = true; 2879 rt5645->en_button_func = true;
2881 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 2880 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
2882 RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ); 2881 RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
2883 regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
2884 RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
2885 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1, 2882 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
2886 RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL); 2883 RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
2887 } 2884 }
@@ -3205,6 +3202,13 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
3205 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 3202 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
3206 }, 3203 },
3207 }, 3204 },
3205 {
3206 .ident = "Google Ultima",
3207 .callback = strago_quirk_cb,
3208 .matches = {
3209 DMI_MATCH(DMI_PRODUCT_NAME, "Ultima"),
3210 },
3211 },
3208 { } 3212 { }
3209}; 3213};
3210 3214
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f2c6ad4b8fde..581ec1502228 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -577,7 +577,6 @@ static int wm0010_boot(struct snd_soc_codec *codec)
577 struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec); 577 struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
578 unsigned long flags; 578 unsigned long flags;
579 int ret; 579 int ret;
580 const struct firmware *fw;
581 struct spi_message m; 580 struct spi_message m;
582 struct spi_transfer t; 581 struct spi_transfer t;
583 struct dfw_pllrec pll_rec; 582 struct dfw_pllrec pll_rec;
@@ -623,14 +622,6 @@ static int wm0010_boot(struct snd_soc_codec *codec)
623 wm0010->state = WM0010_OUT_OF_RESET; 622 wm0010->state = WM0010_OUT_OF_RESET;
624 spin_unlock_irqrestore(&wm0010->irq_lock, flags); 623 spin_unlock_irqrestore(&wm0010->irq_lock, flags);
625 624
626 /* First the bootloader */
627 ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
628 if (ret != 0) {
629 dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
630 ret);
631 goto abort;
632 }
633
634 if (!wait_for_completion_timeout(&wm0010->boot_completion, 625 if (!wait_for_completion_timeout(&wm0010->boot_completion,
635 msecs_to_jiffies(20))) 626 msecs_to_jiffies(20)))
636 dev_err(codec->dev, "Failed to get interrupt from DSP\n"); 627 dev_err(codec->dev, "Failed to get interrupt from DSP\n");
@@ -673,7 +664,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
673 664
674 img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA); 665 img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
675 if (!img_swap) 666 if (!img_swap)
676 goto abort; 667 goto abort_out;
677 668
678 /* We need to re-order for 0010 */ 669 /* We need to re-order for 0010 */
679 byte_swap_64((u64 *)&pll_rec, img_swap, len); 670 byte_swap_64((u64 *)&pll_rec, img_swap, len);
@@ -688,16 +679,16 @@ static int wm0010_boot(struct snd_soc_codec *codec)
688 spi_message_add_tail(&t, &m); 679 spi_message_add_tail(&t, &m);
689 680
690 ret = spi_sync(spi, &m); 681 ret = spi_sync(spi, &m);
691 if (ret != 0) { 682 if (ret) {
692 dev_err(codec->dev, "First PLL write failed: %d\n", ret); 683 dev_err(codec->dev, "First PLL write failed: %d\n", ret);
693 goto abort; 684 goto abort_swap;
694 } 685 }
695 686
696 /* Use a second send of the message to get the return status */ 687 /* Use a second send of the message to get the return status */
697 ret = spi_sync(spi, &m); 688 ret = spi_sync(spi, &m);
698 if (ret != 0) { 689 if (ret) {
699 dev_err(codec->dev, "Second PLL write failed: %d\n", ret); 690 dev_err(codec->dev, "Second PLL write failed: %d\n", ret);
700 goto abort; 691 goto abort_swap;
701 } 692 }
702 693
703 p = (u32 *)out; 694 p = (u32 *)out;
@@ -730,6 +721,10 @@ static int wm0010_boot(struct snd_soc_codec *codec)
730 721
731 return 0; 722 return 0;
732 723
724abort_swap:
725 kfree(img_swap);
726abort_out:
727 kfree(out);
733abort: 728abort:
734 /* Put the chip back into reset */ 729 /* Put the chip back into reset */
735 wm0010_halt(codec); 730 wm0010_halt(codec);
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e3b7d0c57411..dbd88408861a 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -211,28 +211,38 @@ static int wm8960_put_deemph(struct snd_kcontrol *kcontrol,
211 return wm8960_set_deemph(codec); 211 return wm8960_set_deemph(codec);
212} 212}
213 213
214static const DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 50, 0); 214static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
215static const DECLARE_TLV_DB_SCALE(dac_tlv, -12700, 50, 1); 215static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1725, 75, 0);
216static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
216static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0); 217static const DECLARE_TLV_DB_SCALE(bypass_tlv, -2100, 300, 0);
217static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); 218static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
218static const DECLARE_TLV_DB_SCALE(boost_tlv, -1200, 300, 1); 219static const DECLARE_TLV_DB_SCALE(lineinboost_tlv, -1500, 300, 1);
220static const unsigned int micboost_tlv[] = {
221 TLV_DB_RANGE_HEAD(2),
222 0, 1, TLV_DB_SCALE_ITEM(0, 1300, 0),
223 2, 3, TLV_DB_SCALE_ITEM(2000, 900, 0),
224};
219 225
220static const struct snd_kcontrol_new wm8960_snd_controls[] = { 226static const struct snd_kcontrol_new wm8960_snd_controls[] = {
221SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, 227SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL,
222 0, 63, 0, adc_tlv), 228 0, 63, 0, inpga_tlv),
223SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, 229SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
224 6, 1, 0), 230 6, 1, 0),
225SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 231SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
226 7, 1, 0), 232 7, 1, 0),
227 233
228SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", 234SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
229 WM8960_INBMIX1, 4, 7, 0, boost_tlv), 235 WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
230SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", 236SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
231 WM8960_INBMIX1, 1, 7, 0, boost_tlv), 237 WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
232SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", 238SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
233 WM8960_INBMIX2, 4, 7, 0, boost_tlv), 239 WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
234SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", 240SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
235 WM8960_INBMIX2, 1, 7, 0, boost_tlv), 241 WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
242SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
243 WM8960_RINPATH, 4, 3, 0, micboost_tlv),
244SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT1 Volume",
245 WM8960_LINPATH, 4, 3, 0, micboost_tlv),
236 246
237SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC, 247SOC_DOUBLE_R_TLV("Playback Volume", WM8960_LDAC, WM8960_RDAC,
238 0, 255, 0, dac_tlv), 248 0, 255, 0, dac_tlv),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index b4eb975da981..293e47a6ff59 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2944,7 +2944,8 @@ static int wm8962_mute(struct snd_soc_dai *dai, int mute)
2944 WM8962_DAC_MUTE, val); 2944 WM8962_DAC_MUTE, val);
2945} 2945}
2946 2946
2947#define WM8962_RATES SNDRV_PCM_RATE_8000_96000 2947#define WM8962_RATES (SNDRV_PCM_RATE_8000_48000 |\
2948 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
2948 2949
2949#define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ 2950#define WM8962_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
2950 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) 2951 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index add6bb99661d..7d45d98a861f 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -663,7 +663,7 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
663 u8 rx_ser = 0; 663 u8 rx_ser = 0;
664 u8 slots = mcasp->tdm_slots; 664 u8 slots = mcasp->tdm_slots;
665 u8 max_active_serializers = (channels + slots - 1) / slots; 665 u8 max_active_serializers = (channels + slots - 1) / slots;
666 int active_serializers, numevt, n; 666 int active_serializers, numevt;
667 u32 reg; 667 u32 reg;
668 /* Default configuration */ 668 /* Default configuration */
669 if (mcasp->version < MCASP_VERSION_3) 669 if (mcasp->version < MCASP_VERSION_3)
@@ -745,9 +745,8 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
745 * The number of words for numevt need to be in steps of active 745 * The number of words for numevt need to be in steps of active
746 * serializers. 746 * serializers.
747 */ 747 */
748 n = numevt % active_serializers; 748 numevt = (numevt / active_serializers) * active_serializers;
749 if (n) 749
750 numevt += (active_serializers - n);
751 while (period_words % numevt && numevt > 0) 750 while (period_words % numevt && numevt > 0)
752 numevt -= active_serializers; 751 numevt -= active_serializers;
753 if (numevt <= 0) 752 if (numevt <= 0)
@@ -1299,6 +1298,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
1299 .ops = &davinci_mcasp_dai_ops, 1298 .ops = &davinci_mcasp_dai_ops,
1300 1299
1301 .symmetric_samplebits = 1, 1300 .symmetric_samplebits = 1,
1301 .symmetric_rates = 1,
1302 }, 1302 },
1303 { 1303 {
1304 .name = "davinci-mcasp.1", 1304 .name = "davinci-mcasp.1",
@@ -1685,7 +1685,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1685 1685
1686 irq = platform_get_irq_byname(pdev, "common"); 1686 irq = platform_get_irq_byname(pdev, "common");
1687 if (irq >= 0) { 1687 if (irq >= 0) {
1688 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common\n", 1688 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_common",
1689 dev_name(&pdev->dev)); 1689 dev_name(&pdev->dev));
1690 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1690 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1691 davinci_mcasp_common_irq_handler, 1691 davinci_mcasp_common_irq_handler,
@@ -1702,7 +1702,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1702 1702
1703 irq = platform_get_irq_byname(pdev, "rx"); 1703 irq = platform_get_irq_byname(pdev, "rx");
1704 if (irq >= 0) { 1704 if (irq >= 0) {
1705 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n", 1705 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx",
1706 dev_name(&pdev->dev)); 1706 dev_name(&pdev->dev));
1707 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1707 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1708 davinci_mcasp_rx_irq_handler, 1708 davinci_mcasp_rx_irq_handler,
@@ -1717,7 +1717,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1717 1717
1718 irq = platform_get_irq_byname(pdev, "tx"); 1718 irq = platform_get_irq_byname(pdev, "tx");
1719 if (irq >= 0) { 1719 if (irq >= 0) {
1720 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n", 1720 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx",
1721 dev_name(&pdev->dev)); 1721 dev_name(&pdev->dev));
1722 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 1722 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1723 davinci_mcasp_tx_irq_handler, 1723 davinci_mcasp_tx_irq_handler,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 5aeb6ed4827e..96f55ae75c71 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -488,7 +488,8 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
488 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; 488 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
489 } else { 489 } else {
490 dev_err(&pdev->dev, "unknown Device Tree compatible\n"); 490 dev_err(&pdev->dev, "unknown Device Tree compatible\n");
491 return -EINVAL; 491 ret = -EINVAL;
492 goto asrc_fail;
492 } 493 }
493 494
494 /* Common settings for corresponding Freescale CPU DAI driver */ 495 /* Common settings for corresponding Freescale CPU DAI driver */
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 8ec6fb208ea0..37c5cd4d0e59 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -249,7 +249,8 @@ MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
249 249
250static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private) 250static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
251{ 251{
252 return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97); 252 return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
253 SND_SOC_DAIFMT_AC97;
253} 254}
254 255
255static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private) 256static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
@@ -947,7 +948,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
947 CCSR_SSI_SCR_TCH_EN); 948 CCSR_SSI_SCR_TCH_EN);
948 } 949 }
949 950
950 if (fmt & SND_SOC_DAIFMT_AC97) 951 if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
951 fsl_ssi_setup_ac97(ssi_private); 952 fsl_ssi_setup_ac97(ssi_private);
952 953
953 return 0; 954 return 0;
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f6efa9d4acad..b27f25f70730 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -302,6 +302,10 @@ struct sst_hsw {
302 struct sst_hsw_ipc_dx_reply dx; 302 struct sst_hsw_ipc_dx_reply dx;
303 void *dx_context; 303 void *dx_context;
304 dma_addr_t dx_context_paddr; 304 dma_addr_t dx_context_paddr;
305 enum sst_hsw_device_id dx_dev;
306 enum sst_hsw_device_mclk dx_mclk;
307 enum sst_hsw_device_mode dx_mode;
308 u32 dx_clock_divider;
305 309
306 /* boot */ 310 /* boot */
307 wait_queue_head_t boot_wait; 311 wait_queue_head_t boot_wait;
@@ -1400,10 +1404,10 @@ int sst_hsw_device_set_config(struct sst_hsw *hsw,
1400 1404
1401 trace_ipc_request("set device config", dev); 1405 trace_ipc_request("set device config", dev);
1402 1406
1403 config.ssp_interface = dev; 1407 hsw->dx_dev = config.ssp_interface = dev;
1404 config.clock_frequency = mclk; 1408 hsw->dx_mclk = config.clock_frequency = mclk;
1405 config.mode = mode; 1409 hsw->dx_mode = config.mode = mode;
1406 config.clock_divider = clock_divider; 1410 hsw->dx_clock_divider = config.clock_divider = clock_divider;
1407 if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER) 1411 if (mode == SST_HSW_DEVICE_TDM_CLOCK_MASTER)
1408 config.channels = 4; 1412 config.channels = 4;
1409 else 1413 else
@@ -1704,10 +1708,10 @@ int sst_hsw_dsp_runtime_resume(struct sst_hsw *hsw)
1704 return -EIO; 1708 return -EIO;
1705 } 1709 }
1706 1710
1707 /* Set ADSP SSP port settings */ 1711 /* Set ADSP SSP port settings - sadly the FW does not store SSP port
1708 ret = sst_hsw_device_set_config(hsw, SST_HSW_DEVICE_SSP_0, 1712 settings as part of the PM context. */
1709 SST_HSW_DEVICE_MCLK_FREQ_24_MHZ, 1713 ret = sst_hsw_device_set_config(hsw, hsw->dx_dev, hsw->dx_mclk,
1710 SST_HSW_DEVICE_CLOCK_MASTER, 9); 1714 hsw->dx_mode, hsw->dx_clock_divider);
1711 if (ret < 0) 1715 if (ret < 0)
1712 dev_err(dev, "error: SSP re-initialization failed\n"); 1716 dev_err(dev, "error: SSP re-initialization failed\n");
1713 1717
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index d190fe017559..f5baf3c38863 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -549,6 +549,23 @@ static int mtk_afe_dais_startup(struct snd_pcm_substream *substream,
549 memif->substream = substream; 549 memif->substream = substream;
550 550
551 snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware); 551 snd_soc_set_runtime_hwparams(substream, &mtk_afe_hardware);
552
553 /*
554 * Capture cannot use ping-pong buffer since hw_ptr at IRQ may be
555 * smaller than period_size due to AFE's internal buffer.
556 * This easily leads to overrun when avail_min is period_size.
557 * One more period can hold the possible unread buffer.
558 */
559 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
560 ret = snd_pcm_hw_constraint_minmax(runtime,
561 SNDRV_PCM_HW_PARAM_PERIODS,
562 3,
563 mtk_afe_hardware.periods_max);
564 if (ret < 0) {
565 dev_err(afe->dev, "hw_constraint_minmax failed\n");
566 return ret;
567 }
568 }
552 ret = snd_pcm_hw_constraint_integer(runtime, 569 ret = snd_pcm_hw_constraint_integer(runtime,
553 SNDRV_PCM_HW_PARAM_PERIODS); 570 SNDRV_PCM_HW_PARAM_PERIODS);
554 if (ret < 0) 571 if (ret < 0)
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 39cea80846c3..f2bf8661dd21 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
1config SND_PXA2XX_SOC 1config SND_PXA2XX_SOC
2 tristate "SoC Audio for the Intel PXA2xx chip" 2 tristate "SoC Audio for the Intel PXA2xx chip"
3 depends on ARCH_PXA 3 depends on ARCH_PXA
4 select SND_ARM
5 select SND_PXA2XX_LIB 4 select SND_PXA2XX_LIB
6 help 5 help
7 Say Y or M if you want to add support for codecs attached to 6 Say Y or M if you want to add support for codecs attached to
@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
25config SND_PXA2XX_SOC_AC97 24config SND_PXA2XX_SOC_AC97
26 tristate 25 tristate
27 select AC97_BUS 26 select AC97_BUS
28 select SND_ARM
29 select SND_PXA2XX_LIB_AC97 27 select SND_PXA2XX_LIB_AC97
30 select SND_SOC_AC97_BUS 28 select SND_SOC_AC97_BUS
31 29
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 1f6054650991..9e4b04e0fbd1 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
49 .reset = pxa2xx_ac97_cold_reset, 49 .reset = pxa2xx_ac97_cold_reset,
50}; 50};
51 51
52static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12; 52static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
53static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = { 53static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
54 .addr = __PREG(PCDR), 54 .addr = __PREG(PCDR),
55 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 55 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
57 .filter_data = &pxa2xx_ac97_pcm_stereo_in_req, 57 .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
58}; 58};
59 59
60static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11; 60static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
61static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = { 61static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
62 .addr = __PREG(PCDR), 62 .addr = __PREG(PCDR),
63 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 63 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f4bf21a5539b..ff8bda471b25 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3501,7 +3501,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3501 3501
3502 default: 3502 default:
3503 WARN(1, "Unknown event %d\n", event); 3503 WARN(1, "Unknown event %d\n", event);
3504 return -EINVAL; 3504 ret = -EINVAL;
3505 } 3505 }
3506 3506
3507out: 3507out:
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index 362c69ac1d6c..53dd085d3ee2 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -101,6 +101,15 @@ static struct snd_soc_codec_driver dummy_codec;
101 SNDRV_PCM_FMTBIT_S32_LE | \ 101 SNDRV_PCM_FMTBIT_S32_LE | \
102 SNDRV_PCM_FMTBIT_U32_LE | \ 102 SNDRV_PCM_FMTBIT_U32_LE | \
103 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE) 103 SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
104/*
105 * The dummy CODEC is only meant to be used in situations where there is no
106 * actual hardware.
107 *
108 * If there is actual hardware even if it does not have a control bus
109 * the hardware will still have constraints like supported samplerates, etc.
110 * which should be modelled. And the data flow graph also should be modelled
111 * using DAPM.
112 */
104static struct snd_soc_dai_driver dummy_dai = { 113static struct snd_soc_dai_driver dummy_dai = {
105 .name = "snd-soc-dummy-dai", 114 .name = "snd-soc-dummy-dai",
106 .playback = { 115 .playback = {
diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig
index 0a53053495f3..4fb91412ebec 100644
--- a/sound/soc/spear/Kconfig
+++ b/sound/soc/spear/Kconfig
@@ -1,6 +1,6 @@
1config SND_SPEAR_SOC 1config SND_SPEAR_SOC
2 tristate 2 tristate
3 select SND_DMAENGINE_PCM 3 select SND_SOC_GENERIC_DMAENGINE_PCM
4 4
5config SND_SPEAR_SPDIF_OUT 5config SND_SPEAR_SPDIF_OUT
6 tristate 6 tristate
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index f6eefe1b8f8f..843f037a317d 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -989,8 +989,8 @@ static int uni_player_parse_dt(struct platform_device *pdev,
989 if (!info) 989 if (!info)
990 return -ENOMEM; 990 return -ENOMEM;
991 991
992 of_property_read_u32(pnode, "version", &player->ver); 992 if (of_property_read_u32(pnode, "version", &player->ver) ||
993 if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { 993 player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
994 dev_err(dev, "Unknown uniperipheral version "); 994 dev_err(dev, "Unknown uniperipheral version ");
995 return -EINVAL; 995 return -EINVAL;
996 } 996 }
@@ -998,10 +998,16 @@ static int uni_player_parse_dt(struct platform_device *pdev,
998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) 998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
999 info->underflow_enabled = 1; 999 info->underflow_enabled = 1;
1000 1000
1001 of_property_read_u32(pnode, "uniperiph-id", &info->id); 1001 if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) {
1002 dev_err(dev, "uniperipheral id not defined");
1003 return -EINVAL;
1004 }
1002 1005
1003 /* Read the device mode property */ 1006 /* Read the device mode property */
1004 of_property_read_string(pnode, "mode", &mode); 1007 if (of_property_read_string(pnode, "mode", &mode)) {
1008 dev_err(dev, "uniperipheral mode not defined");
1009 return -EINVAL;
1010 }
1005 1011
1006 if (strcasecmp(mode, "hdmi") == 0) 1012 if (strcasecmp(mode, "hdmi") == 0)
1007 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI; 1013 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index c502626f339b..f791239a3087 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -316,7 +316,11 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
316 if (!info) 316 if (!info)
317 return -ENOMEM; 317 return -ENOMEM;
318 318
319 of_property_read_u32(node, "version", &reader->ver); 319 if (of_property_read_u32(node, "version", &reader->ver) ||
320 reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
321 dev_err(&pdev->dev, "Unknown uniperipheral version ");
322 return -EINVAL;
323 }
320 324
321 /* Save the info structure */ 325 /* Save the info structure */
322 reader->info = info; 326 reader->info = info;
diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
index 877a50355d7f..a1a97085847d 100644
--- a/tools/testing/selftests/membarrier/Makefile
+++ b/tools/testing/selftests/membarrier/Makefile
@@ -1,11 +1,10 @@
1CFLAGS += -g -I../../../../usr/include/ 1CFLAGS += -g -I../../../../usr/include/
2 2
3all:
4 $(CC) $(CFLAGS) membarrier_test.c -o membarrier_test
5
6TEST_PROGS := membarrier_test 3TEST_PROGS := membarrier_test
7 4
5all: $(TEST_PROGS)
6
8include ../lib.mk 7include ../lib.mk
9 8
10clean: 9clean:
11 $(RM) membarrier_test 10 $(RM) $(TEST_PROGS)
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
index dde312508007..535f0fef4d0b 100644
--- a/tools/testing/selftests/membarrier/membarrier_test.c
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -1,9 +1,6 @@
1#define _GNU_SOURCE 1#define _GNU_SOURCE
2#define __EXPORTED_HEADERS__
3
4#include <linux/membarrier.h> 2#include <linux/membarrier.h>
5#include <asm-generic/unistd.h> 3#include <syscall.h>
6#include <sys/syscall.h>
7#include <stdio.h> 4#include <stdio.h>
8#include <errno.h> 5#include <errno.h>
9#include <string.h> 6#include <string.h>
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index d36fab7d8ebd..3c53cac15de1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,6 +1,6 @@
1# Makefile for vm selftests 1# Makefile for vm selftests
2 2
3CFLAGS = -Wall 3CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
4BINARIES = compaction_test 4BINARIES = compaction_test
5BINARIES += hugepage-mmap 5BINARIES += hugepage-mmap
6BINARIES += hugepage-shm 6BINARIES += hugepage-shm
@@ -12,8 +12,11 @@ BINARIES += userfaultfd
12all: $(BINARIES) 12all: $(BINARIES)
13%: %.c 13%: %.c
14 $(CC) $(CFLAGS) -o $@ $^ -lrt 14 $(CC) $(CFLAGS) -o $@ $^ -lrt
15userfaultfd: userfaultfd.c 15userfaultfd: userfaultfd.c ../../../../usr/include/linux/kernel.h
16 $(CC) $(CFLAGS) -O2 -o $@ $^ -lpthread 16 $(CC) $(CFLAGS) -O2 -o $@ $< -lpthread
17
18../../../../usr/include/linux/kernel.h:
19 make -C ../../../.. headers_install
17 20
18TEST_PROGS := run_vmtests 21TEST_PROGS := run_vmtests
19TEST_FILES := $(BINARIES) 22TEST_FILES := $(BINARIES)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 2c7cca6f26a4..d77ed41b2094 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -64,17 +64,9 @@
64#include <sys/syscall.h> 64#include <sys/syscall.h>
65#include <sys/ioctl.h> 65#include <sys/ioctl.h>
66#include <pthread.h> 66#include <pthread.h>
67#include "../../../../include/uapi/linux/userfaultfd.h" 67#include <linux/userfaultfd.h>
68 68
69#ifdef __x86_64__ 69#ifdef __NR_userfaultfd
70#define __NR_userfaultfd 323
71#elif defined(__i386__)
72#define __NR_userfaultfd 374
73#elif defined(__powewrpc__)
74#define __NR_userfaultfd 364
75#else
76#error "missing __NR_userfaultfd definition"
77#endif
78 70
79static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; 71static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
80 72
@@ -430,7 +422,7 @@ static int userfaultfd_stress(void)
430 struct uffdio_register uffdio_register; 422 struct uffdio_register uffdio_register;
431 struct uffdio_api uffdio_api; 423 struct uffdio_api uffdio_api;
432 unsigned long cpu; 424 unsigned long cpu;
433 int uffd_flags; 425 int uffd_flags, err;
434 unsigned long userfaults[nr_cpus]; 426 unsigned long userfaults[nr_cpus];
435 427
436 if (posix_memalign(&area, page_size, nr_pages * page_size)) { 428 if (posix_memalign(&area, page_size, nr_pages * page_size)) {
@@ -473,6 +465,14 @@ static int userfaultfd_stress(void)
473 *area_mutex(area_src, nr) = (pthread_mutex_t) 465 *area_mutex(area_src, nr) = (pthread_mutex_t)
474 PTHREAD_MUTEX_INITIALIZER; 466 PTHREAD_MUTEX_INITIALIZER;
475 count_verify[nr] = *area_count(area_src, nr) = 1; 467 count_verify[nr] = *area_count(area_src, nr) = 1;
468 /*
469 * In the transition between 255 to 256, powerpc will
470 * read out of order in my_bcmp and see both bytes as
471 * zero, so leave a placeholder below always non-zero
472 * after the count, to avoid my_bcmp to trigger false
473 * positives.
474 */
475 *(area_count(area_src, nr) + 1) = 1;
476 } 476 }
477 477
478 pipefd = malloc(sizeof(int) * nr_cpus * 2); 478 pipefd = malloc(sizeof(int) * nr_cpus * 2);
@@ -499,6 +499,7 @@ static int userfaultfd_stress(void)
499 pthread_attr_init(&attr); 499 pthread_attr_init(&attr);
500 pthread_attr_setstacksize(&attr, 16*1024*1024); 500 pthread_attr_setstacksize(&attr, 16*1024*1024);
501 501
502 err = 0;
502 while (bounces--) { 503 while (bounces--) {
503 unsigned long expected_ioctls; 504 unsigned long expected_ioctls;
504 505
@@ -579,20 +580,13 @@ static int userfaultfd_stress(void)
579 /* verification */ 580 /* verification */
580 if (bounces & BOUNCE_VERIFY) { 581 if (bounces & BOUNCE_VERIFY) {
581 for (nr = 0; nr < nr_pages; nr++) { 582 for (nr = 0; nr < nr_pages; nr++) {
582 if (my_bcmp(area_dst,
583 area_dst + nr * page_size,
584 sizeof(pthread_mutex_t))) {
585 fprintf(stderr,
586 "error mutex 2 %lu\n",
587 nr);
588 bounces = 0;
589 }
590 if (*area_count(area_dst, nr) != count_verify[nr]) { 583 if (*area_count(area_dst, nr) != count_verify[nr]) {
591 fprintf(stderr, 584 fprintf(stderr,
592 "error area_count %Lu %Lu %lu\n", 585 "error area_count %Lu %Lu %lu\n",
593 *area_count(area_src, nr), 586 *area_count(area_src, nr),
594 count_verify[nr], 587 count_verify[nr],
595 nr); 588 nr);
589 err = 1;
596 bounces = 0; 590 bounces = 0;
597 } 591 }
598 } 592 }
@@ -609,7 +603,7 @@ static int userfaultfd_stress(void)
609 printf("\n"); 603 printf("\n");
610 } 604 }
611 605
612 return 0; 606 return err;
613} 607}
614 608
615int main(int argc, char **argv) 609int main(int argc, char **argv)
@@ -618,8 +612,8 @@ int main(int argc, char **argv)
618 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1); 612 fprintf(stderr, "Usage: <MiB> <bounces>\n"), exit(1);
619 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 613 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
620 page_size = sysconf(_SC_PAGE_SIZE); 614 page_size = sysconf(_SC_PAGE_SIZE);
621 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) > 615 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2
622 page_size) 616 > page_size)
623 fprintf(stderr, "Impossible to run this test\n"), exit(2); 617 fprintf(stderr, "Impossible to run this test\n"), exit(2);
624 nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size / 618 nr_pages_per_cpu = atol(argv[1]) * 1024*1024 / page_size /
625 nr_cpus; 619 nr_cpus;
@@ -637,3 +631,15 @@ int main(int argc, char **argv)
637 nr_pages, nr_pages_per_cpu); 631 nr_pages, nr_pages_per_cpu);
638 return userfaultfd_stress(); 632 return userfaultfd_stress();
639} 633}
634
635#else /* __NR_userfaultfd */
636
637#warning "missing __NR_userfaultfd definition"
638
639int main(void)
640{
641 printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
642 return 0;
643}
644
645#endif /* __NR_userfaultfd */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 04146a2e1d81..8db1d9361993 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -66,8 +66,8 @@
66MODULE_AUTHOR("Qumranet"); 66MODULE_AUTHOR("Qumranet");
67MODULE_LICENSE("GPL"); 67MODULE_LICENSE("GPL");
68 68
69/* halt polling only reduces halt latency by 5-7 us, 500us is enough */ 69/* Architectures should define their poll value according to the halt latency */
70static unsigned int halt_poll_ns = 500000; 70static unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR); 71module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
72 72
73/* Default doubles per-vcpu halt_poll_ns. */ 73/* Default doubles per-vcpu halt_poll_ns. */