aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-12-05 02:11:48 -0500
committerDave Airlie <airlied@redhat.com>2016-12-05 02:11:48 -0500
commitf03ee46be9401e3434f52bb15e92d1e640f76438 (patch)
treef0a1819bd3e44902578b80e1a03d1dde1c6099b8
parent0d5320fc194128a1a584a7e91a606cb3af2ded80 (diff)
parent3e5de27e940d00d8d504dfb96625fb654f641509 (diff)
Backmerge tag 'v4.9-rc8' into drm-next
Linux 4.9-rc8 Daniel requested this so we could apply some follow on fixes cleanly to -next.
-rw-r--r--CREDITS8
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt (renamed from Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt)4
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt2
-rw-r--r--Documentation/i2c/i2c-topology4
-rw-r--r--Documentation/networking/dsa/dsa.txt3
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt7
-rw-r--r--Documentation/virtual/kvm/api.txt11
-rw-r--r--MAINTAINERS25
-rw-r--r--Makefile18
-rw-r--r--arch/arc/include/asm/delay.h9
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts14
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi5
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi4
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi7
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi16
-rw-r--r--arch/arm/boot/dts/stih410-b2260.dts2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts (renamed from arch/arm/boot/dts/ntc-gr8-evb.dts)2
-rw-r--r--arch/arm/boot/dts/sun5i-gr8.dtsi (renamed from arch/arm/boot/dts/ntc-gr8.dtsi)0
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi4
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/armksyms.c183
-rw-r--r--arch/arm/kernel/entry-ftrace.S3
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/smccc-call.S3
-rw-r--r--arch/arm/kernel/traps.c20
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S5
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S37
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S3
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_from_user.S2
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S3
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopy.S1
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S1
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/lib/div64.S2
-rw-r--r--arch/arm/lib/findbit.S9
-rw-r--r--arch/arm/lib/getuser.S9
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S2
-rw-r--r--arch/arm/lib/io-readsw-armv3.S3
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S2
-rw-r--r--arch/arm/lib/io-writesw-armv3.S2
-rw-r--r--arch/arm/lib/io-writesw-armv4.S2
-rw-r--r--arch/arm/lib/lib1funcs.S9
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memcpy.S3
-rw-r--r--arch/arm/lib/memmove.S2
-rw-r--r--arch/arm/lib/memset.S3
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S5
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c3
-rw-r--r--arch/arm/lib/ucmpdi2.S3
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/ssi-fiq-ksym.c20
-rw-r--r--arch/arm/mach-imx/ssi-fiq.S7
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/id.c16
-rw-r--r--arch/arm/mach-omap2/prm3xxx.c3
-rw-r--r--arch/arm/mach-omap2/voltage.c6
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/include/asm/perf_event.h10
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kvm/sys_regs.c10
-rw-r--r--arch/mips/include/asm/mipsregs.h6
-rw-r--r--arch/mips/include/asm/tlb.h13
-rw-r--r--arch/mips/mm/fault.c9
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c6
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/kernel/cache.c31
-rw-r--r--arch/parisc/kernel/inventory.c8
-rw-r--r--arch/parisc/kernel/pacache.S37
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/setup.c4
-rw-r--r--arch/parisc/kernel/time.c57
-rw-r--r--arch/powerpc/boot/main.c8
-rw-r--r--arch/powerpc/boot/opal-calls.S13
-rw-r--r--arch/powerpc/boot/opal.c11
-rw-r--r--arch/powerpc/boot/ops.h1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h12
-rw-r--r--arch/powerpc/include/asm/exception-64s.h15
-rw-r--r--arch/powerpc/include/asm/mmu.h14
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S8
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/process.c42
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/mm/hash_utils_64.c12
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/mm/tlb-radix.c4
-rw-r--r--arch/sparc/Kconfig23
-rw-r--r--arch/sparc/include/asm/hypervisor.h343
-rw-r--r--arch/sparc/include/asm/iommu_64.h28
-rw-r--r--arch/sparc/kernel/hvapi.c1
-rw-r--r--arch/sparc/kernel/iommu.c8
-rw-r--r--arch/sparc/kernel/iommu_common.h1
-rw-r--r--arch/sparc/kernel/pci_sun4v.c418
-rw-r--r--arch/sparc/kernel/pci_sun4v.h21
-rw-r--r--arch/sparc/kernel/pci_sun4v_asm.S68
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/mm/init_64.c71
-rw-r--r--arch/tile/include/asm/cache.h3
-rw-r--r--arch/tile/kernel/time.c4
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/cpu.c6
-rw-r--r--arch/x86/events/amd/core.c8
-rw-r--r--arch/x86/events/core.c10
-rw-r--r--arch/x86/events/intel/ds.c35
-rw-r--r--arch/x86/events/intel/uncore.c8
-rw-r--r--arch/x86/events/intel/uncore_snb.c44
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/include/asm/intel-mid.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c6
-rw-r--r--arch/x86/kernel/cpu/common.c32
-rw-r--r--arch/x86/kernel/dumpstack.c2
-rw-r--r--arch/x86/kernel/fpu/core.c16
-rw-r--r--arch/x86/kernel/head_32.S9
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c39
-rw-r--r--arch/x86/kernel/unwind_guess.c8
-rw-r--r--arch/x86/kvm/emulate.c36
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/irq_comm.c71
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86.c47
-rw-r--r--arch/x86/mm/extable.c7
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c80
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_wdt.c)34
-rw-r--r--arch/x86/platform/intel-mid/pwr.c19
-rw-r--r--arch/x86/purgatory/Makefile1
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h9
-rw-r--r--arch/xtensa/kernel/time.c14
-rw-r--r--arch/xtensa/kernel/traps.c74
-rw-r--r--crypto/algif_hash.c17
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c1
-rw-r--r--crypto/scatterwalk.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c10
-rw-r--r--drivers/acpi/sleep.c29
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/block/zram/zram_drv.c3
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/clk/bcm/Kconfig2
-rw-r--r--drivers/clk/berlin/bg2.c2
-rw-r--r--drivers/clk/berlin/bg2q.c2
-rw-r--r--drivers/clk/clk-efm32gg.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c2
-rw-r--r--drivers/crypto/caam/caamalg.c11
-rw-r--r--drivers/dax/dax.c4
-rw-r--r--drivers/dax/pmem.c4
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/cppi41.c31
-rw-r--r--drivers/dma/edma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/gpio/Kconfig4
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-tc3589x.c2
-rw-r--r--drivers/gpio/gpiolib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c5
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c159
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c5
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c13
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c15
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c64
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c20
-rw-r--r--drivers/hid/hid-cp2112.c115
-rw-r--r--drivers/hid/hid-lg.c14
-rw-r--r--drivers/hid/hid-magicmouse.c12
-rw-r--r--drivers/hid/hid-rmi.c10
-rw-r--r--drivers/hid/hid-sensor-hub.c1
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c64
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.c4
-rw-r--r--drivers/i2c/busses/i2c-octeon-core.h27
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c22
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c4
-rw-r--r--drivers/infiniband/core/addr.c11
-rw-r--r--drivers/infiniband/core/cm.c126
-rw-r--r--drivers/infiniband/core/cma.c21
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c72
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c27
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c37
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c19
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h89
-rw-r--r--drivers/infiniband/hw/hfi1/init.c104
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c13
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c19
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c25
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h60
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c6
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c12
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c21
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/iommu/dmar.c4
-rw-r--r--drivers/iommu/intel-iommu.c13
-rw-r--r--drivers/iommu/intel-svm.c28
-rw-r--r--drivers/mailbox/pcc.c13
-rw-r--r--drivers/media/dvb-frontends/gp8psk-fe.c17
-rw-r--r--drivers/media/tuners/tuner-xc2028.c37
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/intel-lpss.c3
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c6
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/mfd/wm8994-core.c16
-rw-r--r--drivers/mmc/host/dw_mmc.c1
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c14
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c18
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_ucan.h37
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c104
-rw-r--r--drivers/net/dsa/b53/b53_common.c16
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c21
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c12
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c12
-rw-r--r--drivers/net/ethernet/arc/emac_main.c7
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h64
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c37
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c105
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c153
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c118
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h24
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c8
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c7
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c17
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c25
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c19
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c43
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h72
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.h2
-rw-r--r--drivers/net/ethernet/sun/sunqe.c11
-rw-r--r--drivers/net/ethernet/sun/sunqe.h4
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c87
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c20
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/ieee802154/adf7242.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c17
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/macvlan.c34
-rw-r--r--drivers/net/macvtap.c19
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/micrel.c8
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/realtek.c20
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88179_178a.c17
-rw-r--r--drivers/net/usb/cdc_ether.c38
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c21
-rw-r--r--drivers/net/virtio_net.c35
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c13
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_intel.c15
-rw-r--r--drivers/ntb/ntb_transport.c2
-rw-r--r--drivers/ntb/test/ntb_perf.c8
-rw-r--r--drivers/ntb/test/ntb_pingpong.c2
-rw-r--r--drivers/nvme/host/pci.c18
-rw-r--r--drivers/nvme/host/rdma.c42
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/rdma.c18
-rw-r--r--drivers/of/of_mdio.c21
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/pci-mid.c6
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c14
-rw-r--r--drivers/pci/probe.c28
-rw-r--r--drivers/phy/phy-twl4030-usb.c4
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/sysfs.c2
-rw-r--r--drivers/rtc/rtc-asm9260.c1
-rw-r--r--drivers/rtc/rtc-cmos.c15
-rw-r--r--drivers/rtc/rtc-omap.c38
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c20
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c21
-rw-r--r--drivers/scsi/qlogicpti.h4
-rw-r--r--drivers/thermal/intel_powerclamp.c9
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/udc.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c8
-rw-r--r--drivers/usb/musb/musb_core.c147
-rw-r--r--drivers/usb/musb/musb_core.h13
-rw-r--r--drivers/usb/musb/musb_dsps.c58
-rw-r--r--drivers/usb/musb/musb_gadget.c39
-rw-r--r--drivers/usb/musb/omap2430.c10
-rw-r--r--drivers/usb/musb/tusb6010.c6
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/storage/transport.c7
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--fs/cifs/cifsencrypt.c11
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c25
-rw-r--r--fs/crypto/fname.c53
-rw-r--r--fs/crypto/keyinfo.c16
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/super.c17
-rw-r--r--fs/fuse/dir.c5
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/nfs4_fs.h7
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/orangefs/orangefs-debugfs.c2
-rw-r--r--fs/overlayfs/super.c6
-rw-r--r--fs/splice.c3
-rw-r--r--fs/xattr.c22
-rw-r--r--include/acpi/actbl.h164
-rw-r--r--include/acpi/platform/aclinux.h3
-rw-r--r--include/asm-generic/export.h1
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/compiler-gcc.h4
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/pagemap.h21
-rw-r--r--include/linux/pci.h14
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/gro_cells.h3
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h3
-rw-r--r--include/net/netfilter/nf_tables.h10
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/uapi/linux/atm_zatm.h1
-rw-r--r--include/uapi/linux/bpqether.h2
-rw-r--r--include/uapi/linux/input-event-codes.h2
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--init/do_mounts_rd.c2
-rw-r--r--kernel/bpf/hashtab.c3
-rw-r--r--kernel/bpf/syscall.c4
-rw-r--r--kernel/bpf/verifier.c80
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/locking/lockdep_internals.h20
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/printk/printk.c11
-rw-r--r--kernel/sched/auto_group.c36
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/debugobjects.c8
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/mpi/mpi-pow.c7
-rw-r--r--lib/test_kasan.c29
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/kasan/kasan.c19
-rw-r--r--mm/kasan/kasan.h4
-rw-r--r--mm/kasan/report.c3
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/mremap.c34
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/workingset.c2
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/tp_meter.c1
-rw-r--r--net/bluetooth/6lowpan.c4
-rw-r--r--net/bluetooth/hci_conn.c26
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/can/bcm.c50
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/filter.c68
-rw-r--r--net/core/flow.c6
-rw-r--r--net/core/flow_dissector.c13
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c29
-rw-r--r--net/core/sock.c10
-rw-r--r--net/dccp/ipv4.c28
-rw-r--r--net/dccp/ipv6.c19
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dsa/dsa.c13
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/slave.c19
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/af_inet.c11
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c20
-rw-r--r--net/ipv4/fib_trie.c90
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c50
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_output.c27
-rw-r--r--net/ipv4/ip_tunnel_core.c11
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c6
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/tcp_dctcp.c13
-rw-r--r--net/ipv4/tcp_ipv4.c19
-rw-r--r--net/ipv4/udp.c8
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c14
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6_vti.c31
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c4
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c6
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/l2tp/l2tp_ip.c66
-rw-r--r--net/l2tp/l2tp_ip6.c84
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/tx.c14
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c49
-rw-r--r--net/netfilter/nf_conntrack_helper.c11
-rw-r--r--net/netfilter/nf_conntrack_sip.c5
-rw-r--r--net/netfilter/nf_nat_core.c49
-rw-r--r--net/netfilter/nf_tables_api.c32
-rw-r--r--net/netfilter/nft_dynset.c19
-rw-r--r--net/netfilter/nft_hash.c7
-rw-r--r--net/netfilter/nft_range.c6
-rw-r--r--net/netfilter/nft_set_hash.c19
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netfilter/xt_connmark.c4
-rw-r--r--net/netlink/af_netlink.c27
-rw-r--r--net/netlink/af_netlink.h2
-rw-r--r--net/netlink/diag.c5
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/openvswitch/conntrack.c5
-rw-r--r--net/packet/af_packet.c18
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sched/act_pedit.c24
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_cgroup.c7
-rw-r--r--net/sched/cls_flow.c1
-rw-r--r--net/sched/cls_flower.c41
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/cls_rsvp.h3
-rw-r--r--net/sched/cls_tcindex.c1
-rw-r--r--net/sctp/input.c35
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/socket.c27
-rw-r--r--net/socket.c17
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/svcsock.c21
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c6
-rw-r--r--net/tipc/bearer.c11
-rw-r--r--net/tipc/bearer.h13
-rw-r--r--net/tipc/link.c40
-rw-r--r--net/tipc/monitor.c10
-rw-r--r--net/tipc/socket.c50
-rw-r--r--net/tipc/udp_media.c5
-rw-r--r--net/unix/af_unix.c20
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/scan.c69
-rw-r--r--net/wireless/util.c3
-rw-r--r--net/xfrm/xfrm_policy.c10
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--samples/bpf/Makefile4
-rw-r--r--samples/bpf/bpf_helpers.h2
-rw-r--r--samples/bpf/sampleip_kern.c2
-rwxr-xr-xsamples/bpf/tc_l2_redirect.sh173
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c236
-rw-r--r--samples/bpf/tc_l2_redirect_user.c73
-rw-r--r--samples/bpf/trace_event_kern.c2
-rw-r--r--scripts/Makefile.build81
-rwxr-xr-xscripts/gcc-x86_64-has-stack-protector.sh2
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--security/apparmor/domain.c6
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/thinkpad_helper.c3
-rw-r--r--sound/soc/qcom/lpass-platform.c1
-rw-r--r--sound/sparc/dbri.c26
-rw-r--r--sound/usb/card.c3
-rw-r--r--tools/perf/ui/browsers/hists.c48
-rw-r--r--tools/perf/util/hist.c12
-rw-r--r--tools/power/acpi/Makefile.config23
-rw-r--r--tools/power/acpi/Makefile.rules40
-rw-r--r--tools/power/acpi/tools/acpidbg/Makefile4
-rw-r--r--tools/power/acpi/tools/acpidbg/acpidbg.c8
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile12
-rw-r--r--virt/kvm/arm/pmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c6
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c6
-rw-r--r--virt/kvm/async_pf.c13
-rw-r--r--virt/kvm/kvm_main.c2
698 files changed, 7644 insertions, 3614 deletions
diff --git a/CREDITS b/CREDITS
index 837367624e45..d7ebdfbc4d4f 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,7 +9,7 @@
9 Linus 9 Linus
10---------- 10----------
11 11
12M: Matt Mackal 12N: Matt Mackal
13E: mpm@selenic.com 13E: mpm@selenic.com
14D: SLOB slab allocator 14D: SLOB slab allocator
15 15
@@ -1910,7 +1910,7 @@ S: Ra'annana, Israel
1910 1910
1911N: Andi Kleen 1911N: Andi Kleen
1912E: andi@firstfloor.org 1912E: andi@firstfloor.org
1913U: http://www.halobates.de 1913W: http://www.halobates.de
1914D: network, x86, NUMA, various hacks 1914D: network, x86, NUMA, various hacks
1915S: Schwalbenstr. 96 1915S: Schwalbenstr. 96
1916S: 85551 Ottobrunn 1916S: 85551 Ottobrunn
@@ -2089,8 +2089,8 @@ D: ST Microelectronics SPEAr13xx PCI host bridge driver
2089D: Synopsys Designware PCI host bridge driver 2089D: Synopsys Designware PCI host bridge driver
2090 2090
2091N: Gabor Kuti 2091N: Gabor Kuti
2092M: seasons@falcon.sch.bme.hu 2092E: seasons@falcon.sch.bme.hu
2093M: seasons@makosteszta.sote.hu 2093E: seasons@makosteszta.sote.hu
2094D: Original author of software suspend 2094D: Original author of software suspend
2095 2095
2096N: Jaroslav Kysela 2096N: Jaroslav Kysela
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
index fbbacd958240..6f28969af9dc 100644
--- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt
@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host.
6 6
7Required properties: 7Required properties:
8 8
9- compatible : should be "aspeed,ast2400-bt-bmc" 9- compatible : should be "aspeed,ast2400-ibt-bmc"
10- reg: physical address and size of the registers 10- reg: physical address and size of the registers
11 11
12Optional properties: 12Optional properties:
@@ -17,7 +17,7 @@ Optional properties:
17Example: 17Example:
18 18
19 ibt@1e789140 { 19 ibt@1e789140 {
20 compatible = "aspeed,ast2400-bt-bmc"; 20 compatible = "aspeed,ast2400-ibt-bmc";
21 reg = <0x1e789140 0x18>; 21 reg = <0x1e789140 0x18>;
22 interrupts = <8>; 22 interrupts = <8>;
23 }; 23 };
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index e1d76812419c..05150957ecfd 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -9,10 +9,26 @@ The following properties are common to the Ethernet controllers:
9- max-speed: number, specifies maximum speed in Mbit/s supported by the device; 9- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
10- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than 10- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
11 the maximum frame size (there's contradiction in ePAPR). 11 the maximum frame size (there's contradiction in ePAPR).
12- phy-mode: string, operation mode of the PHY interface; supported values are 12- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
13 "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id", 13 standard property; supported values are:
14 "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a 14 * "mii"
15 de-facto standard property; 15 * "gmii"
16 * "sgmii"
17 * "qsgmii"
18 * "tbi"
19 * "rev-mii"
20 * "rmii"
21 * "rgmii" (RX and TX delays are added by the MAC when required)
22 * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the
23 MAC should not add the RX or TX delays in this case)
24 * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC
25 should not add an RX delay in this case)
26 * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC
27 should not add an TX delay in this case)
28 * "rtbi"
29 * "smii"
30 * "xgmii"
31 * "trgmii"
16- phy-connection-type: the same as "phy-mode" property but described in ePAPR; 32- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
17- phy-handle: phandle, specifies a reference to a node representing a PHY 33- phy-handle: phandle, specifies a reference to a node representing a PHY
18 device; this property is described in ePAPR and so preferred; 34 device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
index fd40c852d7c7..462b04e8209f 100644
--- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
+++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt
@@ -12,7 +12,7 @@ Required properties:
12 12
13Optional properties: 13Optional properties:
14- ti,dmic: phandle for the OMAP dmic node if the machine have it connected 14- ti,dmic: phandle for the OMAP dmic node if the machine have it connected
15- ti,jack_detection: Need to be present if the board capable to detect jack 15- ti,jack-detection: Need to be present if the board capable to detect jack
16 insertion, removal. 16 insertion, removal.
17 17
18Available audio endpoints for the audio-routing table: 18Available audio endpoints for the audio-routing table:
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology
index e0aefeece551..1a014fede0b7 100644
--- a/Documentation/i2c/i2c-topology
+++ b/Documentation/i2c/i2c-topology
@@ -326,7 +326,7 @@ Two parent-locked sibling muxes
326 326
327This is a good topology. 327This is a good topology.
328 328
329 .--------. 329 .--------.
330 .----------. .--| dev D1 | 330 .----------. .--| dev D1 |
331 | parent- |--' '--------' 331 | parent- |--' '--------'
332 .--| locked | .--------. 332 .--| locked | .--------.
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes
350 350
351This is a good topology. 351This is a good topology.
352 352
353 .--------. 353 .--------.
354 .----------. .--| dev D1 | 354 .----------. .--| dev D1 |
355 | mux- |--' '--------' 355 | mux- |--' '--------'
356 .--| locked | .--------. 356 .--| locked | .--------.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 6d6c07cf1a9a..63912ef34606 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and
67Switch tagging protocols 67Switch tagging protocols
68------------------------ 68------------------------
69 69
70DSA currently supports 4 different tagging protocols, and a tag-less mode as 70DSA currently supports 5 different tagging protocols, and a tag-less mode as
71well. The different protocols are implemented in: 71well. The different protocols are implemented in:
72 72
73net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) 73net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
74net/dsa/tag_dsa.c: Marvell's original DSA tag 74net/dsa/tag_dsa.c: Marvell's original DSA tag
75net/dsa/tag_edsa.c: Marvell's enhanced DSA tag 75net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
76net/dsa/tag_brcm.c: Broadcom's 4 bytes tag 76net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
77net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
77 78
78The exact format of the tag protocol is vendor specific, but in general, they 79The exact format of the tag protocol is vendor specific, but in general, they
79all contain something which: 80all contain something which:
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 399e4e866a9c..433b6724797a 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -62,10 +62,13 @@ nf_conntrack_generic_timeout - INTEGER (seconds)
62 protocols. 62 protocols.
63 63
64nf_conntrack_helper - BOOLEAN 64nf_conntrack_helper - BOOLEAN
65 0 - disabled 65 0 - disabled (default)
66 not 0 - enabled (default) 66 not 0 - enabled
67 67
68 Enable automatic conntrack helper assignment. 68 Enable automatic conntrack helper assignment.
69 If disabled it is required to set up iptables rules to assign
70 helpers to connections. See the CT target description in the
71 iptables-extensions(8) man page for further information.
69 72
70nf_conntrack_icmp_timeout - INTEGER (seconds) 73nf_conntrack_icmp_timeout - INTEGER (seconds)
71 default 30 74 default 30
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 739db9ab16b2..6bbceb9a3a19 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In
777conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios 777conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
778such as migration. 778such as migration.
779 779
780When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the
781set of bits that KVM can return in struct kvm_clock_data's flag member.
782
783The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned
784value is the exact kvmclock value seen by all VCPUs at the instant
785when KVM_GET_CLOCK was called. If clear, the returned value is simply
786CLOCK_MONOTONIC plus a constant offset; the offset can be modified
787with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock,
788but the exact value read by each VCPU could differ, because the host
789TSC is not stable.
790
780struct kvm_clock_data { 791struct kvm_clock_data {
781 __u64 clock; /* kvmclock current value */ 792 __u64 clock; /* kvmclock current value */
782 __u32 flags; 793 __u32 flags;
diff --git a/MAINTAINERS b/MAINTAINERS
index 30c873192458..f7fd00ef1323 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -77,6 +77,7 @@ Descriptions of section entries:
77 Q: Patchwork web based patch tracking system site 77 Q: Patchwork web based patch tracking system site
78 T: SCM tree type and location. 78 T: SCM tree type and location.
79 Type is one of: git, hg, quilt, stgit, topgit 79 Type is one of: git, hg, quilt, stgit, topgit
80 B: Bug tracking system location.
80 S: Status, one of the following: 81 S: Status, one of the following:
81 Supported: Someone is actually paid to look after this. 82 Supported: Someone is actually paid to look after this.
82 Maintained: Someone actually looks after it. 83 Maintained: Someone actually looks after it.
@@ -281,6 +282,7 @@ L: linux-acpi@vger.kernel.org
281W: https://01.org/linux-acpi 282W: https://01.org/linux-acpi
282Q: https://patchwork.kernel.org/project/linux-acpi/list/ 283Q: https://patchwork.kernel.org/project/linux-acpi/list/
283T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 284T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
285B: https://bugzilla.kernel.org
284S: Supported 286S: Supported
285F: drivers/acpi/ 287F: drivers/acpi/
286F: drivers/pnp/pnpacpi/ 288F: drivers/pnp/pnpacpi/
@@ -304,6 +306,8 @@ W: https://acpica.org/
304W: https://github.com/acpica/acpica/ 306W: https://github.com/acpica/acpica/
305Q: https://patchwork.kernel.org/project/linux-acpi/list/ 307Q: https://patchwork.kernel.org/project/linux-acpi/list/
306T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 308T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
309B: https://bugzilla.kernel.org
310B: https://bugs.acpica.org
307S: Supported 311S: Supported
308F: drivers/acpi/acpica/ 312F: drivers/acpi/acpica/
309F: include/acpi/ 313F: include/acpi/
@@ -313,6 +317,7 @@ ACPI FAN DRIVER
313M: Zhang Rui <rui.zhang@intel.com> 317M: Zhang Rui <rui.zhang@intel.com>
314L: linux-acpi@vger.kernel.org 318L: linux-acpi@vger.kernel.org
315W: https://01.org/linux-acpi 319W: https://01.org/linux-acpi
320B: https://bugzilla.kernel.org
316S: Supported 321S: Supported
317F: drivers/acpi/fan.c 322F: drivers/acpi/fan.c
318 323
@@ -328,6 +333,7 @@ ACPI THERMAL DRIVER
328M: Zhang Rui <rui.zhang@intel.com> 333M: Zhang Rui <rui.zhang@intel.com>
329L: linux-acpi@vger.kernel.org 334L: linux-acpi@vger.kernel.org
330W: https://01.org/linux-acpi 335W: https://01.org/linux-acpi
336B: https://bugzilla.kernel.org
331S: Supported 337S: Supported
332F: drivers/acpi/*thermal* 338F: drivers/acpi/*thermal*
333 339
@@ -335,6 +341,7 @@ ACPI VIDEO DRIVER
335M: Zhang Rui <rui.zhang@intel.com> 341M: Zhang Rui <rui.zhang@intel.com>
336L: linux-acpi@vger.kernel.org 342L: linux-acpi@vger.kernel.org
337W: https://01.org/linux-acpi 343W: https://01.org/linux-acpi
344B: https://bugzilla.kernel.org
338S: Supported 345S: Supported
339F: drivers/acpi/acpi_video.c 346F: drivers/acpi/acpi_video.c
340 347
@@ -5701,6 +5708,7 @@ HIBERNATION (aka Software Suspend, aka swsusp)
5701M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 5708M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
5702M: Pavel Machek <pavel@ucw.cz> 5709M: Pavel Machek <pavel@ucw.cz>
5703L: linux-pm@vger.kernel.org 5710L: linux-pm@vger.kernel.org
5711B: https://bugzilla.kernel.org
5704S: Supported 5712S: Supported
5705F: arch/x86/power/ 5713F: arch/x86/power/
5706F: drivers/base/power/ 5714F: drivers/base/power/
@@ -7122,6 +7130,7 @@ F: drivers/scsi/53c700*
7122LED SUBSYSTEM 7130LED SUBSYSTEM
7123M: Richard Purdie <rpurdie@rpsys.net> 7131M: Richard Purdie <rpurdie@rpsys.net>
7124M: Jacek Anaszewski <j.anaszewski@samsung.com> 7132M: Jacek Anaszewski <j.anaszewski@samsung.com>
7133M: Pavel Machek <pavel@ucw.cz>
7125L: linux-leds@vger.kernel.org 7134L: linux-leds@vger.kernel.org
7126T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git 7135T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
7127S: Maintained 7136S: Maintained
@@ -8095,6 +8104,7 @@ F: drivers/infiniband/hw/mlx4/
8095F: include/linux/mlx4/ 8104F: include/linux/mlx4/
8096 8105
8097MELLANOX MLX5 core VPI driver 8106MELLANOX MLX5 core VPI driver
8107M: Saeed Mahameed <saeedm@mellanox.com>
8098M: Matan Barak <matanb@mellanox.com> 8108M: Matan Barak <matanb@mellanox.com>
8099M: Leon Romanovsky <leonro@mellanox.com> 8109M: Leon Romanovsky <leonro@mellanox.com>
8100L: netdev@vger.kernel.org 8110L: netdev@vger.kernel.org
@@ -9285,11 +9295,12 @@ S: Maintained
9285F: drivers/pci/host/*layerscape* 9295F: drivers/pci/host/*layerscape*
9286 9296
9287PCI DRIVER FOR IMX6 9297PCI DRIVER FOR IMX6
9288M: Richard Zhu <Richard.Zhu@freescale.com> 9298M: Richard Zhu <hongxing.zhu@nxp.com>
9289M: Lucas Stach <l.stach@pengutronix.de> 9299M: Lucas Stach <l.stach@pengutronix.de>
9290L: linux-pci@vger.kernel.org 9300L: linux-pci@vger.kernel.org
9291L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9301L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9292S: Maintained 9302S: Maintained
9303F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
9293F: drivers/pci/host/*imx6* 9304F: drivers/pci/host/*imx6*
9294 9305
9295PCI DRIVER FOR TI KEYSTONE 9306PCI DRIVER FOR TI KEYSTONE
@@ -9348,17 +9359,11 @@ F: drivers/pci/host/pci-exynos.c
9348 9359
9349PCI DRIVER FOR SYNOPSIS DESIGNWARE 9360PCI DRIVER FOR SYNOPSIS DESIGNWARE
9350M: Jingoo Han <jingoohan1@gmail.com> 9361M: Jingoo Han <jingoohan1@gmail.com>
9351M: Pratyush Anand <pratyush.anand@gmail.com> 9362M: Joao Pinto <Joao.Pinto@synopsys.com>
9352L: linux-pci@vger.kernel.org
9353S: Maintained
9354F: drivers/pci/host/*designware*
9355
9356PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
9357M: Jose Abreu <Jose.Abreu@synopsys.com>
9358L: linux-pci@vger.kernel.org 9363L: linux-pci@vger.kernel.org
9359S: Maintained 9364S: Maintained
9360F: Documentation/devicetree/bindings/pci/designware-pcie.txt 9365F: Documentation/devicetree/bindings/pci/designware-pcie.txt
9361F: drivers/pci/host/pcie-designware-plat.c 9366F: drivers/pci/host/*designware*
9362 9367
9363PCI DRIVER FOR GENERIC OF HOSTS 9368PCI DRIVER FOR GENERIC OF HOSTS
9364M: Will Deacon <will.deacon@arm.com> 9369M: Will Deacon <will.deacon@arm.com>
@@ -9660,6 +9665,7 @@ POWER MANAGEMENT CORE
9660M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 9665M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
9661L: linux-pm@vger.kernel.org 9666L: linux-pm@vger.kernel.org
9662T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm 9667T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
9668B: https://bugzilla.kernel.org
9663S: Supported 9669S: Supported
9664F: drivers/base/power/ 9670F: drivers/base/power/
9665F: include/linux/pm.h 9671F: include/linux/pm.h
@@ -11649,6 +11655,7 @@ M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
11649M: Len Brown <len.brown@intel.com> 11655M: Len Brown <len.brown@intel.com>
11650M: Pavel Machek <pavel@ucw.cz> 11656M: Pavel Machek <pavel@ucw.cz>
11651L: linux-pm@vger.kernel.org 11657L: linux-pm@vger.kernel.org
11658B: https://bugzilla.kernel.org
11652S: Supported 11659S: Supported
11653F: Documentation/power/ 11660F: Documentation/power/
11654F: arch/x86/kernel/acpi/ 11661F: arch/x86/kernel/acpi/
diff --git a/Makefile b/Makefile
index 247430abfc73..369099dc0fae 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 9 2PATCHLEVEL = 9
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc8
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
399 -fno-strict-aliasing -fno-common \ 399 -fno-strict-aliasing -fno-common \
400 -Werror-implicit-function-declaration \ 400 -Werror-implicit-function-declaration \
401 -Wno-format-security \ 401 -Wno-format-security \
402 -std=gnu89 402 -std=gnu89 $(call cc-option,-fno-PIE)
403
403 404
404KBUILD_AFLAGS_KERNEL := 405KBUILD_AFLAGS_KERNEL :=
405KBUILD_CFLAGS_KERNEL := 406KBUILD_CFLAGS_KERNEL :=
406KBUILD_AFLAGS := -D__ASSEMBLY__ 407KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
407KBUILD_AFLAGS_MODULE := -DMODULE 408KBUILD_AFLAGS_MODULE := -DMODULE
408KBUILD_CFLAGS_MODULE := -DMODULE 409KBUILD_CFLAGS_MODULE := -DMODULE
409KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds 410KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
@@ -606,6 +607,13 @@ else
606include/config/auto.conf: ; 607include/config/auto.conf: ;
607endif # $(dot-config) 608endif # $(dot-config)
608 609
610# For the kernel to actually contain only the needed exported symbols,
611# we have to build modules as well to determine what those symbols are.
612# (this can be evaluated only once include/config/auto.conf has been included)
613ifdef CONFIG_TRIM_UNUSED_KSYMS
614 KBUILD_MODULES := 1
615endif
616
609# The all: target is the default when no target is given on the 617# The all: target is the default when no target is given on the
610# command line. 618# command line.
611# This allow a user to issue only 'make' to build a kernel including modules 619# This allow a user to issue only 'make' to build a kernel including modules
@@ -943,7 +951,7 @@ ifdef CONFIG_GDB_SCRIPTS
943endif 951endif
944ifdef CONFIG_TRIM_UNUSED_KSYMS 952ifdef CONFIG_TRIM_UNUSED_KSYMS
945 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ 953 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
946 "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq" 954 "$(MAKE) -f $(srctree)/Makefile vmlinux"
947endif 955endif
948 956
949# standalone target for easier testing 957# standalone target for easier testing
@@ -1018,8 +1026,6 @@ prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic
1018prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ 1026prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
1019 include/config/auto.conf 1027 include/config/auto.conf
1020 $(cmd_crmodverdir) 1028 $(cmd_crmodverdir)
1021 $(Q)test -e include/generated/autoksyms.h || \
1022 touch include/generated/autoksyms.h
1023 1029
1024archprepare: archheaders archscripts prepare1 scripts_basic 1030archprepare: archheaders archscripts prepare1 scripts_basic
1025 1031
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 08e7e2a16ac1..a36e8601114d 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -22,10 +22,11 @@
22static inline void __delay(unsigned long loops) 22static inline void __delay(unsigned long loops)
23{ 23{
24 __asm__ __volatile__( 24 __asm__ __volatile__(
25 " lp 1f \n" 25 " mov lp_count, %0 \n"
26 " nop \n" 26 " lp 1f \n"
27 "1: \n" 27 " nop \n"
28 : "+l"(loops)); 28 "1: \n"
29 : : "r"(loops));
29} 30}
30 31
31extern void __bad_udelay(void); 32extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 89eeb3720051..e94ca72b974e 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
280 280
281#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 281#define pte_page(pte) pfn_to_page(pte_pfn(pte))
282#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 282#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
283#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) 283#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
284 284
285/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ 285/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
286#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 286#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 2b96cfc3be75..50d71695cd4e 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
23 23
24static int l2_line_sz; 24static int l2_line_sz;
25static int ioc_exists; 25static int ioc_exists;
26int slc_enable = 1, ioc_enable = 1; 26int slc_enable = 1, ioc_enable = 0;
27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ 27unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ 28unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29 29
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index befcd2619902..c558ba75cbcc 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -745,7 +745,6 @@ dtb-$(CONFIG_MACH_SUN4I) += \
745 sun4i-a10-pcduino2.dtb \ 745 sun4i-a10-pcduino2.dtb \
746 sun4i-a10-pov-protab2-ips9.dtb 746 sun4i-a10-pov-protab2-ips9.dtb
747dtb-$(CONFIG_MACH_SUN5I) += \ 747dtb-$(CONFIG_MACH_SUN5I) += \
748 ntc-gr8-evb.dtb \
749 sun5i-a10s-auxtek-t003.dtb \ 748 sun5i-a10s-auxtek-t003.dtb \
750 sun5i-a10s-auxtek-t004.dtb \ 749 sun5i-a10s-auxtek-t004.dtb \
751 sun5i-a10s-mk802.dtb \ 750 sun5i-a10s-mk802.dtb \
@@ -761,6 +760,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \
761 sun5i-a13-olinuxino-micro.dtb \ 760 sun5i-a13-olinuxino-micro.dtb \
762 sun5i-a13-q8-tablet.dtb \ 761 sun5i-a13-q8-tablet.dtb \
763 sun5i-a13-utoo-p66.dtb \ 762 sun5i-a13-utoo-p66.dtb \
763 sun5i-gr8-evb.dtb \
764 sun5i-r8-chip.dtb 764 sun5i-r8-chip.dtb
765dtb-$(CONFIG_MACH_SUN6I) += \ 765dtb-$(CONFIG_MACH_SUN6I) += \
766 sun6i-a31-app4-evb1.dtb \ 766 sun6i-a31-app4-evb1.dtb \
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index dec4b073ceb1..379939699164 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -64,8 +64,8 @@
64 }; 64 };
65 65
66 ldo3_reg: ldo3 { 66 ldo3_reg: ldo3 {
67 regulator-min-microvolt = <600000>; 67 regulator-min-microvolt = <1725000>;
68 regulator-max-microvolt = <1800000>; 68 regulator-max-microvolt = <3300000>;
69 regulator-always-on; 69 regulator-always-on;
70 }; 70 };
71 71
@@ -76,8 +76,8 @@
76 }; 76 };
77 77
78 ldo5_reg: ldo5 { 78 ldo5_reg: ldo5 {
79 regulator-min-microvolt = <1725000>; 79 regulator-min-microvolt = <1200000>;
80 regulator-max-microvolt = <3300000>; 80 regulator-max-microvolt = <3600000>;
81 regulator-always-on; 81 regulator-always-on;
82 }; 82 };
83 83
@@ -100,14 +100,14 @@
100 }; 100 };
101 101
102 ldo9_reg: ldo9 { 102 ldo9_reg: ldo9 {
103 regulator-min-microvolt = <1200000>; 103 regulator-min-microvolt = <1250000>;
104 regulator-max-microvolt = <3600000>; 104 regulator-max-microvolt = <3600000>;
105 regulator-always-on; 105 regulator-always-on;
106 }; 106 };
107 107
108 ldo10_reg: ldo10 { 108 ldo10_reg: ldo10 {
109 regulator-min-microvolt = <1250000>; 109 regulator-min-microvolt = <1200000>;
110 regulator-max-microvolt = <3650000>; 110 regulator-max-microvolt = <3600000>;
111 regulator-always-on; 111 regulator-always-on;
112 }; 112 };
113 }; 113 };
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 0ff1c2de95bf..26cce4d18405 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -13,6 +13,11 @@
13 }; 13 };
14 }; 14 };
15 15
16 memory@80000000 {
17 device_type = "memory";
18 reg = <0x80000000 0>;
19 };
20
16 wl12xx_vmmc: wl12xx_vmmc { 21 wl12xx_vmmc: wl12xx_vmmc {
17 compatible = "regulator-fixed"; 22 compatible = "regulator-fixed";
18 regulator-name = "vwl1271"; 23 regulator-name = "vwl1271";
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 731ec37aed5b..8f9a69ca818c 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -13,9 +13,9 @@
13 }; 13 };
14 }; 14 };
15 15
16 memory@0 { 16 memory@80000000 {
17 device_type = "memory"; 17 device_type = "memory";
18 reg = <0 0>; 18 reg = <0x80000000 0>;
19 }; 19 };
20 20
21 leds { 21 leds {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 6365635fea5c..4caadb253249 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -124,6 +124,7 @@
124 compatible = "ti,abe-twl6040"; 124 compatible = "ti,abe-twl6040";
125 ti,model = "omap5-uevm"; 125 ti,model = "omap5-uevm";
126 126
127 ti,jack-detection;
127 ti,mclk-freq = <19200000>; 128 ti,mclk-freq = <19200000>;
128 129
129 ti,mcpdm = <&mcpdm>; 130 ti,mcpdm = <&mcpdm>;
@@ -415,7 +416,7 @@
415 ti,backup-battery-charge-high-current; 416 ti,backup-battery-charge-high-current;
416 }; 417 };
417 418
418 gpadc { 419 gpadc: gpadc {
419 compatible = "ti,palmas-gpadc"; 420 compatible = "ti,palmas-gpadc";
420 interrupts = <18 0 421 interrupts = <18 0
421 16 0 422 16 0
@@ -475,8 +476,8 @@
475 smps6_reg: smps6 { 476 smps6_reg: smps6 {
476 /* VDD_DDR3 - over VDD_SMPS6 */ 477 /* VDD_DDR3 - over VDD_SMPS6 */
477 regulator-name = "smps6"; 478 regulator-name = "smps6";
478 regulator-min-microvolt = <1200000>; 479 regulator-min-microvolt = <1350000>;
479 regulator-max-microvolt = <1200000>; 480 regulator-max-microvolt = <1350000>;
480 regulator-always-on; 481 regulator-always-on;
481 regulator-boot-on; 482 regulator-boot-on;
482 }; 483 };
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 91096a49efa9..8f79b4147bba 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -283,6 +283,8 @@
283 clock-frequency = <400000>; 283 clock-frequency = <400000>;
284 pinctrl-names = "default"; 284 pinctrl-names = "default";
285 pinctrl-0 = <&pinctrl_i2c0_default>; 285 pinctrl-0 = <&pinctrl_i2c0_default>;
286 #address-cells = <1>;
287 #size-cells = <0>;
286 288
287 status = "disabled"; 289 status = "disabled";
288 }; 290 };
@@ -296,6 +298,8 @@
296 clock-frequency = <400000>; 298 clock-frequency = <400000>;
297 pinctrl-names = "default"; 299 pinctrl-names = "default";
298 pinctrl-0 = <&pinctrl_i2c1_default>; 300 pinctrl-0 = <&pinctrl_i2c1_default>;
301 #address-cells = <1>;
302 #size-cells = <0>;
299 303
300 status = "disabled"; 304 status = "disabled";
301 }; 305 };
@@ -309,6 +313,8 @@
309 clock-frequency = <400000>; 313 clock-frequency = <400000>;
310 pinctrl-names = "default"; 314 pinctrl-names = "default";
311 pinctrl-0 = <&pinctrl_i2c2_default>; 315 pinctrl-0 = <&pinctrl_i2c2_default>;
316 #address-cells = <1>;
317 #size-cells = <0>;
312 318
313 status = "disabled"; 319 status = "disabled";
314 }; 320 };
@@ -322,6 +328,8 @@
322 clock-frequency = <400000>; 328 clock-frequency = <400000>;
323 pinctrl-names = "default"; 329 pinctrl-names = "default";
324 pinctrl-0 = <&pinctrl_i2c3_default>; 330 pinctrl-0 = <&pinctrl_i2c3_default>;
331 #address-cells = <1>;
332 #size-cells = <0>;
325 333
326 status = "disabled"; 334 status = "disabled";
327 }; 335 };
@@ -335,6 +343,8 @@
335 clock-frequency = <400000>; 343 clock-frequency = <400000>;
336 pinctrl-names = "default"; 344 pinctrl-names = "default";
337 pinctrl-0 = <&pinctrl_i2c4_default>; 345 pinctrl-0 = <&pinctrl_i2c4_default>;
346 #address-cells = <1>;
347 #size-cells = <0>;
338 348
339 status = "disabled"; 349 status = "disabled";
340 }; 350 };
@@ -348,6 +358,8 @@
348 clock-frequency = <400000>; 358 clock-frequency = <400000>;
349 pinctrl-names = "default"; 359 pinctrl-names = "default";
350 pinctrl-0 = <&pinctrl_i2c5_default>; 360 pinctrl-0 = <&pinctrl_i2c5_default>;
361 #address-cells = <1>;
362 #size-cells = <0>;
351 363
352 status = "disabled"; 364 status = "disabled";
353 }; 365 };
@@ -363,6 +375,8 @@
363 clock-frequency = <400000>; 375 clock-frequency = <400000>;
364 pinctrl-names = "default"; 376 pinctrl-names = "default";
365 pinctrl-0 = <&pinctrl_i2c10_default>; 377 pinctrl-0 = <&pinctrl_i2c10_default>;
378 #address-cells = <1>;
379 #size-cells = <0>;
366 380
367 status = "disabled"; 381 status = "disabled";
368 }; 382 };
@@ -376,6 +390,8 @@
376 clock-frequency = <400000>; 390 clock-frequency = <400000>;
377 pinctrl-names = "default"; 391 pinctrl-names = "default";
378 pinctrl-0 = <&pinctrl_i2c11_default>; 392 pinctrl-0 = <&pinctrl_i2c11_default>;
393 #address-cells = <1>;
394 #size-cells = <0>;
379 395
380 status = "disabled"; 396 status = "disabled";
381 }; 397 };
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index ef2ff2f518f6..7fb507fcba7e 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -74,7 +74,7 @@
74 /* Low speed expansion connector */ 74 /* Low speed expansion connector */
75 spi0: spi@9844000 { 75 spi0: spi@9844000 {
76 label = "LS-SPI0"; 76 label = "LS-SPI0";
77 cs-gpio = <&pio30 3 0>; 77 cs-gpios = <&pio30 3 0>;
78 status = "okay"; 78 status = "okay";
79 }; 79 };
80 80
diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 4b622f3b5220..714381fd64d7 100644
--- a/arch/arm/boot/dts/ntc-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -44,7 +44,7 @@
44 */ 44 */
45 45
46/dts-v1/; 46/dts-v1/;
47#include "ntc-gr8.dtsi" 47#include "sun5i-gr8.dtsi"
48#include "sunxi-common-regulators.dtsi" 48#include "sunxi-common-regulators.dtsi"
49 49
50#include <dt-bindings/gpio/gpio.h> 50#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
index ca54e03ef366..ca54e03ef366 100644
--- a/arch/arm/boot/dts/ntc-gr8.dtsi
+++ b/arch/arm/boot/dts/sun5i-gr8.dtsi
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 48fc24f36fcb..300a1bd5a6ec 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -282,11 +282,15 @@
282 uart1_pins_a: uart1@0 { 282 uart1_pins_a: uart1@0 {
283 allwinner,pins = "PG6", "PG7"; 283 allwinner,pins = "PG6", "PG7";
284 allwinner,function = "uart1"; 284 allwinner,function = "uart1";
285 allwinner,drive = <SUN4I_PINCTRL_10_MA>;
286 allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
285 }; 287 };
286 288
287 uart1_pins_cts_rts_a: uart1-cts-rts@0 { 289 uart1_pins_cts_rts_a: uart1-cts-rts@0 {
288 allwinner,pins = "PG8", "PG9"; 290 allwinner,pins = "PG8", "PG9";
289 allwinner,function = "uart1"; 291 allwinner,function = "uart1";
292 allwinner,drive = <SUN4I_PINCTRL_10_MA>;
293 allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
290 }; 294 };
291 295
292 mmc0_pins_a: mmc0@0 { 296 mmc0_pins_a: mmc0@0 {
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 0745538b26d3..55e0e3ea9cb6 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -8,7 +8,6 @@ generic-y += early_ioremap.h
8generic-y += emergency-restart.h 8generic-y += emergency-restart.h
9generic-y += errno.h 9generic-y += errno.h
10generic-y += exec.h 10generic-y += exec.h
11generic-y += export.h
12generic-y += ioctl.h 11generic-y += ioctl.h
13generic-y += ipcbuf.h 12generic-y += ipcbuf.h
14generic-y += irq_regs.h 13generic-y += irq_regs.h
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 68c2c097cffe..ad325a8c7e1e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -33,7 +33,7 @@ endif
33obj-$(CONFIG_CPU_IDLE) += cpuidle.o 33obj-$(CONFIG_CPU_IDLE) += cpuidle.o
34obj-$(CONFIG_ISA_DMA_API) += dma.o 34obj-$(CONFIG_ISA_DMA_API) += dma.o
35obj-$(CONFIG_FIQ) += fiq.o fiqasm.o 35obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
36obj-$(CONFIG_MODULES) += module.o 36obj-$(CONFIG_MODULES) += armksyms.o module.o
37obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o 37obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
38obj-$(CONFIG_ISA_DMA) += dma-isa.o 38obj-$(CONFIG_ISA_DMA) += dma-isa.o
39obj-$(CONFIG_PCI) += bios32.o isa.o 39obj-$(CONFIG_PCI) += bios32.o isa.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 000000000000..7e45f69a0ddc
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,183 @@
1/*
2 * linux/arch/arm/kernel/armksyms.c
3 *
4 * Copyright (C) 2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/cryptohash.h>
14#include <linux/delay.h>
15#include <linux/in6.h>
16#include <linux/syscalls.h>
17#include <linux/uaccess.h>
18#include <linux/io.h>
19#include <linux/arm-smccc.h>
20
21#include <asm/checksum.h>
22#include <asm/ftrace.h>
23
24/*
25 * libgcc functions - functions that are used internally by the
26 * compiler... (prototypes are not correct though, but that
27 * doesn't really matter since they're not versioned).
28 */
29extern void __ashldi3(void);
30extern void __ashrdi3(void);
31extern void __divsi3(void);
32extern void __lshrdi3(void);
33extern void __modsi3(void);
34extern void __muldi3(void);
35extern void __ucmpdi2(void);
36extern void __udivsi3(void);
37extern void __umodsi3(void);
38extern void __do_div64(void);
39extern void __bswapsi2(void);
40extern void __bswapdi2(void);
41
42extern void __aeabi_idiv(void);
43extern void __aeabi_idivmod(void);
44extern void __aeabi_lasr(void);
45extern void __aeabi_llsl(void);
46extern void __aeabi_llsr(void);
47extern void __aeabi_lmul(void);
48extern void __aeabi_uidiv(void);
49extern void __aeabi_uidivmod(void);
50extern void __aeabi_ulcmp(void);
51
52extern void fpundefinstr(void);
53
54void mmioset(void *, unsigned int, size_t);
55void mmiocpy(void *, const void *, size_t);
56
57 /* platform dependent support */
58EXPORT_SYMBOL(arm_delay_ops);
59
60 /* networking */
61EXPORT_SYMBOL(csum_partial);
62EXPORT_SYMBOL(csum_partial_copy_from_user);
63EXPORT_SYMBOL(csum_partial_copy_nocheck);
64EXPORT_SYMBOL(__csum_ipv6_magic);
65
66 /* io */
67#ifndef __raw_readsb
68EXPORT_SYMBOL(__raw_readsb);
69#endif
70#ifndef __raw_readsw
71EXPORT_SYMBOL(__raw_readsw);
72#endif
73#ifndef __raw_readsl
74EXPORT_SYMBOL(__raw_readsl);
75#endif
76#ifndef __raw_writesb
77EXPORT_SYMBOL(__raw_writesb);
78#endif
79#ifndef __raw_writesw
80EXPORT_SYMBOL(__raw_writesw);
81#endif
82#ifndef __raw_writesl
83EXPORT_SYMBOL(__raw_writesl);
84#endif
85
86 /* string / mem functions */
87EXPORT_SYMBOL(strchr);
88EXPORT_SYMBOL(strrchr);
89EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memcpy);
91EXPORT_SYMBOL(memmove);
92EXPORT_SYMBOL(memchr);
93EXPORT_SYMBOL(__memzero);
94
95EXPORT_SYMBOL(mmioset);
96EXPORT_SYMBOL(mmiocpy);
97
98#ifdef CONFIG_MMU
99EXPORT_SYMBOL(copy_page);
100
101EXPORT_SYMBOL(arm_copy_from_user);
102EXPORT_SYMBOL(arm_copy_to_user);
103EXPORT_SYMBOL(arm_clear_user);
104
105EXPORT_SYMBOL(__get_user_1);
106EXPORT_SYMBOL(__get_user_2);
107EXPORT_SYMBOL(__get_user_4);
108EXPORT_SYMBOL(__get_user_8);
109
110#ifdef __ARMEB__
111EXPORT_SYMBOL(__get_user_64t_1);
112EXPORT_SYMBOL(__get_user_64t_2);
113EXPORT_SYMBOL(__get_user_64t_4);
114EXPORT_SYMBOL(__get_user_32t_8);
115#endif
116
117EXPORT_SYMBOL(__put_user_1);
118EXPORT_SYMBOL(__put_user_2);
119EXPORT_SYMBOL(__put_user_4);
120EXPORT_SYMBOL(__put_user_8);
121#endif
122
123 /* gcc lib functions */
124EXPORT_SYMBOL(__ashldi3);
125EXPORT_SYMBOL(__ashrdi3);
126EXPORT_SYMBOL(__divsi3);
127EXPORT_SYMBOL(__lshrdi3);
128EXPORT_SYMBOL(__modsi3);
129EXPORT_SYMBOL(__muldi3);
130EXPORT_SYMBOL(__ucmpdi2);
131EXPORT_SYMBOL(__udivsi3);
132EXPORT_SYMBOL(__umodsi3);
133EXPORT_SYMBOL(__do_div64);
134EXPORT_SYMBOL(__bswapsi2);
135EXPORT_SYMBOL(__bswapdi2);
136
137#ifdef CONFIG_AEABI
138EXPORT_SYMBOL(__aeabi_idiv);
139EXPORT_SYMBOL(__aeabi_idivmod);
140EXPORT_SYMBOL(__aeabi_lasr);
141EXPORT_SYMBOL(__aeabi_llsl);
142EXPORT_SYMBOL(__aeabi_llsr);
143EXPORT_SYMBOL(__aeabi_lmul);
144EXPORT_SYMBOL(__aeabi_uidiv);
145EXPORT_SYMBOL(__aeabi_uidivmod);
146EXPORT_SYMBOL(__aeabi_ulcmp);
147#endif
148
149 /* bitops */
150EXPORT_SYMBOL(_set_bit);
151EXPORT_SYMBOL(_test_and_set_bit);
152EXPORT_SYMBOL(_clear_bit);
153EXPORT_SYMBOL(_test_and_clear_bit);
154EXPORT_SYMBOL(_change_bit);
155EXPORT_SYMBOL(_test_and_change_bit);
156EXPORT_SYMBOL(_find_first_zero_bit_le);
157EXPORT_SYMBOL(_find_next_zero_bit_le);
158EXPORT_SYMBOL(_find_first_bit_le);
159EXPORT_SYMBOL(_find_next_bit_le);
160
161#ifdef __ARMEB__
162EXPORT_SYMBOL(_find_first_zero_bit_be);
163EXPORT_SYMBOL(_find_next_zero_bit_be);
164EXPORT_SYMBOL(_find_first_bit_be);
165EXPORT_SYMBOL(_find_next_bit_be);
166#endif
167
168#ifdef CONFIG_FUNCTION_TRACER
169#ifdef CONFIG_OLD_MCOUNT
170EXPORT_SYMBOL(mcount);
171#endif
172EXPORT_SYMBOL(__gnu_mcount_nc);
173#endif
174
175#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
176EXPORT_SYMBOL(__pv_phys_pfn_offset);
177EXPORT_SYMBOL(__pv_offset);
178#endif
179
180#ifdef CONFIG_HAVE_ARM_SMCCC
181EXPORT_SYMBOL(arm_smccc_smc);
182EXPORT_SYMBOL(arm_smccc_hvc);
183#endif
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index b629d3f11c3d..c73c4030ca5d 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -7,7 +7,6 @@
7#include <asm/assembler.h> 7#include <asm/assembler.h>
8#include <asm/ftrace.h> 8#include <asm/ftrace.h>
9#include <asm/unwind.h> 9#include <asm/unwind.h>
10#include <asm/export.h>
11 10
12#include "entry-header.S" 11#include "entry-header.S"
13 12
@@ -154,7 +153,6 @@ ENTRY(mcount)
154 __mcount _old 153 __mcount _old
155#endif 154#endif
156ENDPROC(mcount) 155ENDPROC(mcount)
157EXPORT_SYMBOL(mcount)
158 156
159#ifdef CONFIG_DYNAMIC_FTRACE 157#ifdef CONFIG_DYNAMIC_FTRACE
160ENTRY(ftrace_caller_old) 158ENTRY(ftrace_caller_old)
@@ -207,7 +205,6 @@ UNWIND(.fnstart)
207#endif 205#endif
208UNWIND(.fnend) 206UNWIND(.fnend)
209ENDPROC(__gnu_mcount_nc) 207ENDPROC(__gnu_mcount_nc)
210EXPORT_SYMBOL(__gnu_mcount_nc)
211 208
212#ifdef CONFIG_DYNAMIC_FTRACE 209#ifdef CONFIG_DYNAMIC_FTRACE
213ENTRY(ftrace_caller) 210ENTRY(ftrace_caller)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f41cee4c5746..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -22,7 +22,6 @@
22#include <asm/memory.h> 22#include <asm/memory.h>
23#include <asm/thread_info.h> 23#include <asm/thread_info.h>
24#include <asm/pgtable.h> 24#include <asm/pgtable.h>
25#include <asm/export.h>
26 25
27#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 26#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
28#include CONFIG_DEBUG_LL_INCLUDE 27#include CONFIG_DEBUG_LL_INCLUDE
@@ -728,8 +727,6 @@ __pv_phys_pfn_offset:
728__pv_offset: 727__pv_offset:
729 .quad 0 728 .quad 0
730 .size __pv_offset, . -__pv_offset 729 .size __pv_offset, . -__pv_offset
731EXPORT_SYMBOL(__pv_phys_pfn_offset)
732EXPORT_SYMBOL(__pv_offset)
733#endif 730#endif
734 731
735#include "head-common.S" 732#include "head-common.S"
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 37669e7e13af..2e48b674aab1 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -16,7 +16,6 @@
16#include <asm/opcodes-sec.h> 16#include <asm/opcodes-sec.h>
17#include <asm/opcodes-virt.h> 17#include <asm/opcodes-virt.h>
18#include <asm/unwind.h> 18#include <asm/unwind.h>
19#include <asm/export.h>
20 19
21 /* 20 /*
22 * Wrap c macros in asm macros to delay expansion until after the 21 * Wrap c macros in asm macros to delay expansion until after the
@@ -52,7 +51,6 @@ UNWIND( .fnend)
52ENTRY(arm_smccc_smc) 51ENTRY(arm_smccc_smc)
53 SMCCC SMCCC_SMC 52 SMCCC SMCCC_SMC
54ENDPROC(arm_smccc_smc) 53ENDPROC(arm_smccc_smc)
55EXPORT_SYMBOL(arm_smccc_smc)
56 54
57/* 55/*
58 * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, 56 * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc)
62ENTRY(arm_smccc_hvc) 60ENTRY(arm_smccc_hvc)
63 SMCCC SMCCC_HVC 61 SMCCC SMCCC_HVC
64ENDPROC(arm_smccc_hvc) 62ENDPROC(arm_smccc_hvc)
65EXPORT_SYMBOL(arm_smccc_hvc)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index bc698383e822..9688ec0c6ef4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
74 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); 74 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
75} 75}
76 76
77void dump_backtrace_stm(u32 *stack, u32 instruction)
78{
79 char str[80], *p;
80 unsigned int x;
81 int reg;
82
83 for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
84 if (instruction & BIT(reg)) {
85 p += sprintf(p, " r%d:%08x", reg, *stack--);
86 if (++x == 6) {
87 x = 0;
88 p = str;
89 printk("%s\n", str);
90 }
91 }
92 }
93 if (p != str)
94 printk("%s\n", str);
95}
96
77#ifndef CONFIG_ARM_UNWIND 97#ifndef CONFIG_ARM_UNWIND
78/* 98/*
79 * Stack pointers should always be within the kernels view of 99 * Stack pointers should always be within the kernels view of
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 7fa487ef7e2f..37b2a11af345 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -3,6 +3,9 @@
3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> 3 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
4 */ 4 */
5 5
6/* No __ro_after_init data in the .rodata section - which will always be ro */
7#define RO_AFTER_INIT_DATA
8
6#include <asm-generic/vmlinux.lds.h> 9#include <asm-generic/vmlinux.lds.h>
7#include <asm/cache.h> 10#include <asm/cache.h>
8#include <asm/thread_info.h> 11#include <asm/thread_info.h>
@@ -223,6 +226,8 @@ SECTIONS
223 . = ALIGN(PAGE_SIZE); 226 . = ALIGN(PAGE_SIZE);
224 __init_end = .; 227 __init_end = .;
225 228
229 *(.data..ro_after_init)
230
226 NOSAVE_DATA 231 NOSAVE_DATA
227 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 232 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
228 READ_MOSTLY_DATA(L1_CACHE_BYTES) 233 READ_MOSTLY_DATA(L1_CACHE_BYTES)
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index a7e7de89bd75..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/export.h>
32 31
33#ifdef __ARMEB__ 32#ifdef __ARMEB__
34#define al r1 33#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl)
53 52
54ENDPROC(__ashldi3) 53ENDPROC(__ashldi3)
55ENDPROC(__aeabi_llsl) 54ENDPROC(__aeabi_llsl)
56EXPORT_SYMBOL(__ashldi3)
57EXPORT_SYMBOL(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 490336e42518..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/export.h>
32 31
33#ifdef __ARMEB__ 32#ifdef __ARMEB__
34#define al r1 33#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr)
53 52
54ENDPROC(__ashrdi3) 53ENDPROC(__ashrdi3)
55ENDPROC(__aeabi_lasr) 54ENDPROC(__aeabi_lasr)
56EXPORT_SYMBOL(__ashrdi3)
57EXPORT_SYMBOL(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index fab5a50503ae..7d7952e5a3b1 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -10,6 +10,7 @@
10 * 27/03/03 Ian Molton Clean up CONFIG_CPU 10 * 27/03/03 Ian Molton Clean up CONFIG_CPU
11 * 11 *
12 */ 12 */
13#include <linux/kern_levels.h>
13#include <linux/linkage.h> 14#include <linux/linkage.h>
14#include <asm/assembler.h> 15#include <asm/assembler.h>
15 .text 16 .text
@@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions
83 teq r3, r1, lsr #11 84 teq r3, r1, lsr #11
84 ldreq r0, [frame, #-8] @ get sp 85 ldreq r0, [frame, #-8] @ get sp
85 subeq r0, r0, #4 @ point at the last arg 86 subeq r0, r0, #4 @ point at the last arg
86 bleq .Ldumpstm @ dump saved registers 87 bleq dump_backtrace_stm @ dump saved registers
87 88
881004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} 891004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
89 ldr r3, .Ldsi @ instruction exists, 90 ldr r3, .Ldsi @ instruction exists,
90 teq r3, r1, lsr #11 91 teq r3, r1, lsr #11
91 subeq r0, frame, #16 92 subeq r0, frame, #16
92 bleq .Ldumpstm @ dump saved registers 93 bleq dump_backtrace_stm @ dump saved registers
93 94
94 teq sv_fp, #0 @ zero saved fp means 95 teq sv_fp, #0 @ zero saved fp means
95 beq no_frame @ no further frames 96 beq no_frame @ no further frames
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace)
112 .long 1004b, 1006b 113 .long 1004b, 1006b
113 .popsection 114 .popsection
114 115
115#define instr r4
116#define reg r5
117#define stack r6
118
119.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr}
120 mov stack, r0
121 mov instr, r1
122 mov reg, #10
123 mov r7, #0
1241: mov r3, #1
125 ARM( tst instr, r3, lsl reg )
126 THUMB( lsl r3, reg )
127 THUMB( tst instr, r3 )
128 beq 2f
129 add r7, r7, #1
130 teq r7, #6
131 moveq r7, #0
132 adr r3, .Lcr
133 addne r3, r3, #1 @ skip newline
134 ldr r2, [stack], #-4
135 mov r1, reg
136 adr r0, .Lfp
137 bl printk
1382: subs reg, reg, #1
139 bpl 1b
140 teq r7, #0
141 adrne r0, .Lcr
142 blne printk
143 ldmfd sp!, {instr, reg, stack, r7, pc}
144
145.Lfp: .asciz " r%d:%08x%s"
146.Lcr: .asciz "\n"
147.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" 116.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
148 .align 117 .align
149.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} 118.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index df06638b327c..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,6 +1,5 @@
1#include <asm/assembler.h> 1#include <asm/assembler.h>
2#include <asm/unwind.h> 2#include <asm/unwind.h>
3#include <asm/export.h>
4 3
5#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
6 .macro bitop, name, instr 5 .macro bitop, name, instr
@@ -26,7 +25,6 @@ UNWIND( .fnstart )
26 bx lr 25 bx lr
27UNWIND( .fnend ) 26UNWIND( .fnend )
28ENDPROC(\name ) 27ENDPROC(\name )
29EXPORT_SYMBOL(\name )
30 .endm 28 .endm
31 29
32 .macro testop, name, instr, store 30 .macro testop, name, instr, store
@@ -57,7 +55,6 @@ UNWIND( .fnstart )
572: bx lr 552: bx lr
58UNWIND( .fnend ) 56UNWIND( .fnend )
59ENDPROC(\name ) 57ENDPROC(\name )
60EXPORT_SYMBOL(\name )
61 .endm 58 .endm
62#else 59#else
63 .macro bitop, name, instr 60 .macro bitop, name, instr
@@ -77,7 +74,6 @@ UNWIND( .fnstart )
77 ret lr 74 ret lr
78UNWIND( .fnend ) 75UNWIND( .fnend )
79ENDPROC(\name ) 76ENDPROC(\name )
80EXPORT_SYMBOL(\name )
81 .endm 77 .endm
82 78
83/** 79/**
@@ -106,6 +102,5 @@ UNWIND( .fnstart )
106 ret lr 102 ret lr
107UNWIND( .fnend ) 103UNWIND( .fnend )
108ENDPROC(\name ) 104ENDPROC(\name )
109EXPORT_SYMBOL(\name )
110 .endm 105 .endm
111#endif 106#endif
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index f05f78247304..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,6 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h> 2#include <asm/assembler.h>
3#include <asm/export.h>
4 3
5#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
6ENTRY(__bswapsi2) 5ENTRY(__bswapsi2)
@@ -36,5 +35,3 @@ ENTRY(__bswapdi2)
36 ret lr 35 ret lr
37ENDPROC(__bswapdi2) 36ENDPROC(__bswapdi2)
38#endif 37#endif
39EXPORT_SYMBOL(__bswapsi2)
40EXPORT_SYMBOL(__bswapdi2)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index b566154f5cf4..e936352ccb00 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -10,7 +10,6 @@
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/unwind.h> 12#include <asm/unwind.h>
13#include <asm/export.h>
14 13
15 .text 14 .text
16 15
@@ -51,9 +50,6 @@ USER( strnebt r2, [r0])
51UNWIND(.fnend) 50UNWIND(.fnend)
52ENDPROC(arm_clear_user) 51ENDPROC(arm_clear_user)
53ENDPROC(__clear_user_std) 52ENDPROC(__clear_user_std)
54#ifndef CONFIG_UACCESS_WITH_MEMCPY
55EXPORT_SYMBOL(arm_clear_user)
56#endif
57 53
58 .pushsection .text.fixup,"ax" 54 .pushsection .text.fixup,"ax"
59 .align 0 55 .align 0
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 63e4c1ed0225..7a4b06049001 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h> 15#include <asm/unwind.h>
16#include <asm/export.h>
17 16
18/* 17/*
19 * Prototype: 18 * Prototype:
@@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user)
95#include "copy_template.S" 94#include "copy_template.S"
96 95
97ENDPROC(arm_copy_from_user) 96ENDPROC(arm_copy_from_user)
98EXPORT_SYMBOL(arm_copy_from_user)
99 97
100 .pushsection .fixup,"ax" 98 .pushsection .fixup,"ax"
101 .align 0 99 .align 0
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index d97851d4af7a..6ee2f6706f86 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -13,7 +13,6 @@
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/cache.h> 15#include <asm/cache.h>
16#include <asm/export.h>
17 16
18#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) 17#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
19 18
@@ -46,4 +45,3 @@ ENTRY(copy_page)
46 PLD( beq 2b ) 45 PLD( beq 2b )
47 ldmfd sp!, {r4, pc} @ 3 46 ldmfd sp!, {r4, pc} @ 3
48ENDPROC(copy_page) 47ENDPROC(copy_page)
49EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 592c179112d1..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h> 15#include <asm/unwind.h>
16#include <asm/export.h>
17 16
18/* 17/*
19 * Prototype: 18 * Prototype:
@@ -100,9 +99,6 @@ WEAK(arm_copy_to_user)
100 99
101ENDPROC(arm_copy_to_user) 100ENDPROC(arm_copy_to_user)
102ENDPROC(__copy_to_user_std) 101ENDPROC(__copy_to_user_std)
103#ifndef CONFIG_UACCESS_WITH_MEMCPY
104EXPORT_SYMBOL(arm_copy_to_user)
105#endif
106 102
107 .pushsection .text.fixup,"ax" 103 .pushsection .text.fixup,"ax"
108 .align 0 104 .align 0
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 68603b5ee537..3ac6ef01bc43 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14 .text 13 .text
15 14
@@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic)
31 adcs r0, r0, #0 30 adcs r0, r0, #0
32 ldmfd sp!, {pc} 31 ldmfd sp!, {pc}
33ENDPROC(__csum_ipv6_magic) 32ENDPROC(__csum_ipv6_magic)
34EXPORT_SYMBOL(__csum_ipv6_magic) 33
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 830b20e81c37..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14 .text 13 .text
15 14
@@ -141,4 +140,3 @@ ENTRY(csum_partial)
141 bne 4b 140 bne 4b
142 b .Lless4 141 b .Lless4
143ENDPROC(csum_partial) 142ENDPROC(csum_partial)
144EXPORT_SYMBOL(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index 9c3383fed129..d03fc71fc88c 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -49,6 +49,5 @@
49 49
50#define FN_ENTRY ENTRY(csum_partial_copy_nocheck) 50#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
51#define FN_EXIT ENDPROC(csum_partial_copy_nocheck) 51#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
52#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
53 52
54#include "csumpartialcopygeneric.S" 53#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 8b94d20e51d1..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -8,7 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <asm/assembler.h> 10#include <asm/assembler.h>
11#include <asm/export.h>
12 11
13/* 12/*
14 * unsigned int 13 * unsigned int
@@ -332,4 +331,3 @@ FN_ENTRY
332 mov r5, r4, get_byte_1 331 mov r5, r4, get_byte_1
333 b .Lexit 332 b .Lexit
334FN_EXIT 333FN_EXIT
335FN_EXPORT
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 5d495edf3d83..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -73,7 +73,6 @@
73 73
74#define FN_ENTRY ENTRY(csum_partial_copy_from_user) 74#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
75#define FN_EXIT ENDPROC(csum_partial_copy_from_user) 75#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
76#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
77 76
78#include "csumpartialcopygeneric.S" 77#include "csumpartialcopygeneric.S"
79 78
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 69aad80a3af4..2cef11884857 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -24,7 +24,6 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/export.h>
28#include <linux/timex.h> 27#include <linux/timex.h>
29 28
30/* 29/*
@@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
35 .const_udelay = __loop_const_udelay, 34 .const_udelay = __loop_const_udelay,
36 .udelay = __loop_udelay, 35 .udelay = __loop_udelay,
37}; 36};
38EXPORT_SYMBOL(arm_delay_ops);
39 37
40static const struct delay_timer *delay_timer; 38static const struct delay_timer *delay_timer;
41static bool delay_calibrated; 39static bool delay_calibrated;
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index 0c9e1c18fc9e..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -15,7 +15,6 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h> 16#include <asm/assembler.h>
17#include <asm/unwind.h> 17#include <asm/unwind.h>
18#include <asm/export.h>
19 18
20#ifdef __ARMEB__ 19#ifdef __ARMEB__
21#define xh r0 20#define xh r0
@@ -211,4 +210,3 @@ Ldiv0_64:
211 210
212UNWIND(.fnend) 211UNWIND(.fnend)
213ENDPROC(__do_div64) 212ENDPROC(__do_div64)
214EXPORT_SYMBOL(__do_div64)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 26302b8cd38f..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -15,7 +15,6 @@
15 */ 15 */
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/assembler.h> 17#include <asm/assembler.h>
18#include <asm/export.h>
19 .text 18 .text
20 19
21/* 20/*
@@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le)
383: mov r0, r1 @ no free bits 373: mov r0, r1 @ no free bits
39 ret lr 38 ret lr
40ENDPROC(_find_first_zero_bit_le) 39ENDPROC(_find_first_zero_bit_le)
41EXPORT_SYMBOL(_find_first_zero_bit_le)
42 40
43/* 41/*
44 * Purpose : Find next 'zero' bit 42 * Purpose : Find next 'zero' bit
@@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le)
59 add r2, r2, #1 @ align bit pointer 57 add r2, r2, #1 @ align bit pointer
60 b 2b @ loop for next bit 58 b 2b @ loop for next bit
61ENDPROC(_find_next_zero_bit_le) 59ENDPROC(_find_next_zero_bit_le)
62EXPORT_SYMBOL(_find_next_zero_bit_le)
63 60
64/* 61/*
65 * Purpose : Find a 'one' bit 62 * Purpose : Find a 'one' bit
@@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le)
813: mov r0, r1 @ no free bits 783: mov r0, r1 @ no free bits
82 ret lr 79 ret lr
83ENDPROC(_find_first_bit_le) 80ENDPROC(_find_first_bit_le)
84EXPORT_SYMBOL(_find_first_bit_le)
85 81
86/* 82/*
87 * Purpose : Find next 'one' bit 83 * Purpose : Find next 'one' bit
@@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le)
101 add r2, r2, #1 @ align bit pointer 97 add r2, r2, #1 @ align bit pointer
102 b 2b @ loop for next bit 98 b 2b @ loop for next bit
103ENDPROC(_find_next_bit_le) 99ENDPROC(_find_next_bit_le)
104EXPORT_SYMBOL(_find_next_bit_le)
105 100
106#ifdef __ARMEB__ 101#ifdef __ARMEB__
107 102
@@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be)
1213: mov r0, r1 @ no free bits 1163: mov r0, r1 @ no free bits
122 ret lr 117 ret lr
123ENDPROC(_find_first_zero_bit_be) 118ENDPROC(_find_first_zero_bit_be)
124EXPORT_SYMBOL(_find_first_zero_bit_be)
125 119
126ENTRY(_find_next_zero_bit_be) 120ENTRY(_find_next_zero_bit_be)
127 teq r1, #0 121 teq r1, #0
@@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be)
139 add r2, r2, #1 @ align bit pointer 133 add r2, r2, #1 @ align bit pointer
140 b 2b @ loop for next bit 134 b 2b @ loop for next bit
141ENDPROC(_find_next_zero_bit_be) 135ENDPROC(_find_next_zero_bit_be)
142EXPORT_SYMBOL(_find_next_zero_bit_be)
143 136
144ENTRY(_find_first_bit_be) 137ENTRY(_find_first_bit_be)
145 teq r1, #0 138 teq r1, #0
@@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be)
1573: mov r0, r1 @ no free bits 1503: mov r0, r1 @ no free bits
158 ret lr 151 ret lr
159ENDPROC(_find_first_bit_be) 152ENDPROC(_find_first_bit_be)
160EXPORT_SYMBOL(_find_first_bit_be)
161 153
162ENTRY(_find_next_bit_be) 154ENTRY(_find_next_bit_be)
163 teq r1, #0 155 teq r1, #0
@@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be)
174 add r2, r2, #1 @ align bit pointer 166 add r2, r2, #1 @ align bit pointer
175 b 2b @ loop for next bit 167 b 2b @ loop for next bit
176ENDPROC(_find_next_bit_be) 168ENDPROC(_find_next_bit_be)
177EXPORT_SYMBOL(_find_next_bit_be)
178 169
179#endif 170#endif
180 171
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9d09a38e73af..8ecfd15c3a02 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -31,7 +31,6 @@
31#include <asm/assembler.h> 31#include <asm/assembler.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/domain.h> 33#include <asm/domain.h>
34#include <asm/export.h>
35 34
36ENTRY(__get_user_1) 35ENTRY(__get_user_1)
37 check_uaccess r0, 1, r1, r2, __get_user_bad 36 check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -39,7 +38,6 @@ ENTRY(__get_user_1)
39 mov r0, #0 38 mov r0, #0
40 ret lr 39 ret lr
41ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
42EXPORT_SYMBOL(__get_user_1)
43 41
44ENTRY(__get_user_2) 42ENTRY(__get_user_2)
45 check_uaccess r0, 2, r1, r2, __get_user_bad 43 check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -60,7 +58,6 @@ rb .req r0
60 mov r0, #0 58 mov r0, #0
61 ret lr 59 ret lr
62ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
63EXPORT_SYMBOL(__get_user_2)
64 61
65ENTRY(__get_user_4) 62ENTRY(__get_user_4)
66 check_uaccess r0, 4, r1, r2, __get_user_bad 63 check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -68,7 +65,6 @@ ENTRY(__get_user_4)
68 mov r0, #0 65 mov r0, #0
69 ret lr 66 ret lr
70ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
71EXPORT_SYMBOL(__get_user_4)
72 68
73ENTRY(__get_user_8) 69ENTRY(__get_user_8)
74 check_uaccess r0, 8, r1, r2, __get_user_bad 70 check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -82,7 +78,6 @@ ENTRY(__get_user_8)
82 mov r0, #0 78 mov r0, #0
83 ret lr 79 ret lr
84ENDPROC(__get_user_8) 80ENDPROC(__get_user_8)
85EXPORT_SYMBOL(__get_user_8)
86 81
87#ifdef __ARMEB__ 82#ifdef __ARMEB__
88ENTRY(__get_user_32t_8) 83ENTRY(__get_user_32t_8)
@@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8)
96 mov r0, #0 91 mov r0, #0
97 ret lr 92 ret lr
98ENDPROC(__get_user_32t_8) 93ENDPROC(__get_user_32t_8)
99EXPORT_SYMBOL(__get_user_32t_8)
100 94
101ENTRY(__get_user_64t_1) 95ENTRY(__get_user_64t_1)
102 check_uaccess r0, 1, r1, r2, __get_user_bad8 96 check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1)
104 mov r0, #0 98 mov r0, #0
105 ret lr 99 ret lr
106ENDPROC(__get_user_64t_1) 100ENDPROC(__get_user_64t_1)
107EXPORT_SYMBOL(__get_user_64t_1)
108 101
109ENTRY(__get_user_64t_2) 102ENTRY(__get_user_64t_2)
110 check_uaccess r0, 2, r1, r2, __get_user_bad8 103 check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -121,7 +114,6 @@ rb .req r0
121 mov r0, #0 114 mov r0, #0
122 ret lr 115 ret lr
123ENDPROC(__get_user_64t_2) 116ENDPROC(__get_user_64t_2)
124EXPORT_SYMBOL(__get_user_64t_2)
125 117
126ENTRY(__get_user_64t_4) 118ENTRY(__get_user_64t_4)
127 check_uaccess r0, 4, r1, r2, __get_user_bad8 119 check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4)
129 mov r0, #0 121 mov r0, #0
130 ret lr 122 ret lr
131ENDPROC(__get_user_64t_4) 123ENDPROC(__get_user_64t_4)
132EXPORT_SYMBOL(__get_user_64t_4)
133#endif 124#endif
134 125
135__get_user_bad8: 126__get_user_bad8:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 3dff7a3a2aef..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14.Linsb_align: rsb ip, ip, #4 13.Linsb_align: rsb ip, ip, #4
15 cmp ip, r2 14 cmp ip, r2
@@ -122,4 +121,3 @@ ENTRY(__raw_readsb)
122 121
123 ldmfd sp!, {r4 - r6, pc} 122 ldmfd sp!, {r4 - r6, pc}
124ENDPROC(__raw_readsb) 123ENDPROC(__raw_readsb)
125EXPORT_SYMBOL(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index bfd39682325b..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14ENTRY(__raw_readsl) 13ENTRY(__raw_readsl)
15 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
@@ -78,4 +77,3 @@ ENTRY(__raw_readsl)
78 strb r3, [r1, #0] 77 strb r3, [r1, #0]
79 ret lr 78 ret lr
80ENDPROC(__raw_readsl) 79ENDPROC(__raw_readsl)
81EXPORT_SYMBOL(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index b3af3db6caac..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14.Linsw_bad_alignment: 13.Linsw_bad_alignment:
15 adr r0, .Linsw_bad_align_msg 14 adr r0, .Linsw_bad_align_msg
@@ -104,4 +103,4 @@ ENTRY(__raw_readsw)
104 103
105 ldmfd sp!, {r4, r5, r6, pc} 104 ldmfd sp!, {r4, r5, r6, pc}
106 105
107EXPORT_SYMBOL(__raw_readsw) 106
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 3c7a7a40b33e..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14 .macro pack, rd, hw1, hw2 13 .macro pack, rd, hw1, hw2
15#ifndef __ARMEB__ 14#ifndef __ARMEB__
@@ -130,4 +129,3 @@ ENTRY(__raw_readsw)
130 strneb ip, [r1] 129 strneb ip, [r1]
131 ldmfd sp!, {r4, pc} 130 ldmfd sp!, {r4, pc}
132ENDPROC(__raw_readsw) 131ENDPROC(__raw_readsw)
133EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index fa3633594415..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14 .macro outword, rd 13 .macro outword, rd
15#ifndef __ARMEB__ 14#ifndef __ARMEB__
@@ -93,4 +92,3 @@ ENTRY(__raw_writesb)
93 92
94 ldmfd sp!, {r4, r5, pc} 93 ldmfd sp!, {r4, r5, pc}
95ENDPROC(__raw_writesb) 94ENDPROC(__raw_writesb)
96EXPORT_SYMBOL(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 98ed6aec0b47..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14ENTRY(__raw_writesl) 13ENTRY(__raw_writesl)
15 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
@@ -66,4 +65,3 @@ ENTRY(__raw_writesl)
66 bne 6b 65 bne 6b
67 ret lr 66 ret lr
68ENDPROC(__raw_writesl) 67ENDPROC(__raw_writesl)
69EXPORT_SYMBOL(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 577184c082bb..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14.Loutsw_bad_alignment: 13.Loutsw_bad_alignment:
15 adr r0, .Loutsw_bad_align_msg 14 adr r0, .Loutsw_bad_align_msg
@@ -125,4 +124,3 @@ ENTRY(__raw_writesw)
125 strne ip, [r0] 124 strne ip, [r0]
126 125
127 ldmfd sp!, {r4, r5, r6, pc} 126 ldmfd sp!, {r4, r5, r6, pc}
128EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index e335f489d1fc..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -9,7 +9,6 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/export.h>
13 12
14 .macro outword, rd 13 .macro outword, rd
15#ifndef __ARMEB__ 14#ifndef __ARMEB__
@@ -99,4 +98,3 @@ ENTRY(__raw_writesw)
99 strneh ip, [r0] 98 strneh ip, [r0]
100 ret lr 99 ret lr
101ENDPROC(__raw_writesw) 100ENDPROC(__raw_writesw)
102EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index f541bc013bff..9397b2e532af 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */
36#include <linux/linkage.h> 36#include <linux/linkage.h>
37#include <asm/assembler.h> 37#include <asm/assembler.h>
38#include <asm/unwind.h> 38#include <asm/unwind.h>
39#include <asm/export.h>
40 39
41.macro ARM_DIV_BODY dividend, divisor, result, curbit 40.macro ARM_DIV_BODY dividend, divisor, result, curbit
42 41
@@ -239,8 +238,6 @@ UNWIND(.fnstart)
239UNWIND(.fnend) 238UNWIND(.fnend)
240ENDPROC(__udivsi3) 239ENDPROC(__udivsi3)
241ENDPROC(__aeabi_uidiv) 240ENDPROC(__aeabi_uidiv)
242EXPORT_SYMBOL(__udivsi3)
243EXPORT_SYMBOL(__aeabi_uidiv)
244 241
245ENTRY(__umodsi3) 242ENTRY(__umodsi3)
246UNWIND(.fnstart) 243UNWIND(.fnstart)
@@ -259,7 +256,6 @@ UNWIND(.fnstart)
259 256
260UNWIND(.fnend) 257UNWIND(.fnend)
261ENDPROC(__umodsi3) 258ENDPROC(__umodsi3)
262EXPORT_SYMBOL(__umodsi3)
263 259
264#ifdef CONFIG_ARM_PATCH_IDIV 260#ifdef CONFIG_ARM_PATCH_IDIV
265 .align 3 261 .align 3
@@ -307,8 +303,6 @@ UNWIND(.fnstart)
307UNWIND(.fnend) 303UNWIND(.fnend)
308ENDPROC(__divsi3) 304ENDPROC(__divsi3)
309ENDPROC(__aeabi_idiv) 305ENDPROC(__aeabi_idiv)
310EXPORT_SYMBOL(__divsi3)
311EXPORT_SYMBOL(__aeabi_idiv)
312 306
313ENTRY(__modsi3) 307ENTRY(__modsi3)
314UNWIND(.fnstart) 308UNWIND(.fnstart)
@@ -333,7 +327,6 @@ UNWIND(.fnstart)
333 327
334UNWIND(.fnend) 328UNWIND(.fnend)
335ENDPROC(__modsi3) 329ENDPROC(__modsi3)
336EXPORT_SYMBOL(__modsi3)
337 330
338#ifdef CONFIG_AEABI 331#ifdef CONFIG_AEABI
339 332
@@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} )
350 343
351UNWIND(.fnend) 344UNWIND(.fnend)
352ENDPROC(__aeabi_uidivmod) 345ENDPROC(__aeabi_uidivmod)
353EXPORT_SYMBOL(__aeabi_uidivmod)
354 346
355ENTRY(__aeabi_idivmod) 347ENTRY(__aeabi_idivmod)
356UNWIND(.fnstart) 348UNWIND(.fnstart)
@@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} )
364 356
365UNWIND(.fnend) 357UNWIND(.fnend)
366ENDPROC(__aeabi_idivmod) 358ENDPROC(__aeabi_idivmod)
367EXPORT_SYMBOL(__aeabi_idivmod)
368 359
369#endif 360#endif
370 361
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index e40833981417..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/export.h>
32 31
33#ifdef __ARMEB__ 32#ifdef __ARMEB__
34#define al r1 33#define al r1
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr)
53 52
54ENDPROC(__lshrdi3) 53ENDPROC(__lshrdi3)
55ENDPROC(__aeabi_llsr) 54ENDPROC(__aeabi_llsr)
56EXPORT_SYMBOL(__lshrdi3)
57EXPORT_SYMBOL(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 44182bf686a5..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/export.h>
15 14
16 .text 15 .text
17 .align 5 16 .align 5
@@ -25,4 +24,3 @@ ENTRY(memchr)
252: movne r0, #0 242: movne r0, #0
26 ret lr 25 ret lr
27ENDPROC(memchr) 26ENDPROC(memchr)
28EXPORT_SYMBOL(memchr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 1be5b6ddf37c..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h> 15#include <asm/unwind.h>
16#include <asm/export.h>
17 16
18#define LDR1W_SHIFT 0 17#define LDR1W_SHIFT 0
19#define STR1W_SHIFT 0 18#define STR1W_SHIFT 0
@@ -69,5 +68,3 @@ ENTRY(memcpy)
69 68
70ENDPROC(memcpy) 69ENDPROC(memcpy)
71ENDPROC(mmiocpy) 70ENDPROC(mmiocpy)
72EXPORT_SYMBOL(memcpy)
73EXPORT_SYMBOL(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 71dcc5400d02..69a9d47fc5ab 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -13,7 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h> 15#include <asm/unwind.h>
16#include <asm/export.h>
17 16
18 .text 17 .text
19 18
@@ -226,4 +225,3 @@ ENTRY(memmove)
22618: backward_copy_shift push=24 pull=8 22518: backward_copy_shift push=24 pull=8
227 226
228ENDPROC(memmove) 227ENDPROC(memmove)
229EXPORT_SYMBOL(memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 7b72044cba62..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -12,7 +12,6 @@
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/unwind.h> 14#include <asm/unwind.h>
15#include <asm/export.h>
16 15
17 .text 16 .text
18 .align 5 17 .align 5
@@ -136,5 +135,3 @@ UNWIND( .fnstart )
136UNWIND( .fnend ) 135UNWIND( .fnend )
137ENDPROC(memset) 136ENDPROC(memset)
138ENDPROC(mmioset) 137ENDPROC(mmioset)
139EXPORT_SYMBOL(memset)
140EXPORT_SYMBOL(mmioset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 6dec26ed5bcc..0eded952e089 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -10,7 +10,6 @@
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/unwind.h> 12#include <asm/unwind.h>
13#include <asm/export.h>
14 13
15 .text 14 .text
16 .align 5 15 .align 5
@@ -136,4 +135,3 @@ UNWIND( .fnstart )
136 ret lr @ 1 135 ret lr @ 1
137UNWIND( .fnend ) 136UNWIND( .fnend )
138ENDPROC(__memzero) 137ENDPROC(__memzero)
139EXPORT_SYMBOL(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index b8f12388ccac..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -12,7 +12,6 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/export.h>
16 15
17#ifdef __ARMEB__ 16#ifdef __ARMEB__
18#define xh r0 17#define xh r0
@@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul)
47 46
48ENDPROC(__muldi3) 47ENDPROC(__muldi3)
49ENDPROC(__aeabi_lmul) 48ENDPROC(__aeabi_lmul)
50EXPORT_SYMBOL(__muldi3)
51EXPORT_SYMBOL(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 11de126e2ed6..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -31,7 +31,6 @@
31#include <asm/assembler.h> 31#include <asm/assembler.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/domain.h> 33#include <asm/domain.h>
34#include <asm/export.h>
35 34
36ENTRY(__put_user_1) 35ENTRY(__put_user_1)
37 check_uaccess r0, 1, r1, ip, __put_user_bad 36 check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -39,7 +38,6 @@ ENTRY(__put_user_1)
39 mov r0, #0 38 mov r0, #0
40 ret lr 39 ret lr
41ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
42EXPORT_SYMBOL(__put_user_1)
43 41
44ENTRY(__put_user_2) 42ENTRY(__put_user_2)
45 check_uaccess r0, 2, r1, ip, __put_user_bad 43 check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -64,7 +62,6 @@ ENTRY(__put_user_2)
64 mov r0, #0 62 mov r0, #0
65 ret lr 63 ret lr
66ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
67EXPORT_SYMBOL(__put_user_2)
68 65
69ENTRY(__put_user_4) 66ENTRY(__put_user_4)
70 check_uaccess r0, 4, r1, ip, __put_user_bad 67 check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -72,7 +69,6 @@ ENTRY(__put_user_4)
72 mov r0, #0 69 mov r0, #0
73 ret lr 70 ret lr
74ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
75EXPORT_SYMBOL(__put_user_4)
76 72
77ENTRY(__put_user_8) 73ENTRY(__put_user_8)
78 check_uaccess r0, 8, r1, ip, __put_user_bad 74 check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -86,7 +82,6 @@ ENTRY(__put_user_8)
86 mov r0, #0 82 mov r0, #0
87 ret lr 83 ret lr
88ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
89EXPORT_SYMBOL(__put_user_8)
90 85
91__put_user_bad: 86__put_user_bad:
92 mov r0, #-EFAULT 87 mov r0, #-EFAULT
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 7301f6e6046c..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/export.h>
15 14
16 .text 15 .text
17 .align 5 16 .align 5
@@ -26,4 +25,3 @@ ENTRY(strchr)
26 subeq r0, r0, #1 25 subeq r0, r0, #1
27 ret lr 26 ret lr
28ENDPROC(strchr) 27ENDPROC(strchr)
29EXPORT_SYMBOL(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index aaf9fd98b754..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/export.h>
15 14
16 .text 15 .text
17 .align 5 16 .align 5
@@ -25,4 +24,3 @@ ENTRY(strrchr)
25 mov r0, r3 24 mov r0, r3
26 ret lr 25 ret lr
27ENDPROC(strrchr) 26ENDPROC(strrchr)
28EXPORT_SYMBOL(strrchr)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 1626e3a551a1..6bd1089b07e0 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -19,7 +19,6 @@
19#include <linux/gfp.h> 19#include <linux/gfp.h>
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/hugetlb.h> 21#include <linux/hugetlb.h>
22#include <linux/export.h>
23#include <asm/current.h> 22#include <asm/current.h>
24#include <asm/page.h> 23#include <asm/page.h>
25 24
@@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
157 } 156 }
158 return n; 157 return n;
159} 158}
160EXPORT_SYMBOL(arm_copy_to_user);
161 159
162static unsigned long noinline 160static unsigned long noinline
163__clear_user_memset(void __user *addr, unsigned long n) 161__clear_user_memset(void __user *addr, unsigned long n)
@@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
215 } 213 }
216 return n; 214 return n;
217} 215}
218EXPORT_SYMBOL(arm_clear_user);
219 216
220#if 0 217#if 0
221 218
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index 127a91af46f3..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -12,7 +12,6 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/export.h>
16 15
17#ifdef __ARMEB__ 16#ifdef __ARMEB__
18#define xh r0 17#define xh r0
@@ -36,7 +35,6 @@ ENTRY(__ucmpdi2)
36 ret lr 35 ret lr
37 36
38ENDPROC(__ucmpdi2) 37ENDPROC(__ucmpdi2)
39EXPORT_SYMBOL(__ucmpdi2)
40 38
41#ifdef CONFIG_AEABI 39#ifdef CONFIG_AEABI
42 40
@@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp)
50 ret lr 48 ret lr
51 49
52ENDPROC(__aeabi_ulcmp) 50ENDPROC(__aeabi_ulcmp)
53EXPORT_SYMBOL(__aeabi_ulcmp)
54 51
55#endif 52#endif
56 53
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 737450fe790c..cab128913e72 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -32,6 +32,7 @@ endif
32 32
33ifdef CONFIG_SND_IMX_SOC 33ifdef CONFIG_SND_IMX_SOC
34obj-y += ssi-fiq.o 34obj-y += ssi-fiq.o
35obj-y += ssi-fiq-ksym.o
35endif 36endif
36 37
37# i.MX21 based machines 38# i.MX21 based machines
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c
new file mode 100644
index 000000000000..792090f9a032
--- /dev/null
+++ b/arch/arm/mach-imx/ssi-fiq-ksym.c
@@ -0,0 +1,20 @@
1/*
2 * Exported ksyms for the SSI FIQ handler
3 *
4 * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/platform_data/asoc-imx-ssi.h>
14
15EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
16EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
17EXPORT_SYMBOL(imx_ssi_fiq_start);
18EXPORT_SYMBOL(imx_ssi_fiq_end);
19EXPORT_SYMBOL(imx_ssi_fiq_base);
20
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S
index fd7917f1c204..a8b93c5f29b5 100644
--- a/arch/arm/mach-imx/ssi-fiq.S
+++ b/arch/arm/mach-imx/ssi-fiq.S
@@ -8,7 +8,6 @@
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/assembler.h> 10#include <asm/assembler.h>
11#include <asm/export.h>
12 11
13/* 12/*
14 * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size 13 * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer:
145 .word 0x0 144 .word 0x0
146.L_imx_ssi_fiq_end: 145.L_imx_ssi_fiq_end:
147imx_ssi_fiq_end: 146imx_ssi_fiq_end:
148EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) 147
149EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
150EXPORT_SYMBOL(imx_ssi_fiq_start)
151EXPORT_SYMBOL(imx_ssi_fiq_end)
152EXPORT_SYMBOL(imx_ssi_fiq_base)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index a9afeebd59f2..0465338183c7 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -71,6 +71,7 @@ config SOC_AM43XX
71 select HAVE_ARM_TWD 71 select HAVE_ARM_TWD
72 select ARM_ERRATA_754322 72 select ARM_ERRATA_754322
73 select ARM_ERRATA_775420 73 select ARM_ERRATA_775420
74 select OMAP_INTERCONNECT
74 75
75config SOC_DRA7XX 76config SOC_DRA7XX
76 bool "TI DRA7XX" 77 bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 2abd53ae3e7a..cc6d9fa60924 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void)
205 205
206#define OMAP3_SHOW_FEATURE(feat) \ 206#define OMAP3_SHOW_FEATURE(feat) \
207 if (omap3_has_ ##feat()) \ 207 if (omap3_has_ ##feat()) \
208 printk(#feat" "); 208 n += scnprintf(buf + n, sizeof(buf) - n, #feat " ");
209 209
210static void __init omap3_cpuinfo(void) 210static void __init omap3_cpuinfo(void)
211{ 211{
212 const char *cpu_name; 212 const char *cpu_name;
213 char buf[64];
214 int n = 0;
215
216 memset(buf, 0, sizeof(buf));
213 217
214 /* 218 /*
215 * OMAP3430 and OMAP3530 are assumed to be same. 219 * OMAP3430 and OMAP3530 are assumed to be same.
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void)
241 cpu_name = "OMAP3503"; 245 cpu_name = "OMAP3503";
242 } 246 }
243 247
244 sprintf(soc_name, "%s", cpu_name); 248 scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name);
245 249
246 /* Print verbose information */ 250 /* Print verbose information */
247 pr_info("%s %s (", soc_name, soc_rev); 251 n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev);
248 252
249 OMAP3_SHOW_FEATURE(l2cache); 253 OMAP3_SHOW_FEATURE(l2cache);
250 OMAP3_SHOW_FEATURE(iva); 254 OMAP3_SHOW_FEATURE(iva);
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void)
252 OMAP3_SHOW_FEATURE(neon); 256 OMAP3_SHOW_FEATURE(neon);
253 OMAP3_SHOW_FEATURE(isp); 257 OMAP3_SHOW_FEATURE(isp);
254 OMAP3_SHOW_FEATURE(192mhz_clk); 258 OMAP3_SHOW_FEATURE(192mhz_clk);
255 259 if (*(buf + n - 1) == ' ')
256 printk(")\n"); 260 n--;
261 n += scnprintf(buf + n, sizeof(buf) - n, ")\n");
262 pr_info("%s", buf);
257} 263}
258 264
259#define OMAP3_CHECK_FEATURE(status,feat) \ 265#define OMAP3_CHECK_FEATURE(status,feat) \
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c
index 62680aad2126..718981bb80cd 100644
--- a/arch/arm/mach-omap2/prm3xxx.c
+++ b/arch/arm/mach-omap2/prm3xxx.c
@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva)
319 if (has_uart4) { 319 if (has_uart4) {
320 en_uart4_mask = OMAP3630_EN_UART4_MASK; 320 en_uart4_mask = OMAP3630_EN_UART4_MASK;
321 grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; 321 grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK;
322 } else {
323 en_uart4_mask = 0;
324 grpsel_uart4_mask = 0;
322 } 325 }
323 326
324 /* Enable wakeups in PER */ 327 /* Enable wakeups in PER */
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index cba8cada8c81..cd15dbd62671 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm,
87 return -ENODATA; 87 return -ENODATA;
88 } 88 }
89 89
90 if (!voltdm->volt_data) {
91 pr_err("%s: No voltage data defined for vdd_%s\n",
92 __func__, voltdm->name);
93 return -ENODATA;
94 }
95
90 /* Adjust voltage to the exact voltage from the OPP table */ 96 /* Adjust voltage to the exact voltage from the OPP table */
91 for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { 97 for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
92 if (voltdm->volt_data[i].volt_nominal >= target_volt) { 98 if (voltdm->volt_data[i].volt_nominal >= target_volt) {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab4f74536057..ab7710002ba6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void)
1167 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1167 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
1168 return 0; 1168 return 0;
1169} 1169}
1170fs_initcall(dma_debug_do_init); 1170core_initcall(dma_debug_do_init);
1171 1171
1172#ifdef CONFIG_ARM_DMA_USE_IOMMU 1172#ifdef CONFIG_ARM_DMA_USE_IOMMU
1173 1173
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index f6d333f09bfe..8dea61640cc1 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin)
96 ret lr 96 ret lr
97ENDPROC(cpu_cm7_proc_fin) 97ENDPROC(cpu_cm7_proc_fin)
98 98
99 .section ".text.init", #alloc, #execinstr 99 .section ".init.text", #alloc, #execinstr
100 100
101__v7m_cm7_setup: 101__v7m_cm7_setup:
102 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) 102 mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 334271a25f70..7d3a2acc6a55 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -393,7 +393,7 @@
393 #address-cells = <3>; 393 #address-cells = <3>;
394 #size-cells = <2>; 394 #size-cells = <2>;
395 dma-coherent; 395 dma-coherent;
396 ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>, 396 ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>,
397 <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>, 397 <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
398 <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; 398 <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
399 #interrupt-cells = <1>; 399 #interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 123a58b29cbd..f0b857d6d73c 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -76,7 +76,7 @@
76 compatible = "arm,idle-state"; 76 compatible = "arm,idle-state";
77 arm,psci-suspend-param = <0x1010000>; 77 arm,psci-suspend-param = <0x1010000>;
78 local-timer-stop; 78 local-timer-stop;
79 entry-latency-us = <300>; 79 entry-latency-us = <400>;
80 exit-latency-us = <1200>; 80 exit-latency-us = <1200>;
81 min-residency-us = <2500>; 81 min-residency-us = <2500>;
82 }; 82 };
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 007be826efce..26aaa6a7670f 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -76,7 +76,7 @@
76 compatible = "arm,idle-state"; 76 compatible = "arm,idle-state";
77 arm,psci-suspend-param = <0x1010000>; 77 arm,psci-suspend-param = <0x1010000>;
78 local-timer-stop; 78 local-timer-stop;
79 entry-latency-us = <300>; 79 entry-latency-us = <400>;
80 exit-latency-us = <1200>; 80 exit-latency-us = <1200>;
81 min-residency-us = <2500>; 81 min-residency-us = <2500>;
82 }; 82 };
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index a7270eff6939..6e154d948a80 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -76,7 +76,7 @@
76 compatible = "arm,idle-state"; 76 compatible = "arm,idle-state";
77 arm,psci-suspend-param = <0x1010000>; 77 arm,psci-suspend-param = <0x1010000>;
78 local-timer-stop; 78 local-timer-stop;
79 entry-latency-us = <300>; 79 entry-latency-us = <400>;
80 exit-latency-us = <1200>; 80 exit-latency-us = <1200>;
81 min-residency-us = <2500>; 81 min-residency-us = <2500>;
82 }; 82 };
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index c4762538ec01..e9bd58793464 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -105,7 +105,7 @@
105 status = "disabled"; 105 status = "disabled";
106 }; 106 };
107 107
108 nb_perih_clk: nb-periph-clk@13000{ 108 nb_periph_clk: nb-periph-clk@13000 {
109 compatible = "marvell,armada-3700-periph-clock-nb"; 109 compatible = "marvell,armada-3700-periph-clock-nb";
110 reg = <0x13000 0x100>; 110 reg = <0x13000 0x100>;
111 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, 111 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
@@ -113,7 +113,7 @@
113 #clock-cells = <1>; 113 #clock-cells = <1>;
114 }; 114 };
115 115
116 sb_perih_clk: sb-periph-clk@18000{ 116 sb_periph_clk: sb-periph-clk@18000 {
117 compatible = "marvell,armada-3700-periph-clock-sb"; 117 compatible = "marvell,armada-3700-periph-clock-sb";
118 reg = <0x18000 0x100>; 118 reg = <0x18000 0x100>;
119 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, 119 clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 842fb333285c..6bf9e241179b 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -130,8 +130,8 @@
130 reg = <0x700600 0x50>; 130 reg = <0x700600 0x50>;
131 #address-cells = <0x1>; 131 #address-cells = <0x1>;
132 #size-cells = <0x0>; 132 #size-cells = <0x0>;
133 cell-index = <1>; 133 cell-index = <3>;
134 clocks = <&cps_syscon0 0 3>; 134 clocks = <&cps_syscon0 1 21>;
135 status = "disabled"; 135 status = "disabled";
136 }; 136 };
137 137
@@ -140,7 +140,7 @@
140 reg = <0x700680 0x50>; 140 reg = <0x700680 0x50>;
141 #address-cells = <1>; 141 #address-cells = <1>;
142 #size-cells = <0>; 142 #size-cells = <0>;
143 cell-index = <2>; 143 cell-index = <4>;
144 clocks = <&cps_syscon0 1 21>; 144 clocks = <&cps_syscon0 1 21>;
145 status = "disabled"; 145 status = "disabled";
146 }; 146 };
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 2065f46fa740..38b6a2b49d68 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -46,7 +46,15 @@
46#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ 46#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
47#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ 47#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
48 48
49#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ 49/*
50 * PMUv3 event types: required events
51 */
52#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
53#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
54#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
55#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
56#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
57#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
50 58
51/* 59/*
52 * Event filters for PMUv3 60 * Event filters for PMUv3
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index a9310a69fffd..57ae9d9ed9bb 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -31,17 +31,9 @@
31 31
32/* 32/*
33 * ARMv8 PMUv3 Performance Events handling code. 33 * ARMv8 PMUv3 Performance Events handling code.
34 * Common event types. 34 * Common event types (some are defined in asm/perf_event.h).
35 */ 35 */
36 36
37/* Required events. */
38#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
39#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
40#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
41#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
42#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
43#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
44
45/* At least one of the following is required. */ 37/* At least one of the following is required. */
46#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 38#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
47#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B 39#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index f302fdb3a030..87e7e6608cd8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
597 597
598 idx = ARMV8_PMU_CYCLE_IDX; 598 idx = ARMV8_PMU_CYCLE_IDX;
599 } else { 599 } else {
600 BUG(); 600 return false;
601 } 601 }
602 } else if (r->CRn == 0 && r->CRm == 9) {
603 /* PMCCNTR */
604 if (pmu_access_event_counter_el0_disabled(vcpu))
605 return false;
606
607 idx = ARMV8_PMU_CYCLE_IDX;
602 } else if (r->CRn == 14 && (r->CRm & 12) == 8) { 608 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
603 /* PMEVCNTRn_EL0 */ 609 /* PMEVCNTRn_EL0 */
604 if (pmu_access_event_counter_el0_disabled(vcpu)) 610 if (pmu_access_event_counter_el0_disabled(vcpu))
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
606 612
607 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); 613 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
608 } else { 614 } else {
609 BUG(); 615 return false;
610 } 616 }
611 617
612 if (!pmu_counter_idx_valid(vcpu, idx)) 618 if (!pmu_counter_idx_valid(vcpu, idx))
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7dd2dd47909a..df78b2ca70eb 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -215,6 +215,12 @@
215#endif 215#endif
216 216
217/* 217/*
218 * Wired register bits
219 */
220#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
221#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
222
223/*
218 * Values used for computation of new tlb entries 224 * Values used for computation of new tlb entries
219 */ 225 */
220#define PL_4K 12 226#define PL_4K 12
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 4a2349302b55..dd179fd8acda 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_TLB_H 1#ifndef __ASM_TLB_H
2#define __ASM_TLB_H 2#define __ASM_TLB_H
3 3
4#include <asm/cpu-features.h>
5#include <asm/mipsregs.h>
6
4/* 7/*
5 * MIPS doesn't need any special per-pte or per-vma handling, except 8 * MIPS doesn't need any special per-pte or per-vma handling, except
6 * we need to flush cache for area to be unmapped. 9 * we need to flush cache for area to be unmapped.
@@ -22,6 +25,16 @@
22 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ 25 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
23 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) 26 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
24 27
28static inline unsigned int num_wired_entries(void)
29{
30 unsigned int wired = read_c0_wired();
31
32 if (cpu_has_mips_r6)
33 wired &= MIPSR6_WIRED_WIRED;
34
35 return wired;
36}
37
25#include <asm-generic/tlb.h> 38#include <asm-generic/tlb.h>
26 39
27#endif /* __ASM_TLB_H */ 40#endif /* __ASM_TLB_H */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index d56a855828c2..3bef306cdfdb 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -209,17 +209,18 @@ bad_area_nosemaphore:
209 if (show_unhandled_signals && 209 if (show_unhandled_signals &&
210 unhandled_signal(tsk, SIGSEGV) && 210 unhandled_signal(tsk, SIGSEGV) &&
211 __ratelimit(&ratelimit_state)) { 211 __ratelimit(&ratelimit_state)) {
212 pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", 212 pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
213 tsk->comm, 213 tsk->comm,
214 write ? "write access to" : "read access from", 214 write ? "write access to" : "read access from",
215 field, address); 215 field, address);
216 pr_info("epc = %0*lx in", field, 216 pr_info("epc = %0*lx in", field,
217 (unsigned long) regs->cp0_epc); 217 (unsigned long) regs->cp0_epc);
218 print_vma_addr(" ", regs->cp0_epc); 218 print_vma_addr(KERN_CONT " ", regs->cp0_epc);
219 pr_cont("\n");
219 pr_info("ra = %0*lx in", field, 220 pr_info("ra = %0*lx in", field,
220 (unsigned long) regs->regs[31]); 221 (unsigned long) regs->regs[31]);
221 print_vma_addr(" ", regs->regs[31]); 222 print_vma_addr(KERN_CONT " ", regs->regs[31]);
222 pr_info("\n"); 223 pr_cont("\n");
223 } 224 }
224 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; 225 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
225 info.si_signo = SIGSEGV; 226 info.si_signo = SIGSEGV;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3a6edecc3f38..e86ebcf5c071 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
118 writex_c0_entrylo1(entrylo); 118 writex_c0_entrylo1(entrylo);
119 } 119 }
120#endif 120#endif
121 tlbidx = read_c0_wired(); 121 tlbidx = num_wired_entries();
122 write_c0_wired(tlbidx + 1); 122 write_c0_wired(tlbidx + 1);
123 write_c0_index(tlbidx); 123 write_c0_index(tlbidx);
124 mtc0_tlbw_hazard(); 124 mtc0_tlbw_hazard();
@@ -147,7 +147,7 @@ void kunmap_coherent(void)
147 147
148 local_irq_save(flags); 148 local_irq_save(flags);
149 old_ctx = read_c0_entryhi(); 149 old_ctx = read_c0_entryhi();
150 wired = read_c0_wired() - 1; 150 wired = num_wired_entries() - 1;
151 write_c0_wired(wired); 151 write_c0_wired(wired);
152 write_c0_index(wired); 152 write_c0_index(wired);
153 write_c0_entryhi(UNIQUE_ENTRYHI(wired)); 153 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bba9c1484b41..0596505770db 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void)
65 write_c0_entrylo0(0); 65 write_c0_entrylo0(0);
66 write_c0_entrylo1(0); 66 write_c0_entrylo1(0);
67 67
68 entry = read_c0_wired(); 68 entry = num_wired_entries();
69 69
70 /* 70 /*
71 * Blast 'em all away. 71 * Blast 'em all away.
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
385 old_ctx = read_c0_entryhi(); 385 old_ctx = read_c0_entryhi();
386 htw_stop(); 386 htw_stop();
387 old_pagemask = read_c0_pagemask(); 387 old_pagemask = read_c0_pagemask();
388 wired = read_c0_wired(); 388 wired = num_wired_entries();
389 write_c0_wired(wired + 1); 389 write_c0_wired(wired + 1);
390 write_c0_index(wired); 390 write_c0_index(wired);
391 tlbw_use_hazard(); /* What is the hazard here? */ 391 tlbw_use_hazard(); /* What is the hazard here? */
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
449 htw_stop(); 449 htw_stop();
450 old_ctx = read_c0_entryhi(); 450 old_ctx = read_c0_entryhi();
451 old_pagemask = read_c0_pagemask(); 451 old_pagemask = read_c0_pagemask();
452 wired = read_c0_wired(); 452 wired = num_wired_entries();
453 if (--temp_tlb_entry < wired) { 453 if (--temp_tlb_entry < wired) {
454 printk(KERN_WARNING 454 printk(KERN_WARNING
455 "No TLB space left for add_temporary_entry\n"); 455 "No TLB space left for add_temporary_entry\n");
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 71c4a3aa3752..a14b86587013 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -34,7 +34,9 @@ config PARISC
34 select HAVE_ARCH_HASH 34 select HAVE_ARCH_HASH
35 select HAVE_ARCH_SECCOMP_FILTER 35 select HAVE_ARCH_SECCOMP_FILTER
36 select HAVE_ARCH_TRACEHOOK 36 select HAVE_ARCH_TRACEHOOK
37 select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) 37 select GENERIC_SCHED_CLOCK
38 select HAVE_UNSTABLE_SCHED_CLOCK if SMP
39 select GENERIC_CLOCKEVENTS
38 select ARCH_NO_COHERENT_DMA_MMAP 40 select ARCH_NO_COHERENT_DMA_MMAP
39 select CPU_NO_EFFICIENT_FFS 41 select CPU_NO_EFFICIENT_FFS
40 42
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 629eb464d5ba..c263301648f3 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void)
369{ 369{
370 unsigned long rangetime, alltime; 370 unsigned long rangetime, alltime;
371 unsigned long size, start; 371 unsigned long size, start;
372 unsigned long threshold;
372 373
373 alltime = mfctl(16); 374 alltime = mfctl(16);
374 flush_data_cache(); 375 flush_data_cache();
@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void)
382 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", 383 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
383 alltime, size, rangetime); 384 alltime, size, rangetime);
384 385
385 /* Racy, but if we see an intermediate value, it's ok too... */ 386 threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
386 parisc_cache_flush_threshold = size * alltime / rangetime; 387 if (threshold > cache_info.dc_size)
387 388 threshold = cache_info.dc_size;
388 parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); 389 if (threshold)
389 if (!parisc_cache_flush_threshold) 390 parisc_cache_flush_threshold = threshold;
390 parisc_cache_flush_threshold = FLUSH_THRESHOLD; 391 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
391
392 if (parisc_cache_flush_threshold > cache_info.dc_size)
393 parisc_cache_flush_threshold = cache_info.dc_size;
394
395 printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
396 parisc_cache_flush_threshold/1024); 392 parisc_cache_flush_threshold/1024);
397 393
398 /* calculate TLB flush threshold */ 394 /* calculate TLB flush threshold */
@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void)
401 flush_tlb_all(); 397 flush_tlb_all();
402 alltime = mfctl(16) - alltime; 398 alltime = mfctl(16) - alltime;
403 399
404 size = PAGE_SIZE; 400 size = 0;
405 start = (unsigned long) _text; 401 start = (unsigned long) _text;
406 rangetime = mfctl(16); 402 rangetime = mfctl(16);
407 while (start < (unsigned long) _end) { 403 while (start < (unsigned long) _end) {
@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void)
414 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", 410 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
415 alltime, size, rangetime); 411 alltime, size, rangetime);
416 412
417 parisc_tlb_flush_threshold = size * alltime / rangetime; 413 threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
418 parisc_tlb_flush_threshold *= num_online_cpus(); 414 if (threshold)
419 parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); 415 parisc_tlb_flush_threshold = threshold;
420 if (!parisc_tlb_flush_threshold) 416 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
421 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
422
423 printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
424 parisc_tlb_flush_threshold/1024); 417 parisc_tlb_flush_threshold/1024);
425} 418}
426 419
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 545f9d2fe711..c05d1876d27c 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -58,7 +58,7 @@ void __init setup_pdc(void)
58 status = pdc_system_map_find_mods(&module_result, &module_path, 0); 58 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
59 if (status == PDC_OK) { 59 if (status == PDC_OK) {
60 pdc_type = PDC_TYPE_SYSTEM_MAP; 60 pdc_type = PDC_TYPE_SYSTEM_MAP;
61 printk("System Map.\n"); 61 pr_cont("System Map.\n");
62 return; 62 return;
63 } 63 }
64 64
@@ -77,7 +77,7 @@ void __init setup_pdc(void)
77 status = pdc_pat_cell_get_number(&cell_info); 77 status = pdc_pat_cell_get_number(&cell_info);
78 if (status == PDC_OK) { 78 if (status == PDC_OK) {
79 pdc_type = PDC_TYPE_PAT; 79 pdc_type = PDC_TYPE_PAT;
80 printk("64 bit PAT.\n"); 80 pr_cont("64 bit PAT.\n");
81 return; 81 return;
82 } 82 }
83#endif 83#endif
@@ -97,12 +97,12 @@ void __init setup_pdc(void)
97 case 0xC: /* 715/64, at least */ 97 case 0xC: /* 715/64, at least */
98 98
99 pdc_type = PDC_TYPE_SNAKE; 99 pdc_type = PDC_TYPE_SNAKE;
100 printk("Snake.\n"); 100 pr_cont("Snake.\n");
101 return; 101 return;
102 102
103 default: /* Everything else */ 103 default: /* Everything else */
104 104
105 printk("Unsupported.\n"); 105 pr_cont("Unsupported.\n");
106 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); 106 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107 } 107 }
108} 108}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 985e06da37f5..1b39a2acaadf 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */
96 96
97fitmanymiddle: /* Loop if LOOP >= 2 */ 97fitmanymiddle: /* Loop if LOOP >= 2 */
98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ 98 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
99 pitlbe 0(%sr1, %r28) 99 pitlbe %r0(%sr1, %r28)
100 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ 100 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
101 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ 101 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
102 copy %arg3, %r31 /* Re-init inner loop count */ 102 copy %arg3, %r31 /* Re-init inner loop count */
@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */
139 139
140fdtmanymiddle: /* Loop if LOOP >= 2 */ 140fdtmanymiddle: /* Loop if LOOP >= 2 */
141 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ 141 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
142 pdtlbe 0(%sr1, %r28) 142 pdtlbe %r0(%sr1, %r28)
143 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ 143 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
144 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ 144 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
145 copy %arg3, %r31 /* Re-init inner loop count */ 145 copy %arg3, %r31 /* Re-init inner loop count */
@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm)
626 /* Purge any old translations */ 626 /* Purge any old translations */
627 627
628#ifdef CONFIG_PA20 628#ifdef CONFIG_PA20
629 pdtlb,l 0(%r28) 629 pdtlb,l %r0(%r28)
630 pdtlb,l 0(%r29) 630 pdtlb,l %r0(%r29)
631#else 631#else
632 tlb_lock %r20,%r21,%r22 632 tlb_lock %r20,%r21,%r22
633 pdtlb 0(%r28) 633 pdtlb %r0(%r28)
634 pdtlb 0(%r29) 634 pdtlb %r0(%r29)
635 tlb_unlock %r20,%r21,%r22 635 tlb_unlock %r20,%r21,%r22
636#endif 636#endif
637 637
@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm)
774 /* Purge any old translation */ 774 /* Purge any old translation */
775 775
776#ifdef CONFIG_PA20 776#ifdef CONFIG_PA20
777 pdtlb,l 0(%r28) 777 pdtlb,l %r0(%r28)
778#else 778#else
779 tlb_lock %r20,%r21,%r22 779 tlb_lock %r20,%r21,%r22
780 pdtlb 0(%r28) 780 pdtlb %r0(%r28)
781 tlb_unlock %r20,%r21,%r22 781 tlb_unlock %r20,%r21,%r22
782#endif 782#endif
783 783
@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm)
858 /* Purge any old translation */ 858 /* Purge any old translation */
859 859
860#ifdef CONFIG_PA20 860#ifdef CONFIG_PA20
861 pdtlb,l 0(%r28) 861 pdtlb,l %r0(%r28)
862#else 862#else
863 tlb_lock %r20,%r21,%r22 863 tlb_lock %r20,%r21,%r22
864 pdtlb 0(%r28) 864 pdtlb %r0(%r28)
865 tlb_unlock %r20,%r21,%r22 865 tlb_unlock %r20,%r21,%r22
866#endif 866#endif
867 867
@@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm)
898 sync 898 sync
899 899
900#ifdef CONFIG_PA20 900#ifdef CONFIG_PA20
901 pdtlb,l 0(%r25) 901 pdtlb,l %r0(%r25)
902#else 902#else
903 tlb_lock %r20,%r21,%r22 903 tlb_lock %r20,%r21,%r22
904 pdtlb 0(%r25) 904 pdtlb %r0(%r25)
905 tlb_unlock %r20,%r21,%r22 905 tlb_unlock %r20,%r21,%r22
906#endif 906#endif
907 907
@@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm)
931 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ 931 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
932#endif 932#endif
933 933
934 /* Purge any old translation */ 934 /* Purge any old translation. Note that the FIC instruction
935 * may use either the instruction or data TLB. Given that we
936 * have a flat address space, it's not clear which TLB will be
937 * used. So, we purge both entries. */
935 938
936#ifdef CONFIG_PA20 939#ifdef CONFIG_PA20
940 pdtlb,l %r0(%r28)
937 pitlb,l %r0(%sr4,%r28) 941 pitlb,l %r0(%sr4,%r28)
938#else 942#else
939 tlb_lock %r20,%r21,%r22 943 tlb_lock %r20,%r21,%r22
940 pitlb (%sr4,%r28) 944 pdtlb %r0(%r28)
945 pitlb %r0(%sr4,%r28)
941 tlb_unlock %r20,%r21,%r22 946 tlb_unlock %r20,%r21,%r22
942#endif 947#endif
943 948
@@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm)
976 sync 981 sync
977 982
978#ifdef CONFIG_PA20 983#ifdef CONFIG_PA20
984 pdtlb,l %r0(%r28)
979 pitlb,l %r0(%sr4,%r25) 985 pitlb,l %r0(%sr4,%r25)
980#else 986#else
981 tlb_lock %r20,%r21,%r22 987 tlb_lock %r20,%r21,%r22
982 pitlb (%sr4,%r25) 988 pdtlb %r0(%r28)
989 pitlb %r0(%sr4,%r25)
983 tlb_unlock %r20,%r21,%r22 990 tlb_unlock %r20,%r21,%r22
984#endif 991#endif
985 992
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 02d9ed0f3949..494ff6e8c88a 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte,
95 95
96 if (!pte_none(*pte)) 96 if (!pte_none(*pte))
97 printk(KERN_ERR "map_pte_uncached: page already exists\n"); 97 printk(KERN_ERR "map_pte_uncached: page already exists\n");
98 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
99 purge_tlb_start(flags); 98 purge_tlb_start(flags);
99 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
100 pdtlb_kernel(orig_vaddr); 100 pdtlb_kernel(orig_vaddr);
101 purge_tlb_end(flags); 101 purge_tlb_end(flags);
102 vaddr += PAGE_SIZE; 102 vaddr += PAGE_SIZE;
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 81d6f6391944..2e66a887788e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -334,6 +334,10 @@ static int __init parisc_init(void)
334 /* tell PDC we're Linux. Nevermind failure. */ 334 /* tell PDC we're Linux. Nevermind failure. */
335 pdc_stable_write(0x40, &osid, sizeof(osid)); 335 pdc_stable_write(0x40, &osid, sizeof(osid));
336 336
337 /* start with known state */
338 flush_cache_all_local();
339 flush_tlb_all_local(NULL);
340
337 processor_init(); 341 processor_init();
338#ifdef CONFIG_SMP 342#ifdef CONFIG_SMP
339 pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", 343 pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9b63b876a13a..325f30d82b64 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/sched_clock.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/param.h> 19#include <linux/param.h>
19#include <linux/string.h> 20#include <linux/string.h>
@@ -39,18 +40,6 @@
39 40
40static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 41static unsigned long clocktick __read_mostly; /* timer cycles per tick */
41 42
42#ifndef CONFIG_64BIT
43/*
44 * The processor-internal cycle counter (Control Register 16) is used as time
45 * source for the sched_clock() function. This register is 64bit wide on a
46 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
47 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
48 * with a per-cpu variable which we increase every time the counter
49 * wraps-around (which happens every ~4 secounds).
50 */
51static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
52#endif
53
54/* 43/*
55 * We keep time on PA-RISC Linux by using the Interval Timer which is 44 * We keep time on PA-RISC Linux by using the Interval Timer which is
56 * a pair of registers; one is read-only and one is write-only; both 45 * a pair of registers; one is read-only and one is write-only; both
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
121 */ 110 */
122 mtctl(next_tick, 16); 111 mtctl(next_tick, 16);
123 112
124#if !defined(CONFIG_64BIT)
125 /* check for overflow on a 32bit kernel (every ~4 seconds). */
126 if (unlikely(next_tick < now))
127 this_cpu_inc(cr16_high_32_bits);
128#endif
129
130 /* Skip one clocktick on purpose if we missed next_tick. 113 /* Skip one clocktick on purpose if we missed next_tick.
131 * The new CR16 must be "later" than current CR16 otherwise 114 * The new CR16 must be "later" than current CR16 otherwise
132 * itimer would not fire until CR16 wrapped - e.g 4 seconds 115 * itimer would not fire until CR16 wrapped - e.g 4 seconds
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc);
208 191
209/* clock source code */ 192/* clock source code */
210 193
211static cycle_t read_cr16(struct clocksource *cs) 194static cycle_t notrace read_cr16(struct clocksource *cs)
212{ 195{
213 return get_cycles(); 196 return get_cycles();
214} 197}
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts)
287} 270}
288 271
289 272
290/* 273static u64 notrace read_cr16_sched_clock(void)
291 * sched_clock() framework
292 */
293
294static u32 cyc2ns_mul __read_mostly;
295static u32 cyc2ns_shift __read_mostly;
296
297u64 sched_clock(void)
298{ 274{
299 u64 now; 275 return get_cycles();
300
301 /* Get current cycle counter (Control Register 16). */
302#ifdef CONFIG_64BIT
303 now = mfctl(16);
304#else
305 now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
306#endif
307
308 /* return the value in ns (cycles_2_ns) */
309 return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
310} 276}
311 277
312 278
@@ -316,17 +282,16 @@ u64 sched_clock(void)
316 282
317void __init time_init(void) 283void __init time_init(void)
318{ 284{
319 unsigned long current_cr16_khz; 285 unsigned long cr16_hz;
320 286
321 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
322 clocktick = (100 * PAGE0->mem_10msec) / HZ; 287 clocktick = (100 * PAGE0->mem_10msec) / HZ;
323
324 /* calculate mult/shift values for cr16 */
325 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
326 NSEC_PER_MSEC, 0);
327
328 start_cpu_itimer(); /* get CPU 0 started */ 288 start_cpu_itimer(); /* get CPU 0 started */
329 289
290 cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
291
330 /* register at clocksource framework */ 292 /* register at clocksource framework */
331 clocksource_register_khz(&clocksource_cr16, current_cr16_khz); 293 clocksource_register_hz(&clocksource_cr16, cr16_hz);
294
295 /* register as sched_clock source */
296 sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
332} 297}
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index 57d42d129033..78aaf4ffd7ab 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -232,8 +232,12 @@ void start(void)
232 console_ops.close(); 232 console_ops.close();
233 233
234 kentry = (kernel_entry_t) vmlinux.addr; 234 kentry = (kernel_entry_t) vmlinux.addr;
235 if (ft_addr) 235 if (ft_addr) {
236 kentry(ft_addr, 0, NULL); 236 if(platform_ops.kentry)
237 platform_ops.kentry(ft_addr, vmlinux.addr);
238 else
239 kentry(ft_addr, 0, NULL);
240 }
237 else 241 else
238 kentry((unsigned long)initrd.addr, initrd.size, 242 kentry((unsigned long)initrd.addr, initrd.size,
239 loader_info.promptr); 243 loader_info.promptr);
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
index ff2f1b97bc53..2a99fc9a3ccf 100644
--- a/arch/powerpc/boot/opal-calls.S
+++ b/arch/powerpc/boot/opal-calls.S
@@ -12,6 +12,19 @@
12 12
13 .text 13 .text
14 14
15 .globl opal_kentry
16opal_kentry:
17 /* r3 is the fdt ptr */
18 mtctr r4
19 li r4, 0
20 li r5, 0
21 li r6, 0
22 li r7, 0
23 ld r11,opal@got(r2)
24 ld r8,0(r11)
25 ld r9,8(r11)
26 bctr
27
15#define OPAL_CALL(name, token) \ 28#define OPAL_CALL(name, token) \
16 .globl name; \ 29 .globl name; \
17name: \ 30name: \
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
index 1f37e1c1d6d8..d7b4fd47eb44 100644
--- a/arch/powerpc/boot/opal.c
+++ b/arch/powerpc/boot/opal.c
@@ -23,14 +23,25 @@ struct opal {
23 23
24static u32 opal_con_id; 24static u32 opal_con_id;
25 25
26/* see opal-wrappers.S */
26int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); 27int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer);
27int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); 28int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer);
28int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); 29int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length);
29int64_t opal_console_flush(uint64_t term_number); 30int64_t opal_console_flush(uint64_t term_number);
30int64_t opal_poll_events(uint64_t *outstanding_event_mask); 31int64_t opal_poll_events(uint64_t *outstanding_event_mask);
31 32
33void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr);
34
32static int opal_con_open(void) 35static int opal_con_open(void)
33{ 36{
37 /*
38 * When OPAL loads the boot kernel it stashes the OPAL base and entry
39 * address in r8 and r9 so the kernel can use the OPAL console
40 * before unflattening the devicetree. While executing the wrapper will
41 * probably trash r8 and r9 so this kentry hook restores them before
42 * entering the decompressed kernel.
43 */
44 platform_ops.kentry = opal_kentry;
34 return 0; 45 return 0;
35} 46}
36 47
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 309d1b127e96..fad1862f4b2d 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -30,6 +30,7 @@ struct platform_ops {
30 void * (*realloc)(void *ptr, unsigned long size); 30 void * (*realloc)(void *ptr, unsigned long size);
31 void (*exit)(void); 31 void (*exit)(void);
32 void * (*vmlinux_alloc)(unsigned long size); 32 void * (*vmlinux_alloc)(unsigned long size);
33 void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr);
33}; 34};
34extern struct platform_ops platform_ops; 35extern struct platform_ops platform_ops;
35 36
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index d1492736d852..e0baba1535e6 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -14,6 +14,10 @@
14 14
15#include <linux/threads.h> 15#include <linux/threads.h>
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <asm/cacheflush.h>
18#include <asm/checksum.h>
19#include <asm/uaccess.h>
20#include <asm/epapr_hcalls.h>
17 21
18#include <uapi/asm/ucontext.h> 22#include <uapi/asm/ucontext.h>
19 23
@@ -109,4 +113,12 @@ void early_setup_secondary(void);
109/* time */ 113/* time */
110void accumulate_stolen_time(void); 114void accumulate_stolen_time(void);
111 115
116/* misc runtime */
117extern u64 __bswapdi2(u64);
118extern s64 __lshrdi3(s64, int);
119extern s64 __ashldi3(s64, int);
120extern s64 __ashrdi3(s64, int);
121extern int __cmpdi2(s64, s64);
122extern int __ucmpdi2(u64, u64);
123
112#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ 124#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 84d49b197c32..9a3eee661297 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -91,7 +91,7 @@
91 */ 91 */
92#define LOAD_HANDLER(reg, label) \ 92#define LOAD_HANDLER(reg, label) \
93 ld reg,PACAKBASE(r13); /* get high part of &label */ \ 93 ld reg,PACAKBASE(r13); /* get high part of &label */ \
94 ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; 94 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label);
95 95
96#define __LOAD_HANDLER(reg, label) \ 96#define __LOAD_HANDLER(reg, label) \
97 ld reg,PACAKBASE(r13); \ 97 ld reg,PACAKBASE(r13); \
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \
158 std ra,offset(r13); \ 158 std ra,offset(r13); \
159END_FTR_SECTION_NESTED(ftr,ftr,943) 159END_FTR_SECTION_NESTED(ftr,ftr,943)
160 160
161#define EXCEPTION_PROLOG_0(area) \ 161#define EXCEPTION_PROLOG_0_PACA(area) \
162 GET_PACA(r13); \
163 std r9,area+EX_R9(r13); /* save r9 */ \ 162 std r9,area+EX_R9(r13); /* save r9 */ \
164 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ 163 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
165 HMT_MEDIUM; \ 164 HMT_MEDIUM; \
166 std r10,area+EX_R10(r13); /* save r10 - r12 */ \ 165 std r10,area+EX_R10(r13); /* save r10 - r12 */ \
167 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) 166 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
168 167
168#define EXCEPTION_PROLOG_0(area) \
169 GET_PACA(r13); \
170 EXCEPTION_PROLOG_0_PACA(area)
171
169#define __EXCEPTION_PROLOG_1(area, extra, vec) \ 172#define __EXCEPTION_PROLOG_1(area, extra, vec) \
170 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ 173 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
171 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ 174 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
196 EXCEPTION_PROLOG_1(area, extra, vec); \ 199 EXCEPTION_PROLOG_1(area, extra, vec); \
197 EXCEPTION_PROLOG_PSERIES_1(label, h); 200 EXCEPTION_PROLOG_PSERIES_1(label, h);
198 201
202/* Have the PACA in r13 already */
203#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \
204 EXCEPTION_PROLOG_0_PACA(area); \
205 EXCEPTION_PROLOG_1(area, extra, vec); \
206 EXCEPTION_PROLOG_PSERIES_1(label, h);
207
199#define __KVMTEST(h, n) \ 208#define __KVMTEST(h, n) \
200 lbz r10,HSTATE_IN_GUEST(r13); \ 209 lbz r10,HSTATE_IN_GUEST(r13); \
201 cmpwi r10,0; \ 210 cmpwi r10,0; \
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e88368354e49..e311c25751a4 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -29,6 +29,12 @@
29 */ 29 */
30 30
31/* 31/*
32 * Kernel read only support.
33 * We added the ppp value 0b110 in ISA 2.04.
34 */
35#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000)
36
37/*
32 * We need to clear top 16bits of va (from the remaining 64 bits )in 38 * We need to clear top 16bits of va (from the remaining 64 bits )in
33 * tlbie* instructions 39 * tlbie* instructions
34 */ 40 */
@@ -103,10 +109,10 @@
103#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 109#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2
104#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA 110#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA
105#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 111#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE
106#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 112#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
107#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 113#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
108#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 114#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
109#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 115#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO
110#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 116#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
111 MMU_FTR_CI_LARGE_PAGE 117 MMU_FTR_CI_LARGE_PAGE
112#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 118#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 0132831b3081..c56ea8c84abb 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -460,5 +460,6 @@
460 460
461#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ 461#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
462 ((IH & 0x7) << 21)) 462 ((IH & 0x7) << 21))
463#define PPC_INVALIDATE_ERAT PPC_SLBIA(7)
463 464
464#endif /* _ASM_POWERPC_PPC_OPCODE_H */ 465#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9cd4e8cbc78c..9e1499f98def 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -355,6 +355,7 @@
355#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ 355#define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */
356#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ 356#define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */
357#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ 357#define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */
358#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */
358#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ 359#define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */
359#define LPCR_MER_SH 11 360#define LPCR_MER_SH 11
360#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */ 361#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 52ff3f025437..37c027ca83b2 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9)
98 li r0,0 98 li r0,0
99 mtspr SPRN_LPID,r0 99 mtspr SPRN_LPID,r0
100 mfspr r3,SPRN_LPCR 100 mfspr r3,SPRN_LPCR
101 ori r3, r3, LPCR_PECEDH 101 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
102 ori r3, r3, LPCR_HVICE 102 or r3, r3, r4
103 bl __init_LPCR 103 bl __init_LPCR
104 bl __init_HFSCR 104 bl __init_HFSCR
105 bl __init_tlb_power9 105 bl __init_tlb_power9
@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9)
118 li r0,0 118 li r0,0
119 mtspr SPRN_LPID,r0 119 mtspr SPRN_LPID,r0
120 mfspr r3,SPRN_LPCR 120 mfspr r3,SPRN_LPCR
121 ori r3, r3, LPCR_PECEDH 121 LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
122 ori r3, r3, LPCR_HVICE 122 or r3, r3, r4
123 bl __init_LPCR 123 bl __init_LPCR
124 bl __init_HFSCR 124 bl __init_HFSCR
125 bl __init_tlb_power9 125 bl __init_tlb_power9
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08ba447a4b3d..1ba82ea90230 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100)
116 116
117EXC_REAL_BEGIN(system_reset, 0x100, 0x200) 117EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
118 SET_SCRATCH0(r13) 118 SET_SCRATCH0(r13)
119 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, 119 GET_PACA(r13)
120 clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
121 EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
120 IDLETEST, 0x100) 122 IDLETEST, 0x100)
121 123
122EXC_REAL_END(system_reset, 0x100, 0x200) 124EXC_REAL_END(system_reset, 0x100, 0x200)
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200)
124 126
125#ifdef CONFIG_PPC_P7_NAP 127#ifdef CONFIG_PPC_P7_NAP
126EXC_COMMON_BEGIN(system_reset_idle_common) 128EXC_COMMON_BEGIN(system_reset_idle_common)
129BEGIN_FTR_SECTION
130 GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
131END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
127 bl pnv_restore_hyp_resource 132 bl pnv_restore_hyp_resource
128 133
129 li r0,PNV_THREAD_RUNNING 134 li r0,PNV_THREAD_RUNNING
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
169 SET_SCRATCH0(r13) /* save r13 */ 174 SET_SCRATCH0(r13) /* save r13 */
170 /* 175 /*
171 * Running native on arch 2.06 or later, we may wakeup from winkle 176 * Running native on arch 2.06 or later, we may wakeup from winkle
172 * inside machine check. If yes, then last bit of HSPGR0 would be set 177 * inside machine check. If yes, then last bit of HSPRG0 would be set
173 * to 1. Hence clear it unconditionally. 178 * to 1. Hence clear it unconditionally.
174 */ 179 */
175 GET_PACA(r13) 180 GET_PACA(r13)
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
388 /* 393 /*
389 * Go back to winkle. Please note that this thread was woken up in 394 * Go back to winkle. Please note that this thread was woken up in
390 * machine check from winkle and have not restored the per-subcore 395 * machine check from winkle and have not restored the per-subcore
391 * state. Hence before going back to winkle, set last bit of HSPGR0 396 * state. Hence before going back to winkle, set last bit of HSPRG0
392 * to 1. This will make sure that if this thread gets woken up 397 * to 1. This will make sure that if this thread gets woken up
393 * again at reset vector 0x100 then it will get chance to restore 398 * again at reset vector 0x100 then it will get chance to restore
394 * the subcore state. 399 * the subcore state.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ce6dc61b15b2..49a680d5ae37 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs)
1215 int instr; 1215 int instr;
1216 1216
1217 if (!(i % 8)) 1217 if (!(i % 8))
1218 printk("\n"); 1218 pr_cont("\n");
1219 1219
1220#if !defined(CONFIG_BOOKE) 1220#if !defined(CONFIG_BOOKE)
1221 /* If executing with the IMMU off, adjust pc rather 1221 /* If executing with the IMMU off, adjust pc rather
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs)
1227 1227
1228 if (!__kernel_text_address(pc) || 1228 if (!__kernel_text_address(pc) ||
1229 probe_kernel_address((unsigned int __user *)pc, instr)) { 1229 probe_kernel_address((unsigned int __user *)pc, instr)) {
1230 printk(KERN_CONT "XXXXXXXX "); 1230 pr_cont("XXXXXXXX ");
1231 } else { 1231 } else {
1232 if (regs->nip == pc) 1232 if (regs->nip == pc)
1233 printk(KERN_CONT "<%08x> ", instr); 1233 pr_cont("<%08x> ", instr);
1234 else 1234 else
1235 printk(KERN_CONT "%08x ", instr); 1235 pr_cont("%08x ", instr);
1236 } 1236 }
1237 1237
1238 pc += sizeof(int); 1238 pc += sizeof(int);
1239 } 1239 }
1240 1240
1241 printk("\n"); 1241 pr_cont("\n");
1242} 1242}
1243 1243
1244struct regbit { 1244struct regbit {
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1282 1282
1283 for (; bits->bit; ++bits) 1283 for (; bits->bit; ++bits)
1284 if (val & bits->bit) { 1284 if (val & bits->bit) {
1285 printk("%s%s", s, bits->name); 1285 pr_cont("%s%s", s, bits->name);
1286 s = sep; 1286 s = sep;
1287 } 1287 }
1288} 1288}
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val)
1305 * T: Transactional (bit 34) 1305 * T: Transactional (bit 34)
1306 */ 1306 */
1307 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { 1307 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1308 printk(",TM["); 1308 pr_cont(",TM[");
1309 print_bits(val, msr_tm_bits, ""); 1309 print_bits(val, msr_tm_bits, "");
1310 printk("]"); 1310 pr_cont("]");
1311 } 1311 }
1312} 1312}
1313#else 1313#else
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {}
1316 1316
1317static void print_msr_bits(unsigned long val) 1317static void print_msr_bits(unsigned long val)
1318{ 1318{
1319 printk("<"); 1319 pr_cont("<");
1320 print_bits(val, msr_bits, ","); 1320 print_bits(val, msr_bits, ",");
1321 print_tm_bits(val); 1321 print_tm_bits(val);
1322 printk(">"); 1322 pr_cont(">");
1323} 1323}
1324 1324
1325#ifdef CONFIG_PPC64 1325#ifdef CONFIG_PPC64
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs)
1347 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 1347 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1348 trap = TRAP(regs); 1348 trap = TRAP(regs);
1349 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 1349 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1350 printk("CFAR: "REG" ", regs->orig_gpr3); 1350 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1351 if (trap == 0x200 || trap == 0x300 || trap == 0x600) 1351 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1352#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) 1352#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1353 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); 1353 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1354#else 1354#else
1355 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); 1355 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1356#endif 1356#endif
1357#ifdef CONFIG_PPC64 1357#ifdef CONFIG_PPC64
1358 printk("SOFTE: %ld ", regs->softe); 1358 pr_cont("SOFTE: %ld ", regs->softe);
1359#endif 1359#endif
1360#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1360#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1361 if (MSR_TM_ACTIVE(regs->msr)) 1361 if (MSR_TM_ACTIVE(regs->msr))
1362 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); 1362 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1363#endif 1363#endif
1364 1364
1365 for (i = 0; i < 32; i++) { 1365 for (i = 0; i < 32; i++) {
1366 if ((i % REGS_PER_LINE) == 0) 1366 if ((i % REGS_PER_LINE) == 0)
1367 printk("\nGPR%02d: ", i); 1367 pr_cont("\nGPR%02d: ", i);
1368 printk(REG " ", regs->gpr[i]); 1368 pr_cont(REG " ", regs->gpr[i]);
1369 if (i == LAST_VOLATILE && !FULL_REGS(regs)) 1369 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1370 break; 1370 break;
1371 } 1371 }
1372 printk("\n"); 1372 pr_cont("\n");
1373#ifdef CONFIG_KALLSYMS 1373#ifdef CONFIG_KALLSYMS
1374 /* 1374 /*
1375 * Lookup NIP late so we have the best change of getting the 1375 * Lookup NIP late so we have the best change of getting the
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1900 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1900 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1901#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1901#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1902 if ((ip == rth) && curr_frame >= 0) { 1902 if ((ip == rth) && curr_frame >= 0) {
1903 printk(" (%pS)", 1903 pr_cont(" (%pS)",
1904 (void *)current->ret_stack[curr_frame].ret); 1904 (void *)current->ret_stack[curr_frame].ret);
1905 curr_frame--; 1905 curr_frame--;
1906 } 1906 }
1907#endif 1907#endif
1908 if (firstframe) 1908 if (firstframe)
1909 printk(" (unreliable)"); 1909 pr_cont(" (unreliable)");
1910 printk("\n"); 1910 pr_cont("\n");
1911 } 1911 }
1912 firstframe = 0; 1912 firstframe = 0;
1913 1913
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7ac8e6eaab5b..8d586cff8a41 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -226,17 +226,25 @@ static void __init configure_exceptions(void)
226 if (firmware_has_feature(FW_FEATURE_OPAL)) 226 if (firmware_has_feature(FW_FEATURE_OPAL))
227 opal_configure_cores(); 227 opal_configure_cores();
228 228
229 /* Enable AIL if supported, and we are in hypervisor mode */ 229 /* AIL on native is done in cpu_ready_for_interrupts() */
230 if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
231 early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
232 unsigned long lpcr = mfspr(SPRN_LPCR);
233 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
234 }
235 } 230 }
236} 231}
237 232
238static void cpu_ready_for_interrupts(void) 233static void cpu_ready_for_interrupts(void)
239{ 234{
235 /*
236 * Enable AIL if supported, and we are in hypervisor mode. This
237 * is called once for every processor.
238 *
239 * If we are not in hypervisor mode the job is done once for
240 * the whole partition in configure_exceptions().
241 */
242 if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
243 early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
244 unsigned long lpcr = mfspr(SPRN_LPCR);
245 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
246 }
247
240 /* Set IR and DR in PACA MSR */ 248 /* Set IR and DR in PACA MSR */
241 get_paca()->kernel_msr = MSR_KERNEL; 249 get_paca()->kernel_msr = MSR_KERNEL;
242} 250}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 44d3c3a38e3e..78dabf065ba9 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -193,8 +193,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
193 /* 193 /*
194 * Kernel read only mapped with ppp bits 0b110 194 * Kernel read only mapped with ppp bits 0b110
195 */ 195 */
196 if (!(pteflags & _PAGE_WRITE)) 196 if (!(pteflags & _PAGE_WRITE)) {
197 rflags |= (HPTE_R_PP0 | 0x2); 197 if (mmu_has_feature(MMU_FTR_KERNEL_RO))
198 rflags |= (HPTE_R_PP0 | 0x2);
199 else
200 rflags |= 0x3;
201 }
198 } else { 202 } else {
199 if (pteflags & _PAGE_RWX) 203 if (pteflags & _PAGE_RWX)
200 rflags |= 0x2; 204 rflags |= 0x2;
@@ -1029,6 +1033,10 @@ void hash__early_init_mmu_secondary(void)
1029{ 1033{
1030 /* Initialize hash table for that CPU */ 1034 /* Initialize hash table for that CPU */
1031 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 1035 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
1036
1037 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1038 update_hid_for_hash();
1039
1032 if (!cpu_has_feature(CPU_FTR_ARCH_300)) 1040 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1033 mtspr(SPRN_SDR1, _SDR1); 1041 mtspr(SPRN_SDR1, _SDR1);
1034 else 1042 else
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index ed7bddc456b7..688b54517655 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void)
388 * update partition table control register and UPRT 388 * update partition table control register and UPRT
389 */ 389 */
390 if (!firmware_has_feature(FW_FEATURE_LPAR)) { 390 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
391
392 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
393 update_hid_for_radix();
394
391 lpcr = mfspr(SPRN_LPCR); 395 lpcr = mfspr(SPRN_LPCR);
392 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); 396 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
393 397
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index bda8c43be78a..3493cf4e0452 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { 50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
51 __tlbiel_pid(pid, set, ric); 51 __tlbiel_pid(pid, set, ric);
52 } 52 }
53 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
54 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
53 return; 55 return;
54} 56}
55 57
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
83 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 85 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
84 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); 86 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
85 asm volatile("ptesync": : :"memory"); 87 asm volatile("ptesync": : :"memory");
88 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
89 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
86} 90}
87 91
88static inline void _tlbie_va(unsigned long va, unsigned long pid, 92static inline void _tlbie_va(unsigned long va, unsigned long pid,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index b23c76b42d6e..165ecdd24d22 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -43,6 +43,7 @@ config SPARC
43 select ARCH_HAS_SG_CHAIN 43 select ARCH_HAS_SG_CHAIN
44 select CPU_NO_EFFICIENT_FFS 44 select CPU_NO_EFFICIENT_FFS
45 select HAVE_ARCH_HARDENED_USERCOPY 45 select HAVE_ARCH_HARDENED_USERCOPY
46 select PROVE_LOCKING_SMALL if PROVE_LOCKING
46 47
47config SPARC32 48config SPARC32
48 def_bool !64BIT 49 def_bool !64BIT
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG
89config ARCH_PROC_KCORE_TEXT 90config ARCH_PROC_KCORE_TEXT
90 def_bool y 91 def_bool y
91 92
93config ARCH_ATU
94 bool
95 default y if SPARC64
96
97config ARCH_DMA_ADDR_T_64BIT
98 bool
99 default y if ARCH_ATU
100
92config IOMMU_HELPER 101config IOMMU_HELPER
93 bool 102 bool
94 default y if SPARC64 103 default y if SPARC64
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE
304config ARCH_SPARSEMEM_DEFAULT 313config ARCH_SPARSEMEM_DEFAULT
305 def_bool y if SPARC64 314 def_bool y if SPARC64
306 315
316config FORCE_MAX_ZONEORDER
317 int "Maximum zone order"
318 default "13"
319 help
320 The kernel memory allocator divides physically contiguous memory
321 blocks into "zones", where each zone is a power of two number of
322 pages. This option selects the largest power of two that the kernel
323 keeps in the memory allocator. If you need to allocate very large
324 blocks of physically contiguous memory, then you may need to
325 increase this value.
326
327 This config option is actually maximum order plus one. For example,
328 a value of 13 means that the largest free memory block is 2^12 pages.
329
307source "mm/Kconfig" 330source "mm/Kconfig"
308 331
309if SPARC64 332if SPARC64
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h
index 666d5ba230d2..73cb8978df58 100644
--- a/arch/sparc/include/asm/hypervisor.h
+++ b/arch/sparc/include/asm/hypervisor.h
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
2335 */ 2335 */
2336#define HV_FAST_PCI_MSG_SETVALID 0xd3 2336#define HV_FAST_PCI_MSG_SETVALID 0xd3
2337 2337
2338/* PCI IOMMU v2 definitions and services
2339 *
2340 * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
2341 * definitions and services.
2342 *
2343 * CTE Clump Table Entry. First level table entry in the ATU.
2344 *
2345 * pci_device_list
2346 * A 32-bit aligned list of pci_devices.
2347 *
2348 * pci_device_listp
2349 * real address of a pci_device_list. 32-bit aligned.
2350 *
2351 * iotte IOMMU translation table entry.
2352 *
2353 * iotte_attributes
2354 * IO Attributes for IOMMU v2 mappings. In addition to
2355 * read, write IOMMU v2 supports relax ordering
2356 *
2357 * io_page_list A 64-bit aligned list of real addresses. Each real
2358 * address in an io_page_list must be properly aligned
2359 * to the pagesize of the given IOTSB.
2360 *
2361 * io_page_list_p Real address of an io_page_list, 64-bit aligned.
2362 *
2363 * IOTSB IO Translation Storage Buffer. An aligned table of
2364 * IOTTEs. Each IOTSB has a pagesize, table size, and
2365 * virtual address associated with it that must match
2366 * a pagesize and table size supported by the un-derlying
2367 * hardware implementation. The alignment requirements
2368 * for an IOTSB depend on the pagesize used for that IOTSB.
2369 * Each IOTTE in an IOTSB maps one pagesize-sized page.
2370 * The size of the IOTSB dictates how large of a virtual
2371 * address space the IOTSB is capable of mapping.
2372 *
2373 * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
2374 * iotsb_handle represents a binding of an IOTSB to a
2375 * PCI root complex.
2376 *
2377 * iotsb_index Zero-based IOTTE number within an IOTSB.
2378 */
2379
2380/* The index_count argument consists of two fields:
2381 * bits 63:48 #iottes and bits 47:0 iotsb_index
2382 */
2383#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
2384 (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
2385
2386/* pci_iotsb_conf()
2387 * TRAP: HV_FAST_TRAP
2388 * FUNCTION: HV_FAST_PCI_IOTSB_CONF
2389 * ARG0: devhandle
2390 * ARG1: r_addr
2391 * ARG2: size
2392 * ARG3: pagesize
2393 * ARG4: iova
2394 * RET0: status
2395 * RET1: iotsb_handle
2396 * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
2397 * EBADALIGN r_addr is not properly aligned
2398 * ENORADDR r_addr is not a valid real address
2399 * ETOOMANY No further IOTSBs may be configured
2400 * EBUSY Duplicate devhandle, raddir, iova combination
2401 *
2402 * Create an IOTSB suitable for the PCI root complex identified by devhandle,
2403 * for the DMA virtual address defined by the argument iova.
2404 *
2405 * r_addr is the properly aligned base address of the IOTSB and size is the
2406 * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
2407 * being configured. If it contains any values other than zeros then the
2408 * behavior is undefined.
2409 *
2410 * pagesize is the size of each page in the IOTSB. Note that the combination of
2411 * size (table size) and pagesize must be valid.
2412 *
2413 * virt is the DMA virtual address this IOTSB will map.
2414 *
2415 * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
2416 * Once configured, privileged access to the IOTSB memory is prohibited and
2417 * creates undefined behavior. The only permitted access is indirect via these
2418 * services.
2419 */
2420#define HV_FAST_PCI_IOTSB_CONF 0x190
2421
2422/* pci_iotsb_info()
2423 * TRAP: HV_FAST_TRAP
2424 * FUNCTION: HV_FAST_PCI_IOTSB_INFO
2425 * ARG0: devhandle
2426 * ARG1: iotsb_handle
2427 * RET0: status
2428 * RET1: r_addr
2429 * RET2: size
2430 * RET3: pagesize
2431 * RET4: iova
2432 * RET5: #bound
2433 * ERRORS: EINVAL Invalid devhandle or iotsb_handle
2434 *
2435 * This service returns configuration information about an IOTSB previously
2436 * created with pci_iotsb_conf.
2437 *
2438 * iotsb_handle value 0 may be used with this service to inquire about the
2439 * legacy IOTSB that may or may not exist. If the service succeeds, the return
2440 * values describe the legacy IOTSB and I/O virtual addresses mapped by that
2441 * table. However, the table base address r_addr may contain the value -1 which
2442 * indicates a memory range that cannot be accessed or be reclaimed.
2443 *
2444 * The return value #bound contains the number of PCI devices that iotsb_handle
2445 * is currently bound to.
2446 */
2447#define HV_FAST_PCI_IOTSB_INFO 0x191
2448
2449/* pci_iotsb_unconf()
2450 * TRAP: HV_FAST_TRAP
2451 * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
2452 * ARG0: devhandle
2453 * ARG1: iotsb_handle
2454 * RET0: status
2455 * ERRORS: EINVAL Invalid devhandle or iotsb_handle
2456 * EBUSY The IOTSB is bound and may not be unconfigured
2457 *
2458 * This service unconfigures the IOTSB identified by the devhandle and
2459 * iotsb_handle arguments, previously created with pci_iotsb_conf.
2460 * The IOTSB must not be currently bound to any device or the service will fail
2461 *
2462 * If the call succeeds, iotsb_handle is no longer valid.
2463 */
2464#define HV_FAST_PCI_IOTSB_UNCONF 0x192
2465
2466/* pci_iotsb_bind()
2467 * TRAP: HV_FAST_TRAP
2468 * FUNCTION: HV_FAST_PCI_IOTSB_BIND
2469 * ARG0: devhandle
2470 * ARG1: iotsb_handle
2471 * ARG2: pci_device
2472 * RET0: status
2473 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
2474 * EBUSY A PCI function is already bound to an IOTSB at the same
2475 * address range as specified by devhandle, iotsb_handle.
2476 *
2477 * This service binds the PCI function specified by the argument pci_device to
2478 * the IOTSB specified by the arguments devhandle and iotsb_handle.
2479 *
2480 * The PCI device function is bound to the specified IOTSB with the IOVA range
2481 * specified when the IOTSB was configured via pci_iotsb_conf. If the function
2482 * is already bound then it is unbound first.
2483 */
2484#define HV_FAST_PCI_IOTSB_BIND 0x193
2485
2486/* pci_iotsb_unbind()
2487 * TRAP: HV_FAST_TRAP
2488 * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
2489 * ARG0: devhandle
2490 * ARG1: iotsb_handle
2491 * ARG2: pci_device
2492 * RET0: status
2493 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
2494 * ENOMAP The PCI function was not bound to the specified IOTSB
2495 *
2496 * This service unbinds the PCI device specified by the argument pci_device
2497 * from the IOTSB identified * by the arguments devhandle and iotsb_handle.
2498 *
2499 * If the PCI device is not bound to the specified IOTSB then this service will
2500 * fail with status ENOMAP
2501 */
2502#define HV_FAST_PCI_IOTSB_UNBIND 0x194
2503
2504/* pci_iotsb_get_binding()
2505 * TRAP: HV_FAST_TRAP
2506 * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
2507 * ARG0: devhandle
2508 * ARG1: iotsb_handle
2509 * ARG2: iova
2510 * RET0: status
2511 * RET1: iotsb_handle
2512 * ERRORS: EINVAL Invalid devhandle, pci_device, or iova
2513 * ENOMAP The PCI function is not bound to an IOTSB at iova
2514 *
2515 * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
2516 * and DMA virtual address, iova.
2517 *
2518 * iova must be the base address of a DMA virtual address range as defined by
2519 * the iommu-address-ranges property in the root complex device node defined
2520 * by the argument devhandle.
2521 */
2522#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
2523
2524/* pci_iotsb_map()
2525 * TRAP: HV_FAST_TRAP
2526 * FUNCTION: HV_FAST_PCI_IOTSB_MAP
2527 * ARG0: devhandle
2528 * ARG1: iotsb_handle
2529 * ARG2: index_count
2530 * ARG3: iotte_attributes
2531 * ARG4: io_page_list_p
2532 * RET0: status
2533 * RET1: #mapped
2534 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
2535 * iotsb_index or iotte_attributes
2536 * EBADALIGN Improperly aligned io_page_list_p or I/O page
2537 * address in the I/O page list.
2538 * ENORADDR Invalid io_page_list_p or I/O page address in
2539 * the I/O page list.
2540 *
2541 * This service creates and flushes mappings in the IOTSB defined by the
2542 * arguments devhandle, iotsb.
2543 *
2544 * The index_count argument consists of two fields. Bits 63:48 contain #iotte
2545 * and bits 47:0 contain iotsb_index
2546 *
2547 * The first mapping is created in the IOTSB index specified by iotsb_index.
2548 * Subsequent mappings are created at iotsb_index+1 and so on.
2549 *
2550 * The attributes of each mapping are defined by the argument iotte_attributes.
2551 *
2552 * The io_page_list_p specifies the real address of the 64-bit-aligned list of
2553 * #iottes I/O page addresses. Each page address must be a properly aligned
2554 * real address of a page to be mapped in the IOTSB. The first entry in the I/O
2555 * page list contains the real address of the first page, the 2nd entry for the
2556 * 2nd page, and so on.
2557 *
2558 * #iottes must be greater than zero.
2559 *
2560 * The return value #mapped is the actual number of mappings created, which may
2561 * be less than or equal to the argument #iottes. If the function returns
2562 * successfully with a #mapped value less than the requested #iottes then the
2563 * caller should continue to invoke the service with updated iotsb_index,
2564 * #iottes, and io_page_list_p arguments until all pages are mapped.
2565 *
2566 * This service must not be used to demap a mapping. In other words, all
2567 * mappings must be valid and have one or both of the RW attribute bits set.
2568 *
2569 * Note:
2570 * It is implementation-defined whether I/O page real address validity checking
2571 * is done at time mappings are established or deferred until they are
2572 * accessed.
2573 */
2574#define HV_FAST_PCI_IOTSB_MAP 0x196
2575
2576/* pci_iotsb_map_one()
2577 * TRAP: HV_FAST_TRAP
2578 * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
2579 * ARG0: devhandle
2580 * ARG1: iotsb_handle
2581 * ARG2: iotsb_index
2582 * ARG3: iotte_attributes
2583 * ARG4: r_addr
2584 * RET0: status
2585 * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
2586 * or iotte_attributes
2587 * EBADALIGN Improperly aligned r_addr
2588 * ENORADDR Invalid r_addr
2589 *
2590 * This service creates and flushes a single mapping in the IOTSB defined by the
2591 * arguments devhandle, iotsb.
2592 *
2593 * The mapping for the page at r_addr is created at the IOTSB index specified by
2594 * iotsb_index with the attributes iotte_attributes.
2595 *
2596 * This service must not be used to demap a mapping. In other words, the mapping
2597 * must be valid and have one or both of the RW attribute bits set.
2598 *
2599 * Note:
2600 * It is implementation-defined whether I/O page real address validity checking
2601 * is done at time mappings are established or deferred until they are
2602 * accessed.
2603 */
2604#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
2605
2606/* pci_iotsb_demap()
2607 * TRAP: HV_FAST_TRAP
2608 * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
2609 * ARG0: devhandle
2610 * ARG1: iotsb_handle
2611 * ARG2: iotsb_index
2612 * ARG3: #iottes
2613 * RET0: status
2614 * RET1: #unmapped
2615 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
2616 *
2617 * This service unmaps and flushes up to #iottes mappings starting at index
2618 * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
2619 *
2620 * #iottes must be greater than zero.
2621 *
2622 * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
2623 * than or equal to the requested number of IOTTEs, #iottes.
2624 *
2625 * If #unmapped is less than #iottes, the caller should continue to invoke this
2626 * service with updated iotsb_index and #iottes arguments until all pages are
2627 * demapped.
2628 */
2629#define HV_FAST_PCI_IOTSB_DEMAP 0x198
2630
2631/* pci_iotsb_getmap()
2632 * TRAP: HV_FAST_TRAP
2633 * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
2634 * ARG0: devhandle
2635 * ARG1: iotsb_handle
2636 * ARG2: iotsb_index
2637 * RET0: status
2638 * RET1: r_addr
2639 * RET2: iotte_attributes
2640 * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
2641 * ENOMAP No mapping was found
2642 *
2643 * This service returns the mapping specified by index iotsb_index from the
2644 * IOTSB defined by the arguments devhandle, iotsb.
2645 *
2646 * Upon success, the real address of the mapping shall be returned in
2647 * r_addr and thethe IOTTE mapping attributes shall be returned in
2648 * iotte_attributes.
2649 *
2650 * The return value iotte_attributes may not include optional features used in
2651 * the call to create the mapping.
2652 */
2653#define HV_FAST_PCI_IOTSB_GETMAP 0x199
2654
2655/* pci_iotsb_sync_mappings()
2656 * TRAP: HV_FAST_TRAP
2657 * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
2658 * ARG0: devhandle
2659 * ARG1: iotsb_handle
2660 * ARG2: iotsb_index
2661 * ARG3: #iottes
2662 * RET0: status
2663 * RET1: #synced
2664 * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
2665 *
2666 * This service synchronizes #iottes mappings starting at index iotsb_index in
2667 * the IOTSB defined by the arguments devhandle, iotsb.
2668 *
2669 * #iottes must be greater than zero.
2670 *
2671 * The actual number of IOTTEs synchronized is returned in #synced, which may
2672 * be less than or equal to the requested number, #iottes.
2673 *
2674 * Upon a successful return, #synced is less than #iottes, the caller should
2675 * continue to invoke this service with updated iotsb_index and #iottes
2676 * arguments until all pages are synchronized.
2677 */
2678#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
2679
2338/* Logical Domain Channel services. */ 2680/* Logical Domain Channel services. */
2339 2681
2340#define LDC_CHANNEL_DOWN 0 2682#define LDC_CHANNEL_DOWN 0
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
2993#define HV_GRP_SDIO 0x0108 3335#define HV_GRP_SDIO 0x0108
2994#define HV_GRP_SDIO_ERR 0x0109 3336#define HV_GRP_SDIO_ERR 0x0109
2995#define HV_GRP_REBOOT_DATA 0x0110 3337#define HV_GRP_REBOOT_DATA 0x0110
3338#define HV_GRP_ATU 0x0111
2996#define HV_GRP_M7_PERF 0x0114 3339#define HV_GRP_M7_PERF 0x0114
2997#define HV_GRP_NIAG_PERF 0x0200 3340#define HV_GRP_NIAG_PERF 0x0200
2998#define HV_GRP_FIRE_PERF 0x0201 3341#define HV_GRP_FIRE_PERF 0x0201
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h
index cd0d69fa7592..f24f356f2503 100644
--- a/arch/sparc/include/asm/iommu_64.h
+++ b/arch/sparc/include/asm/iommu_64.h
@@ -24,8 +24,36 @@ struct iommu_arena {
24 unsigned int limit; 24 unsigned int limit;
25}; 25};
26 26
27#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
28
29/* Data structures for SPARC ATU architecture */
30struct atu_iotsb {
31 void *table; /* IOTSB table base virtual addr*/
32 u64 ra; /* IOTSB table real addr */
33 u64 dvma_size; /* ranges[3].size or OS slected 32G size */
34 u64 dvma_base; /* ranges[3].base */
35 u64 table_size; /* IOTSB table size */
36 u64 page_size; /* IO PAGE size for IOTSB */
37 u32 iotsb_num; /* tsbnum is same as iotsb_handle */
38};
39
40struct atu_ranges {
41 u64 base;
42 u64 size;
43};
44
45struct atu {
46 struct atu_ranges *ranges;
47 struct atu_iotsb *iotsb;
48 struct iommu_map_table tbl;
49 u64 base;
50 u64 size;
51 u64 dma_addr_mask;
52};
53
27struct iommu { 54struct iommu {
28 struct iommu_map_table tbl; 55 struct iommu_map_table tbl;
56 struct atu *atu;
29 spinlock_t lock; 57 spinlock_t lock;
30 u32 dma_addr_mask; 58 u32 dma_addr_mask;
31 iopte_t *page_table; 59 iopte_t *page_table;
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c
index 662500fa555f..267731234ce8 100644
--- a/arch/sparc/kernel/hvapi.c
+++ b/arch/sparc/kernel/hvapi.c
@@ -39,6 +39,7 @@ static struct api_info api_table[] = {
39 { .group = HV_GRP_SDIO, }, 39 { .group = HV_GRP_SDIO, },
40 { .group = HV_GRP_SDIO_ERR, }, 40 { .group = HV_GRP_SDIO_ERR, },
41 { .group = HV_GRP_REBOOT_DATA, }, 41 { .group = HV_GRP_REBOOT_DATA, },
42 { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
42 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, 43 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
43 { .group = HV_GRP_FIRE_PERF, }, 44 { .group = HV_GRP_FIRE_PERF, },
44 { .group = HV_GRP_N2_CPU, }, 45 { .group = HV_GRP_N2_CPU, },
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 5c615abff030..852a3291db96 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask)
760 struct iommu *iommu = dev->archdata.iommu; 760 struct iommu *iommu = dev->archdata.iommu;
761 u64 dma_addr_mask = iommu->dma_addr_mask; 761 u64 dma_addr_mask = iommu->dma_addr_mask;
762 762
763 if (device_mask >= (1UL << 32UL)) 763 if (device_mask > DMA_BIT_MASK(32)) {
764 return 0; 764 if (iommu->atu)
765 dma_addr_mask = iommu->atu->dma_addr_mask;
766 else
767 return 0;
768 }
765 769
766 if ((device_mask & dma_addr_mask) == dma_addr_mask) 770 if ((device_mask & dma_addr_mask) == dma_addr_mask)
767 return 1; 771 return 1;
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index b40cec252905..828493329f68 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -13,7 +13,6 @@
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/iommu-helper.h> 15#include <linux/iommu-helper.h>
16#include <linux/scatterlist.h>
17 16
18#include <asm/iommu.h> 17#include <asm/iommu.h>
19 18
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index db57d8acdc01..06981cc716b6 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = {
44 { .major = 1, .minor = 1 }, 44 { .major = 1, .minor = 1 },
45}; 45};
46 46
47static unsigned long vatu_major = 1;
48static unsigned long vatu_minor = 1;
49
47#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) 50#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
48 51
49struct iommu_batch { 52struct iommu_batch {
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
69} 72}
70 73
71/* Interrupts must be disabled. */ 74/* Interrupts must be disabled. */
72static long iommu_batch_flush(struct iommu_batch *p) 75static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
73{ 76{
74 struct pci_pbm_info *pbm = p->dev->archdata.host_controller; 77 struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
78 u64 *pglist = p->pglist;
79 u64 index_count;
75 unsigned long devhandle = pbm->devhandle; 80 unsigned long devhandle = pbm->devhandle;
76 unsigned long prot = p->prot; 81 unsigned long prot = p->prot;
77 unsigned long entry = p->entry; 82 unsigned long entry = p->entry;
78 u64 *pglist = p->pglist;
79 unsigned long npages = p->npages; 83 unsigned long npages = p->npages;
84 unsigned long iotsb_num;
85 unsigned long ret;
86 long num;
80 87
81 /* VPCI maj=1, min=[0,1] only supports read and write */ 88 /* VPCI maj=1, min=[0,1] only supports read and write */
82 if (vpci_major < 2) 89 if (vpci_major < 2)
83 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); 90 prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
84 91
85 while (npages != 0) { 92 while (npages != 0) {
86 long num; 93 if (mask <= DMA_BIT_MASK(32)) {
87 94 num = pci_sun4v_iommu_map(devhandle,
88 num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), 95 HV_PCI_TSBID(0, entry),
89 npages, prot, __pa(pglist)); 96 npages,
90 if (unlikely(num < 0)) { 97 prot,
91 if (printk_ratelimit()) 98 __pa(pglist));
92 printk("iommu_batch_flush: IOMMU map of " 99 if (unlikely(num < 0)) {
93 "[%08lx:%08llx:%lx:%lx:%lx] failed with " 100 pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
94 "status %ld\n", 101 __func__,
95 devhandle, HV_PCI_TSBID(0, entry), 102 devhandle,
96 npages, prot, __pa(pglist), num); 103 HV_PCI_TSBID(0, entry),
97 return -1; 104 npages, prot, __pa(pglist),
105 num);
106 return -1;
107 }
108 } else {
109 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
110 iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
111 ret = pci_sun4v_iotsb_map(devhandle,
112 iotsb_num,
113 index_count,
114 prot,
115 __pa(pglist),
116 &num);
117 if (unlikely(ret != HV_EOK)) {
118 pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
119 __func__,
120 devhandle, iotsb_num,
121 index_count, prot,
122 __pa(pglist), ret);
123 return -1;
124 }
98 } 125 }
99
100 entry += num; 126 entry += num;
101 npages -= num; 127 npages -= num;
102 pglist += num; 128 pglist += num;
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p)
108 return 0; 134 return 0;
109} 135}
110 136
111static inline void iommu_batch_new_entry(unsigned long entry) 137static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
112{ 138{
113 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
114 140
115 if (p->entry + p->npages == entry) 141 if (p->entry + p->npages == entry)
116 return; 142 return;
117 if (p->entry != ~0UL) 143 if (p->entry != ~0UL)
118 iommu_batch_flush(p); 144 iommu_batch_flush(p, mask);
119 p->entry = entry; 145 p->entry = entry;
120} 146}
121 147
122/* Interrupts must be disabled. */ 148/* Interrupts must be disabled. */
123static inline long iommu_batch_add(u64 phys_page) 149static inline long iommu_batch_add(u64 phys_page, u64 mask)
124{ 150{
125 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 151 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
126 152
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page)
128 154
129 p->pglist[p->npages++] = phys_page; 155 p->pglist[p->npages++] = phys_page;
130 if (p->npages == PGLIST_NENTS) 156 if (p->npages == PGLIST_NENTS)
131 return iommu_batch_flush(p); 157 return iommu_batch_flush(p, mask);
132 158
133 return 0; 159 return 0;
134} 160}
135 161
136/* Interrupts must be disabled. */ 162/* Interrupts must be disabled. */
137static inline long iommu_batch_end(void) 163static inline long iommu_batch_end(u64 mask)
138{ 164{
139 struct iommu_batch *p = this_cpu_ptr(&iommu_batch); 165 struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
140 166
141 BUG_ON(p->npages >= PGLIST_NENTS); 167 BUG_ON(p->npages >= PGLIST_NENTS);
142 168
143 return iommu_batch_flush(p); 169 return iommu_batch_flush(p, mask);
144} 170}
145 171
146static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 172static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
147 dma_addr_t *dma_addrp, gfp_t gfp, 173 dma_addr_t *dma_addrp, gfp_t gfp,
148 unsigned long attrs) 174 unsigned long attrs)
149{ 175{
176 u64 mask;
150 unsigned long flags, order, first_page, npages, n; 177 unsigned long flags, order, first_page, npages, n;
151 unsigned long prot = 0; 178 unsigned long prot = 0;
152 struct iommu *iommu; 179 struct iommu *iommu;
180 struct atu *atu;
181 struct iommu_map_table *tbl;
153 struct page *page; 182 struct page *page;
154 void *ret; 183 void *ret;
155 long entry; 184 long entry;
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
174 memset((char *)first_page, 0, PAGE_SIZE << order); 203 memset((char *)first_page, 0, PAGE_SIZE << order);
175 204
176 iommu = dev->archdata.iommu; 205 iommu = dev->archdata.iommu;
206 atu = iommu->atu;
207
208 mask = dev->coherent_dma_mask;
209 if (mask <= DMA_BIT_MASK(32))
210 tbl = &iommu->tbl;
211 else
212 tbl = &atu->tbl;
177 213
178 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 214 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
179 (unsigned long)(-1), 0); 215 (unsigned long)(-1), 0);
180 216
181 if (unlikely(entry == IOMMU_ERROR_CODE)) 217 if (unlikely(entry == IOMMU_ERROR_CODE))
182 goto range_alloc_fail; 218 goto range_alloc_fail;
183 219
184 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 220 *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
185 ret = (void *) first_page; 221 ret = (void *) first_page;
186 first_page = __pa(first_page); 222 first_page = __pa(first_page);
187 223
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
193 entry); 229 entry);
194 230
195 for (n = 0; n < npages; n++) { 231 for (n = 0; n < npages; n++) {
196 long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); 232 long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
197 if (unlikely(err < 0L)) 233 if (unlikely(err < 0L))
198 goto iommu_map_fail; 234 goto iommu_map_fail;
199 } 235 }
200 236
201 if (unlikely(iommu_batch_end() < 0L)) 237 if (unlikely(iommu_batch_end(mask) < 0L))
202 goto iommu_map_fail; 238 goto iommu_map_fail;
203 239
204 local_irq_restore(flags); 240 local_irq_restore(flags);
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
206 return ret; 242 return ret;
207 243
208iommu_map_fail: 244iommu_map_fail:
209 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); 245 iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
210 246
211range_alloc_fail: 247range_alloc_fail:
212 free_pages(first_page, order); 248 free_pages(first_page, order);
213 return NULL; 249 return NULL;
214} 250}
215 251
216static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, 252unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
217 unsigned long npages) 253 unsigned long iotsb_num,
254 struct pci_bus *bus_dev)
255{
256 struct pci_dev *pdev;
257 unsigned long err;
258 unsigned int bus;
259 unsigned int device;
260 unsigned int fun;
261
262 list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
263 if (pdev->subordinate) {
264 /* No need to bind pci bridge */
265 dma_4v_iotsb_bind(devhandle, iotsb_num,
266 pdev->subordinate);
267 } else {
268 bus = bus_dev->number;
269 device = PCI_SLOT(pdev->devfn);
270 fun = PCI_FUNC(pdev->devfn);
271 err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
272 HV_PCI_DEVICE_BUILD(bus,
273 device,
274 fun));
275
276 /* If bind fails for one device it is going to fail
277 * for rest of the devices because we are sharing
278 * IOTSB. So in case of failure simply return with
279 * error.
280 */
281 if (err)
282 return err;
283 }
284 }
285
286 return 0;
287}
288
289static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
290 dma_addr_t dvma, unsigned long iotsb_num,
291 unsigned long entry, unsigned long npages)
218{ 292{
219 u32 devhandle = *(u32 *)demap_arg;
220 unsigned long num, flags; 293 unsigned long num, flags;
294 unsigned long ret;
221 295
222 local_irq_save(flags); 296 local_irq_save(flags);
223 do { 297 do {
224 num = pci_sun4v_iommu_demap(devhandle, 298 if (dvma <= DMA_BIT_MASK(32)) {
225 HV_PCI_TSBID(0, entry), 299 num = pci_sun4v_iommu_demap(devhandle,
226 npages); 300 HV_PCI_TSBID(0, entry),
227 301 npages);
302 } else {
303 ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
304 entry, npages, &num);
305 if (unlikely(ret != HV_EOK)) {
306 pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
307 ret);
308 }
309 }
228 entry += num; 310 entry += num;
229 npages -= num; 311 npages -= num;
230 } while (npages != 0); 312 } while (npages != 0);
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
236{ 318{
237 struct pci_pbm_info *pbm; 319 struct pci_pbm_info *pbm;
238 struct iommu *iommu; 320 struct iommu *iommu;
321 struct atu *atu;
322 struct iommu_map_table *tbl;
239 unsigned long order, npages, entry; 323 unsigned long order, npages, entry;
324 unsigned long iotsb_num;
240 u32 devhandle; 325 u32 devhandle;
241 326
242 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; 327 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
243 iommu = dev->archdata.iommu; 328 iommu = dev->archdata.iommu;
244 pbm = dev->archdata.host_controller; 329 pbm = dev->archdata.host_controller;
330 atu = iommu->atu;
245 devhandle = pbm->devhandle; 331 devhandle = pbm->devhandle;
246 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); 332
247 dma_4v_iommu_demap(&devhandle, entry, npages); 333 if (dvma <= DMA_BIT_MASK(32)) {
248 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); 334 tbl = &iommu->tbl;
335 iotsb_num = 0; /* we don't care for legacy iommu */
336 } else {
337 tbl = &atu->tbl;
338 iotsb_num = atu->iotsb->iotsb_num;
339 }
340 entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
341 dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
342 iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
249 order = get_order(size); 343 order = get_order(size);
250 if (order < 10) 344 if (order < 10)
251 free_pages((unsigned long)cpu, order); 345 free_pages((unsigned long)cpu, order);
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
257 unsigned long attrs) 351 unsigned long attrs)
258{ 352{
259 struct iommu *iommu; 353 struct iommu *iommu;
354 struct atu *atu;
355 struct iommu_map_table *tbl;
356 u64 mask;
260 unsigned long flags, npages, oaddr; 357 unsigned long flags, npages, oaddr;
261 unsigned long i, base_paddr; 358 unsigned long i, base_paddr;
262 u32 bus_addr, ret;
263 unsigned long prot; 359 unsigned long prot;
360 dma_addr_t bus_addr, ret;
264 long entry; 361 long entry;
265 362
266 iommu = dev->archdata.iommu; 363 iommu = dev->archdata.iommu;
364 atu = iommu->atu;
267 365
268 if (unlikely(direction == DMA_NONE)) 366 if (unlikely(direction == DMA_NONE))
269 goto bad; 367 goto bad;
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
272 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); 370 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
273 npages >>= IO_PAGE_SHIFT; 371 npages >>= IO_PAGE_SHIFT;
274 372
275 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, 373 mask = *dev->dma_mask;
374 if (mask <= DMA_BIT_MASK(32))
375 tbl = &iommu->tbl;
376 else
377 tbl = &atu->tbl;
378
379 entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
276 (unsigned long)(-1), 0); 380 (unsigned long)(-1), 0);
277 381
278 if (unlikely(entry == IOMMU_ERROR_CODE)) 382 if (unlikely(entry == IOMMU_ERROR_CODE))
279 goto bad; 383 goto bad;
280 384
281 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); 385 bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
282 ret = bus_addr | (oaddr & ~IO_PAGE_MASK); 386 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
283 base_paddr = __pa(oaddr & IO_PAGE_MASK); 387 base_paddr = __pa(oaddr & IO_PAGE_MASK);
284 prot = HV_PCI_MAP_ATTR_READ; 388 prot = HV_PCI_MAP_ATTR_READ;
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
293 iommu_batch_start(dev, prot, entry); 397 iommu_batch_start(dev, prot, entry);
294 398
295 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { 399 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
296 long err = iommu_batch_add(base_paddr); 400 long err = iommu_batch_add(base_paddr, mask);
297 if (unlikely(err < 0L)) 401 if (unlikely(err < 0L))
298 goto iommu_map_fail; 402 goto iommu_map_fail;
299 } 403 }
300 if (unlikely(iommu_batch_end() < 0L)) 404 if (unlikely(iommu_batch_end(mask) < 0L))
301 goto iommu_map_fail; 405 goto iommu_map_fail;
302 406
303 local_irq_restore(flags); 407 local_irq_restore(flags);
@@ -310,7 +414,7 @@ bad:
310 return DMA_ERROR_CODE; 414 return DMA_ERROR_CODE;
311 415
312iommu_map_fail: 416iommu_map_fail:
313 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 417 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
314 return DMA_ERROR_CODE; 418 return DMA_ERROR_CODE;
315} 419}
316 420
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
320{ 424{
321 struct pci_pbm_info *pbm; 425 struct pci_pbm_info *pbm;
322 struct iommu *iommu; 426 struct iommu *iommu;
427 struct atu *atu;
428 struct iommu_map_table *tbl;
323 unsigned long npages; 429 unsigned long npages;
430 unsigned long iotsb_num;
324 long entry; 431 long entry;
325 u32 devhandle; 432 u32 devhandle;
326 433
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
332 439
333 iommu = dev->archdata.iommu; 440 iommu = dev->archdata.iommu;
334 pbm = dev->archdata.host_controller; 441 pbm = dev->archdata.host_controller;
442 atu = iommu->atu;
335 devhandle = pbm->devhandle; 443 devhandle = pbm->devhandle;
336 444
337 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); 445 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
338 npages >>= IO_PAGE_SHIFT; 446 npages >>= IO_PAGE_SHIFT;
339 bus_addr &= IO_PAGE_MASK; 447 bus_addr &= IO_PAGE_MASK;
340 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; 448
341 dma_4v_iommu_demap(&devhandle, entry, npages); 449 if (bus_addr <= DMA_BIT_MASK(32)) {
342 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); 450 iotsb_num = 0; /* we don't care for legacy iommu */
451 tbl = &iommu->tbl;
452 } else {
453 iotsb_num = atu->iotsb->iotsb_num;
454 tbl = &atu->tbl;
455 }
456 entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
457 dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
458 iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
343} 459}
344 460
345static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 461static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
353 unsigned long seg_boundary_size; 469 unsigned long seg_boundary_size;
354 int outcount, incount, i; 470 int outcount, incount, i;
355 struct iommu *iommu; 471 struct iommu *iommu;
472 struct atu *atu;
473 struct iommu_map_table *tbl;
474 u64 mask;
356 unsigned long base_shift; 475 unsigned long base_shift;
357 long err; 476 long err;
358 477
359 BUG_ON(direction == DMA_NONE); 478 BUG_ON(direction == DMA_NONE);
360 479
361 iommu = dev->archdata.iommu; 480 iommu = dev->archdata.iommu;
481 atu = iommu->atu;
482
362 if (nelems == 0 || !iommu) 483 if (nelems == 0 || !iommu)
363 return 0; 484 return 0;
364 485
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
384 max_seg_size = dma_get_max_seg_size(dev); 505 max_seg_size = dma_get_max_seg_size(dev);
385 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 506 seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
386 IO_PAGE_SIZE) >> IO_PAGE_SHIFT; 507 IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
387 base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; 508
509 mask = *dev->dma_mask;
510 if (mask <= DMA_BIT_MASK(32))
511 tbl = &iommu->tbl;
512 else
513 tbl = &atu->tbl;
514
515 base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
516
388 for_each_sg(sglist, s, nelems, i) { 517 for_each_sg(sglist, s, nelems, i) {
389 unsigned long paddr, npages, entry, out_entry = 0, slen; 518 unsigned long paddr, npages, entry, out_entry = 0, slen;
390 519
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
397 /* Allocate iommu entries for that segment */ 526 /* Allocate iommu entries for that segment */
398 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); 527 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
399 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); 528 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
400 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, 529 entry = iommu_tbl_range_alloc(dev, tbl, npages,
401 &handle, (unsigned long)(-1), 0); 530 &handle, (unsigned long)(-1), 0);
402 531
403 /* Handle failure */ 532 /* Handle failure */
404 if (unlikely(entry == IOMMU_ERROR_CODE)) { 533 if (unlikely(entry == IOMMU_ERROR_CODE)) {
405 if (printk_ratelimit()) 534 pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
406 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" 535 tbl, paddr, npages);
407 " npages %lx\n", iommu, paddr, npages);
408 goto iommu_map_failed; 536 goto iommu_map_failed;
409 } 537 }
410 538
411 iommu_batch_new_entry(entry); 539 iommu_batch_new_entry(entry, mask);
412 540
413 /* Convert entry to a dma_addr_t */ 541 /* Convert entry to a dma_addr_t */
414 dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); 542 dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
415 dma_addr |= (s->offset & ~IO_PAGE_MASK); 543 dma_addr |= (s->offset & ~IO_PAGE_MASK);
416 544
417 /* Insert into HW table */ 545 /* Insert into HW table */
418 paddr &= IO_PAGE_MASK; 546 paddr &= IO_PAGE_MASK;
419 while (npages--) { 547 while (npages--) {
420 err = iommu_batch_add(paddr); 548 err = iommu_batch_add(paddr, mask);
421 if (unlikely(err < 0L)) 549 if (unlikely(err < 0L))
422 goto iommu_map_failed; 550 goto iommu_map_failed;
423 paddr += IO_PAGE_SIZE; 551 paddr += IO_PAGE_SIZE;
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
452 dma_next = dma_addr + slen; 580 dma_next = dma_addr + slen;
453 } 581 }
454 582
455 err = iommu_batch_end(); 583 err = iommu_batch_end(mask);
456 584
457 if (unlikely(err < 0L)) 585 if (unlikely(err < 0L))
458 goto iommu_map_failed; 586 goto iommu_map_failed;
@@ -475,7 +603,7 @@ iommu_map_failed:
475 vaddr = s->dma_address & IO_PAGE_MASK; 603 vaddr = s->dma_address & IO_PAGE_MASK;
476 npages = iommu_num_pages(s->dma_address, s->dma_length, 604 npages = iommu_num_pages(s->dma_address, s->dma_length,
477 IO_PAGE_SIZE); 605 IO_PAGE_SIZE);
478 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 606 iommu_tbl_range_free(tbl, vaddr, npages,
479 IOMMU_ERROR_CODE); 607 IOMMU_ERROR_CODE);
480 /* XXX demap? XXX */ 608 /* XXX demap? XXX */
481 s->dma_address = DMA_ERROR_CODE; 609 s->dma_address = DMA_ERROR_CODE;
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
496 struct pci_pbm_info *pbm; 624 struct pci_pbm_info *pbm;
497 struct scatterlist *sg; 625 struct scatterlist *sg;
498 struct iommu *iommu; 626 struct iommu *iommu;
627 struct atu *atu;
499 unsigned long flags, entry; 628 unsigned long flags, entry;
629 unsigned long iotsb_num;
500 u32 devhandle; 630 u32 devhandle;
501 631
502 BUG_ON(direction == DMA_NONE); 632 BUG_ON(direction == DMA_NONE);
503 633
504 iommu = dev->archdata.iommu; 634 iommu = dev->archdata.iommu;
505 pbm = dev->archdata.host_controller; 635 pbm = dev->archdata.host_controller;
636 atu = iommu->atu;
506 devhandle = pbm->devhandle; 637 devhandle = pbm->devhandle;
507 638
508 local_irq_save(flags); 639 local_irq_save(flags);
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
512 dma_addr_t dma_handle = sg->dma_address; 643 dma_addr_t dma_handle = sg->dma_address;
513 unsigned int len = sg->dma_length; 644 unsigned int len = sg->dma_length;
514 unsigned long npages; 645 unsigned long npages;
515 struct iommu_map_table *tbl = &iommu->tbl; 646 struct iommu_map_table *tbl;
516 unsigned long shift = IO_PAGE_SHIFT; 647 unsigned long shift = IO_PAGE_SHIFT;
517 648
518 if (!len) 649 if (!len)
519 break; 650 break;
520 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); 651 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
652
653 if (dma_handle <= DMA_BIT_MASK(32)) {
654 iotsb_num = 0; /* we don't care for legacy iommu */
655 tbl = &iommu->tbl;
656 } else {
657 iotsb_num = atu->iotsb->iotsb_num;
658 tbl = &atu->tbl;
659 }
521 entry = ((dma_handle - tbl->table_map_base) >> shift); 660 entry = ((dma_handle - tbl->table_map_base) >> shift);
522 dma_4v_iommu_demap(&devhandle, entry, npages); 661 dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
523 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, 662 entry, npages);
663 iommu_tbl_range_free(tbl, dma_handle, npages,
524 IOMMU_ERROR_CODE); 664 IOMMU_ERROR_CODE);
525 sg = sg_next(sg); 665 sg = sg_next(sg);
526 } 666 }
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
581 return cnt; 721 return cnt;
582} 722}
583 723
724static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
725{
726 struct atu *atu = pbm->iommu->atu;
727 struct atu_iotsb *iotsb;
728 void *table;
729 u64 table_size;
730 u64 iotsb_num;
731 unsigned long order;
732 unsigned long err;
733
734 iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
735 if (!iotsb) {
736 err = -ENOMEM;
737 goto out_err;
738 }
739 atu->iotsb = iotsb;
740
741 /* calculate size of IOTSB */
742 table_size = (atu->size / IO_PAGE_SIZE) * 8;
743 order = get_order(table_size);
744 table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
745 if (!table) {
746 err = -ENOMEM;
747 goto table_failed;
748 }
749 iotsb->table = table;
750 iotsb->ra = __pa(table);
751 iotsb->dvma_size = atu->size;
752 iotsb->dvma_base = atu->base;
753 iotsb->table_size = table_size;
754 iotsb->page_size = IO_PAGE_SIZE;
755
756 /* configure and register IOTSB with HV */
757 err = pci_sun4v_iotsb_conf(pbm->devhandle,
758 iotsb->ra,
759 iotsb->table_size,
760 iotsb->page_size,
761 iotsb->dvma_base,
762 &iotsb_num);
763 if (err) {
764 pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
765 goto iotsb_conf_failed;
766 }
767 iotsb->iotsb_num = iotsb_num;
768
769 err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
770 if (err) {
771 pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
772 goto iotsb_conf_failed;
773 }
774
775 return 0;
776
777iotsb_conf_failed:
778 free_pages((unsigned long)table, order);
779table_failed:
780 kfree(iotsb);
781out_err:
782 return err;
783}
784
785static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
786{
787 struct atu *atu = pbm->iommu->atu;
788 unsigned long err;
789 const u64 *ranges;
790 u64 map_size, num_iotte;
791 u64 dma_mask;
792 const u32 *page_size;
793 int len;
794
795 ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
796 &len);
797 if (!ranges) {
798 pr_err(PFX "No iommu-address-ranges\n");
799 return -EINVAL;
800 }
801
802 page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
803 NULL);
804 if (!page_size) {
805 pr_err(PFX "No iommu-pagesizes\n");
806 return -EINVAL;
807 }
808
809 /* There are 4 iommu-address-ranges supported. Each range is pair of
810 * {base, size}. The ranges[0] and ranges[1] are 32bit address space
811 * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
812 * address ranges to support 64bit addressing. Because 'size' for
813 * address ranges[2] and ranges[3] are same we can select either of
814 * ranges[2] or ranges[3] for mapping. However due to 'size' is too
815 * large for OS to allocate IOTSB we are using fix size 32G
816 * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
817 * to share.
818 */
819 atu->ranges = (struct atu_ranges *)ranges;
820 atu->base = atu->ranges[3].base;
821 atu->size = ATU_64_SPACE_SIZE;
822
823 /* Create IOTSB */
824 err = pci_sun4v_atu_alloc_iotsb(pbm);
825 if (err) {
826 pr_err(PFX "Error creating ATU IOTSB\n");
827 return err;
828 }
829
830 /* Create ATU iommu map.
831 * One bit represents one iotte in IOTSB table.
832 */
833 dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
834 num_iotte = atu->size / IO_PAGE_SIZE;
835 map_size = num_iotte / 8;
836 atu->tbl.table_map_base = atu->base;
837 atu->dma_addr_mask = dma_mask;
838 atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
839 if (!atu->tbl.map)
840 return -ENOMEM;
841
842 iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
843 NULL, false /* no large_pool */,
844 0 /* default npools */,
845 false /* want span boundary checking */);
846
847 return 0;
848}
849
584static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) 850static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
585{ 851{
586 static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; 852 static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
918 1184
919 pci_sun4v_scan_bus(pbm, &op->dev); 1185 pci_sun4v_scan_bus(pbm, &op->dev);
920 1186
1187 /* if atu_init fails its not complete failure.
1188 * we can still continue using legacy iommu.
1189 */
1190 if (pbm->iommu->atu) {
1191 err = pci_sun4v_atu_init(pbm);
1192 if (err) {
1193 kfree(pbm->iommu->atu);
1194 pbm->iommu->atu = NULL;
1195 pr_err(PFX "ATU init failed, err=%d\n", err);
1196 }
1197 }
1198
921 pbm->next = pci_pbm_root; 1199 pbm->next = pci_pbm_root;
922 pci_pbm_root = pbm; 1200 pci_pbm_root = pbm;
923 1201
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op)
931 struct pci_pbm_info *pbm; 1209 struct pci_pbm_info *pbm;
932 struct device_node *dp; 1210 struct device_node *dp;
933 struct iommu *iommu; 1211 struct iommu *iommu;
1212 struct atu *atu;
934 u32 devhandle; 1213 u32 devhandle;
935 int i, err = -ENODEV; 1214 int i, err = -ENODEV;
1215 static bool hv_atu = true;
936 1216
937 dp = op->dev.of_node; 1217 dp = op->dev.of_node;
938 1218
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op)
954 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", 1234 pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
955 vpci_major, vpci_minor); 1235 vpci_major, vpci_minor);
956 1236
1237 err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
1238 if (err) {
1239 /* don't return an error if we fail to register the
1240 * ATU group, but ATU hcalls won't be available.
1241 */
1242 hv_atu = false;
1243 pr_err(PFX "Could not register hvapi ATU err=%d\n",
1244 err);
1245 } else {
1246 pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
1247 vatu_major, vatu_minor);
1248 }
1249
957 dma_ops = &sun4v_dma_ops; 1250 dma_ops = &sun4v_dma_ops;
958 } 1251 }
959 1252
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op)
991 } 1284 }
992 1285
993 pbm->iommu = iommu; 1286 pbm->iommu = iommu;
1287 iommu->atu = NULL;
1288 if (hv_atu) {
1289 atu = kzalloc(sizeof(*atu), GFP_KERNEL);
1290 if (!atu)
1291 pr_err(PFX "Could not allocate atu\n");
1292 else
1293 iommu->atu = atu;
1294 }
994 1295
995 err = pci_sun4v_pbm_init(pbm, op, devhandle); 1296 err = pci_sun4v_pbm_init(pbm, op, devhandle);
996 if (err) 1297 if (err)
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op)
1001 return 0; 1302 return 0;
1002 1303
1003out_free_iommu: 1304out_free_iommu:
1305 kfree(iommu->atu);
1004 kfree(pbm->iommu); 1306 kfree(pbm->iommu);
1005 1307
1006out_free_controller: 1308out_free_controller:
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h
index 5642212390b2..22603a4e48bf 100644
--- a/arch/sparc/kernel/pci_sun4v.h
+++ b/arch/sparc/kernel/pci_sun4v.h
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
89 unsigned long msinum, 89 unsigned long msinum,
90 unsigned long valid); 90 unsigned long valid);
91 91
92/* Sun4v HV IOMMU v2 APIs */
93unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
94 unsigned long ra,
95 unsigned long table_size,
96 unsigned long page_size,
97 unsigned long dvma_base,
98 u64 *iotsb_num);
99unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
100 unsigned long iotsb_num,
101 unsigned int pci_device);
102unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
103 unsigned long iotsb_num,
104 unsigned long iotsb_index_iottes,
105 unsigned long io_attributes,
106 unsigned long io_page_list_pa,
107 long *mapped);
108unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
109 unsigned long iotsb_num,
110 unsigned long iotsb_index,
111 unsigned long iottes,
112 unsigned long *demapped);
92#endif /* !(_PCI_SUN4V_H) */ 113#endif /* !(_PCI_SUN4V_H) */
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S
index e606d46c6815..578f09657916 100644
--- a/arch/sparc/kernel/pci_sun4v_asm.S
+++ b/arch/sparc/kernel/pci_sun4v_asm.S
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid)
360 mov %o0, %o0 360 mov %o0, %o0
361ENDPROC(pci_sun4v_msg_setvalid) 361ENDPROC(pci_sun4v_msg_setvalid)
362 362
363 /*
364 * %o0: devhandle
365 * %o1: r_addr
366 * %o2: size
367 * %o3: pagesize
368 * %o4: virt
369 * %o5: &iotsb_num/&iotsb_handle
370 *
371 * returns %o0: status
372 * %o1: iotsb_num/iotsb_handle
373 */
374ENTRY(pci_sun4v_iotsb_conf)
375 mov %o5, %g1
376 mov HV_FAST_PCI_IOTSB_CONF, %o5
377 ta HV_FAST_TRAP
378 retl
379 stx %o1, [%g1]
380ENDPROC(pci_sun4v_iotsb_conf)
381
382 /*
383 * %o0: devhandle
384 * %o1: iotsb_num/iotsb_handle
385 * %o2: pci_device
386 *
387 * returns %o0: status
388 */
389ENTRY(pci_sun4v_iotsb_bind)
390 mov HV_FAST_PCI_IOTSB_BIND, %o5
391 ta HV_FAST_TRAP
392 retl
393 nop
394ENDPROC(pci_sun4v_iotsb_bind)
395
396 /*
397 * %o0: devhandle
398 * %o1: iotsb_num/iotsb_handle
399 * %o2: index_count
400 * %o3: iotte_attributes
401 * %o4: io_page_list_p
402 * %o5: &mapped
403 *
404 * returns %o0: status
405 * %o1: #mapped
406 */
407ENTRY(pci_sun4v_iotsb_map)
408 mov %o5, %g1
409 mov HV_FAST_PCI_IOTSB_MAP, %o5
410 ta HV_FAST_TRAP
411 retl
412 stx %o1, [%g1]
413ENDPROC(pci_sun4v_iotsb_map)
414
415 /*
416 * %o0: devhandle
417 * %o1: iotsb_num/iotsb_handle
418 * %o2: iotsb_index
419 * %o3: #iottes
420 * %o4: &demapped
421 *
422 * returns %o0: status
423 * %o1: #demapped
424 */
425ENTRY(pci_sun4v_iotsb_demap)
426 mov HV_FAST_PCI_IOTSB_DEMAP, %o5
427 ta HV_FAST_TRAP
428 retl
429 stx %o1, [%o4]
430ENDPROC(pci_sun4v_iotsb_demap)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index c3c12efe0bc0..9c0c8fd0b292 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; 89 sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
90 90
91 /* 1. Make sure we are not getting garbage from the user */ 91 /* 1. Make sure we are not getting garbage from the user */
92 if (!invalid_frame_pointer(sf, sizeof(*sf))) 92 if (invalid_frame_pointer(sf, sizeof(*sf)))
93 goto segv_and_exit; 93 goto segv_and_exit;
94 94
95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) 95 if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
150 150
151 synchronize_user_stack(); 151 synchronize_user_stack();
152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; 152 sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
153 if (!invalid_frame_pointer(sf, sizeof(*sf))) 153 if (invalid_frame_pointer(sf, sizeof(*sf)))
154 goto segv; 154 goto segv;
155 155
156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) 156 if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 439784b7b7ac..37aa537b3ad8 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -802,8 +802,10 @@ struct mdesc_mblock {
802}; 802};
803static struct mdesc_mblock *mblocks; 803static struct mdesc_mblock *mblocks;
804static int num_mblocks; 804static int num_mblocks;
805static int find_numa_node_for_addr(unsigned long pa,
806 struct node_mem_mask *pnode_mask);
805 807
806static unsigned long ra_to_pa(unsigned long addr) 808static unsigned long __init ra_to_pa(unsigned long addr)
807{ 809{
808 int i; 810 int i;
809 811
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr)
819 return addr; 821 return addr;
820} 822}
821 823
822static int find_node(unsigned long addr) 824static int __init find_node(unsigned long addr)
823{ 825{
826 static bool search_mdesc = true;
827 static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
828 static int last_index;
824 int i; 829 int i;
825 830
826 addr = ra_to_pa(addr); 831 addr = ra_to_pa(addr);
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr)
830 if ((addr & p->mask) == p->val) 835 if ((addr & p->mask) == p->val)
831 return i; 836 return i;
832 } 837 }
833 /* The following condition has been observed on LDOM guests.*/ 838 /* The following condition has been observed on LDOM guests because
834 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" 839 * node_masks only contains the best latency mask and value.
835 " rule. Some physical memory will be owned by node 0."); 840 * LDOM guest's mdesc can contain a single latency group to
836 return 0; 841 * cover multiple address range. Print warning message only if the
842 * address cannot be found in node_masks nor mdesc.
843 */
844 if ((search_mdesc) &&
845 ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
846 /* find the available node in the mdesc */
847 last_index = find_numa_node_for_addr(addr, &last_mem_mask);
848 numadbg("find_node: latency group for address 0x%lx is %d\n",
849 addr, last_index);
850 if ((last_index < 0) || (last_index >= num_node_masks)) {
851 /* WARN_ONCE() and use default group 0 */
852 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
853 search_mdesc = false;
854 last_index = 0;
855 }
856 }
857
858 return last_index;
837} 859}
838 860
839static u64 memblock_nid_range(u64 start, u64 end, int *nid) 861static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
840{ 862{
841 *nid = find_node(start); 863 *nid = find_node(start);
842 start += PAGE_SIZE; 864 start += PAGE_SIZE;
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
1160 return numa_latency[from][to]; 1182 return numa_latency[from][to];
1161} 1183}
1162 1184
1185static int find_numa_node_for_addr(unsigned long pa,
1186 struct node_mem_mask *pnode_mask)
1187{
1188 struct mdesc_handle *md = mdesc_grab();
1189 u64 node, arc;
1190 int i = 0;
1191
1192 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1193 if (node == MDESC_NODE_NULL)
1194 goto out;
1195
1196 mdesc_for_each_node_by_name(md, node, "group") {
1197 mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1198 u64 target = mdesc_arc_target(md, arc);
1199 struct mdesc_mlgroup *m = find_mlgroup(target);
1200
1201 if (!m)
1202 continue;
1203 if ((pa & m->mask) == m->match) {
1204 if (pnode_mask) {
1205 pnode_mask->mask = m->mask;
1206 pnode_mask->val = m->match;
1207 }
1208 mdesc_release(md);
1209 return i;
1210 }
1211 }
1212 i++;
1213 }
1214
1215out:
1216 mdesc_release(md);
1217 return -1;
1218}
1219
1163static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1220static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1164{ 1221{
1165 int i; 1222 int i;
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761d5f61..4810e48dbbbf 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -61,4 +61,7 @@
61 */ 61 */
62#define __write_once __read_mostly 62#define __write_once __read_mostly
63 63
64/* __ro_after_init is the generic name for the tile arch __write_once. */
65#define __ro_after_init __read_mostly
66
64#endif /* _ASM_TILE_CACHE_H */ 67#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 178989e6d3e3..ea960d660917 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
218 */ 218 */
219unsigned long long sched_clock(void) 219unsigned long long sched_clock(void)
220{ 220{
221 return clocksource_cyc2ns(get_cycles(), 221 return mult_frac(get_cycles(),
222 sched_clock_mult, SCHED_CLOCK_SHIFT); 222 sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
223} 223}
224 224
225int setup_profiling_timer(unsigned int multiplier) 225int setup_profiling_timer(unsigned int multiplier)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 536ccfcc01c6..34d9e15857c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -40,8 +40,8 @@ GCOV_PROFILE := n
40UBSAN_SANITIZE :=n 40UBSAN_SANITIZE :=n
41 41
42LDFLAGS := -m elf_$(UTS_MACHINE) 42LDFLAGS := -m elf_$(UTS_MACHINE)
43ifeq ($(CONFIG_RELOCATABLE),y) 43# Compressed kernel should be built as PIE since it may be loaded at any
44# If kernel is relocatable, build compressed kernel as PIE. 44# address by the bootloader.
45ifeq ($(CONFIG_X86_32),y) 45ifeq ($(CONFIG_X86_32),y)
46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) 46LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
47else 47else
@@ -51,7 +51,6 @@ else
51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ 51LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
52 && echo "-z noreloc-overflow -pie --no-dynamic-linker") 52 && echo "-z noreloc-overflow -pie --no-dynamic-linker")
53endif 53endif
54endif
55LDFLAGS_vmlinux := -T 54LDFLAGS_vmlinux := -T
56 55
57hostprogs-y := mkpiggy 56hostprogs-y := mkpiggy
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c
index 26240dde081e..4224ede43b4e 100644
--- a/arch/x86/boot/cpu.c
+++ b/arch/x86/boot/cpu.c
@@ -87,6 +87,12 @@ int validate_cpu(void)
87 return -1; 87 return -1;
88 } 88 }
89 89
90 if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
91 !has_eflag(X86_EFLAGS_ID)) {
92 printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
93 return -1;
94 }
95
90 if (err_flags) { 96 if (err_flags) {
91 puts("This kernel requires the following features " 97 puts("This kernel requires the following features "
92 "not present on the CPU:\n"); 98 "not present on the CPU:\n");
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index f5f4b3fbbbc2..afb222b63cae 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
662 pr_cont("Fam15h "); 662 pr_cont("Fam15h ");
663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 663 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
664 break; 664 break;
665 665 case 0x17:
666 pr_cont("Fam17h ");
667 /*
668 * In family 17h, there are no event constraints in the PMC hardware.
669 * We fallback to using default amd_get_event_constraints.
670 */
671 break;
666 default: 672 default:
667 pr_err("core perfctr but no constraints; unknown hardware!\n"); 673 pr_err("core perfctr but no constraints; unknown hardware!\n");
668 return -ENODEV; 674 return -ENODEV;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index d31735f37ed7..9d4bf3ab049e 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2352 frame.next_frame = 0; 2352 frame.next_frame = 0;
2353 frame.return_address = 0; 2353 frame.return_address = 0;
2354 2354
2355 if (!access_ok(VERIFY_READ, fp, 8)) 2355 if (!valid_user_frame(fp, sizeof(frame)))
2356 break; 2356 break;
2357 2357
2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); 2358 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
2362 if (bytes != 0) 2362 if (bytes != 0)
2363 break; 2363 break;
2364 2364
2365 if (!valid_user_frame(fp, sizeof(frame)))
2366 break;
2367
2368 perf_callchain_store(entry, cs_base + frame.return_address); 2365 perf_callchain_store(entry, cs_base + frame.return_address);
2369 fp = compat_ptr(ss_base + frame.next_frame); 2366 fp = compat_ptr(ss_base + frame.next_frame);
2370 } 2367 }
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2413 frame.next_frame = NULL; 2410 frame.next_frame = NULL;
2414 frame.return_address = 0; 2411 frame.return_address = 0;
2415 2412
2416 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) 2413 if (!valid_user_frame(fp, sizeof(frame)))
2417 break; 2414 break;
2418 2415
2419 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); 2416 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2423 if (bytes != 0) 2420 if (bytes != 0)
2424 break; 2421 break;
2425 2422
2426 if (!valid_user_frame(fp, sizeof(frame)))
2427 break;
2428
2429 perf_callchain_store(entry, frame.return_address); 2423 perf_callchain_store(entry, frame.return_address);
2430 fp = (void __user *)frame.next_frame; 2424 fp = (void __user *)frame.next_frame;
2431 } 2425 }
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 0319311dbdbb..be202390bbd3 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
1108 } 1108 }
1109 1109
1110 /* 1110 /*
1111 * We use the interrupt regs as a base because the PEBS record 1111 * We use the interrupt regs as a base because the PEBS record does not
1112 * does not contain a full regs set, specifically it seems to 1112 * contain a full regs set, specifically it seems to lack segment
1113 * lack segment descriptors, which get used by things like 1113 * descriptors, which get used by things like user_mode().
1114 * user_mode().
1115 * 1114 *
1116 * In the simple case fix up only the IP and BP,SP regs, for 1115 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1117 * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. 1116 *
1118 * A possible PERF_SAMPLE_REGS will have to transfer all regs. 1117 * We must however always use BP,SP from iregs for the unwinder to stay
1118 * sane; the record BP,SP can point into thin air when the record is
1119 * from a previous PMI context or an (I)RET happend between the record
1120 * and PMI.
1119 */ 1121 */
1120 *regs = *iregs; 1122 *regs = *iregs;
1121 regs->flags = pebs->flags; 1123 regs->flags = pebs->flags;
1122 set_linear_ip(regs, pebs->ip); 1124 set_linear_ip(regs, pebs->ip);
1123 regs->bp = pebs->bp;
1124 regs->sp = pebs->sp;
1125 1125
1126 if (sample_type & PERF_SAMPLE_REGS_INTR) { 1126 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1127 regs->ax = pebs->ax; 1127 regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
1130 regs->dx = pebs->dx; 1130 regs->dx = pebs->dx;
1131 regs->si = pebs->si; 1131 regs->si = pebs->si;
1132 regs->di = pebs->di; 1132 regs->di = pebs->di;
1133 regs->bp = pebs->bp;
1134 regs->sp = pebs->sp;
1135 1133
1136 regs->flags = pebs->flags; 1134 /*
1135 * Per the above; only set BP,SP if we don't need callchains.
1136 *
1137 * XXX: does this make sense?
1138 */
1139 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1140 regs->bp = pebs->bp;
1141 regs->sp = pebs->sp;
1142 }
1143
1144 /*
1145 * Preserve PERF_EFLAGS_VM from set_linear_ip().
1146 */
1147 regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
1137#ifndef CONFIG_X86_32 1148#ifndef CONFIG_X86_32
1138 regs->r8 = pebs->r8; 1149 regs->r8 = pebs->r8;
1139 regs->r9 = pebs->r9; 1150 regs->r9 = pebs->r9;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index efca2685d876..dbaaf7dc8373 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
319 */ 319 */
320static int uncore_pmu_event_init(struct perf_event *event); 320static int uncore_pmu_event_init(struct perf_event *event);
321 321
322static bool is_uncore_event(struct perf_event *event) 322static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
323{ 323{
324 return event->pmu->event_init == uncore_pmu_event_init; 324 return &box->pmu->pmu == event->pmu;
325} 325}
326 326
327static int 327static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
340 340
341 n = box->n_events; 341 n = box->n_events;
342 342
343 if (is_uncore_event(leader)) { 343 if (is_box_event(box, leader)) {
344 box->event_list[n] = leader; 344 box->event_list[n] = leader;
345 n++; 345 n++;
346 } 346 }
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
349 return n; 349 return n;
350 350
351 list_for_each_entry(event, &leader->sibling_list, group_entry) { 351 list_for_each_entry(event, &leader->sibling_list, group_entry) {
352 if (!is_uncore_event(event) || 352 if (!is_box_event(box, event) ||
353 event->state <= PERF_EVENT_STATE_OFF) 353 event->state <= PERF_EVENT_STATE_OFF)
354 continue; 354 continue;
355 355
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 5f845eef9a4d..a3dcc12bef4a 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -8,8 +8,12 @@
8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 8#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 9#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
10#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 10#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
11#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 11#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
12#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 12#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
13#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
14#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
15#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
16#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
13 17
14/* SNB event control */ 18/* SNB event control */
15#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff 19#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
486 490
487 snb_uncore_imc_event_start(event, 0); 491 snb_uncore_imc_event_start(event, 0);
488 492
489 box->n_events++;
490
491 return 0; 493 return 0;
492} 494}
493 495
494static void snb_uncore_imc_event_del(struct perf_event *event, int flags) 496static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
495{ 497{
496 struct intel_uncore_box *box = uncore_event_to_box(event);
497 int i;
498
499 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); 498 snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
500
501 for (i = 0; i < box->n_events; i++) {
502 if (event == box->event_list[i]) {
503 --box->n_events;
504 break;
505 }
506 }
507} 499}
508 500
509int snb_pci2phy_map_init(int devid) 501int snb_pci2phy_map_init(int devid)
@@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
616 608
617static const struct pci_device_id skl_uncore_pci_ids[] = { 609static const struct pci_device_id skl_uncore_pci_ids[] = {
618 { /* IMC */ 610 { /* IMC */
619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 611 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 612 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
621 }, 613 },
622 { /* IMC */ 614 { /* IMC */
623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 615 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 616 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
625 }, 617 },
618 { /* IMC */
619 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
620 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
621 },
622 { /* IMC */
623 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
624 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
625 },
626 { /* IMC */
627 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
628 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
629 },
630 { /* IMC */
631 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
632 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
633 },
626 634
627 { /* end: all zeroes */ }, 635 { /* end: all zeroes */ },
628}; 636};
@@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
666 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ 674 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
667 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 675 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
668 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 676 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
669 IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 677 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
670 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 678 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
679 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
680 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
681 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
682 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
671 { /* end marker */ } 683 { /* end marker */ }
672}; 684};
673 685
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 5874d8de1f8d..a77ee026643d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -113,7 +113,7 @@ struct debug_store {
113 * Per register state. 113 * Per register state.
114 */ 114 */
115struct er_account { 115struct er_account {
116 raw_spinlock_t lock; /* per-core: protect structure */ 116 raw_spinlock_t lock; /* per-core: protect structure */
117 u64 config; /* extra MSR config */ 117 u64 config; /* extra MSR config */
118 u64 reg; /* extra MSR number */ 118 u64 reg; /* extra MSR number */
119 atomic_t ref; /* reference count */ 119 atomic_t ref; /* reference count */
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 5b6753d1f7f4..49da9f497b90 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -17,6 +17,7 @@
17 17
18extern int intel_mid_pci_init(void); 18extern int intel_mid_pci_init(void);
19extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); 19extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
20extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
20 21
21extern void intel_mid_pwr_power_off(void); 22extern void intel_mid_pwr_power_off(void);
22 23
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b81fe2d63e15..1e81a37c034e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
347#ifdef CONFIG_SMP 347#ifdef CONFIG_SMP
348 unsigned bits; 348 unsigned bits;
349 int cpu = smp_processor_id(); 349 int cpu = smp_processor_id();
350 unsigned int socket_id, core_complex_id;
351 350
352 bits = c->x86_coreid_bits; 351 bits = c->x86_coreid_bits;
353 /* Low order bits define the core id (index of core in socket) */ 352 /* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
365 if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) 364 if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
366 return; 365 return;
367 366
368 socket_id = (c->apicid >> bits) - 1; 367 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
369 core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
370
371 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
372#endif 368#endif
373} 369}
374 370
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9bd910a7dd0a..cc9e980c68ec 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
979} 979}
980 980
981/* 981/*
982 * The physical to logical package id mapping is initialized from the
983 * acpi/mptables information. Make sure that CPUID actually agrees with
984 * that.
985 */
986static void sanitize_package_id(struct cpuinfo_x86 *c)
987{
988#ifdef CONFIG_SMP
989 unsigned int pkg, apicid, cpu = smp_processor_id();
990
991 apicid = apic->cpu_present_to_apicid(cpu);
992 pkg = apicid >> boot_cpu_data.x86_coreid_bits;
993
994 if (apicid != c->initial_apicid) {
995 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
996 cpu, apicid, c->initial_apicid);
997 c->initial_apicid = apicid;
998 }
999 if (pkg != c->phys_proc_id) {
1000 pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
1001 cpu, pkg, c->phys_proc_id);
1002 c->phys_proc_id = pkg;
1003 }
1004 c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
1005#else
1006 c->logical_proc_id = 0;
1007#endif
1008}
1009
1010/*
982 * This does the hard work of actually picking apart the CPU stuff... 1011 * This does the hard work of actually picking apart the CPU stuff...
983 */ 1012 */
984static void identify_cpu(struct cpuinfo_x86 *c) 1013static void identify_cpu(struct cpuinfo_x86 *c)
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
1103#ifdef CONFIG_NUMA 1132#ifdef CONFIG_NUMA
1104 numa_add_cpu(smp_processor_id()); 1133 numa_add_cpu(smp_processor_id());
1105#endif 1134#endif
1106 /* The boot/hotplug time assigment got cleared, restore it */ 1135 sanitize_package_id(c);
1107 c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
1108} 1136}
1109 1137
1110/* 1138/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9b7cf5c28f5f..85f854b98a9d 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
112 for (; stack < stack_info.end; stack++) { 112 for (; stack < stack_info.end; stack++) {
113 unsigned long real_addr; 113 unsigned long real_addr;
114 int reliable = 0; 114 int reliable = 0;
115 unsigned long addr = *stack; 115 unsigned long addr = READ_ONCE_NOCHECK(*stack);
116 unsigned long *ret_addr_p = 116 unsigned long *ret_addr_p =
117 unwind_get_return_address_ptr(&state); 117 unwind_get_return_address_ptr(&state);
118 118
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 47004010ad5d..ebb4e95fbd74 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
521{ 521{
522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ 522 WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
523 523
524 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { 524 fpu__drop(fpu);
525 /* FPU state will be reallocated lazily at the first use. */ 525
526 fpu__drop(fpu); 526 /*
527 } else { 527 * Make sure fpstate is cleared and initialized.
528 if (!fpu->fpstate_active) { 528 */
529 fpu__activate_curr(fpu); 529 if (static_cpu_has(X86_FEATURE_FPU)) {
530 user_fpu_begin(); 530 fpu__activate_curr(fpu);
531 } 531 user_fpu_begin();
532 copy_init_fpstate_to_fpregs(); 532 copy_init_fpstate_to_fpregs();
533 } 533 }
534} 534}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index b6b2f0264af3..2dabea46f039 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
665initial_pg_pmd: 665initial_pg_pmd:
666 .fill 1024*KPMDS,4,0 666 .fill 1024*KPMDS,4,0
667#else 667#else
668ENTRY(initial_page_table) 668.globl initial_page_table
669initial_page_table:
669 .fill 1024,4,0 670 .fill 1024,4,0
670#endif 671#endif
671initial_pg_fixmap: 672initial_pg_fixmap:
672 .fill 1024,4,0 673 .fill 1024,4,0
673ENTRY(empty_zero_page) 674.globl empty_zero_page
675empty_zero_page:
674 .fill 4096,1,0 676 .fill 4096,1,0
675ENTRY(swapper_pg_dir) 677.globl swapper_pg_dir
678swapper_pg_dir:
676 .fill 1024,4,0 679 .fill 1024,4,0
677EXPORT_SYMBOL(empty_zero_page) 680EXPORT_SYMBOL(empty_zero_page)
678 681
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
index 764a29f84de7..85195d447a92 100644
--- a/arch/x86/kernel/sysfb_simplefb.c
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
66{ 66{
67 struct platform_device *pd; 67 struct platform_device *pd;
68 struct resource res; 68 struct resource res;
69 unsigned long len; 69 u64 base, size;
70 u32 length;
70 71
71 /* don't use lfb_size as it may contain the whole VMEM instead of only 72 /*
72 * the part that is occupied by the framebuffer */ 73 * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
73 len = mode->height * mode->stride; 74 * upper half of the base address. Assemble the address, then make sure
74 len = PAGE_ALIGN(len); 75 * it is valid and we can actually access it.
75 if (len > (u64)si->lfb_size << 16) { 76 */
77 base = si->lfb_base;
78 if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
79 base |= (u64)si->ext_lfb_base << 32;
80 if (!base || (u64)(resource_size_t)base != base) {
81 printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
82 return -EINVAL;
83 }
84
85 /*
86 * Don't use lfb_size as IORESOURCE size, since it may contain the
87 * entire VMEM, and thus require huge mappings. Use just the part we
88 * need, that is, the part where the framebuffer is located. But verify
89 * that it does not exceed the advertised VMEM.
90 * Note that in case of VBE, the lfb_size is shifted by 16 bits for
91 * historical reasons.
92 */
93 size = si->lfb_size;
94 if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
95 size <<= 16;
96 length = mode->height * mode->stride;
97 length = PAGE_ALIGN(length);
98 if (length > size) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); 99 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL; 100 return -EINVAL;
78 } 101 }
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
81 memset(&res, 0, sizeof(res)); 104 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; 105 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
83 res.name = simplefb_resname; 106 res.name = simplefb_resname;
84 res.start = si->lfb_base; 107 res.start = base;
85 res.end = si->lfb_base + len - 1; 108 res.end = res.start + length - 1;
86 if (res.end <= res.start) 109 if (res.end <= res.start)
87 return -EINVAL; 110 return -EINVAL;
88 111
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 2d721e533cf4..b80e8bf43cc6 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -7,11 +7,13 @@
7 7
8unsigned long unwind_get_return_address(struct unwind_state *state) 8unsigned long unwind_get_return_address(struct unwind_state *state)
9{ 9{
10 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
11
10 if (unwind_done(state)) 12 if (unwind_done(state))
11 return 0; 13 return 0;
12 14
13 return ftrace_graph_ret_addr(state->task, &state->graph_idx, 15 return ftrace_graph_ret_addr(state->task, &state->graph_idx,
14 *state->sp, state->sp); 16 addr, state->sp);
15} 17}
16EXPORT_SYMBOL_GPL(unwind_get_return_address); 18EXPORT_SYMBOL_GPL(unwind_get_return_address);
17 19
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
23 return false; 25 return false;
24 26
25 do { 27 do {
28 unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
29
26 for (state->sp++; state->sp < info->end; state->sp++) 30 for (state->sp++; state->sp < info->end; state->sp++)
27 if (__kernel_text_address(*state->sp)) 31 if (__kernel_text_address(addr))
28 return true; 32 return true;
29 33
30 state->sp = info->next_sp; 34 state->sp = info->next_sp;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index cbd7b92585bb..a3ce9d260d68 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
2105static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2105static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2106{ 2106{
2107 int rc; 2107 int rc;
2108 unsigned short sel, old_sel; 2108 unsigned short sel;
2109 struct desc_struct old_desc, new_desc; 2109 struct desc_struct new_desc;
2110 const struct x86_emulate_ops *ops = ctxt->ops;
2111 u8 cpl = ctxt->ops->cpl(ctxt); 2110 u8 cpl = ctxt->ops->cpl(ctxt);
2112 2111
2113 /* Assignment of RIP may only fail in 64-bit mode */
2114 if (ctxt->mode == X86EMUL_MODE_PROT64)
2115 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2116 VCPU_SREG_CS);
2117
2118 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2112 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2119 2113
2120 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, 2114 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2124 return rc; 2118 return rc;
2125 2119
2126 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); 2120 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2127 if (rc != X86EMUL_CONTINUE) { 2121 /* Error handling is not implemented. */
2128 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2122 if (rc != X86EMUL_CONTINUE)
2129 /* assigning eip failed; restore the old cs */ 2123 return X86EMUL_UNHANDLEABLE;
2130 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); 2124
2131 return rc;
2132 }
2133 return rc; 2125 return rc;
2134} 2126}
2135 2127
@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2189{ 2181{
2190 int rc; 2182 int rc;
2191 unsigned long eip, cs; 2183 unsigned long eip, cs;
2192 u16 old_cs;
2193 int cpl = ctxt->ops->cpl(ctxt); 2184 int cpl = ctxt->ops->cpl(ctxt);
2194 struct desc_struct old_desc, new_desc; 2185 struct desc_struct new_desc;
2195 const struct x86_emulate_ops *ops = ctxt->ops;
2196
2197 if (ctxt->mode == X86EMUL_MODE_PROT64)
2198 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2199 VCPU_SREG_CS);
2200 2186
2201 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); 2187 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2202 if (rc != X86EMUL_CONTINUE) 2188 if (rc != X86EMUL_CONTINUE)
@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2213 if (rc != X86EMUL_CONTINUE) 2199 if (rc != X86EMUL_CONTINUE)
2214 return rc; 2200 return rc;
2215 rc = assign_eip_far(ctxt, eip, &new_desc); 2201 rc = assign_eip_far(ctxt, eip, &new_desc);
2216 if (rc != X86EMUL_CONTINUE) { 2202 /* Error handling is not implemented. */
2217 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2203 if (rc != X86EMUL_CONTINUE)
2218 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 2204 return X86EMUL_UNHANDLEABLE;
2219 } 2205
2220 return rc; 2206 return rc;
2221} 2207}
2222 2208
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1a22de70f7f7..6e219e5c07d2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
94static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) 94static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
95{ 95{
96 ioapic->rtc_status.pending_eoi = 0; 96 ioapic->rtc_status.pending_eoi = 0;
97 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); 97 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
98} 98}
99 99
100static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); 100static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 7d2692a49657..1cc6e54436db 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -42,13 +42,13 @@ struct kvm_vcpu;
42 42
43struct dest_map { 43struct dest_map {
44 /* vcpu bitmap where IRQ has been sent */ 44 /* vcpu bitmap where IRQ has been sent */
45 DECLARE_BITMAP(map, KVM_MAX_VCPUS); 45 DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
46 46
47 /* 47 /*
48 * Vector sent to a given vcpu, only valid when 48 * Vector sent to a given vcpu, only valid when
49 * the vcpu's bit in map is set 49 * the vcpu's bit in map is set
50 */ 50 */
51 u8 vectors[KVM_MAX_VCPUS]; 51 u8 vectors[KVM_MAX_VCPU_ID];
52}; 52};
53 53
54 54
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 25810b144b58..6c0191615f23 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
41 bool line_status) 41 bool line_status)
42{ 42{
43 struct kvm_pic *pic = pic_irqchip(kvm); 43 struct kvm_pic *pic = pic_irqchip(kvm);
44
45 /*
46 * XXX: rejecting pic routes when pic isn't in use would be better,
47 * but the default routing table is installed while kvm->arch.vpic is
48 * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
49 */
50 if (!pic)
51 return -1;
52
44 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); 53 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
45} 54}
46 55
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
49 bool line_status) 58 bool line_status)
50{ 59{
51 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 60 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
61
62 if (!ioapic)
63 return -1;
64
52 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, 65 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
53 line_status); 66 line_status);
54} 67}
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
156} 169}
157 170
158 171
172static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
173 struct kvm *kvm, int irq_source_id, int level,
174 bool line_status)
175{
176 if (!level)
177 return -1;
178
179 return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
180}
181
159int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 182int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
160 struct kvm *kvm, int irq_source_id, int level, 183 struct kvm *kvm, int irq_source_id, int level,
161 bool line_status) 184 bool line_status)
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
163 struct kvm_lapic_irq irq; 186 struct kvm_lapic_irq irq;
164 int r; 187 int r;
165 188
166 if (unlikely(e->type != KVM_IRQ_ROUTING_MSI)) 189 switch (e->type) {
167 return -EWOULDBLOCK; 190 case KVM_IRQ_ROUTING_HV_SINT:
191 return kvm_hv_set_sint(e, kvm, irq_source_id, level,
192 line_status);
168 193
169 if (kvm_msi_route_invalid(kvm, e)) 194 case KVM_IRQ_ROUTING_MSI:
170 return -EINVAL; 195 if (kvm_msi_route_invalid(kvm, e))
196 return -EINVAL;
171 197
172 kvm_set_msi_irq(kvm, e, &irq); 198 kvm_set_msi_irq(kvm, e, &irq);
173 199
174 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) 200 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
175 return r; 201 return r;
176 else 202 break;
177 return -EWOULDBLOCK; 203
204 default:
205 break;
206 }
207
208 return -EWOULDBLOCK;
178} 209}
179 210
180int kvm_request_irq_source_id(struct kvm *kvm) 211int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
254 srcu_read_unlock(&kvm->irq_srcu, idx); 285 srcu_read_unlock(&kvm->irq_srcu, idx);
255} 286}
256 287
257static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
258 struct kvm *kvm, int irq_source_id, int level,
259 bool line_status)
260{
261 if (!level)
262 return -1;
263
264 return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
265}
266
267int kvm_set_routing_entry(struct kvm *kvm, 288int kvm_set_routing_entry(struct kvm *kvm,
268 struct kvm_kernel_irq_routing_entry *e, 289 struct kvm_kernel_irq_routing_entry *e,
269 const struct kvm_irq_routing_entry *ue) 290 const struct kvm_irq_routing_entry *ue)
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
423 srcu_read_unlock(&kvm->irq_srcu, idx); 444 srcu_read_unlock(&kvm->irq_srcu, idx);
424} 445}
425 446
426int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
427 int irq_source_id, int level, bool line_status)
428{
429 switch (irq->type) {
430 case KVM_IRQ_ROUTING_HV_SINT:
431 return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
432 line_status);
433 default:
434 return -EWOULDBLOCK;
435 }
436}
437
438void kvm_arch_irq_routing_update(struct kvm *kvm) 447void kvm_arch_irq_routing_update(struct kvm *kvm)
439{ 448{
440 kvm_hv_irq_routing_update(kvm); 449 kvm_hv_irq_routing_update(kvm);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 23b99f305382..6f69340f9fa3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
138 *mask = dest_id & 0xff; 138 *mask = dest_id & 0xff;
139 return true; 139 return true;
140 case KVM_APIC_MODE_XAPIC_CLUSTER: 140 case KVM_APIC_MODE_XAPIC_CLUSTER:
141 *cluster = map->xapic_cluster_map[dest_id >> 4]; 141 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
142 *mask = dest_id & 0xf; 142 *mask = dest_id & 0xf;
143 return true; 143 return true;
144 default: 144 default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7e30c720d0c5..073eaeabc2a7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
210 struct kvm_shared_msrs *locals 210 struct kvm_shared_msrs *locals
211 = container_of(urn, struct kvm_shared_msrs, urn); 211 = container_of(urn, struct kvm_shared_msrs, urn);
212 struct kvm_shared_msr_values *values; 212 struct kvm_shared_msr_values *values;
213 unsigned long flags;
213 214
215 /*
216 * Disabling irqs at this point since the following code could be
217 * interrupted and executed through kvm_arch_hardware_disable()
218 */
219 local_irq_save(flags);
220 if (locals->registered) {
221 locals->registered = false;
222 user_return_notifier_unregister(urn);
223 }
224 local_irq_restore(flags);
214 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 225 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
215 values = &locals->values[slot]; 226 values = &locals->values[slot];
216 if (values->host != values->curr) { 227 if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
218 values->curr = values->host; 229 values->curr = values->host;
219 } 230 }
220 } 231 }
221 locals->registered = false;
222 user_return_notifier_unregister(urn);
223} 232}
224 233
225static void shared_msr_update(unsigned slot, u32 msr) 234static void shared_msr_update(unsigned slot, u32 msr)
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
1724 1733
1725static u64 __get_kvmclock_ns(struct kvm *kvm) 1734static u64 __get_kvmclock_ns(struct kvm *kvm)
1726{ 1735{
1727 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
1728 struct kvm_arch *ka = &kvm->arch; 1736 struct kvm_arch *ka = &kvm->arch;
1729 s64 ns; 1737 struct pvclock_vcpu_time_info hv_clock;
1730 1738
1731 if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) { 1739 spin_lock(&ka->pvclock_gtod_sync_lock);
1732 u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 1740 if (!ka->use_master_clock) {
1733 ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc); 1741 spin_unlock(&ka->pvclock_gtod_sync_lock);
1734 } else { 1742 return ktime_get_boot_ns() + ka->kvmclock_offset;
1735 ns = ktime_get_boot_ns() + ka->kvmclock_offset;
1736 } 1743 }
1737 1744
1738 return ns; 1745 hv_clock.tsc_timestamp = ka->master_cycle_now;
1746 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1747 spin_unlock(&ka->pvclock_gtod_sync_lock);
1748
1749 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1750 &hv_clock.tsc_shift,
1751 &hv_clock.tsc_to_system_mul);
1752 return __pvclock_read_cycles(&hv_clock, rdtsc());
1739} 1753}
1740 1754
1741u64 get_kvmclock_ns(struct kvm *kvm) 1755u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2596 case KVM_CAP_PIT_STATE2: 2610 case KVM_CAP_PIT_STATE2:
2597 case KVM_CAP_SET_IDENTITY_MAP_ADDR: 2611 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2598 case KVM_CAP_XEN_HVM: 2612 case KVM_CAP_XEN_HVM:
2599 case KVM_CAP_ADJUST_CLOCK:
2600 case KVM_CAP_VCPU_EVENTS: 2613 case KVM_CAP_VCPU_EVENTS:
2601 case KVM_CAP_HYPERV: 2614 case KVM_CAP_HYPERV:
2602 case KVM_CAP_HYPERV_VAPIC: 2615 case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
2623#endif 2636#endif
2624 r = 1; 2637 r = 1;
2625 break; 2638 break;
2639 case KVM_CAP_ADJUST_CLOCK:
2640 r = KVM_CLOCK_TSC_STABLE;
2641 break;
2626 case KVM_CAP_X86_SMM: 2642 case KVM_CAP_X86_SMM:
2627 /* SMBASE is usually relocated above 1M on modern chipsets, 2643 /* SMBASE is usually relocated above 1M on modern chipsets,
2628 * and SMM handlers might indeed rely on 4G segment limits, 2644 * and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3415 }; 3431 };
3416 case KVM_SET_VAPIC_ADDR: { 3432 case KVM_SET_VAPIC_ADDR: {
3417 struct kvm_vapic_addr va; 3433 struct kvm_vapic_addr va;
3434 int idx;
3418 3435
3419 r = -EINVAL; 3436 r = -EINVAL;
3420 if (!lapic_in_kernel(vcpu)) 3437 if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
3422 r = -EFAULT; 3439 r = -EFAULT;
3423 if (copy_from_user(&va, argp, sizeof va)) 3440 if (copy_from_user(&va, argp, sizeof va))
3424 goto out; 3441 goto out;
3442 idx = srcu_read_lock(&vcpu->kvm->srcu);
3425 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 3443 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
3444 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3426 break; 3445 break;
3427 } 3446 }
3428 case KVM_X86_SETUP_MCE: { 3447 case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
4103 struct kvm_clock_data user_ns; 4122 struct kvm_clock_data user_ns;
4104 u64 now_ns; 4123 u64 now_ns;
4105 4124
4106 now_ns = get_kvmclock_ns(kvm); 4125 local_irq_disable();
4126 now_ns = __get_kvmclock_ns(kvm);
4107 user_ns.clock = now_ns; 4127 user_ns.clock = now_ns;
4108 user_ns.flags = 0; 4128 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
4129 local_irq_enable();
4109 memset(&user_ns.pad, 0, sizeof(user_ns.pad)); 4130 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
4110 4131
4111 r = -EFAULT; 4132 r = -EFAULT;
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 79ae939970d3..fcd06f7526de 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
135 if (early_recursion_flag > 2) 135 if (early_recursion_flag > 2)
136 goto halt_loop; 136 goto halt_loop;
137 137
138 if (regs->cs != __KERNEL_CS) 138 /*
139 * Old CPUs leave the high bits of CS on the stack
140 * undefined. I'm not sure which CPUs do this, but at least
141 * the 486 DX works this way.
142 */
143 if ((regs->cs & 0xFFFF) != __KERNEL_CS)
139 goto fail; 144 goto fail;
140 145
141 /* 146 /*
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index bf99aa7005eb..936a488d6cf6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
861 int count = 0, pg_shift = 0; 861 int count = 0, pg_shift = 0;
862 void *new_memmap = NULL; 862 void *new_memmap = NULL;
863 efi_status_t status; 863 efi_status_t status;
864 phys_addr_t pa; 864 unsigned long pa;
865 865
866 efi.systab = NULL; 866 efi.systab = NULL;
867 867
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 58b0f801f66f..319148bd4b05 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -31,6 +31,7 @@
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/reboot.h> 32#include <linux/reboot.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/ucs2_string.h>
34 35
35#include <asm/setup.h> 36#include <asm/setup.h>
36#include <asm/page.h> 37#include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
211 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); 212 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
212} 213}
213 214
215/*
216 * Wrapper for slow_virt_to_phys() that handles NULL addresses.
217 */
218static inline phys_addr_t
219virt_to_phys_or_null_size(void *va, unsigned long size)
220{
221 bool bad_size;
222
223 if (!va)
224 return 0;
225
226 if (virt_addr_valid(va))
227 return virt_to_phys(va);
228
229 /*
230 * A fully aligned variable on the stack is guaranteed not to
231 * cross a page bounary. Try to catch strings on the stack by
232 * checking that 'size' is a power of two.
233 */
234 bad_size = size > PAGE_SIZE || !is_power_of_2(size);
235
236 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
237
238 return slow_virt_to_phys(va);
239}
240
241#define virt_to_phys_or_null(addr) \
242 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
243
214int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 244int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
215{ 245{
216 unsigned long pfn, text; 246 unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
494 524
495 spin_lock(&rtc_lock); 525 spin_lock(&rtc_lock);
496 526
497 phys_tm = virt_to_phys(tm); 527 phys_tm = virt_to_phys_or_null(tm);
498 phys_tc = virt_to_phys(tc); 528 phys_tc = virt_to_phys_or_null(tc);
499 529
500 status = efi_thunk(get_time, phys_tm, phys_tc); 530 status = efi_thunk(get_time, phys_tm, phys_tc);
501 531
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
511 541
512 spin_lock(&rtc_lock); 542 spin_lock(&rtc_lock);
513 543
514 phys_tm = virt_to_phys(tm); 544 phys_tm = virt_to_phys_or_null(tm);
515 545
516 status = efi_thunk(set_time, phys_tm); 546 status = efi_thunk(set_time, phys_tm);
517 547
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
529 559
530 spin_lock(&rtc_lock); 560 spin_lock(&rtc_lock);
531 561
532 phys_enabled = virt_to_phys(enabled); 562 phys_enabled = virt_to_phys_or_null(enabled);
533 phys_pending = virt_to_phys(pending); 563 phys_pending = virt_to_phys_or_null(pending);
534 phys_tm = virt_to_phys(tm); 564 phys_tm = virt_to_phys_or_null(tm);
535 565
536 status = efi_thunk(get_wakeup_time, phys_enabled, 566 status = efi_thunk(get_wakeup_time, phys_enabled,
537 phys_pending, phys_tm); 567 phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
549 579
550 spin_lock(&rtc_lock); 580 spin_lock(&rtc_lock);
551 581
552 phys_tm = virt_to_phys(tm); 582 phys_tm = virt_to_phys_or_null(tm);
553 583
554 status = efi_thunk(set_wakeup_time, enabled, phys_tm); 584 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
555 585
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
558 return status; 588 return status;
559} 589}
560 590
591static unsigned long efi_name_size(efi_char16_t *name)
592{
593 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
594}
561 595
562static efi_status_t 596static efi_status_t
563efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, 597efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
567 u32 phys_name, phys_vendor, phys_attr; 601 u32 phys_name, phys_vendor, phys_attr;
568 u32 phys_data_size, phys_data; 602 u32 phys_data_size, phys_data;
569 603
570 phys_data_size = virt_to_phys(data_size); 604 phys_data_size = virt_to_phys_or_null(data_size);
571 phys_vendor = virt_to_phys(vendor); 605 phys_vendor = virt_to_phys_or_null(vendor);
572 phys_name = virt_to_phys(name); 606 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
573 phys_attr = virt_to_phys(attr); 607 phys_attr = virt_to_phys_or_null(attr);
574 phys_data = virt_to_phys(data); 608 phys_data = virt_to_phys_or_null_size(data, *data_size);
575 609
576 status = efi_thunk(get_variable, phys_name, phys_vendor, 610 status = efi_thunk(get_variable, phys_name, phys_vendor,
577 phys_attr, phys_data_size, phys_data); 611 phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
586 u32 phys_name, phys_vendor, phys_data; 620 u32 phys_name, phys_vendor, phys_data;
587 efi_status_t status; 621 efi_status_t status;
588 622
589 phys_name = virt_to_phys(name); 623 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
590 phys_vendor = virt_to_phys(vendor); 624 phys_vendor = virt_to_phys_or_null(vendor);
591 phys_data = virt_to_phys(data); 625 phys_data = virt_to_phys_or_null_size(data, data_size);
592 626
593 /* If data_size is > sizeof(u32) we've got problems */ 627 /* If data_size is > sizeof(u32) we've got problems */
594 status = efi_thunk(set_variable, phys_name, phys_vendor, 628 status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
605 efi_status_t status; 639 efi_status_t status;
606 u32 phys_name_size, phys_name, phys_vendor; 640 u32 phys_name_size, phys_name, phys_vendor;
607 641
608 phys_name_size = virt_to_phys(name_size); 642 phys_name_size = virt_to_phys_or_null(name_size);
609 phys_vendor = virt_to_phys(vendor); 643 phys_vendor = virt_to_phys_or_null(vendor);
610 phys_name = virt_to_phys(name); 644 phys_name = virt_to_phys_or_null_size(name, *name_size);
611 645
612 status = efi_thunk(get_next_variable, phys_name_size, 646 status = efi_thunk(get_next_variable, phys_name_size,
613 phys_name, phys_vendor); 647 phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
621 efi_status_t status; 655 efi_status_t status;
622 u32 phys_count; 656 u32 phys_count;
623 657
624 phys_count = virt_to_phys(count); 658 phys_count = virt_to_phys_or_null(count);
625 status = efi_thunk(get_next_high_mono_count, phys_count); 659 status = efi_thunk(get_next_high_mono_count, phys_count);
626 660
627 return status; 661 return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
633{ 667{
634 u32 phys_data; 668 u32 phys_data;
635 669
636 phys_data = virt_to_phys(data); 670 phys_data = virt_to_phys_or_null_size(data, data_size);
637 671
638 efi_thunk(reset_system, reset_type, status, data_size, phys_data); 672 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
639} 673}
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
661 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 695 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
662 return EFI_UNSUPPORTED; 696 return EFI_UNSUPPORTED;
663 697
664 phys_storage = virt_to_phys(storage_space); 698 phys_storage = virt_to_phys_or_null(storage_space);
665 phys_remaining = virt_to_phys(remaining_space); 699 phys_remaining = virt_to_phys_or_null(remaining_space);
666 phys_max = virt_to_phys(max_variable_size); 700 phys_max = virt_to_phys_or_null(max_variable_size);
667 701
668 status = efi_thunk(query_variable_info, attr, phys_storage, 702 status = efi_thunk(query_variable_info, attr, phys_storage,
669 phys_remaining, phys_max); 703 phys_remaining, phys_max);
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 429d08be7848..dd6cfa4ad3ac 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o 28obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
29# MISC Devices 29# MISC Devices
30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o 30obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o 31obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index de734134bc8d..3f1f1c77d090 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * platform_wdt.c: Watchdog platform library file 2 * Intel Merrifield watchdog platform device library file
3 * 3 *
4 * (C) Copyright 2014 Intel Corporation 4 * (C) Copyright 2014 Intel Corporation
5 * Author: David Cohen <david.a.cohen@linux.intel.com> 5 * Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/platform_data/intel-mid_wdt.h> 16#include <linux/platform_data/intel-mid_wdt.h>
17
17#include <asm/intel-mid.h> 18#include <asm/intel-mid.h>
19#include <asm/intel_scu_ipc.h>
18#include <asm/io_apic.h> 20#include <asm/io_apic.h>
19 21
20#define TANGIER_EXT_TIMER0_MSI 15 22#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
50 .probe = tangier_probe, 52 .probe = tangier_probe,
51}; 53};
52 54
53static int __init register_mid_wdt(void) 55static int wdt_scu_status_change(struct notifier_block *nb,
56 unsigned long code, void *data)
54{ 57{
55 if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { 58 if (code == SCU_DOWN) {
56 wdt_dev.dev.platform_data = &tangier_pdata; 59 platform_device_unregister(&wdt_dev);
57 return platform_device_register(&wdt_dev); 60 return 0;
58 } 61 }
59 62
60 return -ENODEV; 63 return platform_device_register(&wdt_dev);
61} 64}
62 65
66static struct notifier_block wdt_scu_notifier = {
67 .notifier_call = wdt_scu_status_change,
68};
69
70static int __init register_mid_wdt(void)
71{
72 if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
73 return -ENODEV;
74
75 wdt_dev.dev.platform_data = &tangier_pdata;
76
77 /*
78 * We need to be sure that the SCU IPC is ready before watchdog device
79 * can be registered:
80 */
81 intel_scu_notifier_add(&wdt_scu_notifier);
82
83 return 0;
84}
63rootfs_initcall(register_mid_wdt); 85rootfs_initcall(register_mid_wdt);
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 5d3b45ad1c03..67375dda451c 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
272} 272}
273EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); 273EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
274 274
275pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
276{
277 struct mid_pwr *pwr = midpwr;
278 int id, reg, bit;
279 u32 power;
280
281 if (!pwr || !pwr->available)
282 return PCI_UNKNOWN;
283
284 id = intel_mid_pwr_get_lss_id(pdev);
285 if (id < 0)
286 return PCI_UNKNOWN;
287
288 reg = (id * LSS_PWS_BITS) / 32;
289 bit = (id * LSS_PWS_BITS) % 32;
290 power = mid_pwr_get_state(pwr, reg);
291 return (__force pci_power_t)((power >> bit) & 3);
292}
293
275void intel_mid_pwr_power_off(void) 294void intel_mid_pwr_power_off(void)
276{ 295{
277 struct mid_pwr *pwr = midpwr; 296 struct mid_pwr *pwr = midpwr;
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index ac58c1616408..555b9fa0ad43 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
16 16
17KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large 17KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
18KBUILD_CFLAGS += -m$(BITS) 18KBUILD_CFLAGS += -m$(BITS)
19KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
19 20
20$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 21$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
21 $(call if_changed,ld) 22 $(call if_changed,ld)
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index de9b14b2d348..cd400af4a6b2 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6)
767#define __NR_pwritev2 347 767#define __NR_pwritev2 347
768__SYSCALL(347, sys_pwritev2, 6) 768__SYSCALL(347, sys_pwritev2, 6)
769 769
770#define __NR_syscall_count 348 770#define __NR_pkey_mprotect 348
771__SYSCALL(348, sys_pkey_mprotect, 4)
772#define __NR_pkey_alloc 349
773__SYSCALL(349, sys_pkey_alloc, 2)
774#define __NR_pkey_free 350
775__SYSCALL(350, sys_pkey_free, 1)
776
777#define __NR_syscall_count 351
771 778
772/* 779/*
773 * sysxtensa syscall handler 780 * sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 9a5bcd0381a7..be81e69b25bc 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -172,10 +172,11 @@ void __init time_init(void)
172{ 172{
173 of_clk_init(NULL); 173 of_clk_init(NULL);
174#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT 174#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
175 printk("Calibrating CPU frequency "); 175 pr_info("Calibrating CPU frequency ");
176 calibrate_ccount(); 176 calibrate_ccount();
177 printk("%d.%02d MHz\n", (int)ccount_freq/1000000, 177 pr_cont("%d.%02d MHz\n",
178 (int)(ccount_freq/10000)%100); 178 (int)ccount_freq / 1000000,
179 (int)(ccount_freq / 10000) % 100);
179#else 180#else
180 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; 181 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
181#endif 182#endif
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
210void calibrate_delay(void) 211void calibrate_delay(void)
211{ 212{
212 loops_per_jiffy = ccount_freq / HZ; 213 loops_per_jiffy = ccount_freq / HZ;
213 printk("Calibrating delay loop (skipped)... " 214 pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n",
214 "%lu.%02lu BogoMIPS preset\n", 215 loops_per_jiffy / (1000000 / HZ),
215 loops_per_jiffy/(1000000/HZ), 216 (loops_per_jiffy / (10000 / HZ)) % 100);
216 (loops_per_jiffy/(10000/HZ)) % 100);
217} 217}
218#endif 218#endif
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index d02fc304b31c..ce37d5b899fe 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs)
465 465
466 for (i = 0; i < 16; i++) { 466 for (i = 0; i < 16; i++) {
467 if ((i % 8) == 0) 467 if ((i % 8) == 0)
468 printk(KERN_INFO "a%02d:", i); 468 pr_info("a%02d:", i);
469 printk(KERN_CONT " %08lx", regs->areg[i]); 469 pr_cont(" %08lx", regs->areg[i]);
470 } 470 }
471 printk(KERN_CONT "\n"); 471 pr_cont("\n");
472 472 pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
473 printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", 473 regs->pc, regs->ps, regs->depc, regs->excvaddr);
474 regs->pc, regs->ps, regs->depc, regs->excvaddr); 474 pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
475 printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", 475 regs->lbeg, regs->lend, regs->lcount, regs->sar);
476 regs->lbeg, regs->lend, regs->lcount, regs->sar);
477 if (user_mode(regs)) 476 if (user_mode(regs))
478 printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", 477 pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
479 regs->windowbase, regs->windowstart, regs->wmask, 478 regs->windowbase, regs->windowstart, regs->wmask,
480 regs->syscall); 479 regs->syscall);
481} 480}
482 481
483static int show_trace_cb(struct stackframe *frame, void *data) 482static int show_trace_cb(struct stackframe *frame, void *data)
484{ 483{
485 if (kernel_text_address(frame->pc)) { 484 if (kernel_text_address(frame->pc)) {
486 printk(" [<%08lx>] ", frame->pc); 485 pr_cont(" [<%08lx>]", frame->pc);
487 print_symbol("%s\n", frame->pc); 486 print_symbol(" %s\n", frame->pc);
488 } 487 }
489 return 0; 488 return 0;
490} 489}
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp)
494 if (!sp) 493 if (!sp)
495 sp = stack_pointer(task); 494 sp = stack_pointer(task);
496 495
497 printk("Call Trace:"); 496 pr_info("Call Trace:\n");
498#ifdef CONFIG_KALLSYMS
499 printk("\n");
500#endif
501 walk_stackframe(sp, show_trace_cb, NULL); 497 walk_stackframe(sp, show_trace_cb, NULL);
502 printk("\n"); 498#ifndef CONFIG_KALLSYMS
499 pr_cont("\n");
500#endif
503} 501}
504 502
505/*
506 * This routine abuses get_user()/put_user() to reference pointers
507 * with at least a bit of error checking ...
508 */
509
510static int kstack_depth_to_print = 24; 503static int kstack_depth_to_print = 24;
511 504
512void show_stack(struct task_struct *task, unsigned long *sp) 505void show_stack(struct task_struct *task, unsigned long *sp)
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp)
518 sp = stack_pointer(task); 511 sp = stack_pointer(task);
519 stack = sp; 512 stack = sp;
520 513
521 printk("\nStack: "); 514 pr_info("Stack:\n");
522 515
523 for (i = 0; i < kstack_depth_to_print; i++) { 516 for (i = 0; i < kstack_depth_to_print; i++) {
524 if (kstack_end(sp)) 517 if (kstack_end(sp))
525 break; 518 break;
526 if (i && ((i % 8) == 0)) 519 pr_cont(" %08lx", *sp++);
527 printk("\n "); 520 if (i % 8 == 7)
528 printk("%08lx ", *sp++); 521 pr_cont("\n");
529 } 522 }
530 printk("\n");
531 show_trace(task, stack); 523 show_trace(task, stack);
532} 524}
533 525
534void show_code(unsigned int *pc)
535{
536 long i;
537
538 printk("\nCode:");
539
540 for(i = -3 ; i < 6 ; i++) {
541 unsigned long insn;
542 if (__get_user(insn, pc + i)) {
543 printk(" (Bad address in pc)\n");
544 break;
545 }
546 printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
547 }
548}
549
550DEFINE_SPINLOCK(die_lock); 526DEFINE_SPINLOCK(die_lock);
551 527
552void die(const char * str, struct pt_regs * regs, long err) 528void die(const char * str, struct pt_regs * regs, long err)
553{ 529{
554 static int die_counter; 530 static int die_counter;
555 int nl = 0;
556 531
557 console_verbose(); 532 console_verbose();
558 spin_lock_irq(&die_lock); 533 spin_lock_irq(&die_lock);
559 534
560 printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); 535 pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter,
561#ifdef CONFIG_PREEMPT 536 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "");
562 printk("PREEMPT ");
563 nl = 1;
564#endif
565 if (nl)
566 printk("\n");
567 show_regs(regs); 537 show_regs(regs);
568 if (!user_mode(regs)) 538 if (!user_mode(regs))
569 show_stack(NULL, (unsigned long*)regs->areg[1]); 539 show_stack(NULL, (unsigned long*)regs->areg[1]);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 2d8466f9e49b..d19b09cdf284 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
214 214
215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); 215 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
216 216
217 if (ctx->more) { 217 if (!result && !ctx->more) {
218 err = af_alg_wait_for_completion(
219 crypto_ahash_init(&ctx->req),
220 &ctx->completion);
221 if (err)
222 goto unlock;
223 }
224
225 if (!result || ctx->more) {
218 ctx->more = 0; 226 ctx->more = 0;
219 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), 227 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
220 &ctx->completion); 228 &ctx->completion);
221 if (err) 229 if (err)
222 goto unlock; 230 goto unlock;
223 } else if (!result) {
224 err = af_alg_wait_for_completion(
225 crypto_ahash_digest(&ctx->req),
226 &ctx->completion);
227 } 231 }
228 232
229 err = memcpy_to_msg(msg, ctx->result, len); 233 err = memcpy_to_msg(msg, ctx->result, len);
230 234
231 hash_free_result(sk, ctx);
232
233unlock: 235unlock:
236 hash_free_result(sk, ctx);
234 release_sock(sk); 237 release_sock(sk);
235 238
236 return err ?: len; 239 return err ?: len;
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 865f46ea724f..c80765b211cf 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
133 return cert; 133 return cert;
134 134
135error_decode: 135error_decode:
136 kfree(cert->pub->key);
137 kfree(ctx); 136 kfree(ctx);
138error_no_ctx: 137error_no_ctx:
139 x509_free_certificate(cert); 138 x509_free_certificate(cert);
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 52ce17a3dd63..c16c94f88733 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
68 68
69 sg = scatterwalk_ffwd(tmp, sg, start); 69 sg = scatterwalk_ffwd(tmp, sg, start);
70 70
71 if (sg_page(sg) == virt_to_page(buf) &&
72 sg->offset == offset_in_page(buf))
73 return;
74
75 scatterwalk_start(&walk, sg); 71 scatterwalk_start(&walk, sg);
76 scatterwalk_copychunks(buf, &walk, nbytes, out); 72 scatterwalk_copychunks(buf, &walk, nbytes, out);
77 scatterwalk_done(&walk, out, 0); 73 scatterwalk_done(&walk, out, 0);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 046c4d0394ee..5fb838e592dc 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void)
480 u32 i; 480 u32 i;
481 481
482 /* 482 /*
483 * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which 483 * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
484 * should be zero are indeed zero. This will workaround BIOSs that 484 * should be zero are indeed zero. This will workaround BIOSs that
485 * inadvertently place values in these fields. 485 * inadvertently place values in these fields.
486 * 486 *
487 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located 487 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located
488 * at offset 45, 55, 95, and the word located at offset 109, 110. 488 * at offset 45, 55, 95, and the word located at offset 109, 110.
489 * 489 *
490 * Note: The FADT revision value is unreliable because of BIOS errors. 490 * Note: The FADT revision value is unreliable. Only the length can be
491 * The table length is instead used as the final word on the version. 491 * trusted.
492 *
493 * Note: FADT revision 3 is the ACPI 2.0 version of the FADT.
494 */ 492 */
495 if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { 493 if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
496 acpi_gbl_FADT.preferred_profile = 0; 494 acpi_gbl_FADT.preferred_profile = 0;
497 acpi_gbl_FADT.pstate_control = 0; 495 acpi_gbl_FADT.pstate_control = 0;
498 acpi_gbl_FADT.cst_control = 0; 496 acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index deb0ff78eba8..54abb26b7366 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state)
47 } 47 }
48} 48}
49 49
50static void acpi_sleep_pts_switch(u32 acpi_state) 50static int tts_notify_reboot(struct notifier_block *this,
51{
52 acpi_status status;
53
54 status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state);
55 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
56 /*
57 * OS can't evaluate the _PTS object correctly. Some warning
58 * message will be printed. But it won't break anything.
59 */
60 printk(KERN_NOTICE "Failure in evaluating _PTS object\n");
61 }
62}
63
64static int sleep_notify_reboot(struct notifier_block *this,
65 unsigned long code, void *x) 51 unsigned long code, void *x)
66{ 52{
67 acpi_sleep_tts_switch(ACPI_STATE_S5); 53 acpi_sleep_tts_switch(ACPI_STATE_S5);
68
69 acpi_sleep_pts_switch(ACPI_STATE_S5);
70
71 return NOTIFY_DONE; 54 return NOTIFY_DONE;
72} 55}
73 56
74static struct notifier_block sleep_notifier = { 57static struct notifier_block tts_notifier = {
75 .notifier_call = sleep_notify_reboot, 58 .notifier_call = tts_notify_reboot,
76 .next = NULL, 59 .next = NULL,
77 .priority = 0, 60 .priority = 0,
78}; 61};
@@ -916,9 +899,9 @@ int __init acpi_sleep_init(void)
916 pr_info(PREFIX "(supports%s)\n", supported); 899 pr_info(PREFIX "(supports%s)\n", supported);
917 900
918 /* 901 /*
919 * Register the sleep_notifier to reboot notifier list so that the _TTS 902 * Register the tts_notifier to reboot notifier list so that the _TTS
920 * and _PTS object can also be evaluated when the system enters S5. 903 * object can also be evaluated when the system enters S5.
921 */ 904 */
922 register_reboot_notifier(&sleep_notifier); 905 register_reboot_notifier(&tts_notifier);
923 return 0; 906 return 0;
924} 907}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9669fc7c19df..74f4c662f776 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1436,13 +1436,6 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
1436 "ahci: MRSM is on, fallback to single MSI\n"); 1436 "ahci: MRSM is on, fallback to single MSI\n");
1437 pci_free_irq_vectors(pdev); 1437 pci_free_irq_vectors(pdev);
1438 } 1438 }
1439
1440 /*
1441 * -ENOSPC indicated we don't have enough vectors. Don't bother
1442 * trying a single vectors for any other error:
1443 */
1444 if (nvec < 0 && nvec != -ENOSPC)
1445 return nvec;
1446 } 1439 }
1447 1440
1448 /* 1441 /*
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9cceb4a875a5..c4eb4ae9c3aa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
1088 desc[1] = tf->command; /* status */ 1088 desc[1] = tf->command; /* status */
1089 desc[2] = tf->device; 1089 desc[2] = tf->device;
1090 desc[3] = tf->nsect; 1090 desc[3] = tf->nsect;
1091 desc[0] = 0; 1091 desc[7] = 0;
1092 if (tf->flags & ATA_TFLAG_LBA48) { 1092 if (tf->flags & ATA_TFLAG_LBA48) {
1093 desc[8] |= 0x80; 1093 desc[8] |= 0x80;
1094 if (tf->hob_nsect) 1094 if (tf->hob_nsect)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04365b17ee67..5163c8f918cb 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class,
1403 zram = idr_find(&zram_index_idr, dev_id); 1403 zram = idr_find(&zram_index_idr, dev_id);
1404 if (zram) { 1404 if (zram) {
1405 ret = zram_remove(zram); 1405 ret = zram_remove(zram);
1406 idr_remove(&zram_index_idr, dev_id); 1406 if (!ret)
1407 idr_remove(&zram_index_idr, dev_id);
1407 } else { 1408 } else {
1408 ret = -ENODEV; 1409 ret = -ENODEV;
1409 } 1410 }
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index b49e61320952..fc9e8891eae3 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
484} 484}
485 485
486static const struct of_device_id bt_bmc_match[] = { 486static const struct of_device_id bt_bmc_match[] = {
487 { .compatible = "aspeed,ast2400-bt-bmc" }, 487 { .compatible = "aspeed,ast2400-ibt-bmc" },
488 { }, 488 { },
489}; 489};
490 490
@@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver);
502MODULE_DEVICE_TABLE(of, bt_bmc_match); 502MODULE_DEVICE_TABLE(of, bt_bmc_match);
503MODULE_LICENSE("GPL"); 503MODULE_LICENSE("GPL");
504MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>"); 504MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
505MODULE_DESCRIPTION("Linux device interface to the BT interface"); 505MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface");
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index f21e9b7afd1a..e3eed5a78404 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -20,7 +20,7 @@ config CLK_BCM_KONA
20 20
21config COMMON_CLK_IPROC 21config COMMON_CLK_IPROC
22 bool "Broadcom iProc clock support" 22 bool "Broadcom iProc clock support"
23 depends on ARCH_BCM_IPROC || COMPILE_TEST 23 depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST
24 depends on COMMON_CLK 24 depends on COMMON_CLK
25 default ARCH_BCM_IPROC 25 default ARCH_BCM_IPROC
26 help 26 help
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index edf3b96b3b73..1d99292e2039 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
685 } 685 }
686 686
687 /* register clk-provider */ 687 /* register clk-provider */
688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 688 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
689 689
690 return; 690 return;
691 691
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 0718e831475f..3b784b593afd 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
382 } 382 }
383 383
384 /* register clk-provider */ 384 /* register clk-provider */
385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 385 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
386 386
387 return; 387 return;
388 388
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 8802a2dd56ac..f674778fb3ac 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np)
82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", 82 hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); 83 "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
84 84
85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); 85 of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
86} 86}
87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); 87CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 79596463e0d9..fc75a335a7ce 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -143,7 +143,7 @@ static SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(pll_mipi_clk, "pll-mipi",
143 4, 2, /* K */ 143 4, 2, /* K */
144 0, 4, /* M */ 144 0, 4, /* M */
145 21, 0, /* mux */ 145 21, 0, /* mux */
146 BIT(31), /* gate */ 146 BIT(31) | BIT(23) | BIT(22), /* gate */
147 BIT(28), /* lock */ 147 BIT(28), /* lock */
148 CLK_SET_RATE_UNGATE); 148 CLK_SET_RATE_UNGATE);
149 149
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = {
191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 191static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
192 0x050, 0, 3, axi_div_table, 0); 192 0x050, 0, 3, axi_div_table, 0);
193 193
194#define SUN6I_A31_AHB1_REG 0x054
195
194static const char * const ahb1_parents[] = { "osc32k", "osc24M", 196static const char * const ahb1_parents[] = { "osc32k", "osc24M",
195 "axi", "pll-periph" }; 197 "axi", "pll-periph" };
196 198
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
1230 val &= BIT(16); 1232 val &= BIT(16);
1231 writel(val, reg + SUN6I_A31_PLL_MIPI_REG); 1233 writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
1232 1234
1235 /* Force AHB1 to PLL6 / 3 */
1236 val = readl(reg + SUN6I_A31_AHB1_REG);
1237 /* set PLL6 pre-div = 3 */
1238 val &= ~GENMASK(7, 6);
1239 val |= 0x2 << 6;
1240 /* select PLL6 / pre-div */
1241 val &= ~GENMASK(13, 12);
1242 val |= 0x3 << 12;
1243 writel(val, reg + SUN6I_A31_AHB1_REG);
1244
1233 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); 1245 sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
1234 1246
1235 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, 1247 ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 96b40ca57697..9bd1f78a0547 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -131,7 +131,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
131 8, 4, /* N */ 131 8, 4, /* N */
132 4, 2, /* K */ 132 4, 2, /* K */
133 0, 4, /* M */ 133 0, 4, /* M */
134 BIT(31), /* gate */ 134 BIT(31) | BIT(23) | BIT(22), /* gate */
135 BIT(28), /* lock */ 135 BIT(28), /* lock */
136 CLK_SET_RATE_UNGATE); 136 CLK_SET_RATE_UNGATE);
137 137
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 838b22aa8b67..f2c9274b8bd5 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req)
373 else 373 else
374 calcp = 3; 374 calcp = 3;
375 375
376 calcm = (req->parent_rate >> calcp) - 1; 376 calcm = (div >> calcp) - 1;
377 377
378 req->rate = (req->parent_rate >> calcp) / (calcm + 1); 378 req->rate = (req->parent_rate >> calcp) / (calcm + 1);
379 req->m = calcm; 379 req->m = calcm;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 156aad167cd6..954a64c7757b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str,
137 } 137 }
138 138
139 buf = it_page + it->offset; 139 buf = it_page + it->offset;
140 len = min(tlen, it->length); 140 len = min_t(size_t, tlen, it->length);
141 print_hex_dump(level, prefix_str, prefix_type, rowsize, 141 print_hex_dump(level, prefix_str, prefix_type, rowsize,
142 groupsize, buf, len, ascii); 142 groupsize, buf, len, ascii);
143 tlen -= len; 143 tlen -= len;
@@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void)
4583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) 4583 if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
4584 continue; 4584 continue;
4585 4585
4586 /*
4587 * Check support for AES modes not available
4588 * on LP devices.
4589 */
4590 if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
4591 if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
4592 OP_ALG_AAI_XTS)
4593 continue;
4594
4586 t_alg = caam_alg_alloc(alg); 4595 t_alg = caam_alg_alloc(alg);
4587 if (IS_ERR(t_alg)) { 4596 if (IS_ERR(t_alg)) {
4588 err = PTR_ERR(t_alg); 4597 err = PTR_ERR(t_alg);
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 0e499bfca41c..3d94ff20fdca 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -270,8 +270,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
270 if (!dax_dev->alive) 270 if (!dax_dev->alive)
271 return -ENXIO; 271 return -ENXIO;
272 272
273 /* prevent private / writable mappings from being established */ 273 /* prevent private mappings from being established */
274 if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) { 274 if ((vma->vm_flags & VM_SHARED) != VM_SHARED) {
275 dev_info(dev, "%s: %s: fail, attempted private mapping\n", 275 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
276 current->comm, func); 276 current->comm, func);
277 return -EINVAL; 277 return -EINVAL;
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 4a15fa5df98b..73c6ce93a0d9 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -78,7 +78,9 @@ static int dax_pmem_probe(struct device *dev)
78 nsio = to_nd_namespace_io(&ndns->dev); 78 nsio = to_nd_namespace_io(&ndns->dev);
79 79
80 /* parse the 'pfn' info block via ->rw_bytes */ 80 /* parse the 'pfn' info block via ->rw_bytes */
81 devm_nsio_enable(dev, nsio); 81 rc = devm_nsio_enable(dev, nsio);
82 if (rc)
83 return rc;
82 altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); 84 altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
83 if (IS_ERR(altmap)) 85 if (IS_ERR(altmap))
84 return PTR_ERR(altmap); 86 return PTR_ERR(altmap);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6bcf564..141aefbe37ec 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,7 @@ config MMP_TDMA
306 depends on ARCH_MMP || COMPILE_TEST 306 depends on ARCH_MMP || COMPILE_TEST
307 select DMA_ENGINE 307 select DMA_ENGINE
308 select MMP_SRAM if ARCH_MMP 308 select MMP_SRAM if ARCH_MMP
309 select GENERIC_ALLOCATOR
309 help 310 help
310 Support the MMP Two-Channel DMA engine. 311 Support the MMP Two-Channel DMA engine.
311 This engine used for MMP Audio DMA and pxa910 SQU. 312 This engine used for MMP Audio DMA and pxa910 SQU.
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index bac5f023013b..d5ba43a87a68 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
317 317
318 while (val) { 318 while (val) {
319 u32 desc, len; 319 u32 desc, len;
320 int error;
321
322 error = pm_runtime_get(cdd->ddev.dev);
323 if (error < 0)
324 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
325 __func__, error);
320 326
321 q_num = __fls(val); 327 q_num = __fls(val);
322 val &= ~(1 << q_num); 328 val &= ~(1 << q_num);
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
338 dma_cookie_complete(&c->txd); 344 dma_cookie_complete(&c->txd);
339 dmaengine_desc_get_callback_invoke(&c->txd, NULL); 345 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
340 346
341 /* Paired with cppi41_dma_issue_pending */
342 pm_runtime_mark_last_busy(cdd->ddev.dev); 347 pm_runtime_mark_last_busy(cdd->ddev.dev);
343 pm_runtime_put_autosuspend(cdd->ddev.dev); 348 pm_runtime_put_autosuspend(cdd->ddev.dev);
344 } 349 }
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
362 int error; 367 int error;
363 368
364 error = pm_runtime_get_sync(cdd->ddev.dev); 369 error = pm_runtime_get_sync(cdd->ddev.dev);
365 if (error < 0) 370 if (error < 0) {
371 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
372 __func__, error);
373 pm_runtime_put_noidle(cdd->ddev.dev);
374
366 return error; 375 return error;
376 }
367 377
368 dma_cookie_init(chan); 378 dma_cookie_init(chan);
369 dma_async_tx_descriptor_init(&c->txd, chan); 379 dma_async_tx_descriptor_init(&c->txd, chan);
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
385 int error; 395 int error;
386 396
387 error = pm_runtime_get_sync(cdd->ddev.dev); 397 error = pm_runtime_get_sync(cdd->ddev.dev);
388 if (error < 0) 398 if (error < 0) {
399 pm_runtime_put_noidle(cdd->ddev.dev);
400
389 return; 401 return;
402 }
390 403
391 WARN_ON(!list_empty(&cdd->pending)); 404 WARN_ON(!list_empty(&cdd->pending));
392 405
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
460 struct cppi41_dd *cdd = c->cdd; 473 struct cppi41_dd *cdd = c->cdd;
461 int error; 474 int error;
462 475
463 /* PM runtime paired with dmaengine_desc_get_callback_invoke */
464 error = pm_runtime_get(cdd->ddev.dev); 476 error = pm_runtime_get(cdd->ddev.dev);
465 if ((error != -EINPROGRESS) && error < 0) { 477 if ((error != -EINPROGRESS) && error < 0) {
478 pm_runtime_put_noidle(cdd->ddev.dev);
466 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", 479 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
467 error); 480 error);
468 481
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
473 push_desc_queue(c); 486 push_desc_queue(c);
474 else 487 else
475 pending_desc(c); 488 pending_desc(c);
489
490 pm_runtime_mark_last_busy(cdd->ddev.dev);
491 pm_runtime_put_autosuspend(cdd->ddev.dev);
476} 492}
477 493
478static u32 get_host_pd0(u32 length) 494static u32 get_host_pd0(u32 length)
@@ -1059,8 +1075,8 @@ err_chans:
1059 deinit_cppi41(dev, cdd); 1075 deinit_cppi41(dev, cdd);
1060err_init_cppi: 1076err_init_cppi:
1061 pm_runtime_dont_use_autosuspend(dev); 1077 pm_runtime_dont_use_autosuspend(dev);
1062 pm_runtime_put_sync(dev);
1063err_get_sync: 1078err_get_sync:
1079 pm_runtime_put_sync(dev);
1064 pm_runtime_disable(dev); 1080 pm_runtime_disable(dev);
1065 iounmap(cdd->usbss_mem); 1081 iounmap(cdd->usbss_mem);
1066 iounmap(cdd->ctrl_mem); 1082 iounmap(cdd->ctrl_mem);
@@ -1072,7 +1088,12 @@ err_get_sync:
1072static int cppi41_dma_remove(struct platform_device *pdev) 1088static int cppi41_dma_remove(struct platform_device *pdev)
1073{ 1089{
1074 struct cppi41_dd *cdd = platform_get_drvdata(pdev); 1090 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1091 int error;
1075 1092
1093 error = pm_runtime_get_sync(&pdev->dev);
1094 if (error < 0)
1095 dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
1096 __func__, error);
1076 of_dma_controller_free(pdev->dev.of_node); 1097 of_dma_controller_free(pdev->dev.of_node);
1077 dma_async_device_unregister(&cdd->ddev); 1098 dma_async_device_unregister(&cdd->ddev);
1078 1099
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index e18a58068bca..77242b37ef87 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
1628 if (echan->slot[0] < 0) { 1628 if (echan->slot[0] < 0) {
1629 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1629 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1630 EDMA_CHAN_SLOT(echan->ch_num)); 1630 EDMA_CHAN_SLOT(echan->ch_num));
1631 ret = echan->slot[0];
1631 goto err_slot; 1632 goto err_slot;
1632 } 1633 }
1633 1634
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 83461994e418..a2358780ab2c 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
578 578
579 burst = convert_burst(8); 579 burst = convert_burst(8);
580 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); 580 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
581 v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 581 v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
582 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 582 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
583 DMA_CHAN_CFG_DST_LINEAR_MODE | 583 DMA_CHAN_CFG_DST_LINEAR_MODE |
584 DMA_CHAN_CFG_SRC_LINEAR_MODE | 584 DMA_CHAN_CFG_SRC_LINEAR_MODE |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d011cb89d25e..ed37e5908b91 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -22,10 +22,6 @@ menuconfig GPIOLIB
22 22
23if GPIOLIB 23if GPIOLIB
24 24
25config GPIO_DEVRES
26 def_bool y
27 depends on HAS_IOMEM
28
29config OF_GPIO 25config OF_GPIO
30 def_bool y 26 def_bool y
31 depends on OF 27 depends on OF
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index ab28a2daeacc..d074c2299393 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
2 2
3ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG 3ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
4 4
5obj-$(CONFIG_GPIO_DEVRES) += devres.o 5obj-$(CONFIG_GPIOLIB) += devres.o
6obj-$(CONFIG_GPIOLIB) += gpiolib.o 6obj-$(CONFIG_GPIOLIB) += gpiolib.o
7obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o 7obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
8obj-$(CONFIG_OF_GPIO) += gpiolib-of.o 8obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e422568e14ad..fe731f094257 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc,
372 372
373 bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); 373 bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
374 374
375 memcpy(reg_val, chip->reg_output, NBANK(chip));
376 mutex_lock(&chip->i2c_lock); 375 mutex_lock(&chip->i2c_lock);
376 memcpy(reg_val, chip->reg_output, NBANK(chip));
377 for (bank = 0; bank < NBANK(chip); bank++) { 377 for (bank = 0; bank < NBANK(chip); bank++) {
378 bank_mask = mask[bank / sizeof(*mask)] >> 378 bank_mask = mask[bank / sizeof(*mask)] >>
379 ((bank % sizeof(*mask)) * 8); 379 ((bank % sizeof(*mask)) * 8);
380 if (bank_mask) { 380 if (bank_mask) {
381 bank_val = bits[bank / sizeof(*bits)] >> 381 bank_val = bits[bank / sizeof(*bits)] >>
382 ((bank % sizeof(*bits)) * 8); 382 ((bank % sizeof(*bits)) * 8);
383 bank_val &= bank_mask;
383 reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; 384 reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val;
384 } 385 }
385 } 386 }
@@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
607 608
608 if (client->irq && irq_base != -1 609 if (client->irq && irq_base != -1
609 && (chip->driver_data & PCA_INT)) { 610 && (chip->driver_data & PCA_INT)) {
610
611 ret = pca953x_read_regs(chip, 611 ret = pca953x_read_regs(chip,
612 chip->regs->input, chip->irq_stat); 612 chip->regs->input, chip->irq_stat);
613 if (ret) 613 if (ret)
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 5a5a6cb00eea..d6e21f1a70a9 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip,
97 if (ret < 0) 97 if (ret < 0)
98 return ret; 98 return ret;
99 99
100 return !!(ret & BIT(pos)); 100 return !(ret & BIT(pos));
101} 101}
102 102
103static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip, 103static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 93ed0e00c578..868128a676ba 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2737,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
2737 if (IS_ERR(desc)) 2737 if (IS_ERR(desc))
2738 return PTR_ERR(desc); 2738 return PTR_ERR(desc);
2739 2739
2740 /* Flush direction if something changed behind our back */ 2740 /*
2741 if (chip->get_direction) { 2741 * If it's fast: flush the direction setting if something changed
2742 * behind our back
2743 */
2744 if (!chip->can_sleep && chip->get_direction) {
2742 int dir = chip->get_direction(chip, offset); 2745 int dir = chip->get_direction(chip, offset);
2743 2746
2744 if (dir) 2747 if (dir)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c2b8496cdf63..121a034fe27d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -391,6 +391,7 @@ struct amdgpu_bo {
391 u64 metadata_flags; 391 u64 metadata_flags;
392 void *metadata; 392 void *metadata;
393 u32 metadata_size; 393 u32 metadata_size;
394 unsigned prime_shared_count;
394 /* list of all virtual address to which this bo 395 /* list of all virtual address to which this bo
395 * is associated to 396 * is associated to
396 */ 397 */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index dae35a96a694..6c343a933182 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -34,6 +34,7 @@ struct amdgpu_atpx {
34 34
35static struct amdgpu_atpx_priv { 35static struct amdgpu_atpx_priv {
36 bool atpx_detected; 36 bool atpx_detected;
37 bool bridge_pm_usable;
37 /* handle for device - and atpx */ 38 /* handle for device - and atpx */
38 acpi_handle dhandle; 39 acpi_handle dhandle;
39 acpi_handle other_handle; 40 acpi_handle other_handle;
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
205 atpx->is_hybrid = false; 206 atpx->is_hybrid = false;
206 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 207 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
207 printk("ATPX Hybrid Graphics\n"); 208 printk("ATPX Hybrid Graphics\n");
208 atpx->functions.power_cntl = false; 209 /*
210 * Disable legacy PM methods only when pcie port PM is usable,
211 * otherwise the device might fail to power off or power on.
212 */
213 atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
209 atpx->is_hybrid = true; 214 atpx->is_hybrid = true;
210 } 215 }
211 216
@@ -555,17 +560,25 @@ static bool amdgpu_atpx_detect(void)
555 struct pci_dev *pdev = NULL; 560 struct pci_dev *pdev = NULL;
556 bool has_atpx = false; 561 bool has_atpx = false;
557 int vga_count = 0; 562 int vga_count = 0;
563 bool d3_supported = false;
564 struct pci_dev *parent_pdev;
558 565
559 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 566 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
560 vga_count++; 567 vga_count++;
561 568
562 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); 569 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
570
571 parent_pdev = pci_upstream_bridge(pdev);
572 d3_supported |= parent_pdev && parent_pdev->bridge_d3;
563 } 573 }
564 574
565 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { 575 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
566 vga_count++; 576 vga_count++;
567 577
568 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); 578 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
579
580 parent_pdev = pci_upstream_bridge(pdev);
581 d3_supported |= parent_pdev && parent_pdev->bridge_d3;
569 } 582 }
570 583
571 if (has_atpx && vga_count == 2) { 584 if (has_atpx && vga_count == 2) {
@@ -573,6 +586,7 @@ static bool amdgpu_atpx_detect(void)
573 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", 586 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
574 acpi_method_name); 587 acpi_method_name);
575 amdgpu_atpx_priv.atpx_detected = true; 588 amdgpu_atpx_priv.atpx_detected = true;
589 amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
576 amdgpu_atpx_init(); 590 amdgpu_atpx_init();
577 return true; 591 return true;
578 } 592 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 651115dcce12..c02db01f6583 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
132 entry->priority = min(info[i].bo_priority, 132 entry->priority = min(info[i].bo_priority,
133 AMDGPU_BO_LIST_MAX_PRIORITY); 133 AMDGPU_BO_LIST_MAX_PRIORITY);
134 entry->tv.bo = &entry->robj->tbo; 134 entry->tv.bo = &entry->robj->tbo;
135 entry->tv.shared = true; 135 entry->tv.shared = !entry->robj->prime_shared_count;
136 136
137 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 137 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
138 gds_obj = entry->robj; 138 gds_obj = entry->robj;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 9e16e975f31a..deee2db36fce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -636,12 +636,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
636 return false; 636 return false;
637 637
638 if (amdgpu_passthrough(adev)) { 638 if (amdgpu_passthrough(adev)) {
639 /* for FIJI: In whole GPU pass-through virtualization case 639 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
640 * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) 640 * some old smc fw still need driver do vPost otherwise gpu hang, while
641 * so amdgpu_card_posted return false and driver will incorrectly skip vPost. 641 * those smc fw version above 22.15 doesn't have this flaw, so we force
642 * but if we force vPost do in pass-through case, the driver reload will hang. 642 * vpost executed for smc version below 22.15
643 * whether doing vPost depends on amdgpu_card_posted if smc version is above
644 * 00160e00 for FIJI.
645 */ 643 */
646 if (adev->asic_type == CHIP_FIJI) { 644 if (adev->asic_type == CHIP_FIJI) {
647 int err; 645 int err;
@@ -652,22 +650,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
652 return true; 650 return true;
653 651
654 fw_ver = *((uint32_t *)adev->pm.fw->data + 69); 652 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
655 if (fw_ver >= 0x00160e00) 653 if (fw_ver < 0x00160e00)
656 return !amdgpu_card_posted(adev); 654 return true;
657 } 655 }
658 } else {
659 /* in bare-metal case, amdgpu_card_posted return false
660 * after system reboot/boot, and return true if driver
661 * reloaded.
662 * we shouldn't do vPost after driver reload otherwise GPU
663 * could hang.
664 */
665 if (amdgpu_card_posted(adev))
666 return false;
667 } 656 }
668 657 return !amdgpu_card_posted(adev);
669 /* we assume vPost is neede for all other cases */
670 return true;
671} 658}
672 659
673/** 660/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 7700dc22f243..3826d5aea0a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
74 if (ret) 74 if (ret)
75 return ERR_PTR(ret); 75 return ERR_PTR(ret);
76 76
77 bo->prime_shared_count = 1;
77 return &bo->gem_base; 78 return &bo->gem_base;
78} 79}
79 80
80int amdgpu_gem_prime_pin(struct drm_gem_object *obj) 81int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
81{ 82{
82 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
83 int ret = 0; 84 long ret = 0;
84 85
85 ret = amdgpu_bo_reserve(bo, false); 86 ret = amdgpu_bo_reserve(bo, false);
86 if (unlikely(ret != 0)) 87 if (unlikely(ret != 0))
87 return ret; 88 return ret;
88 89
90 /*
91 * Wait for all shared fences to complete before we switch to future
92 * use of exclusive fence on this prime shared bo.
93 */
94 ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
95 MAX_SCHEDULE_TIMEOUT);
96 if (unlikely(ret < 0)) {
97 DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
98 amdgpu_bo_unreserve(bo);
99 return ret;
100 }
101
89 /* pin buffer into GTT */ 102 /* pin buffer into GTT */
90 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); 103 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
104 if (likely(ret == 0))
105 bo->prime_shared_count++;
106
91 amdgpu_bo_unreserve(bo); 107 amdgpu_bo_unreserve(bo);
92 return ret; 108 return ret;
93} 109}
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
102 return; 118 return;
103 119
104 amdgpu_bo_unpin(bo); 120 amdgpu_bo_unpin(bo);
121 if (bo->prime_shared_count)
122 bo->prime_shared_count--;
105 amdgpu_bo_unreserve(bo); 123 amdgpu_bo_unreserve(bo);
106} 124}
107 125
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 28e748d688e2..85621a77335d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1483,8 +1483,6 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1483 table_info->vddgfx_lookup_table, vv_id, &sclk)) { 1483 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1484 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 1484 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1485 PHM_PlatformCaps_ClockStretcher)) { 1485 PHM_PlatformCaps_ClockStretcher)) {
1486 if (table_info == NULL)
1487 return -EINVAL;
1488 sclk_table = table_info->vdd_dep_on_sclk; 1486 sclk_table = table_info->vdd_dep_on_sclk;
1489 1487
1490 for (j = 1; j < sclk_table->count; j++) { 1488 for (j = 1; j < sclk_table->count; j++) {
@@ -3000,19 +2998,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3000 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) 2998 if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3001 data->highest_mclk = memory_clock; 2999 data->highest_mclk = memory_clock;
3002 3000
3003 performance_level = &(ps->performance_levels
3004 [ps->performance_level_count++]);
3005
3006 PP_ASSERT_WITH_CODE( 3001 PP_ASSERT_WITH_CODE(
3007 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), 3002 (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
3008 "Performance levels exceeds SMC limit!", 3003 "Performance levels exceeds SMC limit!",
3009 return -EINVAL); 3004 return -EINVAL);
3010 3005
3011 PP_ASSERT_WITH_CODE( 3006 PP_ASSERT_WITH_CODE(
3012 (ps->performance_level_count <= 3007 (ps->performance_level_count <
3013 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), 3008 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3014 "Performance levels exceeds Driver limit!", 3009 "Performance levels exceeds Driver limit, Skip!",
3015 return -EINVAL); 3010 return 0);
3011
3012 performance_level = &(ps->performance_levels
3013 [ps->performance_level_count++]);
3016 3014
3017 /* Performance levels are arranged from low to high. */ 3015 /* Performance levels are arranged from low to high. */
3018 performance_level->memory_clock = memory_clock; 3016 performance_level->memory_clock = memory_clock;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 006b22071685..8db8e209d915 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2214,6 +2214,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
2214int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) 2214int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2215{ 2215{
2216 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); 2216 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
2217 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2217 uint32_t tmp; 2218 uint32_t tmp;
2218 int result; 2219 int result;
2219 bool error = false; 2220 bool error = false;
@@ -2233,8 +2234,10 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
2233 offsetof(SMU74_Firmware_Header, SoftRegisters), 2234 offsetof(SMU74_Firmware_Header, SoftRegisters),
2234 &tmp, SMC_RAM_END); 2235 &tmp, SMC_RAM_END);
2235 2236
2236 if (!result) 2237 if (!result) {
2238 data->soft_regs_start = tmp;
2237 smu_data->smu7_data.soft_regs_start = tmp; 2239 smu_data->smu7_data.soft_regs_start = tmp;
2240 }
2238 2241
2239 error |= (0 != result); 2242 error |= (0 != result);
2240 2243
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index b7a8b2ac4055..b69c66b4897e 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -14,170 +14,45 @@
14 * 14 *
15 */ 15 */
16 16
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc.h>
18#include <drm/drm_encoder_slave.h> 18#include <drm/drm_encoder_slave.h>
19#include <drm/drm_atomic_helper.h>
20 19
21#include "arcpgu.h" 20#include "arcpgu.h"
22 21
23struct arcpgu_drm_connector {
24 struct drm_connector connector;
25 struct drm_encoder_slave *encoder_slave;
26};
27
28static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
29{
30 const struct drm_encoder_slave_funcs *sfuncs;
31 struct drm_encoder_slave *slave;
32 struct arcpgu_drm_connector *con =
33 container_of(connector, struct arcpgu_drm_connector, connector);
34
35 slave = con->encoder_slave;
36 if (slave == NULL) {
37 dev_err(connector->dev->dev,
38 "connector_get_modes: cannot find slave encoder for connector\n");
39 return 0;
40 }
41
42 sfuncs = slave->slave_funcs;
43 if (sfuncs->get_modes == NULL)
44 return 0;
45
46 return sfuncs->get_modes(&slave->base, connector);
47}
48
49static enum drm_connector_status
50arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
51{
52 enum drm_connector_status status = connector_status_unknown;
53 const struct drm_encoder_slave_funcs *sfuncs;
54 struct drm_encoder_slave *slave;
55
56 struct arcpgu_drm_connector *con =
57 container_of(connector, struct arcpgu_drm_connector, connector);
58
59 slave = con->encoder_slave;
60 if (slave == NULL) {
61 dev_err(connector->dev->dev,
62 "connector_detect: cannot find slave encoder for connector\n");
63 return status;
64 }
65
66 sfuncs = slave->slave_funcs;
67 if (sfuncs && sfuncs->detect)
68 return sfuncs->detect(&slave->base, connector);
69
70 dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
71 return status;
72}
73
74static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
75{
76 drm_connector_unregister(connector);
77 drm_connector_cleanup(connector);
78}
79
80static const struct drm_connector_helper_funcs
81arcpgu_drm_connector_helper_funcs = {
82 .get_modes = arcpgu_drm_connector_get_modes,
83};
84
85static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
86 .dpms = drm_helper_connector_dpms,
87 .reset = drm_atomic_helper_connector_reset,
88 .detect = arcpgu_drm_connector_detect,
89 .fill_modes = drm_helper_probe_single_connector_modes,
90 .destroy = arcpgu_drm_connector_destroy,
91 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
92 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
93};
94
95static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
96 .dpms = drm_i2c_encoder_dpms,
97 .mode_fixup = drm_i2c_encoder_mode_fixup,
98 .mode_set = drm_i2c_encoder_mode_set,
99 .prepare = drm_i2c_encoder_prepare,
100 .commit = drm_i2c_encoder_commit,
101 .detect = drm_i2c_encoder_detect,
102};
103
104static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { 22static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
105 .destroy = drm_encoder_cleanup, 23 .destroy = drm_encoder_cleanup,
106}; 24};
107 25
108int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) 26int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
109{ 27{
110 struct arcpgu_drm_connector *arcpgu_connector; 28 struct drm_encoder *encoder;
111 struct drm_i2c_encoder_driver *driver; 29 struct drm_bridge *bridge;
112 struct drm_encoder_slave *encoder; 30
113 struct drm_connector *connector; 31 int ret = 0;
114 struct i2c_client *i2c_slave;
115 int ret;
116 32
117 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); 33 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
118 if (encoder == NULL) 34 if (encoder == NULL)
119 return -ENOMEM; 35 return -ENOMEM;
120 36
121 i2c_slave = of_find_i2c_device_by_node(np); 37 /* Locate drm bridge from the hdmi encoder DT node */
122 if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { 38 bridge = of_drm_find_bridge(np);
123 dev_err(drm->dev, "failed to find i2c slave encoder\n"); 39 if (!bridge)
124 return -EPROBE_DEFER;
125 }
126
127 if (i2c_slave->dev.driver == NULL) {
128 dev_err(drm->dev, "failed to find i2c slave driver\n");
129 return -EPROBE_DEFER; 40 return -EPROBE_DEFER;
130 }
131 41
132 driver = 42 encoder->possible_crtcs = 1;
133 to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); 43 encoder->possible_clones = 0;
134 ret = driver->encoder_init(i2c_slave, drm, encoder); 44 ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
135 if (ret) {
136 dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
137 return ret;
138 }
139
140 encoder->base.possible_crtcs = 1;
141 encoder->base.possible_clones = 0;
142 ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
143 DRM_MODE_ENCODER_TMDS, NULL); 45 DRM_MODE_ENCODER_TMDS, NULL);
144 if (ret) 46 if (ret)
145 return ret; 47 return ret;
146 48
147 drm_encoder_helper_add(&encoder->base, 49 /* Link drm_bridge to encoder */
148 &arcpgu_drm_encoder_helper_funcs); 50 bridge->encoder = encoder;
149 51 encoder->bridge = bridge;
150 arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
151 GFP_KERNEL);
152 if (!arcpgu_connector) {
153 ret = -ENOMEM;
154 goto error_encoder_cleanup;
155 }
156
157 connector = &arcpgu_connector->connector;
158 drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
159 ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
160 DRM_MODE_CONNECTOR_HDMIA);
161 if (ret < 0) {
162 dev_err(drm->dev, "failed to initialize drm connector\n");
163 goto error_encoder_cleanup;
164 }
165 52
166 ret = drm_mode_connector_attach_encoder(connector, &encoder->base); 53 ret = drm_bridge_attach(drm, bridge);
167 if (ret < 0) { 54 if (ret)
168 dev_err(drm->dev, "could not attach connector to encoder\n"); 55 drm_encoder_cleanup(encoder);
169 drm_connector_unregister(connector);
170 goto error_connector_cleanup;
171 }
172
173 arcpgu_connector->encoder_slave = encoder;
174
175 return 0;
176
177error_connector_cleanup:
178 drm_connector_cleanup(connector);
179 56
180error_encoder_cleanup:
181 drm_encoder_cleanup(&encoder->base);
182 return ret; 57 return ret;
183} 58}
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index bbaa55add2d2..7d4e5aa77195 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
150 clk_prepare_enable(hdlcd->clk); 150 clk_prepare_enable(hdlcd->clk);
151 hdlcd_crtc_mode_set_nofb(crtc); 151 hdlcd_crtc_mode_set_nofb(crtc);
152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); 152 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
153 drm_crtc_vblank_on(crtc);
153} 154}
154 155
155static void hdlcd_crtc_disable(struct drm_crtc *crtc) 156static void hdlcd_crtc_disable(struct drm_crtc *crtc)
156{ 157{
157 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); 158 struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
158 159
159 if (!crtc->state->active) 160 drm_crtc_vblank_off(crtc);
160 return;
161
162 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); 161 hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
163 clk_disable_unprepare(hdlcd->clk); 162 clk_disable_unprepare(hdlcd->clk);
164} 163}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index faab7f9bd3b7..e5f4f4a6546d 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -378,7 +378,6 @@ err_register:
378 } 378 }
379err_fbdev: 379err_fbdev:
380 drm_kms_helper_poll_fini(drm); 380 drm_kms_helper_poll_fini(drm);
381 drm_mode_config_cleanup(drm);
382 drm_vblank_cleanup(drm); 381 drm_vblank_cleanup(drm);
383err_vblank: 382err_vblank:
384 pm_runtime_disable(drm->dev); 383 pm_runtime_disable(drm->dev);
@@ -388,6 +387,7 @@ err_unload:
388 drm_irq_uninstall(drm); 387 drm_irq_uninstall(drm);
389 of_reserved_mem_device_release(drm->dev); 388 of_reserved_mem_device_release(drm->dev);
390err_free: 389err_free:
390 drm_mode_config_cleanup(drm);
391 dev_set_drvdata(dev, NULL); 391 dev_set_drvdata(dev, NULL);
392 drm_dev_unref(drm); 392 drm_dev_unref(drm);
393 393
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0ad2c47f808f..71c3473476c7 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -254,10 +254,12 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
254 req->value = dev->mode_config.async_page_flip; 254 req->value = dev->mode_config.async_page_flip;
255 break; 255 break;
256 case DRM_CAP_PAGE_FLIP_TARGET: 256 case DRM_CAP_PAGE_FLIP_TARGET:
257 req->value = 1; 257 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
258 drm_for_each_crtc(crtc, dev) { 258 req->value = 1;
259 if (!crtc->funcs->page_flip_target) 259 drm_for_each_crtc(crtc, dev) {
260 req->value = 0; 260 if (!crtc->funcs->page_flip_target)
261 req->value = 0;
262 }
261 } 263 }
262 break; 264 break;
263 case DRM_CAP_CURSOR_WIDTH: 265 case DRM_CAP_CURSOR_WIDTH:
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e8fb6ef947ee..38eaa63afb31 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1907,6 +1907,8 @@ err_disable_pm_runtime:
1907err_hdmiphy: 1907err_hdmiphy:
1908 if (hdata->hdmiphy_port) 1908 if (hdata->hdmiphy_port)
1909 put_device(&hdata->hdmiphy_port->dev); 1909 put_device(&hdata->hdmiphy_port->dev);
1910 if (hdata->regs_hdmiphy)
1911 iounmap(hdata->regs_hdmiphy);
1910err_ddc: 1912err_ddc:
1911 put_device(&hdata->ddc_adpt->dev); 1913 put_device(&hdata->ddc_adpt->dev);
1912 1914
@@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev)
1929 if (hdata->hdmiphy_port) 1931 if (hdata->hdmiphy_port)
1930 put_device(&hdata->hdmiphy_port->dev); 1932 put_device(&hdata->hdmiphy_port->dev);
1931 1933
1934 if (hdata->regs_hdmiphy)
1935 iounmap(hdata->regs_hdmiphy);
1936
1932 put_device(&hdata->ddc_adpt->dev); 1937 put_device(&hdata->ddc_adpt->dev);
1933 1938
1934 return 0; 1939 return 0;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index b2d5e188b1b8..deb57435cc89 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -25,8 +25,13 @@
25static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 25static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
26 struct drm_crtc_state *old_crtc_state) 26 struct drm_crtc_state *old_crtc_state)
27{ 27{
28 struct drm_device *dev = crtc->dev;
29 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
28 struct drm_pending_vblank_event *event = crtc->state->event; 30 struct drm_pending_vblank_event *event = crtc->state->event;
29 31
32 regmap_write(fsl_dev->regmap,
33 DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
34
30 if (event) { 35 if (event) {
31 crtc->state->event = NULL; 36 crtc->state->event = NULL;
32 37
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
39 } 44 }
40} 45}
41 46
42static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 47static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
48 struct drm_crtc_state *old_crtc_state)
43{ 49{
44 struct drm_device *dev = crtc->dev; 50 struct drm_device *dev = crtc->dev;
45 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 51 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
46 52
53 /* always disable planes on the CRTC */
54 drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
55
47 drm_crtc_vblank_off(crtc); 56 drm_crtc_vblank_off(crtc);
48 57
49 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 58 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
122} 131}
123 132
124static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 133static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
134 .atomic_disable = fsl_dcu_drm_crtc_atomic_disable,
125 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 135 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
126 .disable = fsl_dcu_drm_disable_crtc,
127 .enable = fsl_dcu_drm_crtc_enable, 136 .enable = fsl_dcu_drm_crtc_enable,
128 .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, 137 .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb,
129}; 138};
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0b0d1cb11641..320e4728c9b9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
59 59
60 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); 60 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0);
61 regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); 61 regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0);
62 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
63 DCU_UPDATE_MODE_READREG);
64 62
65 return ret; 63 return ret;
66} 64}
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
139 drm_handle_vblank(dev, 0); 137 drm_handle_vblank(dev, 0);
140 138
141 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); 139 regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status);
142 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
143 DCU_UPDATE_MODE_READREG);
144 140
145 return IRQ_HANDLED; 141 return IRQ_HANDLED;
146} 142}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 9e6f7d8112b3..a99f48847420 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
160 DCU_LAYER_POST_SKIP(0) | 160 DCU_LAYER_POST_SKIP(0) |
161 DCU_LAYER_PRE_SKIP(0)); 161 DCU_LAYER_PRE_SKIP(0));
162 } 162 }
163 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
164 DCU_MODE_DCU_MODE_MASK,
165 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
166 regmap_write(fsl_dev->regmap,
167 DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG);
168 163
169 return; 164 return;
170} 165}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b7a7ed82c325..962aae631f13 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12213,7 +12213,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
12213 intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); 12213 intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
12214 if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { 12214 if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
12215 ret = -EIO; 12215 ret = -EIO;
12216 goto cleanup; 12216 goto unlock;
12217 } 12217 }
12218 12218
12219 atomic_inc(&intel_crtc->unpin_work_count); 12219 atomic_inc(&intel_crtc->unpin_work_count);
@@ -12301,6 +12301,7 @@ cleanup_unpin:
12301 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 12301 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
12302cleanup_pending: 12302cleanup_pending:
12303 atomic_dec(&intel_crtc->unpin_work_count); 12303 atomic_dec(&intel_crtc->unpin_work_count);
12304unlock:
12304 mutex_unlock(&dev->struct_mutex); 12305 mutex_unlock(&dev->struct_mutex);
12305cleanup: 12306cleanup:
12306 crtc->primary->fb = old_fb; 12307 crtc->primary->fb = old_fb;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 019b7ca392d7..c70310206ac5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
80 ddp_comp); 80 ddp_comp);
81 81
82 priv->crtc = crtc; 82 priv->crtc = crtc;
83 writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
83 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); 84 writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
84} 85}
85 86
@@ -250,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
250 if (irq < 0) 251 if (irq < 0)
251 return irq; 252 return irq;
252 253
253 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
254 IRQF_TRIGGER_NONE, dev_name(dev), priv);
255 if (ret < 0) {
256 dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
257 return ret;
258 }
259
260 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); 254 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
261 if (comp_id < 0) { 255 if (comp_id < 0) {
262 dev_err(dev, "Failed to identify by alias: %d\n", comp_id); 256 dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
@@ -272,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
272 266
273 platform_set_drvdata(pdev, priv); 267 platform_set_drvdata(pdev, priv);
274 268
269 ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
270 IRQF_TRIGGER_NONE, dev_name(dev), priv);
271 if (ret < 0) {
272 dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
273 return ret;
274 }
275
275 ret = component_add(dev, &mtk_disp_ovl_component_ops); 276 ret = component_add(dev, &mtk_disp_ovl_component_ops);
276 if (ret) 277 if (ret)
277 dev_err(dev, "Failed to add component: %d\n", ret); 278 dev_err(dev, "Failed to add component: %d\n", ret);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 0186e500d2a5..90fb831ef031 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
432 unsigned long pll_rate; 432 unsigned long pll_rate;
433 unsigned int factor; 433 unsigned int factor;
434 434
435 /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */
435 pix_rate = 1000UL * mode->clock; 436 pix_rate = 1000UL * mode->clock;
436 if (mode->clock <= 74000) 437 if (mode->clock <= 27000)
438 factor = 16 * 3;
439 else if (mode->clock <= 84000)
437 factor = 8 * 3; 440 factor = 8 * 3;
438 else 441 else if (mode->clock <= 167000)
439 factor = 4 * 3; 442 factor = 4 * 3;
443 else
444 factor = 2 * 3;
440 pll_rate = pix_rate * factor; 445 pll_rate = pix_rate * factor;
441 446
442 dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", 447 dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index df33b3ca6ffd..48cc01fd20c7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
123 unsigned int bpc) 123 unsigned int bpc)
124{ 124{
125 writel(w << 16 | h, comp->regs + DISP_OD_SIZE); 125 writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
126 writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); 126 writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
127 mtk_dither_set(comp, bpc, DISP_OD_CFG); 127 mtk_dither_set(comp, bpc, DISP_OD_CFG);
128} 128}
129 129
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 28b2044ed9f2..eaa5a2240c0c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -86,7 +86,7 @@
86 86
87#define DSI_PHY_TIMECON0 0x110 87#define DSI_PHY_TIMECON0 0x110
88#define LPX (0xff << 0) 88#define LPX (0xff << 0)
89#define HS_PRPR (0xff << 8) 89#define HS_PREP (0xff << 8)
90#define HS_ZERO (0xff << 16) 90#define HS_ZERO (0xff << 16)
91#define HS_TRAIL (0xff << 24) 91#define HS_TRAIL (0xff << 24)
92 92
@@ -102,10 +102,16 @@
102#define CLK_TRAIL (0xff << 24) 102#define CLK_TRAIL (0xff << 24)
103 103
104#define DSI_PHY_TIMECON3 0x11c 104#define DSI_PHY_TIMECON3 0x11c
105#define CLK_HS_PRPR (0xff << 0) 105#define CLK_HS_PREP (0xff << 0)
106#define CLK_HS_POST (0xff << 8) 106#define CLK_HS_POST (0xff << 8)
107#define CLK_HS_EXIT (0xff << 16) 107#define CLK_HS_EXIT (0xff << 16)
108 108
109#define T_LPX 5
110#define T_HS_PREP 6
111#define T_HS_TRAIL 8
112#define T_HS_EXIT 7
113#define T_HS_ZERO 10
114
109#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 115#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
110 116
111struct phy; 117struct phy;
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
161static void dsi_phy_timconfig(struct mtk_dsi *dsi) 167static void dsi_phy_timconfig(struct mtk_dsi *dsi)
162{ 168{
163 u32 timcon0, timcon1, timcon2, timcon3; 169 u32 timcon0, timcon1, timcon2, timcon3;
164 unsigned int ui, cycle_time; 170 u32 ui, cycle_time;
165 unsigned int lpx;
166 171
167 ui = 1000 / dsi->data_rate + 0x01; 172 ui = 1000 / dsi->data_rate + 0x01;
168 cycle_time = 8000 / dsi->data_rate + 0x01; 173 cycle_time = 8000 / dsi->data_rate + 0x01;
169 lpx = 5;
170 174
171 timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx; 175 timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
172 timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 | 176 timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
173 (4 * lpx); 177 T_HS_EXIT << 24;
174 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | 178 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
175 (NS_TO_CYCLE(0x150, cycle_time) << 16); 179 (NS_TO_CYCLE(0x150, cycle_time) << 16);
176 timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 | 180 timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
177 NS_TO_CYCLE(0x40, cycle_time); 181 NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
178 182
179 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 183 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
180 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); 184 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi)
202{ 206{
203 struct device *dev = dsi->dev; 207 struct device *dev = dsi->dev;
204 int ret; 208 int ret;
209 u64 pixel_clock, total_bits;
210 u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
205 211
206 if (++dsi->refcount != 1) 212 if (++dsi->refcount != 1)
207 return 0; 213 return 0;
208 214
215 switch (dsi->format) {
216 case MIPI_DSI_FMT_RGB565:
217 bit_per_pixel = 16;
218 break;
219 case MIPI_DSI_FMT_RGB666_PACKED:
220 bit_per_pixel = 18;
221 break;
222 case MIPI_DSI_FMT_RGB666:
223 case MIPI_DSI_FMT_RGB888:
224 default:
225 bit_per_pixel = 24;
226 break;
227 }
228
209 /** 229 /**
210 * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio; 230 * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
211 * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000. 231 * htotal_time = htotal * byte_per_pixel / num_lanes
212 * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi. 232 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
213 * we set mipi_ratio is 1.05. 233 * mipi_ratio = (htotal_time + overhead_time) / htotal_time
234 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
214 */ 235 */
215 dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10); 236 pixel_clock = dsi->vm.pixelclock * 1000;
237 htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
238 dsi->vm.hsync_len;
239 htotal_bits = htotal * bit_per_pixel;
240
241 overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
242 T_HS_EXIT;
243 overhead_bits = overhead_cycles * dsi->lanes * 8;
244 total_bits = htotal_bits + overhead_bits;
245
246 dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
247 htotal * dsi->lanes);
216 248
217 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000); 249 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
218 if (ret < 0) { 250 if (ret < 0) {
219 dev_err(dev, "Failed to set data rate: %d\n", ret); 251 dev_err(dev, "Failed to set data rate: %d\n", ret);
220 goto err_refcount; 252 goto err_refcount;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 71227deef21b..0e8c4d9af340 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
1133 phy_power_on(hdmi->phy); 1133 phy_power_on(hdmi->phy);
1134 mtk_hdmi_aud_output_config(hdmi, mode); 1134 mtk_hdmi_aud_output_config(hdmi, mode);
1135 1135
1136 mtk_hdmi_setup_audio_infoframe(hdmi);
1137 mtk_hdmi_setup_avi_infoframe(hdmi, mode);
1138 mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
1139 if (mode->flags & DRM_MODE_FLAG_3D_MASK)
1140 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
1141
1142 mtk_hdmi_hw_vid_black(hdmi, false); 1136 mtk_hdmi_hw_vid_black(hdmi, false);
1143 mtk_hdmi_hw_aud_unmute(hdmi); 1137 mtk_hdmi_hw_aud_unmute(hdmi);
1144 mtk_hdmi_hw_send_av_unmute(hdmi); 1138 mtk_hdmi_hw_send_av_unmute(hdmi);
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
1401 hdmi->powered = true; 1395 hdmi->powered = true;
1402} 1396}
1403 1397
1398static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi,
1399 struct drm_display_mode *mode)
1400{
1401 mtk_hdmi_setup_audio_infoframe(hdmi);
1402 mtk_hdmi_setup_avi_infoframe(hdmi, mode);
1403 mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
1404 if (mode->flags & DRM_MODE_FLAG_3D_MASK)
1405 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
1406}
1407
1404static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) 1408static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
1405{ 1409{
1406 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); 1410 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
1409 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); 1413 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
1410 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); 1414 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
1411 phy_power_on(hdmi->phy); 1415 phy_power_on(hdmi->phy);
1416 mtk_hdmi_send_infoframe(hdmi, &hdmi->mode);
1412 1417
1413 hdmi->enabled = true; 1418 hdmi->enabled = true;
1414} 1419}
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index 8a24754b440f..51cb9cfb6646 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); 265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
266 unsigned int pre_div; 266 unsigned int pre_div;
267 unsigned int div; 267 unsigned int div;
268 unsigned int pre_ibias;
269 unsigned int hdmi_ibias;
270 unsigned int imp_en;
268 271
269 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, 272 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
270 rate, parent_rate); 273 rate, parent_rate);
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
298 (0x1 << PLL_BR_SHIFT), 301 (0x1 << PLL_BR_SHIFT),
299 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | 302 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
300 RG_HDMITX_PLL_BR); 303 RG_HDMITX_PLL_BR);
301 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); 304 if (rate < 165000000) {
305 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
306 RG_HDMITX_PRD_IMP_EN);
307 pre_ibias = 0x3;
308 imp_en = 0x0;
309 hdmi_ibias = hdmi_phy->ibias;
310 } else {
311 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
312 RG_HDMITX_PRD_IMP_EN);
313 pre_ibias = 0x6;
314 imp_en = 0xf;
315 hdmi_ibias = hdmi_phy->ibias_up;
316 }
302 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 317 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
303 (0x3 << PRD_IBIAS_CLK_SHIFT) | 318 (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
304 (0x3 << PRD_IBIAS_D2_SHIFT) | 319 (pre_ibias << PRD_IBIAS_D2_SHIFT) |
305 (0x3 << PRD_IBIAS_D1_SHIFT) | 320 (pre_ibias << PRD_IBIAS_D1_SHIFT) |
306 (0x3 << PRD_IBIAS_D0_SHIFT), 321 (pre_ibias << PRD_IBIAS_D0_SHIFT),
307 RG_HDMITX_PRD_IBIAS_CLK | 322 RG_HDMITX_PRD_IBIAS_CLK |
308 RG_HDMITX_PRD_IBIAS_D2 | 323 RG_HDMITX_PRD_IBIAS_D2 |
309 RG_HDMITX_PRD_IBIAS_D1 | 324 RG_HDMITX_PRD_IBIAS_D1 |
310 RG_HDMITX_PRD_IBIAS_D0); 325 RG_HDMITX_PRD_IBIAS_D0);
311 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, 326 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
312 (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); 327 (imp_en << DRV_IMP_EN_SHIFT),
328 RG_HDMITX_DRV_IMP_EN);
313 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, 329 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
314 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | 330 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
315 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | 331 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
318 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | 334 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
319 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); 335 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
320 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, 336 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
321 (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | 337 (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
322 (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | 338 (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
323 (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | 339 (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
324 (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), 340 (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
325 RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | 341 RG_HDMITX_DRV_IBIAS_CLK |
326 RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); 342 RG_HDMITX_DRV_IBIAS_D2 |
343 RG_HDMITX_DRV_IBIAS_D1 |
344 RG_HDMITX_DRV_IBIAS_D0);
327 return 0; 345 return 0;
328} 346}
329 347
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2fdcd04bc93f..0ae13cd2adda 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -34,6 +34,7 @@ struct radeon_atpx {
34 34
35static struct radeon_atpx_priv { 35static struct radeon_atpx_priv {
36 bool atpx_detected; 36 bool atpx_detected;
37 bool bridge_pm_usable;
37 /* handle for device - and atpx */ 38 /* handle for device - and atpx */
38 acpi_handle dhandle; 39 acpi_handle dhandle;
39 struct radeon_atpx atpx; 40 struct radeon_atpx atpx;
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
203 atpx->is_hybrid = false; 204 atpx->is_hybrid = false;
204 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { 205 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
205 printk("ATPX Hybrid Graphics\n"); 206 printk("ATPX Hybrid Graphics\n");
206 atpx->functions.power_cntl = false; 207 /*
208 * Disable legacy PM methods only when pcie port PM is usable,
209 * otherwise the device might fail to power off or power on.
210 */
211 atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
207 atpx->is_hybrid = true; 212 atpx->is_hybrid = true;
208 } 213 }
209 214
@@ -548,11 +553,16 @@ static bool radeon_atpx_detect(void)
548 struct pci_dev *pdev = NULL; 553 struct pci_dev *pdev = NULL;
549 bool has_atpx = false; 554 bool has_atpx = false;
550 int vga_count = 0; 555 int vga_count = 0;
556 bool d3_supported = false;
557 struct pci_dev *parent_pdev;
551 558
552 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 559 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
553 vga_count++; 560 vga_count++;
554 561
555 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); 562 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
563
564 parent_pdev = pci_upstream_bridge(pdev);
565 d3_supported |= parent_pdev && parent_pdev->bridge_d3;
556 } 566 }
557 567
558 /* some newer PX laptops mark the dGPU as a non-VGA display device */ 568 /* some newer PX laptops mark the dGPU as a non-VGA display device */
@@ -560,6 +570,9 @@ static bool radeon_atpx_detect(void)
560 vga_count++; 570 vga_count++;
561 571
562 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); 572 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
573
574 parent_pdev = pci_upstream_bridge(pdev);
575 d3_supported |= parent_pdev && parent_pdev->bridge_d3;
563 } 576 }
564 577
565 if (has_atpx && vga_count == 2) { 578 if (has_atpx && vga_count == 2) {
@@ -567,6 +580,7 @@ static bool radeon_atpx_detect(void)
567 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", 580 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
568 acpi_method_name); 581 acpi_method_name);
569 radeon_atpx_priv.atpx_detected = true; 582 radeon_atpx_priv.atpx_detected = true;
583 radeon_atpx_priv.bridge_pm_usable = d3_supported;
570 radeon_atpx_init(); 584 radeon_atpx_init();
571 return true; 585 return true;
572 } 586 }
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 2891aa914cfa..4ce665349f6b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -141,9 +141,9 @@ static int sun4i_drv_bind(struct device *dev)
141 141
142 /* Create our layers */ 142 /* Create our layers */
143 drv->layers = sun4i_layers_init(drm); 143 drv->layers = sun4i_layers_init(drm);
144 if (!drv->layers) { 144 if (IS_ERR(drv->layers)) {
145 dev_err(drm->dev, "Couldn't create the planes\n"); 145 dev_err(drm->dev, "Couldn't create the planes\n");
146 ret = -EINVAL; 146 ret = PTR_ERR(drv->layers);
147 goto free_drm; 147 goto free_drm;
148 } 148 }
149 149
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index c3ff10f559cc..d198ad7e5323 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
152 152
153 DRM_DEBUG_DRIVER("Enabling RGB output\n"); 153 DRM_DEBUG_DRIVER("Enabling RGB output\n");
154 154
155 if (!IS_ERR(tcon->panel)) { 155 if (!IS_ERR(tcon->panel))
156 drm_panel_prepare(tcon->panel); 156 drm_panel_prepare(tcon->panel);
157 drm_panel_enable(tcon->panel);
158 }
159
160 /* encoder->bridge can be NULL; drm_bridge_enable checks for it */
161 drm_bridge_enable(encoder->bridge);
162 157
163 sun4i_tcon_channel_enable(tcon, 0); 158 sun4i_tcon_channel_enable(tcon, 0);
159
160 if (!IS_ERR(tcon->panel))
161 drm_panel_enable(tcon->panel);
164} 162}
165 163
166static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) 164static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
@@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
171 169
172 DRM_DEBUG_DRIVER("Disabling RGB output\n"); 170 DRM_DEBUG_DRIVER("Disabling RGB output\n");
173 171
174 sun4i_tcon_channel_disable(tcon, 0); 172 if (!IS_ERR(tcon->panel))
173 drm_panel_disable(tcon->panel);
175 174
176 /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ 175 sun4i_tcon_channel_disable(tcon, 0);
177 drm_bridge_disable(encoder->bridge);
178 176
179 if (!IS_ERR(tcon->panel)) { 177 if (!IS_ERR(tcon->panel))
180 drm_panel_disable(tcon->panel);
181 drm_panel_unprepare(tcon->panel); 178 drm_panel_unprepare(tcon->panel);
182 }
183} 179}
184 180
185static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, 181static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 086d8a507157..60d30203a5fa 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -32,6 +32,11 @@
32#include <linux/usb/ch9.h> 32#include <linux/usb/ch9.h>
33#include "hid-ids.h" 33#include "hid-ids.h"
34 34
35#define CP2112_REPORT_MAX_LENGTH 64
36#define CP2112_GPIO_CONFIG_LENGTH 5
37#define CP2112_GPIO_GET_LENGTH 2
38#define CP2112_GPIO_SET_LENGTH 3
39
35enum { 40enum {
36 CP2112_GPIO_CONFIG = 0x02, 41 CP2112_GPIO_CONFIG = 0x02,
37 CP2112_GPIO_GET = 0x03, 42 CP2112_GPIO_GET = 0x03,
@@ -161,6 +166,8 @@ struct cp2112_device {
161 atomic_t read_avail; 166 atomic_t read_avail;
162 atomic_t xfer_avail; 167 atomic_t xfer_avail;
163 struct gpio_chip gc; 168 struct gpio_chip gc;
169 u8 *in_out_buffer;
170 spinlock_t lock;
164}; 171};
165 172
166static int gpio_push_pull = 0xFF; 173static int gpio_push_pull = 0xFF;
@@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
171{ 178{
172 struct cp2112_device *dev = gpiochip_get_data(chip); 179 struct cp2112_device *dev = gpiochip_get_data(chip);
173 struct hid_device *hdev = dev->hdev; 180 struct hid_device *hdev = dev->hdev;
174 u8 buf[5]; 181 u8 *buf = dev->in_out_buffer;
182 unsigned long flags;
175 int ret; 183 int ret;
176 184
185 spin_lock_irqsave(&dev->lock, flags);
186
177 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 187 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
178 sizeof(buf), HID_FEATURE_REPORT, 188 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
179 HID_REQ_GET_REPORT); 189 HID_REQ_GET_REPORT);
180 if (ret != sizeof(buf)) { 190 if (ret != CP2112_GPIO_CONFIG_LENGTH) {
181 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 191 hid_err(hdev, "error requesting GPIO config: %d\n", ret);
182 return ret; 192 goto exit;
183 } 193 }
184 194
185 buf[1] &= ~(1 << offset); 195 buf[1] &= ~(1 << offset);
186 buf[2] = gpio_push_pull; 196 buf[2] = gpio_push_pull;
187 197
188 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 198 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
189 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 199 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
200 HID_REQ_SET_REPORT);
190 if (ret < 0) { 201 if (ret < 0) {
191 hid_err(hdev, "error setting GPIO config: %d\n", ret); 202 hid_err(hdev, "error setting GPIO config: %d\n", ret);
192 return ret; 203 goto exit;
193 } 204 }
194 205
195 return 0; 206 ret = 0;
207
208exit:
209 spin_unlock_irqrestore(&dev->lock, flags);
210 return ret <= 0 ? ret : -EIO;
196} 211}
197 212
198static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 213static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
199{ 214{
200 struct cp2112_device *dev = gpiochip_get_data(chip); 215 struct cp2112_device *dev = gpiochip_get_data(chip);
201 struct hid_device *hdev = dev->hdev; 216 struct hid_device *hdev = dev->hdev;
202 u8 buf[3]; 217 u8 *buf = dev->in_out_buffer;
218 unsigned long flags;
203 int ret; 219 int ret;
204 220
221 spin_lock_irqsave(&dev->lock, flags);
222
205 buf[0] = CP2112_GPIO_SET; 223 buf[0] = CP2112_GPIO_SET;
206 buf[1] = value ? 0xff : 0; 224 buf[1] = value ? 0xff : 0;
207 buf[2] = 1 << offset; 225 buf[2] = 1 << offset;
208 226
209 ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf), 227 ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
210 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 228 CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
229 HID_REQ_SET_REPORT);
211 if (ret < 0) 230 if (ret < 0)
212 hid_err(hdev, "error setting GPIO values: %d\n", ret); 231 hid_err(hdev, "error setting GPIO values: %d\n", ret);
232
233 spin_unlock_irqrestore(&dev->lock, flags);
213} 234}
214 235
215static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) 236static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
216{ 237{
217 struct cp2112_device *dev = gpiochip_get_data(chip); 238 struct cp2112_device *dev = gpiochip_get_data(chip);
218 struct hid_device *hdev = dev->hdev; 239 struct hid_device *hdev = dev->hdev;
219 u8 buf[2]; 240 u8 *buf = dev->in_out_buffer;
241 unsigned long flags;
220 int ret; 242 int ret;
221 243
222 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf), 244 spin_lock_irqsave(&dev->lock, flags);
223 HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 245
224 if (ret != sizeof(buf)) { 246 ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
247 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
248 HID_REQ_GET_REPORT);
249 if (ret != CP2112_GPIO_GET_LENGTH) {
225 hid_err(hdev, "error requesting GPIO values: %d\n", ret); 250 hid_err(hdev, "error requesting GPIO values: %d\n", ret);
226 return ret; 251 ret = ret < 0 ? ret : -EIO;
252 goto exit;
227 } 253 }
228 254
229 return (buf[1] >> offset) & 1; 255 ret = (buf[1] >> offset) & 1;
256
257exit:
258 spin_unlock_irqrestore(&dev->lock, flags);
259
260 return ret;
230} 261}
231 262
232static int cp2112_gpio_direction_output(struct gpio_chip *chip, 263static int cp2112_gpio_direction_output(struct gpio_chip *chip,
@@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
234{ 265{
235 struct cp2112_device *dev = gpiochip_get_data(chip); 266 struct cp2112_device *dev = gpiochip_get_data(chip);
236 struct hid_device *hdev = dev->hdev; 267 struct hid_device *hdev = dev->hdev;
237 u8 buf[5]; 268 u8 *buf = dev->in_out_buffer;
269 unsigned long flags;
238 int ret; 270 int ret;
239 271
272 spin_lock_irqsave(&dev->lock, flags);
273
240 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, 274 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
241 sizeof(buf), HID_FEATURE_REPORT, 275 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
242 HID_REQ_GET_REPORT); 276 HID_REQ_GET_REPORT);
243 if (ret != sizeof(buf)) { 277 if (ret != CP2112_GPIO_CONFIG_LENGTH) {
244 hid_err(hdev, "error requesting GPIO config: %d\n", ret); 278 hid_err(hdev, "error requesting GPIO config: %d\n", ret);
245 return ret; 279 goto fail;
246 } 280 }
247 281
248 buf[1] |= 1 << offset; 282 buf[1] |= 1 << offset;
249 buf[2] = gpio_push_pull; 283 buf[2] = gpio_push_pull;
250 284
251 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), 285 ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
252 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 286 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
287 HID_REQ_SET_REPORT);
253 if (ret < 0) { 288 if (ret < 0) {
254 hid_err(hdev, "error setting GPIO config: %d\n", ret); 289 hid_err(hdev, "error setting GPIO config: %d\n", ret);
255 return ret; 290 goto fail;
256 } 291 }
257 292
293 spin_unlock_irqrestore(&dev->lock, flags);
294
258 /* 295 /*
259 * Set gpio value when output direction is already set, 296 * Set gpio value when output direction is already set,
260 * as specified in AN495, Rev. 0.2, cpt. 4.4 297 * as specified in AN495, Rev. 0.2, cpt. 4.4
@@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
262 cp2112_gpio_set(chip, offset, value); 299 cp2112_gpio_set(chip, offset, value);
263 300
264 return 0; 301 return 0;
302
303fail:
304 spin_unlock_irqrestore(&dev->lock, flags);
305 return ret < 0 ? ret : -EIO;
265} 306}
266 307
267static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, 308static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1007 struct cp2112_smbus_config_report config; 1048 struct cp2112_smbus_config_report config;
1008 int ret; 1049 int ret;
1009 1050
1051 dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
1052 if (!dev)
1053 return -ENOMEM;
1054
1055 dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
1056 GFP_KERNEL);
1057 if (!dev->in_out_buffer)
1058 return -ENOMEM;
1059
1060 spin_lock_init(&dev->lock);
1061
1010 ret = hid_parse(hdev); 1062 ret = hid_parse(hdev);
1011 if (ret) { 1063 if (ret) {
1012 hid_err(hdev, "parse failed\n"); 1064 hid_err(hdev, "parse failed\n");
@@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1063 goto err_power_normal; 1115 goto err_power_normal;
1064 } 1116 }
1065 1117
1066 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1067 if (!dev) {
1068 ret = -ENOMEM;
1069 goto err_power_normal;
1070 }
1071
1072 hid_set_drvdata(hdev, (void *)dev); 1118 hid_set_drvdata(hdev, (void *)dev);
1073 dev->hdev = hdev; 1119 dev->hdev = hdev;
1074 dev->adap.owner = THIS_MODULE; 1120 dev->adap.owner = THIS_MODULE;
@@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
1087 1133
1088 if (ret) { 1134 if (ret) {
1089 hid_err(hdev, "error registering i2c adapter\n"); 1135 hid_err(hdev, "error registering i2c adapter\n");
1090 goto err_free_dev; 1136 goto err_power_normal;
1091 } 1137 }
1092 1138
1093 hid_dbg(hdev, "adapter registered\n"); 1139 hid_dbg(hdev, "adapter registered\n");
@@ -1123,8 +1169,6 @@ err_gpiochip_remove:
1123 gpiochip_remove(&dev->gc); 1169 gpiochip_remove(&dev->gc);
1124err_free_i2c: 1170err_free_i2c:
1125 i2c_del_adapter(&dev->adap); 1171 i2c_del_adapter(&dev->adap);
1126err_free_dev:
1127 kfree(dev);
1128err_power_normal: 1172err_power_normal:
1129 hid_hw_power(hdev, PM_HINT_NORMAL); 1173 hid_hw_power(hdev, PM_HINT_NORMAL);
1130err_hid_close: 1174err_hid_close:
@@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev)
1149 */ 1193 */
1150 hid_hw_close(hdev); 1194 hid_hw_close(hdev);
1151 hid_hw_stop(hdev); 1195 hid_hw_stop(hdev);
1152 kfree(dev);
1153} 1196}
1154 1197
1155static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, 1198static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 76f644deb0a7..c5c5fbe9d605 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
756 756
757 /* Setup wireless link with Logitech Wii wheel */ 757 /* Setup wireless link with Logitech Wii wheel */
758 if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { 758 if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
759 unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 759 const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
760 u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
760 761
761 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 762 if (!buf) {
762 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 763 ret = -ENOMEM;
764 goto err_free;
765 }
763 766
767 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
768 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
764 if (ret >= 0) { 769 if (ret >= 0) {
765 /* insert a little delay of 10 jiffies ~ 40ms */ 770 /* insert a little delay of 10 jiffies ~ 40ms */
766 wait_queue_head_t wait; 771 wait_queue_head_t wait;
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
772 buf[1] = 0xB2; 777 buf[1] = 0xB2;
773 get_random_bytes(&buf[2], 2); 778 get_random_bytes(&buf[2], 2);
774 779
775 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 780 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
776 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 781 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
777 } 782 }
783 kfree(buf);
778 } 784 }
779 785
780 if (drv_data->quirks & LG_FF) 786 if (drv_data->quirks & LG_FF)
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index d6fa496d0ca2..20b40ad26325 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev,
493static int magicmouse_probe(struct hid_device *hdev, 493static int magicmouse_probe(struct hid_device *hdev,
494 const struct hid_device_id *id) 494 const struct hid_device_id *id)
495{ 495{
496 __u8 feature[] = { 0xd7, 0x01 }; 496 const u8 feature[] = { 0xd7, 0x01 };
497 u8 *buf;
497 struct magicmouse_sc *msc; 498 struct magicmouse_sc *msc;
498 struct hid_report *report; 499 struct hid_report *report;
499 int ret; 500 int ret;
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev,
544 } 545 }
545 report->size = 6; 546 report->size = 6;
546 547
548 buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
549 if (!buf) {
550 ret = -ENOMEM;
551 goto err_stop_hw;
552 }
553
547 /* 554 /*
548 * Some devices repond with 'invalid report id' when feature 555 * Some devices repond with 'invalid report id' when feature
549 * report switching it into multitouch mode is sent to it. 556 * report switching it into multitouch mode is sent to it.
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev,
552 * but there seems to be no other way of switching the mode. 559 * but there seems to be no other way of switching the mode.
553 * Thus the super-ugly hacky success check below. 560 * Thus the super-ugly hacky success check below.
554 */ 561 */
555 ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), 562 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
556 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 563 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
564 kfree(buf);
557 if (ret != -EIO && ret != sizeof(feature)) { 565 if (ret != -EIO && ret != sizeof(feature)) {
558 hid_err(hdev, "unable to request touch data (%d)\n", ret); 566 hid_err(hdev, "unable to request touch data (%d)\n", ret);
559 goto err_stop_hw; 567 goto err_stop_hw;
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 9cd2ca34a6be..be89bcbf6a71 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page)
188static int rmi_set_mode(struct hid_device *hdev, u8 mode) 188static int rmi_set_mode(struct hid_device *hdev, u8 mode)
189{ 189{
190 int ret; 190 int ret;
191 u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; 191 const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
192 u8 *buf;
192 193
193 ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, 194 buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
195 if (!buf)
196 return -ENOMEM;
197
198 ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
194 sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 199 sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
200 kfree(buf);
195 if (ret < 0) { 201 if (ret < 0) {
196 dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, 202 dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
197 ret); 203 ret);
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index c5c3d6111729..60875625cbdf 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
212 __s32 value; 212 __s32 value;
213 int ret = 0; 213 int ret = 0;
214 214
215 memset(buffer, 0, buffer_size);
215 mutex_lock(&data->mutex); 216 mutex_lock(&data->mutex);
216 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); 217 report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
217 if (!report || (field_index >= report->maxfield)) { 218 if (!report || (field_index >= report->maxfield)) {
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index d223650a97e4..11edabf425ae 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -59,7 +59,6 @@ config I2C_CHARDEV
59 59
60config I2C_MUX 60config I2C_MUX
61 tristate "I2C bus multiplexing support" 61 tristate "I2C bus multiplexing support"
62 depends on HAS_IOMEM
63 help 62 help
64 Say Y here if you want the I2C core to support the ability to 63 Say Y here if you want the I2C core to support the ability to
65 handle multiplexed I2C bus topologies, by presenting each 64 handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 11e866d05368..b403fa5ecf49 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -91,9 +91,7 @@
91 DW_IC_INTR_TX_ABRT | \ 91 DW_IC_INTR_TX_ABRT | \
92 DW_IC_INTR_STOP_DET) 92 DW_IC_INTR_STOP_DET)
93 93
94#define DW_IC_STATUS_ACTIVITY 0x1 94#define DW_IC_STATUS_ACTIVITY 0x1
95#define DW_IC_STATUS_TFE BIT(2)
96#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
97 95
98#define DW_IC_SDA_HOLD_RX_SHIFT 16 96#define DW_IC_SDA_HOLD_RX_SHIFT 16
99#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) 97#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
@@ -478,25 +476,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
478{ 476{
479 struct i2c_msg *msgs = dev->msgs; 477 struct i2c_msg *msgs = dev->msgs;
480 u32 ic_tar = 0; 478 u32 ic_tar = 0;
481 bool enabled;
482 479
483 enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1; 480 /* Disable the adapter */
484 481 __i2c_dw_enable_and_wait(dev, false);
485 if (enabled) {
486 u32 ic_status;
487
488 /*
489 * Only disable adapter if ic_tar and ic_con can't be
490 * dynamically updated
491 */
492 ic_status = dw_readl(dev, DW_IC_STATUS);
493 if (!dev->dynamic_tar_update_enabled ||
494 (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
495 !(ic_status & DW_IC_STATUS_TFE)) {
496 __i2c_dw_enable_and_wait(dev, false);
497 enabled = false;
498 }
499 }
500 482
501 /* if the slave address is ten bit address, enable 10BITADDR */ 483 /* if the slave address is ten bit address, enable 10BITADDR */
502 if (dev->dynamic_tar_update_enabled) { 484 if (dev->dynamic_tar_update_enabled) {
@@ -526,8 +508,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
526 /* enforce disabled interrupts (due to HW issues) */ 508 /* enforce disabled interrupts (due to HW issues) */
527 i2c_dw_disable_int(dev); 509 i2c_dw_disable_int(dev);
528 510
529 if (!enabled) 511 /* Enable the adapter */
530 __i2c_dw_enable(dev, true); 512 __i2c_dw_enable(dev, true);
531 513
532 /* Clear and enable interrupts */ 514 /* Clear and enable interrupts */
533 dw_readl(dev, DW_IC_CLR_INTR); 515 dw_readl(dev, DW_IC_CLR_INTR);
@@ -611,7 +593,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
611 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 593 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
612 594
613 /* avoid rx buffer overrun */ 595 /* avoid rx buffer overrun */
614 if (rx_limit - dev->rx_outstanding <= 0) 596 if (dev->rx_outstanding >= dev->rx_fifo_depth)
615 break; 597 break;
616 598
617 dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); 599 dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
@@ -708,8 +690,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
708} 690}
709 691
710/* 692/*
711 * Prepare controller for a transaction and start transfer by calling 693 * Prepare controller for a transaction and call i2c_dw_xfer_msg
712 * i2c_dw_xfer_init()
713 */ 694 */
714static int 695static int
715i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) 696i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -752,13 +733,23 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
752 goto done; 733 goto done;
753 } 734 }
754 735
736 /*
737 * We must disable the adapter before returning and signaling the end
738 * of the current transfer. Otherwise the hardware might continue
739 * generating interrupts which in turn causes a race condition with
740 * the following transfer. Needs some more investigation if the
741 * additional interrupts are a hardware bug or this driver doesn't
742 * handle them correctly yet.
743 */
744 __i2c_dw_enable(dev, false);
745
755 if (dev->msg_err) { 746 if (dev->msg_err) {
756 ret = dev->msg_err; 747 ret = dev->msg_err;
757 goto done; 748 goto done;
758 } 749 }
759 750
760 /* no error */ 751 /* no error */
761 if (likely(!dev->cmd_err)) { 752 if (likely(!dev->cmd_err && !dev->status)) {
762 ret = num; 753 ret = num;
763 goto done; 754 goto done;
764 } 755 }
@@ -768,6 +759,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
768 ret = i2c_dw_handle_tx_abort(dev); 759 ret = i2c_dw_handle_tx_abort(dev);
769 goto done; 760 goto done;
770 } 761 }
762
763 if (dev->status)
764 dev_err(dev->dev,
765 "transfer terminated early - interrupt latency too high?\n");
766
771 ret = -EIO; 767 ret = -EIO;
772 768
773done: 769done:
@@ -888,19 +884,9 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
888 */ 884 */
889 885
890tx_aborted: 886tx_aborted:
891 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) 887 if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
892 || dev->msg_err) {
893 /*
894 * We must disable interruts before returning and signaling
895 * the end of the current transfer. Otherwise the hardware
896 * might continue generating interrupts for non-existent
897 * transfers.
898 */
899 i2c_dw_disable_int(dev);
900 dw_readl(dev, DW_IC_CLR_INTR);
901
902 complete(&dev->cmd_complete); 888 complete(&dev->cmd_complete);
903 } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { 889 else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
904 /* workaround to trigger pending interrupt */ 890 /* workaround to trigger pending interrupt */
905 stat = dw_readl(dev, DW_IC_INTR_MASK); 891 stat = dw_readl(dev, DW_IC_INTR_MASK);
906 i2c_dw_disable_int(dev); 892 i2c_dw_disable_int(dev);
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 49f2084f7bb5..50813a24c541 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev)
347 347
348 ret = i2c_add_adapter(&i2c->adap); 348 ret = i2c_add_adapter(&i2c->adap);
349 if (ret < 0) { 349 if (ret < 0) {
350 clk_unprepare(i2c->clk); 350 clk_disable_unprepare(i2c->clk);
351 return ret; 351 return ret;
352 } 352 }
353 353
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 419b54bfc7c7..5e63b17f935d 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -381,9 +381,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
381 if (result) 381 if (result)
382 return result; 382 return result;
383 383
384 data[i] = octeon_i2c_data_read(i2c, &result); 384 data[i] = octeon_i2c_data_read(i2c);
385 if (result)
386 return result;
387 if (recv_len && i == 0) { 385 if (recv_len && i == 0) {
388 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) 386 if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
389 return -EPROTO; 387 return -EPROTO;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 1db7c835a454..87151ea74acd 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -5,7 +5,6 @@
5#include <linux/i2c.h> 5#include <linux/i2c.h>
6#include <linux/i2c-smbus.h> 6#include <linux/i2c-smbus.h>
7#include <linux/io.h> 7#include <linux/io.h>
8#include <linux/iopoll.h>
9#include <linux/kernel.h> 8#include <linux/kernel.h>
10#include <linux/pci.h> 9#include <linux/pci.h>
11 10
@@ -145,9 +144,9 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
145 u64 tmp; 144 u64 tmp;
146 145
147 __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c)); 146 __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
148 147 do {
149 readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V, 148 tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
150 I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout); 149 } while ((tmp & SW_TWSI_V) != 0);
151} 150}
152 151
153#define octeon_i2c_ctl_write(i2c, val) \ 152#define octeon_i2c_ctl_write(i2c, val) \
@@ -164,28 +163,24 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8
164 * 163 *
165 * The I2C core registers are accessed indirectly via the SW_TWSI CSR. 164 * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
166 */ 165 */
167static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg, 166static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
168 int *error)
169{ 167{
170 u64 tmp; 168 u64 tmp;
171 int ret;
172 169
173 __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c)); 170 __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
171 do {
172 tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
173 } while ((tmp & SW_TWSI_V) != 0);
174 174
175 ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
176 tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
177 i2c->adap.timeout);
178 if (error)
179 *error = ret;
180 return tmp & 0xFF; 175 return tmp & 0xFF;
181} 176}
182 177
183#define octeon_i2c_ctl_read(i2c) \ 178#define octeon_i2c_ctl_read(i2c) \
184 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL) 179 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
185#define octeon_i2c_data_read(i2c, error) \ 180#define octeon_i2c_data_read(i2c) \
186 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error) 181 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
187#define octeon_i2c_stat_read(i2c) \ 182#define octeon_i2c_stat_read(i2c) \
188 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL) 183 octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
189 184
190/** 185/**
191 * octeon_i2c_read_int - read the TWSI_INT register 186 * octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index e280c8ecc0b5..96de9ce5669b 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL
63 63
64config I2C_MUX_REG 64config I2C_MUX_REG
65 tristate "Register-based I2C multiplexer" 65 tristate "Register-based I2C multiplexer"
66 depends on HAS_IOMEM
66 help 67 help
67 If you say yes to this option, support will be included for a 68 If you say yes to this option, support will be included for a
68 register based I2C multiplexer. This driver provides access to 69 register based I2C multiplexer. This driver provides access to
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index b3893f6282ba..3e6fe1760d82 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
69 goto err_with_revert; 69 goto err_with_revert;
70 } 70 }
71 71
72 p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); 72 /*
73 * Check if there are pinctrl states at all. Note: we cant' use
74 * devm_pinctrl_get_select() because we need to distinguish between
75 * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state().
76 */
77 p = devm_pinctrl_get(adap->dev.parent);
73 if (IS_ERR(p)) { 78 if (IS_ERR(p)) {
74 ret = PTR_ERR(p); 79 ret = PTR_ERR(p);
75 goto err_with_put; 80 /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */
81 if (ret != -ENODEV)
82 goto err_with_put;
83 } else {
84 /* there are states. check and use them */
85 struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name);
86
87 if (IS_ERR(s)) {
88 ret = PTR_ERR(s);
89 goto err_with_put;
90 }
91 ret = pinctrl_select_state(p, s);
92 if (ret < 0)
93 goto err_with_put;
76 } 94 }
77 95
78 priv->chan[new_chan].parent_adap = adap; 96 priv->chan[new_chan].parent_adap = adap;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 1091346f2480..8bc3d36d2837 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client,
268 /* discard unconfigured channels */ 268 /* discard unconfigured channels */
269 break; 269 break;
270 idle_disconnect_pd = pdata->modes[num].deselect_on_exit; 270 idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
271 data->deselect |= (idle_disconnect_pd
272 || idle_disconnect_dt) << num;
273 } 271 }
272 data->deselect |= (idle_disconnect_pd ||
273 idle_disconnect_dt) << num;
274 274
275 ret = i2c_mux_add_adapter(muxc, force, num, class); 275 ret = i2c_mux_add_adapter(muxc, force, num, class);
276 276
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index b136d3acc5bd..0f58f46dbad7 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
699struct resolve_cb_context { 699struct resolve_cb_context {
700 struct rdma_dev_addr *addr; 700 struct rdma_dev_addr *addr;
701 struct completion comp; 701 struct completion comp;
702 int status;
702}; 703};
703 704
704static void resolve_cb(int status, struct sockaddr *src_addr, 705static void resolve_cb(int status, struct sockaddr *src_addr,
705 struct rdma_dev_addr *addr, void *context) 706 struct rdma_dev_addr *addr, void *context)
706{ 707{
707 memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct 708 if (!status)
708 rdma_dev_addr)); 709 memcpy(((struct resolve_cb_context *)context)->addr,
710 addr, sizeof(struct rdma_dev_addr));
711 ((struct resolve_cb_context *)context)->status = status;
709 complete(&((struct resolve_cb_context *)context)->comp); 712 complete(&((struct resolve_cb_context *)context)->comp);
710} 713}
711 714
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
743 746
744 wait_for_completion(&ctx.comp); 747 wait_for_completion(&ctx.comp);
745 748
749 ret = ctx.status;
750 if (ret)
751 return ret;
752
746 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); 753 memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
747 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); 754 dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
748 if (!dev) 755 if (!dev)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c99525512b34..71c7c4c328ef 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -80,6 +80,8 @@ static struct ib_cm {
80 __be32 random_id_operand; 80 __be32 random_id_operand;
81 struct list_head timewait_list; 81 struct list_head timewait_list;
82 struct workqueue_struct *wq; 82 struct workqueue_struct *wq;
83 /* Sync on cm change port state */
84 spinlock_t state_lock;
83} cm; 85} cm;
84 86
85/* Counter indexes ordered by attribute ID */ 87/* Counter indexes ordered by attribute ID */
@@ -161,6 +163,8 @@ struct cm_port {
161 struct ib_mad_agent *mad_agent; 163 struct ib_mad_agent *mad_agent;
162 struct kobject port_obj; 164 struct kobject port_obj;
163 u8 port_num; 165 u8 port_num;
166 struct list_head cm_priv_prim_list;
167 struct list_head cm_priv_altr_list;
164 struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; 168 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
165}; 169};
166 170
@@ -241,6 +245,12 @@ struct cm_id_private {
241 u8 service_timeout; 245 u8 service_timeout;
242 u8 target_ack_delay; 246 u8 target_ack_delay;
243 247
248 struct list_head prim_list;
249 struct list_head altr_list;
250 /* Indicates that the send port mad is registered and av is set */
251 int prim_send_port_not_ready;
252 int altr_send_port_not_ready;
253
244 struct list_head work_list; 254 struct list_head work_list;
245 atomic_t work_count; 255 atomic_t work_count;
246}; 256};
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
259 struct ib_mad_agent *mad_agent; 269 struct ib_mad_agent *mad_agent;
260 struct ib_mad_send_buf *m; 270 struct ib_mad_send_buf *m;
261 struct ib_ah *ah; 271 struct ib_ah *ah;
272 struct cm_av *av;
273 unsigned long flags, flags2;
274 int ret = 0;
262 275
276 /* don't let the port to be released till the agent is down */
277 spin_lock_irqsave(&cm.state_lock, flags2);
278 spin_lock_irqsave(&cm.lock, flags);
279 if (!cm_id_priv->prim_send_port_not_ready)
280 av = &cm_id_priv->av;
281 else if (!cm_id_priv->altr_send_port_not_ready &&
282 (cm_id_priv->alt_av.port))
283 av = &cm_id_priv->alt_av;
284 else {
285 pr_info("%s: not valid CM id\n", __func__);
286 ret = -ENODEV;
287 spin_unlock_irqrestore(&cm.lock, flags);
288 goto out;
289 }
290 spin_unlock_irqrestore(&cm.lock, flags);
291 /* Make sure the port haven't released the mad yet */
263 mad_agent = cm_id_priv->av.port->mad_agent; 292 mad_agent = cm_id_priv->av.port->mad_agent;
264 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); 293 if (!mad_agent) {
265 if (IS_ERR(ah)) 294 pr_info("%s: not a valid MAD agent\n", __func__);
266 return PTR_ERR(ah); 295 ret = -ENODEV;
296 goto out;
297 }
298 ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
299 if (IS_ERR(ah)) {
300 ret = PTR_ERR(ah);
301 goto out;
302 }
267 303
268 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 304 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
269 cm_id_priv->av.pkey_index, 305 av->pkey_index,
270 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 306 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
271 GFP_ATOMIC, 307 GFP_ATOMIC,
272 IB_MGMT_BASE_VERSION); 308 IB_MGMT_BASE_VERSION);
273 if (IS_ERR(m)) { 309 if (IS_ERR(m)) {
274 ib_destroy_ah(ah); 310 ib_destroy_ah(ah);
275 return PTR_ERR(m); 311 ret = PTR_ERR(m);
312 goto out;
276 } 313 }
277 314
278 /* Timeout set by caller if response is expected. */ 315 /* Timeout set by caller if response is expected. */
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
282 atomic_inc(&cm_id_priv->refcount); 319 atomic_inc(&cm_id_priv->refcount);
283 m->context[0] = cm_id_priv; 320 m->context[0] = cm_id_priv;
284 *msg = m; 321 *msg = m;
285 return 0; 322
323out:
324 spin_unlock_irqrestore(&cm.state_lock, flags2);
325 return ret;
286} 326}
287 327
288static int cm_alloc_response_msg(struct cm_port *port, 328static int cm_alloc_response_msg(struct cm_port *port,
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
352 grh, &av->ah_attr); 392 grh, &av->ah_attr);
353} 393}
354 394
355static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 395static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
396 struct cm_id_private *cm_id_priv)
356{ 397{
357 struct cm_device *cm_dev; 398 struct cm_device *cm_dev;
358 struct cm_port *port = NULL; 399 struct cm_port *port = NULL;
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
387 &av->ah_attr); 428 &av->ah_attr);
388 av->timeout = path->packet_life_time + 1; 429 av->timeout = path->packet_life_time + 1;
389 430
390 return 0; 431 spin_lock_irqsave(&cm.lock, flags);
432 if (&cm_id_priv->av == av)
433 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
434 else if (&cm_id_priv->alt_av == av)
435 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
436 else
437 ret = -EINVAL;
438
439 spin_unlock_irqrestore(&cm.lock, flags);
440
441 return ret;
391} 442}
392 443
393static int cm_alloc_id(struct cm_id_private *cm_id_priv) 444static int cm_alloc_id(struct cm_id_private *cm_id_priv)
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
677 spin_lock_init(&cm_id_priv->lock); 728 spin_lock_init(&cm_id_priv->lock);
678 init_completion(&cm_id_priv->comp); 729 init_completion(&cm_id_priv->comp);
679 INIT_LIST_HEAD(&cm_id_priv->work_list); 730 INIT_LIST_HEAD(&cm_id_priv->work_list);
731 INIT_LIST_HEAD(&cm_id_priv->prim_list);
732 INIT_LIST_HEAD(&cm_id_priv->altr_list);
680 atomic_set(&cm_id_priv->work_count, -1); 733 atomic_set(&cm_id_priv->work_count, -1);
681 atomic_set(&cm_id_priv->refcount, 1); 734 atomic_set(&cm_id_priv->refcount, 1);
682 return &cm_id_priv->id; 735 return &cm_id_priv->id;
@@ -892,6 +945,15 @@ retest:
892 break; 945 break;
893 } 946 }
894 947
948 spin_lock_irq(&cm.lock);
949 if (!list_empty(&cm_id_priv->altr_list) &&
950 (!cm_id_priv->altr_send_port_not_ready))
951 list_del(&cm_id_priv->altr_list);
952 if (!list_empty(&cm_id_priv->prim_list) &&
953 (!cm_id_priv->prim_send_port_not_ready))
954 list_del(&cm_id_priv->prim_list);
955 spin_unlock_irq(&cm.lock);
956
895 cm_free_id(cm_id->local_id); 957 cm_free_id(cm_id->local_id);
896 cm_deref_id(cm_id_priv); 958 cm_deref_id(cm_id_priv);
897 wait_for_completion(&cm_id_priv->comp); 959 wait_for_completion(&cm_id_priv->comp);
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
1192 goto out; 1254 goto out;
1193 } 1255 }
1194 1256
1195 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); 1257 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1258 cm_id_priv);
1196 if (ret) 1259 if (ret)
1197 goto error1; 1260 goto error1;
1198 if (param->alternate_path) { 1261 if (param->alternate_path) {
1199 ret = cm_init_av_by_path(param->alternate_path, 1262 ret = cm_init_av_by_path(param->alternate_path,
1200 &cm_id_priv->alt_av); 1263 &cm_id_priv->alt_av, cm_id_priv);
1201 if (ret) 1264 if (ret)
1202 goto error1; 1265 goto error1;
1203 } 1266 }
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
1653 dev_put(gid_attr.ndev); 1716 dev_put(gid_attr.ndev);
1654 } 1717 }
1655 work->path[0].gid_type = gid_attr.gid_type; 1718 work->path[0].gid_type = gid_attr.gid_type;
1656 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); 1719 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1720 cm_id_priv);
1657 } 1721 }
1658 if (ret) { 1722 if (ret) {
1659 int err = ib_get_cached_gid(work->port->cm_dev->ib_device, 1723 int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
1672 goto rejected; 1736 goto rejected;
1673 } 1737 }
1674 if (req_msg->alt_local_lid) { 1738 if (req_msg->alt_local_lid) {
1675 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); 1739 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1740 cm_id_priv);
1676 if (ret) { 1741 if (ret) {
1677 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, 1742 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1678 &work->path[0].sgid, 1743 &work->path[0].sgid,
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
2727 goto out; 2792 goto out;
2728 } 2793 }
2729 2794
2730 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); 2795 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2796 cm_id_priv);
2731 if (ret) 2797 if (ret)
2732 goto out; 2798 goto out;
2733 cm_id_priv->alt_av.timeout = 2799 cm_id_priv->alt_av.timeout =
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
2839 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, 2905 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2840 work->mad_recv_wc->recv_buf.grh, 2906 work->mad_recv_wc->recv_buf.grh,
2841 &cm_id_priv->av); 2907 &cm_id_priv->av);
2842 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); 2908 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2909 cm_id_priv);
2843 ret = atomic_inc_and_test(&cm_id_priv->work_count); 2910 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2844 if (!ret) 2911 if (!ret)
2845 list_add_tail(&work->list, &cm_id_priv->work_list); 2912 list_add_tail(&work->list, &cm_id_priv->work_list);
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3031 return -EINVAL; 3098 return -EINVAL;
3032 3099
3033 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3100 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3034 ret = cm_init_av_by_path(param->path, &cm_id_priv->av); 3101 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3035 if (ret) 3102 if (ret)
3036 goto out; 3103 goto out;
3037 3104
@@ -3468,7 +3535,9 @@ out:
3468static int cm_migrate(struct ib_cm_id *cm_id) 3535static int cm_migrate(struct ib_cm_id *cm_id)
3469{ 3536{
3470 struct cm_id_private *cm_id_priv; 3537 struct cm_id_private *cm_id_priv;
3538 struct cm_av tmp_av;
3471 unsigned long flags; 3539 unsigned long flags;
3540 int tmp_send_port_not_ready;
3472 int ret = 0; 3541 int ret = 0;
3473 3542
3474 cm_id_priv = container_of(cm_id, struct cm_id_private, id); 3543 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
3477 (cm_id->lap_state == IB_CM_LAP_UNINIT || 3546 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3478 cm_id->lap_state == IB_CM_LAP_IDLE)) { 3547 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3479 cm_id->lap_state = IB_CM_LAP_IDLE; 3548 cm_id->lap_state = IB_CM_LAP_IDLE;
3549 /* Swap address vector */
3550 tmp_av = cm_id_priv->av;
3480 cm_id_priv->av = cm_id_priv->alt_av; 3551 cm_id_priv->av = cm_id_priv->alt_av;
3552 cm_id_priv->alt_av = tmp_av;
3553 /* Swap port send ready state */
3554 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3555 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3556 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3481 } else 3557 } else
3482 ret = -EINVAL; 3558 ret = -EINVAL;
3483 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 3559 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
3888 port->cm_dev = cm_dev; 3964 port->cm_dev = cm_dev;
3889 port->port_num = i; 3965 port->port_num = i;
3890 3966
3967 INIT_LIST_HEAD(&port->cm_priv_prim_list);
3968 INIT_LIST_HEAD(&port->cm_priv_altr_list);
3969
3891 ret = cm_create_port_fs(port); 3970 ret = cm_create_port_fs(port);
3892 if (ret) 3971 if (ret)
3893 goto error1; 3972 goto error1;
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3945{ 4024{
3946 struct cm_device *cm_dev = client_data; 4025 struct cm_device *cm_dev = client_data;
3947 struct cm_port *port; 4026 struct cm_port *port;
4027 struct cm_id_private *cm_id_priv;
4028 struct ib_mad_agent *cur_mad_agent;
3948 struct ib_port_modify port_modify = { 4029 struct ib_port_modify port_modify = {
3949 .clr_port_cap_mask = IB_PORT_CM_SUP 4030 .clr_port_cap_mask = IB_PORT_CM_SUP
3950 }; 4031 };
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
3968 4049
3969 port = cm_dev->port[i-1]; 4050 port = cm_dev->port[i-1];
3970 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 4051 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4052 /* Mark all the cm_id's as not valid */
4053 spin_lock_irq(&cm.lock);
4054 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4055 cm_id_priv->altr_send_port_not_ready = 1;
4056 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4057 cm_id_priv->prim_send_port_not_ready = 1;
4058 spin_unlock_irq(&cm.lock);
3971 /* 4059 /*
3972 * We flush the queue here after the going_down set, this 4060 * We flush the queue here after the going_down set, this
3973 * verify that no new works will be queued in the recv handler, 4061 * verify that no new works will be queued in the recv handler,
3974 * after that we can call the unregister_mad_agent 4062 * after that we can call the unregister_mad_agent
3975 */ 4063 */
3976 flush_workqueue(cm.wq); 4064 flush_workqueue(cm.wq);
3977 ib_unregister_mad_agent(port->mad_agent); 4065 spin_lock_irq(&cm.state_lock);
4066 cur_mad_agent = port->mad_agent;
4067 port->mad_agent = NULL;
4068 spin_unlock_irq(&cm.state_lock);
4069 ib_unregister_mad_agent(cur_mad_agent);
3978 cm_remove_port_fs(port); 4070 cm_remove_port_fs(port);
3979 } 4071 }
4072
3980 device_unregister(cm_dev->device); 4073 device_unregister(cm_dev->device);
3981 kfree(cm_dev); 4074 kfree(cm_dev);
3982} 4075}
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
3989 INIT_LIST_HEAD(&cm.device_list); 4082 INIT_LIST_HEAD(&cm.device_list);
3990 rwlock_init(&cm.device_lock); 4083 rwlock_init(&cm.device_lock);
3991 spin_lock_init(&cm.lock); 4084 spin_lock_init(&cm.lock);
4085 spin_lock_init(&cm.state_lock);
3992 cm.listen_service_table = RB_ROOT; 4086 cm.listen_service_table = RB_ROOT;
3993 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); 4087 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3994 cm.remote_id_table = RB_ROOT; 4088 cm.remote_id_table = RB_ROOT;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 89a6b0546804..2a6fc47a1dfb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2438,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
2438 return 0; 2438 return 0;
2439} 2439}
2440 2440
2441static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
2442 unsigned long supported_gids,
2443 enum ib_gid_type default_gid)
2444{
2445 if ((network_type == RDMA_NETWORK_IPV4 ||
2446 network_type == RDMA_NETWORK_IPV6) &&
2447 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
2448 return IB_GID_TYPE_ROCE_UDP_ENCAP;
2449
2450 return default_gid;
2451}
2452
2441static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) 2453static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2442{ 2454{
2443 struct rdma_route *route = &id_priv->id.route; 2455 struct rdma_route *route = &id_priv->id.route;
@@ -2463,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2463 route->num_paths = 1; 2475 route->num_paths = 1;
2464 2476
2465 if (addr->dev_addr.bound_dev_if) { 2477 if (addr->dev_addr.bound_dev_if) {
2478 unsigned long supported_gids;
2479
2466 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); 2480 ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
2467 if (!ndev) { 2481 if (!ndev) {
2468 ret = -ENODEV; 2482 ret = -ENODEV;
@@ -2486,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2486 2500
2487 route->path_rec->net = &init_net; 2501 route->path_rec->net = &init_net;
2488 route->path_rec->ifindex = ndev->ifindex; 2502 route->path_rec->ifindex = ndev->ifindex;
2489 route->path_rec->gid_type = id_priv->gid_type; 2503 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
2504 id_priv->id.port_num);
2505 route->path_rec->gid_type =
2506 cma_route_gid_type(addr->dev_addr.network,
2507 supported_gids,
2508 id_priv->gid_type);
2490 } 2509 }
2491 if (!ndev) { 2510 if (!ndev) {
2492 ret = -ENODEV; 2511 ret = -ENODEV;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 224ad274ea0b..84b4eff90395 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
175 175
176 cur_base = addr & PAGE_MASK; 176 cur_base = addr & PAGE_MASK;
177 177
178 if (npages == 0) { 178 if (npages == 0 || npages > UINT_MAX) {
179 ret = -EINVAL; 179 ret = -EINVAL;
180 goto out; 180 goto out;
181 } 181 }
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 0012fa58c105..44b1104eb168 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
262 container_of(uobj, struct ib_uqp_object, uevent.uobject); 262 container_of(uobj, struct ib_uqp_object, uevent.uobject);
263 263
264 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 264 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
265 if (qp != qp->real_qp) { 265 if (qp == qp->real_qp)
266 ib_close_qp(qp);
267 } else {
268 ib_uverbs_detach_umcast(qp, uqp); 266 ib_uverbs_detach_umcast(qp, uqp);
269 ib_destroy_qp(qp); 267 ib_destroy_qp(qp);
270 }
271 ib_uverbs_release_uevent(file, &uqp->uevent); 268 ib_uverbs_release_uevent(file, &uqp->uevent);
272 kfree(uqp); 269 kfree(uqp);
273 } 270 }
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 867b8cf82be8..19c6477af19f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -666,18 +666,6 @@ skip_cqe:
666 return ret; 666 return ret;
667} 667}
668 668
669static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
670{
671 struct c4iw_mr *mhp;
672 unsigned long flags;
673
674 spin_lock_irqsave(&rhp->lock, flags);
675 mhp = get_mhp(rhp, rkey >> 8);
676 if (mhp)
677 mhp->attr.state = 0;
678 spin_unlock_irqrestore(&rhp->lock, flags);
679}
680
681/* 669/*
682 * Get one cq entry from c4iw and map it to openib. 670 * Get one cq entry from c4iw and map it to openib.
683 * 671 *
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
733 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { 721 CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
734 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); 722 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
735 wc->wc_flags |= IB_WC_WITH_INVALIDATE; 723 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
736 invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); 724 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
737 } 725 }
738 } else { 726 } else {
739 switch (CQE_OPCODE(&cqe)) { 727 switch (CQE_OPCODE(&cqe)) {
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
762 750
763 /* Invalidate the MR if the fastreg failed */ 751 /* Invalidate the MR if the fastreg failed */
764 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) 752 if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
765 invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); 753 c4iw_invalidate_mr(qhp->rhp,
754 CQE_WRID_FR_STAG(&cqe));
766 break; 755 break;
767 default: 756 default:
768 printk(KERN_ERR MOD "Unexpected opcode %d " 757 printk(KERN_ERR MOD "Unexpected opcode %d "
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7e7f79e55006..4788e1a46fde 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold;
999extern int use_dsgl; 999extern int use_dsgl;
1000void c4iw_drain_rq(struct ib_qp *qp); 1000void c4iw_drain_rq(struct ib_qp *qp);
1001void c4iw_drain_sq(struct ib_qp *qp); 1001void c4iw_drain_sq(struct ib_qp *qp);
1002 1002void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1003 1003
1004#endif 1004#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 80e27749420a..410408f886c1 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
770 kfree(mhp); 770 kfree(mhp);
771 return 0; 771 return 0;
772} 772}
773
774void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
775{
776 struct c4iw_mr *mhp;
777 unsigned long flags;
778
779 spin_lock_irqsave(&rhp->lock, flags);
780 mhp = get_mhp(rhp, rkey >> 8);
781 if (mhp)
782 mhp->attr.state = 0;
783 spin_unlock_irqrestore(&rhp->lock, flags);
784}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index f57deba6717c..b7ac97b27c88 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
706 return 0; 706 return 0;
707} 707}
708 708
709static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, 709static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
710 struct ib_send_wr *wr, u8 *len16)
711{ 710{
712 struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
713
714 mhp->attr.state = 0;
715 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 711 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
716 wqe->inv.r2 = 0; 712 wqe->inv.r2 = 0;
717 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); 713 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
797 spin_lock_irqsave(&qhp->lock, flag); 793 spin_lock_irqsave(&qhp->lock, flag);
798 if (t4_wq_in_error(&qhp->wq)) { 794 if (t4_wq_in_error(&qhp->wq)) {
799 spin_unlock_irqrestore(&qhp->lock, flag); 795 spin_unlock_irqrestore(&qhp->lock, flag);
796 *bad_wr = wr;
800 return -EINVAL; 797 return -EINVAL;
801 } 798 }
802 num_wrs = t4_sq_avail(&qhp->wq); 799 num_wrs = t4_sq_avail(&qhp->wq);
803 if (num_wrs == 0) { 800 if (num_wrs == 0) {
804 spin_unlock_irqrestore(&qhp->lock, flag); 801 spin_unlock_irqrestore(&qhp->lock, flag);
802 *bad_wr = wr;
805 return -ENOMEM; 803 return -ENOMEM;
806 } 804 }
807 while (wr) { 805 while (wr) {
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
840 case IB_WR_RDMA_READ_WITH_INV: 838 case IB_WR_RDMA_READ_WITH_INV:
841 fw_opcode = FW_RI_RDMA_READ_WR; 839 fw_opcode = FW_RI_RDMA_READ_WR;
842 swsqe->opcode = FW_RI_READ_REQ; 840 swsqe->opcode = FW_RI_READ_REQ;
843 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) 841 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
842 c4iw_invalidate_mr(qhp->rhp,
843 wr->sg_list[0].lkey);
844 fw_flags = FW_RI_RDMA_READ_INVALIDATE; 844 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
845 else 845 } else {
846 fw_flags = 0; 846 fw_flags = 0;
847 }
847 err = build_rdma_read(wqe, wr, &len16); 848 err = build_rdma_read(wqe, wr, &len16);
848 if (err) 849 if (err)
849 break; 850 break;
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
876 fw_flags |= FW_RI_LOCAL_FENCE_FLAG; 877 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
877 fw_opcode = FW_RI_INV_LSTAG_WR; 878 fw_opcode = FW_RI_INV_LSTAG_WR;
878 swsqe->opcode = FW_RI_LOCAL_INV; 879 swsqe->opcode = FW_RI_LOCAL_INV;
879 err = build_inv_stag(qhp->rhp, wqe, wr, &len16); 880 err = build_inv_stag(wqe, wr, &len16);
881 c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
880 break; 882 break;
881 default: 883 default:
882 PDBG("%s post of type=%d TBD!\n", __func__, 884 PDBG("%s post of type=%d TBD!\n", __func__,
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
934 spin_lock_irqsave(&qhp->lock, flag); 936 spin_lock_irqsave(&qhp->lock, flag);
935 if (t4_wq_in_error(&qhp->wq)) { 937 if (t4_wq_in_error(&qhp->wq)) {
936 spin_unlock_irqrestore(&qhp->lock, flag); 938 spin_unlock_irqrestore(&qhp->lock, flag);
939 *bad_wr = wr;
937 return -EINVAL; 940 return -EINVAL;
938 } 941 }
939 num_wrs = t4_rq_avail(&qhp->wq); 942 num_wrs = t4_rq_avail(&qhp->wq);
940 if (num_wrs == 0) { 943 if (num_wrs == 0) {
941 spin_unlock_irqrestore(&qhp->lock, flag); 944 spin_unlock_irqrestore(&qhp->lock, flag);
945 *bad_wr = wr;
942 return -ENOMEM; 946 return -ENOMEM;
943 } 947 }
944 while (wr) { 948 while (wr) {
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index a26a9a0bfc41..67ea85a56945 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu)
775 } 775 }
776 mutex_unlock(&affinity->lock); 776 mutex_unlock(&affinity->lock);
777} 777}
778
779int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
780 size_t count)
781{
782 struct hfi1_affinity_node *entry;
783 cpumask_var_t mask;
784 int ret, i;
785
786 mutex_lock(&node_affinity.lock);
787 entry = node_affinity_lookup(dd->node);
788
789 if (!entry) {
790 ret = -EINVAL;
791 goto unlock;
792 }
793
794 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
795 if (!ret) {
796 ret = -ENOMEM;
797 goto unlock;
798 }
799
800 ret = cpulist_parse(buf, mask);
801 if (ret)
802 goto out;
803
804 if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
805 dd_dev_warn(dd, "Invalid CPU mask\n");
806 ret = -EINVAL;
807 goto out;
808 }
809
810 /* reset the SDMA interrupt affinity details */
811 init_cpu_mask_set(&entry->def_intr);
812 cpumask_copy(&entry->def_intr.mask, mask);
813
814 /* Reassign the affinity for each SDMA interrupt. */
815 for (i = 0; i < dd->num_msix_entries; i++) {
816 struct hfi1_msix_entry *msix;
817
818 msix = &dd->msix_entries[i];
819 if (msix->type != IRQ_SDMA)
820 continue;
821
822 ret = get_irq_affinity(dd, msix);
823
824 if (ret)
825 break;
826 }
827out:
828 free_cpumask_var(mask);
829unlock:
830 mutex_unlock(&node_affinity.lock);
831 return ret ? ret : strnlen(buf, PAGE_SIZE);
832}
833
834int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
835{
836 struct hfi1_affinity_node *entry;
837
838 mutex_lock(&node_affinity.lock);
839 entry = node_affinity_lookup(dd->node);
840
841 if (!entry) {
842 mutex_unlock(&node_affinity.lock);
843 return -EINVAL;
844 }
845
846 cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
847 mutex_unlock(&node_affinity.lock);
848 return strnlen(buf, PAGE_SIZE);
849}
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index b89ea3c0ee1a..42e63316afd1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int);
102/* Release a CPU used by a user process. */ 102/* Release a CPU used by a user process. */
103void hfi1_put_proc_affinity(int); 103void hfi1_put_proc_affinity(int);
104 104
105int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf);
106int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
107 size_t count);
108
109struct hfi1_affinity_node { 105struct hfi1_affinity_node {
110 int node; 106 int node;
111 struct cpu_mask_set def_intr; 107 struct cpu_mask_set def_intr;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9bf5f23544d4..24d0820873cf 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6301 /* leave shared count at zero for both global and VL15 */ 6301 /* leave shared count at zero for both global and VL15 */
6302 write_global_credit(dd, vau, vl15buf, 0); 6302 write_global_credit(dd, vau, vl15buf, 0);
6303 6303
6304 /* We may need some credits for another VL when sending packets 6304 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6305 * with the snoop interface. Dividing it down the middle for VL15 6305 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6306 * and VL0 should suffice.
6307 */
6308 if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6309 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6310 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6311 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6312 << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6313 } else {
6314 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6315 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6316 }
6317} 6306}
6318 6307
6319/* 6308/*
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd)
9915 u32 mask = ~((1U << ppd->lmc) - 1); 9904 u32 mask = ~((1U << ppd->lmc) - 1);
9916 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); 9905 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9917 9906
9918 if (dd->hfi1_snoop.mode_flag)
9919 dd_dev_info(dd, "Set lid/lmc while snooping");
9920
9921 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK 9907 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9922 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); 9908 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9923 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) 9909 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque)
12112 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); 12098 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12113} 12099}
12114 12100
12115#define C_MAX_NAME 13 /* 12 chars + one for /0 */ 12101#define C_MAX_NAME 16 /* 15 chars + one for /0 */
12116static int init_cntrs(struct hfi1_devdata *dd) 12102static int init_cntrs(struct hfi1_devdata *dd)
12117{ 12103{
12118 int i, rcv_ctxts, j; 12104 int i, rcv_ctxts, j;
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14463 * Any error printing is already done by the init code. 14449 * Any error printing is already done by the init code.
14464 * On return, we have the chip mapped. 14450 * On return, we have the chip mapped.
14465 */ 14451 */
14466 ret = hfi1_pcie_ddinit(dd, pdev, ent); 14452 ret = hfi1_pcie_ddinit(dd, pdev);
14467 if (ret < 0) 14453 if (ret < 0)
14468 goto bail_free; 14454 goto bail_free;
14469 14455
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14691 if (ret) 14677 if (ret)
14692 goto bail_free_cntrs; 14678 goto bail_free_cntrs;
14693 14679
14680 init_completion(&dd->user_comp);
14681
14682 /* The user refcount starts with one to inidicate an active device */
14683 atomic_set(&dd->user_refcount, 1);
14684
14694 goto bail; 14685 goto bail;
14695 14686
14696bail_free_rcverr: 14687bail_free_rcverr:
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 92345259a8f4..043fd21dc5f3 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -320,6 +320,9 @@
320/* DC_DC8051_CFG_MODE.GENERAL bits */ 320/* DC_DC8051_CFG_MODE.GENERAL bits */
321#define DISABLE_SELF_GUID_CHECK 0x2 321#define DISABLE_SELF_GUID_CHECK 0x2
322 322
323/* Bad L2 frame error code */
324#define BAD_L2_ERR 0x6
325
323/* 326/*
324 * Eager buffer minimum and maximum sizes supported by the hardware. 327 * Eager buffer minimum and maximum sizes supported by the hardware.
325 * All power-of-two sizes in between are supported as well. 328 * All power-of-two sizes in between are supported as well.
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 6563e4d38b80..c5efff29c147 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
599 dd->rhf_offset; 599 dd->rhf_offset;
600 struct rvt_qp *qp; 600 struct rvt_qp *qp;
601 struct ib_header *hdr; 601 struct ib_header *hdr;
602 struct ib_other_headers *ohdr;
603 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; 602 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
604 u64 rhf = rhf_to_cpu(rhf_addr); 603 u64 rhf = rhf_to_cpu(rhf_addr);
605 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 604 u32 etype = rhf_rcv_type(rhf), qpn, bth1;
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet)
615 if (etype != RHF_RCV_TYPE_IB) 614 if (etype != RHF_RCV_TYPE_IB)
616 goto next; 615 goto next;
617 616
618 hdr = hfi1_get_msgheader(dd, rhf_addr); 617 packet->hdr = hfi1_get_msgheader(dd, rhf_addr);
618 hdr = packet->hdr;
619 619
620 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 620 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
621 621
622 if (lnh == HFI1_LRH_BTH) 622 if (lnh == HFI1_LRH_BTH) {
623 ohdr = &hdr->u.oth; 623 packet->ohdr = &hdr->u.oth;
624 else if (lnh == HFI1_LRH_GRH) 624 } else if (lnh == HFI1_LRH_GRH) {
625 ohdr = &hdr->u.l.oth; 625 packet->ohdr = &hdr->u.l.oth;
626 else 626 packet->rcv_flags |= HFI1_HAS_GRH;
627 } else {
627 goto next; /* just in case */ 628 goto next; /* just in case */
629 }
628 630
629 bth1 = be32_to_cpu(ohdr->bth[1]); 631 bth1 = be32_to_cpu(packet->ohdr->bth[1]);
630 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); 632 is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK));
631 633
632 if (!is_ecn) 634 if (!is_ecn)
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
646 648
647 /* turn off BECN, FECN */ 649 /* turn off BECN, FECN */
648 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); 650 bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK);
649 ohdr->bth[1] = cpu_to_be32(bth1); 651 packet->ohdr->bth[1] = cpu_to_be32(bth1);
650next: 652next:
651 update_ps_mdata(&mdata, rcd); 653 update_ps_mdata(&mdata, rcd);
652 } 654 }
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet)
1360 1362
1361int process_receive_bypass(struct hfi1_packet *packet) 1363int process_receive_bypass(struct hfi1_packet *packet)
1362{ 1364{
1365 struct hfi1_devdata *dd = packet->rcd->dd;
1366
1363 if (unlikely(rhf_err_flags(packet->rhf))) 1367 if (unlikely(rhf_err_flags(packet->rhf)))
1364 handle_eflags(packet); 1368 handle_eflags(packet);
1365 1369
1366 dd_dev_err(packet->rcd->dd, 1370 dd_dev_err(dd,
1367 "Bypass packets are not supported in normal operation. Dropping\n"); 1371 "Bypass packets are not supported in normal operation. Dropping\n");
1368 incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); 1372 incr_cntr64(&dd->sw_rcv_bypass_packet_errors);
1373 if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) {
1374 u64 *flits = packet->ebuf;
1375
1376 if (flits && !(packet->rhf & RHF_LEN_ERR)) {
1377 dd->err_info_rcvport.packet_flit1 = flits[0];
1378 dd->err_info_rcvport.packet_flit2 =
1379 packet->tlen > sizeof(flits[0]) ? flits[1] : 0;
1380 }
1381 dd->err_info_rcvport.status_and_code |=
1382 (OPA_EI_STATUS_SMASK | BAD_L2_ERR);
1383 }
1369 return RHF_RCV_CONTINUE; 1384 return RHF_RCV_CONTINUE;
1370} 1385}
1371 1386
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 677efa0e8cd6..bd786b7bd30b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
172 struct hfi1_devdata, 172 struct hfi1_devdata,
173 user_cdev); 173 user_cdev);
174 174
175 if (!atomic_inc_not_zero(&dd->user_refcount))
176 return -ENXIO;
177
175 /* Just take a ref now. Not all opens result in a context assign */ 178 /* Just take a ref now. Not all opens result in a context assign */
176 kobject_get(&dd->kobj); 179 kobject_get(&dd->kobj);
177 180
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
183 fd->rec_cpu_num = -1; /* no cpu affinity by default */ 186 fd->rec_cpu_num = -1; /* no cpu affinity by default */
184 fd->mm = current->mm; 187 fd->mm = current->mm;
185 atomic_inc(&fd->mm->mm_count); 188 atomic_inc(&fd->mm->mm_count);
186 } 189 fp->private_data = fd;
190 } else {
191 fp->private_data = NULL;
192
193 if (atomic_dec_and_test(&dd->user_refcount))
194 complete(&dd->user_comp);
187 195
188 fp->private_data = fd; 196 return -ENOMEM;
197 }
189 198
190 return fd ? 0 : -ENOMEM; 199 return 0;
191} 200}
192 201
193static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, 202static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
798done: 807done:
799 mmdrop(fdata->mm); 808 mmdrop(fdata->mm);
800 kobject_put(&dd->kobj); 809 kobject_put(&dd->kobj);
810
811 if (atomic_dec_and_test(&dd->user_refcount))
812 complete(&dd->user_comp);
813
801 kfree(fdata); 814 kfree(fdata);
802 return 0; 815 return 0;
803} 816}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7eef11b316ff..cc87fd4e534b 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -367,26 +367,6 @@ struct hfi1_packet {
367 u8 etype; 367 u8 etype;
368}; 368};
369 369
370/*
371 * Private data for snoop/capture support.
372 */
373struct hfi1_snoop_data {
374 int mode_flag;
375 struct cdev cdev;
376 struct device *class_dev;
377 /* protect snoop data */
378 spinlock_t snoop_lock;
379 struct list_head queue;
380 wait_queue_head_t waitq;
381 void *filter_value;
382 int (*filter_callback)(void *hdr, void *data, void *value);
383 u64 dcc_cfg; /* saved value of DCC Cfg register */
384};
385
386/* snoop mode_flag values */
387#define HFI1_PORT_SNOOP_MODE 1U
388#define HFI1_PORT_CAPTURE_MODE 2U
389
390struct rvt_sge_state; 370struct rvt_sge_state;
391 371
392/* 372/*
@@ -613,8 +593,6 @@ struct hfi1_pportdata {
613 struct mutex hls_lock; 593 struct mutex hls_lock;
614 u32 host_link_state; 594 u32 host_link_state;
615 595
616 spinlock_t sdma_alllock ____cacheline_aligned_in_smp;
617
618 u32 lstate; /* logical link state */ 596 u32 lstate; /* logical link state */
619 597
620 /* these are the "32 bit" regs */ 598 /* these are the "32 bit" regs */
@@ -1104,8 +1082,6 @@ struct hfi1_devdata {
1104 char *portcntrnames; 1082 char *portcntrnames;
1105 size_t portcntrnameslen; 1083 size_t portcntrnameslen;
1106 1084
1107 struct hfi1_snoop_data hfi1_snoop;
1108
1109 struct err_info_rcvport err_info_rcvport; 1085 struct err_info_rcvport err_info_rcvport;
1110 struct err_info_constraint err_info_rcv_constraint; 1086 struct err_info_constraint err_info_rcv_constraint;
1111 struct err_info_constraint err_info_xmit_constraint; 1087 struct err_info_constraint err_info_xmit_constraint;
@@ -1141,8 +1117,8 @@ struct hfi1_devdata {
1141 rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; 1117 rhf_rcv_function_ptr normal_rhf_rcv_functions[8];
1142 1118
1143 /* 1119 /*
1144 * Handlers for outgoing data so that snoop/capture does not 1120 * Capability to have different send engines simply by changing a
1145 * have to have its hooks in the send path 1121 * pointer value.
1146 */ 1122 */
1147 send_routine process_pio_send; 1123 send_routine process_pio_send;
1148 send_routine process_dma_send; 1124 send_routine process_dma_send;
@@ -1174,6 +1150,10 @@ struct hfi1_devdata {
1174 spinlock_t aspm_lock; 1150 spinlock_t aspm_lock;
1175 /* Number of verbs contexts which have disabled ASPM */ 1151 /* Number of verbs contexts which have disabled ASPM */
1176 atomic_t aspm_disabled_cnt; 1152 atomic_t aspm_disabled_cnt;
1153 /* Keeps track of user space clients */
1154 atomic_t user_refcount;
1155 /* Used to wait for outstanding user space clients before dev removal */
1156 struct completion user_comp;
1177 1157
1178 struct hfi1_affinity *affinity; 1158 struct hfi1_affinity *affinity;
1179 struct rhashtable sdma_rht; 1159 struct rhashtable sdma_rht;
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit);
1221extern u32 hfi1_cpulist_count; 1201extern u32 hfi1_cpulist_count;
1222extern unsigned long *hfi1_cpulist; 1202extern unsigned long *hfi1_cpulist;
1223 1203
1224extern unsigned int snoop_drop_send;
1225extern unsigned int snoop_force_capture;
1226int hfi1_init(struct hfi1_devdata *, int); 1204int hfi1_init(struct hfi1_devdata *, int);
1227int hfi1_count_units(int *npresentp, int *nupp); 1205int hfi1_count_units(int *npresentp, int *nupp);
1228int hfi1_count_active_units(void); 1206int hfi1_count_active_units(void);
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf);
1557void reset_link_credits(struct hfi1_devdata *dd); 1535void reset_link_credits(struct hfi1_devdata *dd);
1558void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1536void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1559 1537
1560int snoop_recv_handler(struct hfi1_packet *packet);
1561int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1562 u64 pbc);
1563int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
1564 u64 pbc);
1565void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1566 u64 pbc, const void *from, size_t count);
1567int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); 1538int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
1568 1539
1569static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) 1540static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
1763 1734
1764int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); 1735int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *);
1765void hfi1_pcie_cleanup(struct pci_dev *); 1736void hfi1_pcie_cleanup(struct pci_dev *);
1766int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, 1737int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *);
1767 const struct pci_device_id *);
1768void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1738void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
1769void hfi1_pcie_flr(struct hfi1_devdata *); 1739void hfi1_pcie_flr(struct hfi1_devdata *);
1770int pcie_speeds(struct hfi1_devdata *); 1740int pcie_speeds(struct hfi1_devdata *);
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet);
1799int kdeth_process_eager(struct hfi1_packet *packet); 1769int kdeth_process_eager(struct hfi1_packet *packet);
1800int process_receive_invalid(struct hfi1_packet *packet); 1770int process_receive_invalid(struct hfi1_packet *packet);
1801 1771
1802extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8];
1803
1804void update_sge(struct rvt_sge_state *ss, u32 length); 1772void update_sge(struct rvt_sge_state *ss, u32 length);
1805 1773
1806/* global module parameter variables */ 1774/* global module parameter variables */
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex;
1827#define DRIVER_NAME "hfi1" 1795#define DRIVER_NAME "hfi1"
1828#define HFI1_USER_MINOR_BASE 0 1796#define HFI1_USER_MINOR_BASE 0
1829#define HFI1_TRACE_MINOR 127 1797#define HFI1_TRACE_MINOR 127
1830#define HFI1_DIAGPKT_MINOR 128
1831#define HFI1_DIAG_MINOR_BASE 129
1832#define HFI1_SNOOP_CAPTURE_BASE 200
1833#define HFI1_NMINORS 255 1798#define HFI1_NMINORS 255
1834 1799
1835#define PCI_VENDOR_ID_INTEL 0x8086 1800#define PCI_VENDOR_ID_INTEL 0x8086
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex;
1848static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, 1813static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
1849 u16 ctxt_type) 1814 u16 ctxt_type)
1850{ 1815{
1851 u64 base_sc_integrity = 1816 u64 base_sc_integrity;
1817
1818 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
1819 if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
1820 return 0;
1821
1822 base_sc_integrity =
1852 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1823 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
1853 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1824 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
1854 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 1825 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
1863 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1834 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
1864 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 1835 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
1865 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 1836 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
1866 | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
1867 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 1837 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
1868 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1838 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
1869 1839
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
1872 else 1842 else
1873 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; 1843 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
1874 1844
1875 if (is_ax(dd)) 1845 /* turn on send-side job key checks if !A0 */
1876 /* turn off send-side job key checks - A0 */ 1846 if (!is_ax(dd))
1877 return base_sc_integrity & 1847 base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
1878 ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1848
1879 return base_sc_integrity; 1849 return base_sc_integrity;
1880} 1850}
1881 1851
1882static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) 1852static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
1883{ 1853{
1884 u64 base_sdma_integrity = 1854 u64 base_sdma_integrity;
1855
1856 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
1857 if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
1858 return 0;
1859
1860 base_sdma_integrity =
1885 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1861 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
1886 | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
1887 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 1862 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
1888 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 1863 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
1889 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 1864 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
1895 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1870 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
1896 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 1871 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
1897 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 1872 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
1898 | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK
1899 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 1873 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
1900 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; 1874 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
1901 1875
1902 if (is_ax(dd)) 1876 if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1903 /* turn off send-side job key checks - A0 */ 1877 base_sdma_integrity |=
1904 return base_sdma_integrity & 1878 SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
1905 ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 1879
1880 /* turn on send-side job key checks if !A0 */
1881 if (!is_ax(dd))
1882 base_sdma_integrity |=
1883 SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
1884
1906 return base_sdma_integrity; 1885 return base_sdma_integrity;
1907} 1886}
1908 1887
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 60db61536fed..e3b5bc93bc70 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
144 struct hfi1_ctxtdata *rcd; 144 struct hfi1_ctxtdata *rcd;
145 145
146 ppd = dd->pport + (i % dd->num_pports); 146 ppd = dd->pport + (i % dd->num_pports);
147
148 /* dd->rcd[i] gets assigned inside the callee */
147 rcd = hfi1_create_ctxtdata(ppd, i, dd->node); 149 rcd = hfi1_create_ctxtdata(ppd, i, dd->node);
148 if (!rcd) { 150 if (!rcd) {
149 dd_dev_err(dd, 151 dd_dev_err(dd,
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
169 if (!rcd->sc) { 171 if (!rcd->sc) {
170 dd_dev_err(dd, 172 dd_dev_err(dd,
171 "Unable to allocate kernel send context, failing\n"); 173 "Unable to allocate kernel send context, failing\n");
172 dd->rcd[rcd->ctxt] = NULL;
173 hfi1_free_ctxtdata(dd, rcd);
174 goto nomem; 174 goto nomem;
175 } 175 }
176 176
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
178 if (ret < 0) { 178 if (ret < 0) {
179 dd_dev_err(dd, 179 dd_dev_err(dd,
180 "Failed to setup kernel receive context, failing\n"); 180 "Failed to setup kernel receive context, failing\n");
181 sc_free(rcd->sc);
182 dd->rcd[rcd->ctxt] = NULL;
183 hfi1_free_ctxtdata(dd, rcd);
184 ret = -EFAULT; 181 ret = -EFAULT;
185 goto bail; 182 goto bail;
186 } 183 }
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd)
196nomem: 193nomem:
197 ret = -ENOMEM; 194 ret = -ENOMEM;
198bail: 195bail:
196 if (dd->rcd) {
197 for (i = 0; i < dd->num_rcv_contexts; ++i)
198 hfi1_free_ctxtdata(dd, dd->rcd[i]);
199 }
199 kfree(dd->rcd); 200 kfree(dd->rcd);
200 dd->rcd = NULL; 201 dd->rcd = NULL;
201 return ret; 202 return ret;
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
216 dd->num_rcv_contexts - dd->first_user_ctxt) 217 dd->num_rcv_contexts - dd->first_user_ctxt)
217 kctxt_ngroups = (dd->rcv_entries.nctxt_extra - 218 kctxt_ngroups = (dd->rcv_entries.nctxt_extra -
218 (dd->num_rcv_contexts - dd->first_user_ctxt)); 219 (dd->num_rcv_contexts - dd->first_user_ctxt));
219 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); 220 rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa);
220 if (rcd) { 221 if (rcd) {
221 u32 rcvtids, max_entries; 222 u32 rcvtids, max_entries;
222 223
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt,
261 } 262 }
262 rcd->eager_base = base * dd->rcv_entries.group_size; 263 rcd->eager_base = base * dd->rcv_entries.group_size;
263 264
264 /* Validate and initialize Rcv Hdr Q variables */
265 if (rcvhdrcnt % HDRQ_INCREMENT) {
266 dd_dev_err(dd,
267 "ctxt%u: header queue count %d must be divisible by %lu\n",
268 rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT);
269 goto bail;
270 }
271 rcd->rcvhdrq_cnt = rcvhdrcnt; 265 rcd->rcvhdrq_cnt = rcvhdrcnt;
272 rcd->rcvhdrqentsize = hfi1_hdrq_entsize; 266 rcd->rcvhdrqentsize = hfi1_hdrq_entsize;
273 /* 267 /*
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
506 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); 500 INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
507 501
508 mutex_init(&ppd->hls_lock); 502 mutex_init(&ppd->hls_lock);
509 spin_lock_init(&ppd->sdma_alllock);
510 spin_lock_init(&ppd->qsfp_info.qsfp_lock); 503 spin_lock_init(&ppd->qsfp_info.qsfp_lock);
511 504
512 ppd->qsfp_info.ppd = ppd; 505 ppd->qsfp_info.ppd = ppd;
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
1399 hfi1_free_devdata(dd); 1392 hfi1_free_devdata(dd);
1400} 1393}
1401 1394
1395static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
1396{
1397 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
1398 hfi1_early_err(dev, "Receive header queue count too small\n");
1399 return -EINVAL;
1400 }
1401
1402 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1403 hfi1_early_err(dev,
1404 "Receive header queue count cannot be greater than %u\n",
1405 HFI1_MAX_HDRQ_EGRBUF_CNT);
1406 return -EINVAL;
1407 }
1408
1409 if (thecnt % HDRQ_INCREMENT) {
1410 hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
1411 thecnt, HDRQ_INCREMENT);
1412 return -EINVAL;
1413 }
1414
1415 return 0;
1416}
1417
1402static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1418static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1403{ 1419{
1404 int ret = 0, j, pidx, initfail; 1420 int ret = 0, j, pidx, initfail;
1405 struct hfi1_devdata *dd = ERR_PTR(-EINVAL); 1421 struct hfi1_devdata *dd;
1406 struct hfi1_pportdata *ppd; 1422 struct hfi1_pportdata *ppd;
1407 1423
1408 /* First, lock the non-writable module parameters */ 1424 /* First, lock the non-writable module parameters */
1409 HFI1_CAP_LOCK(); 1425 HFI1_CAP_LOCK();
1410 1426
1411 /* Validate some global module parameters */ 1427 /* Validate some global module parameters */
1412 if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { 1428 ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
1413 hfi1_early_err(&pdev->dev, "Header queue count too small\n"); 1429 if (ret)
1414 ret = -EINVAL;
1415 goto bail;
1416 }
1417 if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
1418 hfi1_early_err(&pdev->dev,
1419 "Receive header queue count cannot be greater than %u\n",
1420 HFI1_MAX_HDRQ_EGRBUF_CNT);
1421 ret = -EINVAL;
1422 goto bail; 1430 goto bail;
1423 } 1431
1424 /* use the encoding function as a sanitization check */ 1432 /* use the encoding function as a sanitization check */
1425 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { 1433 if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
1426 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", 1434 hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1461 if (ret) 1469 if (ret)
1462 goto bail; 1470 goto bail;
1463 1471
1464 /* 1472 if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
1465 * Do device-specific initialization, function table setup, dd 1473 ent->device == PCI_DEVICE_ID_INTEL1)) {
1466 * allocation, etc.
1467 */
1468 switch (ent->device) {
1469 case PCI_DEVICE_ID_INTEL0:
1470 case PCI_DEVICE_ID_INTEL1:
1471 dd = hfi1_init_dd(pdev, ent);
1472 break;
1473 default:
1474 hfi1_early_err(&pdev->dev, 1474 hfi1_early_err(&pdev->dev,
1475 "Failing on unknown Intel deviceid 0x%x\n", 1475 "Failing on unknown Intel deviceid 0x%x\n",
1476 ent->device); 1476 ent->device);
1477 ret = -ENODEV; 1477 ret = -ENODEV;
1478 goto clean_bail;
1478 } 1479 }
1479 1480
1480 if (IS_ERR(dd)) 1481 /*
1482 * Do device-specific initialization, function table setup, dd
1483 * allocation, etc.
1484 */
1485 dd = hfi1_init_dd(pdev, ent);
1486
1487 if (IS_ERR(dd)) {
1481 ret = PTR_ERR(dd); 1488 ret = PTR_ERR(dd);
1482 if (ret)
1483 goto clean_bail; /* error already printed */ 1489 goto clean_bail; /* error already printed */
1490 }
1484 1491
1485 ret = create_workqueues(dd); 1492 ret = create_workqueues(dd);
1486 if (ret) 1493 if (ret)
@@ -1538,12 +1545,31 @@ bail:
1538 return ret; 1545 return ret;
1539} 1546}
1540 1547
1548static void wait_for_clients(struct hfi1_devdata *dd)
1549{
1550 /*
1551 * Remove the device init value and complete the device if there is
1552 * no clients or wait for active clients to finish.
1553 */
1554 if (atomic_dec_and_test(&dd->user_refcount))
1555 complete(&dd->user_comp);
1556
1557 wait_for_completion(&dd->user_comp);
1558}
1559
1541static void remove_one(struct pci_dev *pdev) 1560static void remove_one(struct pci_dev *pdev)
1542{ 1561{
1543 struct hfi1_devdata *dd = pci_get_drvdata(pdev); 1562 struct hfi1_devdata *dd = pci_get_drvdata(pdev);
1544 1563
1545 /* close debugfs files before ib unregister */ 1564 /* close debugfs files before ib unregister */
1546 hfi1_dbg_ibdev_exit(&dd->verbs_dev); 1565 hfi1_dbg_ibdev_exit(&dd->verbs_dev);
1566
1567 /* remove the /dev hfi1 interface */
1568 hfi1_device_remove(dd);
1569
1570 /* wait for existing user space clients to finish */
1571 wait_for_clients(dd);
1572
1547 /* unregister from IB core */ 1573 /* unregister from IB core */
1548 hfi1_unregister_ib_device(dd); 1574 hfi1_unregister_ib_device(dd);
1549 1575
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev)
1558 /* wait until all of our (qsfp) queue_work() calls complete */ 1584 /* wait until all of our (qsfp) queue_work() calls complete */
1559 flush_workqueue(ib_wq); 1585 flush_workqueue(ib_wq);
1560 1586
1561 hfi1_device_remove(dd);
1562
1563 postinit_cleanup(dd); 1587 postinit_cleanup(dd);
1564} 1588}
1565 1589
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 89c68da1c273..4ac8f330c5cb 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev)
157 * fields required to re-initialize after a chip reset, or for 157 * fields required to re-initialize after a chip reset, or for
158 * various other purposes 158 * various other purposes
159 */ 159 */
160int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, 160int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
161 const struct pci_device_id *ent)
162{ 161{
163 unsigned long len; 162 unsigned long len;
164 resource_size_t addr; 163 resource_size_t addr;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 50a3a36d9363..d89b8745d4c1 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold)
668void set_pio_integrity(struct send_context *sc) 668void set_pio_integrity(struct send_context *sc)
669{ 669{
670 struct hfi1_devdata *dd = sc->dd; 670 struct hfi1_devdata *dd = sc->dd;
671 u64 reg = 0;
672 u32 hw_context = sc->hw_context; 671 u32 hw_context = sc->hw_context;
673 int type = sc->type; 672 int type = sc->type;
674 673
675 /* 674 write_kctxt_csr(dd, hw_context,
676 * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if 675 SC(CHECK_ENABLE),
677 * we're snooping. 676 hfi1_pkt_default_send_ctxt_mask(dd, type));
678 */
679 if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
680 dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE)
681 reg = hfi1_pkt_default_send_ctxt_mask(dd, type);
682
683 write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg);
684} 677}
685 678
686static u32 get_buffers_allocated(struct send_context *sc) 679static u32 get_buffers_allocated(struct send_context *sc)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 8bc5013f39a1..83198a8a8797 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
89 89
90 lockdep_assert_held(&qp->s_lock); 90 lockdep_assert_held(&qp->s_lock);
91 qp->s_flags |= RVT_S_WAIT_RNR; 91 qp->s_flags |= RVT_S_WAIT_RNR;
92 qp->s_timer.expires = jiffies + usecs_to_jiffies(to); 92 priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to);
93 add_timer(&priv->s_rnr_timer); 93 add_timer(&priv->s_rnr_timer);
94} 94}
95 95
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index fd39bcaa062d..9cbe52d21077 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
2009 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); 2009 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2010} 2010}
2011 2011
2012#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
2013(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
2014
2015#define SET_STATIC_RATE_CONTROL_SMASK(r) \
2016(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
2017/* 2012/*
2018 * set_sdma_integrity 2013 * set_sdma_integrity
2019 * 2014 *
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde)
2022static void set_sdma_integrity(struct sdma_engine *sde) 2017static void set_sdma_integrity(struct sdma_engine *sde)
2023{ 2018{
2024 struct hfi1_devdata *dd = sde->dd; 2019 struct hfi1_devdata *dd = sde->dd;
2025 u64 reg;
2026
2027 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
2028 return;
2029
2030 reg = hfi1_pkt_base_sdma_integrity(dd);
2031
2032 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
2033 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
2034 else
2035 SET_STATIC_RATE_CONTROL_SMASK(reg);
2036 2020
2037 write_sde_csr(sde, SD(CHECK_ENABLE), reg); 2021 write_sde_csr(sde, SD(CHECK_ENABLE),
2022 hfi1_pkt_base_sdma_integrity(dd));
2038} 2023}
2039 2024
2040static void init_sdma_regs( 2025static void init_sdma_regs(
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index edba22461a9c..919a5474e651 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -49,7 +49,6 @@
49#include "hfi.h" 49#include "hfi.h"
50#include "mad.h" 50#include "mad.h"
51#include "trace.h" 51#include "trace.h"
52#include "affinity.h"
53 52
54/* 53/*
55 * Start of per-port congestion control structures and support code 54 * Start of per-port congestion control structures and support code
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device,
623 return ret; 622 return ret;
624} 623}
625 624
626static ssize_t show_sdma_affinity(struct device *device,
627 struct device_attribute *attr, char *buf)
628{
629 struct hfi1_ibdev *dev =
630 container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
631 struct hfi1_devdata *dd = dd_from_dev(dev);
632
633 return hfi1_get_sdma_affinity(dd, buf);
634}
635
636static ssize_t store_sdma_affinity(struct device *device,
637 struct device_attribute *attr,
638 const char *buf, size_t count)
639{
640 struct hfi1_ibdev *dev =
641 container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
642 struct hfi1_devdata *dd = dd_from_dev(dev);
643
644 return hfi1_set_sdma_affinity(dd, buf, count);
645}
646
647/* 625/*
648 * end of per-unit (or driver, in some cases, but replicated 626 * end of per-unit (or driver, in some cases, but replicated
649 * per unit) functions 627 * per unit) functions
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
658static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 636static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
659static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 637static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
660static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); 638static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
661static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity,
662 store_sdma_affinity);
663 639
664static struct device_attribute *hfi1_attributes[] = { 640static struct device_attribute *hfi1_attributes[] = {
665 &dev_attr_hw_rev, 641 &dev_attr_hw_rev,
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = {
670 &dev_attr_boardversion, 646 &dev_attr_boardversion,
671 &dev_attr_tempsense, 647 &dev_attr_tempsense,
672 &dev_attr_chip_reset, 648 &dev_attr_chip_reset,
673 &dev_attr_sdma_affinity,
674}; 649};
675 650
676int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 651int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h
index 11e02b228922..f77e59fb43fe 100644
--- a/drivers/infiniband/hw/hfi1/trace_rx.h
+++ b/drivers/infiniband/hw/hfi1/trace_rx.h
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate,
253 ) 253 )
254 ); 254 );
255 255
256#define SNOOP_PRN \
257 "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \
258 "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]"
259
260TRACE_EVENT(snoop_capture,
261 TP_PROTO(struct hfi1_devdata *dd,
262 int hdr_len,
263 struct ib_header *hdr,
264 int data_len,
265 void *data),
266 TP_ARGS(dd, hdr_len, hdr, data_len, data),
267 TP_STRUCT__entry(
268 DD_DEV_ENTRY(dd)
269 __field(u16, slid)
270 __field(u16, dlid)
271 __field(u32, qpn)
272 __field(u8, opcode)
273 __field(u8, sl)
274 __field(u16, pkey)
275 __field(u32, hdr_len)
276 __field(u32, data_len)
277 __field(u8, lnh)
278 __dynamic_array(u8, raw_hdr, hdr_len)
279 __dynamic_array(u8, raw_pkt, data_len)
280 ),
281 TP_fast_assign(
282 struct ib_other_headers *ohdr;
283
284 __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3);
285 if (__entry->lnh == HFI1_LRH_BTH)
286 ohdr = &hdr->u.oth;
287 else
288 ohdr = &hdr->u.l.oth;
289 DD_DEV_ASSIGN(dd);
290 __entry->slid = be16_to_cpu(hdr->lrh[3]);
291 __entry->dlid = be16_to_cpu(hdr->lrh[1]);
292 __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
293 __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff;
294 __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf;
295 __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff;
296 __entry->hdr_len = hdr_len;
297 __entry->data_len = data_len;
298 memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len);
299 memcpy(__get_dynamic_array(raw_pkt), data, data_len);
300 ),
301 TP_printk(
302 "[%s] " SNOOP_PRN,
303 __get_str(dev),
304 __entry->slid,
305 __entry->dlid,
306 __entry->qpn,
307 __entry->opcode,
308 show_ib_opcode(__entry->opcode),
309 __entry->sl,
310 __entry->pkey,
311 __entry->hdr_len,
312 __entry->data_len
313 )
314);
315
316#endif /* __HFI1_TRACE_RX_H */ 256#endif /* __HFI1_TRACE_RX_H */
317 257
318#undef TRACE_INCLUDE_PATH 258#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a761f804111e..77697d690f3e 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
1144 rb_node = hfi1_mmu_rb_extract(pq->handler, 1144 rb_node = hfi1_mmu_rb_extract(pq->handler,
1145 (unsigned long)iovec->iov.iov_base, 1145 (unsigned long)iovec->iov.iov_base,
1146 iovec->iov.iov_len); 1146 iovec->iov.iov_len);
1147 if (rb_node && !IS_ERR(rb_node)) 1147 if (rb_node)
1148 node = container_of(rb_node, struct sdma_mmu_node, rb); 1148 node = container_of(rb_node, struct sdma_mmu_node, rb);
1149 else 1149 else
1150 rb_node = NULL; 1150 rb_node = NULL;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 5fc623362731..b9bf0759f10a 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
102 if (vlan_tag < 0x1000) 102 if (vlan_tag < 0x1000)
103 vlan_tag |= (ah_attr->sl & 7) << 13; 103 vlan_tag |= (ah_attr->sl & 7) << 13;
104 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 104 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
105 ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 105 ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
106 if (ret < 0)
107 return ERR_PTR(ret);
108 ah->av.eth.gid_index = ret;
106 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 109 ah->av.eth.vlan = cpu_to_be16(vlan_tag);
107 ah->av.eth.hop_limit = ah_attr->grh.hop_limit; 110 ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
108 if (ah_attr->static_rate) { 111 if (ah_attr->static_rate) {
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1ea686b9e0f9..6a0fec357dae 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
253 if (context) 253 if (context)
254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 254 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
255 err = -EFAULT; 255 err = -EFAULT;
256 goto err_dbmap; 256 goto err_cq_free;
257 } 257 }
258 258
259 return &cq->ibcq; 259 return &cq->ibcq;
260 260
261err_cq_free:
262 mlx4_cq_free(dev->dev, &cq->mcq);
263
261err_dbmap: 264err_dbmap:
262 if (context) 265 if (context)
263 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); 266 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 79d017baf6f4..fcd04b881ec1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
932 if (err) 932 if (err)
933 goto err_create; 933 goto err_create;
934 } else { 934 } else {
935 /* for now choose 64 bytes till we have a proper interface */ 935 cqe_size = cache_line_size() == 128 ? 128 : 64;
936 cqe_size = 64;
937 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, 936 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
938 &index, &inlen); 937 &index, &inlen);
939 if (err) 938 if (err)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 63036c731626..32b09f059c84 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2311{ 2311{
2312 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2312 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2313 struct ib_event ibev; 2313 struct ib_event ibev;
2314 2314 bool fatal = false;
2315 u8 port = 0; 2315 u8 port = 0;
2316 2316
2317 switch (event) { 2317 switch (event) {
2318 case MLX5_DEV_EVENT_SYS_ERROR: 2318 case MLX5_DEV_EVENT_SYS_ERROR:
2319 ibdev->ib_active = false;
2320 ibev.event = IB_EVENT_DEVICE_FATAL; 2319 ibev.event = IB_EVENT_DEVICE_FATAL;
2321 mlx5_ib_handle_internal_error(ibdev); 2320 mlx5_ib_handle_internal_error(ibdev);
2321 fatal = true;
2322 break; 2322 break;
2323 2323
2324 case MLX5_DEV_EVENT_PORT_UP: 2324 case MLX5_DEV_EVENT_PORT_UP:
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2370 2370
2371 if (ibdev->ib_active) 2371 if (ibdev->ib_active)
2372 ib_dispatch_event(&ibev); 2372 ib_dispatch_event(&ibev);
2373
2374 if (fatal)
2375 ibdev->ib_active = false;
2373} 2376}
2374 2377
2375static void get_ext_port_caps(struct mlx5_ib_dev *dev) 2378static void get_ext_port_caps(struct mlx5_ib_dev *dev)
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3115 } 3118 }
3116 err = init_node_data(dev); 3119 err = init_node_data(dev);
3117 if (err) 3120 if (err)
3118 goto err_dealloc; 3121 goto err_free_port;
3119 3122
3120 mutex_init(&dev->flow_db.lock); 3123 mutex_init(&dev->flow_db.lock);
3121 mutex_init(&dev->cap_mask_mutex); 3124 mutex_init(&dev->cap_mask_mutex);
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3125 if (ll == IB_LINK_LAYER_ETHERNET) { 3128 if (ll == IB_LINK_LAYER_ETHERNET) {
3126 err = mlx5_enable_roce(dev); 3129 err = mlx5_enable_roce(dev);
3127 if (err) 3130 if (err)
3128 goto err_dealloc; 3131 goto err_free_port;
3129 } 3132 }
3130 3133
3131 err = create_dev_resources(&dev->devr); 3134 err = create_dev_resources(&dev->devr);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index dcdcd195fe53..7d689903c87c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -626,6 +626,8 @@ struct mlx5_ib_dev {
626 struct mlx5_ib_resources devr; 626 struct mlx5_ib_resources devr;
627 struct mlx5_mr_cache cache; 627 struct mlx5_mr_cache cache;
628 struct timer_list delay_timer; 628 struct timer_list delay_timer;
629 /* Prevents soft lock on massive reg MRs */
630 struct mutex slow_path_mutex;
629 int fill_delay; 631 int fill_delay;
630#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 632#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
631 struct ib_odp_caps odp_caps; 633 struct ib_odp_caps odp_caps;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d4ad672b905b..4e9012463c37 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
610 int err; 610 int err;
611 int i; 611 int i;
612 612
613 mutex_init(&dev->slow_path_mutex);
613 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); 614 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
614 if (!cache->wq) { 615 if (!cache->wq) {
615 mlx5_ib_warn(dev, "failed to create work queue\n"); 616 mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1182 goto error; 1183 goto error;
1183 } 1184 }
1184 1185
1185 if (!mr) 1186 if (!mr) {
1187 mutex_lock(&dev->slow_path_mutex);
1186 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, 1188 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1187 page_shift, access_flags); 1189 page_shift, access_flags);
1190 mutex_unlock(&dev->slow_path_mutex);
1191 }
1188 1192
1189 if (IS_ERR(mr)) { 1193 if (IS_ERR(mr)) {
1190 err = PTR_ERR(mr); 1194 err = PTR_ERR(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7ce97daf26c6..d1e921816bfe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
2051 2051
2052 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 2052 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
2053 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 2053 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
2054 to_mcq(init_attr->recv_cq)->mcq.cqn, 2054 init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
2055 to_mcq(init_attr->send_cq)->mcq.cqn); 2055 init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
2056 2056
2057 qp->trans_qp.xrcdn = xrcdn; 2057 qp->trans_qp.xrcdn = xrcdn;
2058 2058
@@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
4814 udata->inlen)) 4814 udata->inlen))
4815 return ERR_PTR(-EOPNOTSUPP); 4815 return ERR_PTR(-EOPNOTSUPP);
4816 4816
4817 if (init_attr->log_ind_tbl_size >
4818 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
4819 mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
4820 init_attr->log_ind_tbl_size,
4821 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
4822 return ERR_PTR(-EINVAL);
4823 }
4824
4817 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 4825 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
4818 if (udata->outlen && udata->outlen < min_resp_len) 4826 if (udata->outlen && udata->outlen < min_resp_len)
4819 return ERR_PTR(-EINVAL); 4827 return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 01f71caa3ac4..f2cefb0d9180 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
90 if (WARN_ON(!valid_dma_direction(direction))) 90 if (WARN_ON(!valid_dma_direction(direction)))
91 return BAD_DMA_ADDRESS; 91 return BAD_DMA_ADDRESS;
92 92
93 if (offset + size > PAGE_SIZE)
94 return BAD_DMA_ADDRESS;
95
96 addr = (u64)page_address(page); 93 addr = (u64)page_address(page);
97 if (addr) 94 if (addr)
98 addr += offset; 95 addr += offset;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index b8258e4f0aea..ffff5a54cb34 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
243{ 243{
244 int err; 244 int err;
245 struct socket *sock; 245 struct socket *sock;
246 struct udp_port_cfg udp_cfg; 246 struct udp_port_cfg udp_cfg = {0};
247 struct udp_tunnel_sock_cfg tnl_cfg; 247 struct udp_tunnel_sock_cfg tnl_cfg = {0};
248
249 memset(&udp_cfg, 0, sizeof(udp_cfg));
250 248
251 if (ipv6) { 249 if (ipv6) {
252 udp_cfg.family = AF_INET6; 250 udp_cfg.family = AF_INET6;
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
264 return ERR_PTR(err); 262 return ERR_PTR(err);
265 } 263 }
266 264
267 tnl_cfg.sk_user_data = NULL;
268 tnl_cfg.encap_type = 1; 265 tnl_cfg.encap_type = 1;
269 tnl_cfg.encap_rcv = rxe_udp_encap_recv; 266 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
270 tnl_cfg.encap_destroy = NULL;
271 267
272 /* Setup UDP tunnel */ 268 /* Setup UDP tunnel */
273 setup_udp_tunnel_sock(net, sock, &tnl_cfg); 269 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index b8036cfbce04..c3e60e4bde6e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
522 if (qp->sq.queue) { 522 if (qp->sq.queue) {
523 __rxe_do_task(&qp->comp.task); 523 __rxe_do_task(&qp->comp.task);
524 __rxe_do_task(&qp->req.task); 524 __rxe_do_task(&qp->req.task);
525 rxe_queue_reset(qp->sq.queue);
525 } 526 }
526 527
527 /* cleanup attributes */ 528 /* cleanup attributes */
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
573{ 574{
574 qp->req.state = QP_STATE_ERROR; 575 qp->req.state = QP_STATE_ERROR;
575 qp->resp.state = QP_STATE_ERROR; 576 qp->resp.state = QP_STATE_ERROR;
577 qp->attr.qp_state = IB_QPS_ERR;
576 578
577 /* drain work and packet queues */ 579 /* drain work and packet queues */
578 rxe_run_task(&qp->resp.task, 1); 580 rxe_run_task(&qp->resp.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 08274254eb88..d14bf496d62d 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -84,6 +84,15 @@ err1:
84 return -EINVAL; 84 return -EINVAL;
85} 85}
86 86
87inline void rxe_queue_reset(struct rxe_queue *q)
88{
89 /* queue is comprised from header and the memory
90 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
91 * reset only the queue itself and not the management header
92 */
93 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
94}
95
87struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 96struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
88 int *num_elem, 97 int *num_elem,
89 unsigned int elem_size) 98 unsigned int elem_size)
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 239fd609c31e..8c8641c87817 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
84 size_t buf_size, 84 size_t buf_size,
85 struct rxe_mmap_info **ip_p); 85 struct rxe_mmap_info **ip_p);
86 86
87void rxe_queue_reset(struct rxe_queue *q);
88
87struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, 89struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
88 int *num_elem, 90 int *num_elem,
89 unsigned int elem_size); 91 unsigned int elem_size);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 832846b73ea0..22bd9630dcd9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -696,7 +696,8 @@ next_wqe:
696 qp->req.wqe_index); 696 qp->req.wqe_index);
697 wqe->state = wqe_state_done; 697 wqe->state = wqe_state_done;
698 wqe->status = IB_WC_SUCCESS; 698 wqe->status = IB_WC_SUCCESS;
699 goto complete; 699 __rxe_do_task(&qp->comp.task);
700 return 0;
700 } 701 }
701 payload = mtu; 702 payload = mtu;
702 } 703 }
@@ -745,13 +746,17 @@ err:
745 wqe->status = IB_WC_LOC_PROT_ERR; 746 wqe->status = IB_WC_LOC_PROT_ERR;
746 wqe->state = wqe_state_error; 747 wqe->state = wqe_state_error;
747 748
748complete: 749 /*
749 if (qp_type(qp) != IB_QPT_RC) { 750 * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
750 while (rxe_completer(qp) == 0) 751 * ---------8<---------8<-------------
751 ; 752 * ...Note that if a completion error occurs, a Work Completion
752 } 753 * will always be generated, even if the signaling
753 754 * indicator requests an Unsignaled Completion.
754 return 0; 755 * ---------8<---------8<-------------
756 */
757 wqe->wr.send_flags |= IB_SEND_SIGNALED;
758 __rxe_do_task(&qp->comp.task);
759 return -EAGAIN;
755 760
756exit: 761exit:
757 return -EAGAIN; 762 return -EAGAIN;
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fb4b185dea96..bee267424972 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse,
1115 if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2, 1115 if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
1116 &max_proto, set_properties, true)) 1116 &max_proto, set_properties, true))
1117 return PSMOUSE_TOUCHKIT_PS2; 1117 return PSMOUSE_TOUCHKIT_PS2;
1118
1119 if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
1120 &max_proto, set_properties, true))
1121 return PSMOUSE_BYD;
1122 } 1118 }
1123 1119
1124 /* 1120 /*
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 58470f5ced04..8c53748a769d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
338 struct pci_dev *pdev = to_pci_dev(data); 338 struct pci_dev *pdev = to_pci_dev(data);
339 struct dmar_pci_notify_info *info; 339 struct dmar_pci_notify_info *info;
340 340
341 /* Only care about add/remove events for physical functions */ 341 /* Only care about add/remove events for physical functions.
342 * For VFs we actually do the lookup based on the corresponding
343 * PF in device_to_iommu() anyway. */
342 if (pdev->is_virtfn) 344 if (pdev->is_virtfn)
343 return NOTIFY_DONE; 345 return NOTIFY_DONE;
344 if (action != BUS_NOTIFY_ADD_DEVICE && 346 if (action != BUS_NOTIFY_ADD_DEVICE &&
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 3965e73db51c..d8376c2d18b3 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
892 return NULL; 892 return NULL;
893 893
894 if (dev_is_pci(dev)) { 894 if (dev_is_pci(dev)) {
895 struct pci_dev *pf_pdev;
896
895 pdev = to_pci_dev(dev); 897 pdev = to_pci_dev(dev);
898 /* VFs aren't listed in scope tables; we need to look up
899 * the PF instead to find the IOMMU. */
900 pf_pdev = pci_physfn(pdev);
901 dev = &pf_pdev->dev;
896 segment = pci_domain_nr(pdev->bus); 902 segment = pci_domain_nr(pdev->bus);
897 } else if (has_acpi_companion(dev)) 903 } else if (has_acpi_companion(dev))
898 dev = &ACPI_COMPANION(dev)->dev; 904 dev = &ACPI_COMPANION(dev)->dev;
@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
905 for_each_active_dev_scope(drhd->devices, 911 for_each_active_dev_scope(drhd->devices,
906 drhd->devices_cnt, i, tmp) { 912 drhd->devices_cnt, i, tmp) {
907 if (tmp == dev) { 913 if (tmp == dev) {
914 /* For a VF use its original BDF# not that of the PF
915 * which we used for the IOMMU lookup. Strictly speaking
916 * we could do this for all PCI devices; we only need to
917 * get the BDF# from the scope table for ACPI matches. */
918 if (pdev->is_virtfn)
919 goto got_pdev;
920
908 *bus = drhd->devices[i].bus; 921 *bus = drhd->devices[i].bus;
909 *devfn = drhd->devices[i].devfn; 922 *devfn = drhd->devices[i].devfn;
910 goto out; 923 goto out;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 8ebb3530afa7..cb72e0011310 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
39 struct page *pages; 39 struct page *pages;
40 int order; 40 int order;
41 41
42 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; 42 /* Start at 2 because it's defined as 2^(1+PSS) */
43 if (order < 0) 43 iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
44 order = 0; 44
45 45 /* Eventually I'm promised we will get a multi-level PASID table
46 * and it won't have to be physically contiguous. Until then,
47 * limit the size because 8MiB contiguous allocations can be hard
48 * to come by. The limit of 0x20000, which is 1MiB for each of
49 * the PASID and PASID-state tables, is somewhat arbitrary. */
50 if (iommu->pasid_max > 0x20000)
51 iommu->pasid_max = 0x20000;
52
53 order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
46 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 54 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
47 if (!pages) { 55 if (!pages) {
48 pr_warn("IOMMU: %s: Failed to allocate PASID table\n", 56 pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
53 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); 61 pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
54 62
55 if (ecap_dis(iommu->ecap)) { 63 if (ecap_dis(iommu->ecap)) {
64 /* Just making it explicit... */
65 BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
56 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 66 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
57 if (pages) 67 if (pages)
58 iommu->pasid_state_table = page_address(pages); 68 iommu->pasid_state_table = page_address(pages);
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
68 78
69int intel_svm_free_pasid_tables(struct intel_iommu *iommu) 79int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
70{ 80{
71 int order; 81 int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
72
73 order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
74 if (order < 0)
75 order = 0;
76 82
77 if (iommu->pasid_table) { 83 if (iommu->pasid_table) {
78 free_pages((unsigned long)iommu->pasid_table, order); 84 free_pages((unsigned long)iommu->pasid_table, order);
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
371 } 377 }
372 svm->iommu = iommu; 378 svm->iommu = iommu;
373 379
374 if (pasid_max > 2 << ecap_pss(iommu->ecap)) 380 if (pasid_max > iommu->pasid_max)
375 pasid_max = 2 << ecap_pss(iommu->ecap); 381 pasid_max = iommu->pasid_max;
376 382
377 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ 383 /* Do not use PASID 0 in caching mode (virtualised IOMMU) */
378 ret = idr_alloc(&iommu->pasid_idr, svm, 384 ret = idr_alloc(&iommu->pasid_idr, svm,
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 08c87fadca8c..1f32688c312d 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -65,6 +65,7 @@
65#include <linux/mailbox_controller.h> 65#include <linux/mailbox_controller.h>
66#include <linux/mailbox_client.h> 66#include <linux/mailbox_client.h>
67#include <linux/io-64-nonatomic-lo-hi.h> 67#include <linux/io-64-nonatomic-lo-hi.h>
68#include <acpi/pcc.h>
68 69
69#include "mailbox.h" 70#include "mailbox.h"
70 71
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
267 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 268 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
268 chan->txdone_method |= TXDONE_BY_ACK; 269 chan->txdone_method |= TXDONE_BY_ACK;
269 270
271 spin_unlock_irqrestore(&chan->lock, flags);
272
270 if (pcc_doorbell_irq[subspace_id] > 0) { 273 if (pcc_doorbell_irq[subspace_id] > 0) {
271 int rc; 274 int rc;
272 275
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
275 if (unlikely(rc)) { 278 if (unlikely(rc)) {
276 dev_err(dev, "failed to register PCC interrupt %d\n", 279 dev_err(dev, "failed to register PCC interrupt %d\n",
277 pcc_doorbell_irq[subspace_id]); 280 pcc_doorbell_irq[subspace_id]);
281 pcc_mbox_free_channel(chan);
278 chan = ERR_PTR(rc); 282 chan = ERR_PTR(rc);
279 } 283 }
280 } 284 }
281 285
282 spin_unlock_irqrestore(&chan->lock, flags);
283
284 return chan; 286 return chan;
285} 287}
286EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); 288EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
304 return; 306 return;
305 } 307 }
306 308
309 if (pcc_doorbell_irq[id] > 0)
310 devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
311
307 spin_lock_irqsave(&chan->lock, flags); 312 spin_lock_irqsave(&chan->lock, flags);
308 chan->cl = NULL; 313 chan->cl = NULL;
309 chan->active_req = NULL; 314 chan->active_req = NULL;
310 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) 315 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
311 chan->txdone_method = TXDONE_BY_POLL; 316 chan->txdone_method = TXDONE_BY_POLL;
312 317
313 if (pcc_doorbell_irq[id] > 0)
314 devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
315
316 spin_unlock_irqrestore(&chan->lock, flags); 318 spin_unlock_irqrestore(&chan->lock, flags);
317} 319}
318EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); 320EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
319 321
320
321/** 322/**
322 * pcc_send_data - Called from Mailbox Controller code. Used 323 * pcc_send_data - Called from Mailbox Controller code. Used
323 * here only to ring the channel doorbell. The PCC client 324 * here only to ring the channel doorbell. The PCC client
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c
index be19afeed7a9..93f59bfea092 100644
--- a/drivers/media/dvb-frontends/gp8psk-fe.c
+++ b/drivers/media/dvb-frontends/gp8psk-fe.c
@@ -1,5 +1,5 @@
1/* DVB USB compliant Linux driver for the 1/*
2 * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module 2 * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module
3 * 3 *
4 * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) 4 * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com)
5 * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) 5 * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com)
@@ -8,11 +8,9 @@
8 * 8 *
9 * This module is based off the vp7045 and vp702x modules 9 * This module is based off the vp7045 and vp702x modules
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free 12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation, version 2. 13 * Software Foundation, version 2.
14 *
15 * see Documentation/dvb/README.dvb-usb for more information
16 */ 14 */
17 15
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -395,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = {
395 .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, 393 .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd,
396 .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage 394 .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage
397}; 395};
396
397MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
398MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S");
399MODULE_VERSION("1.1");
400MODULE_LICENSE("GPL");
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 317ef63ee789..8d96a22647b3 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv)
281 int i; 281 int i;
282 tuner_dbg("%s called\n", __func__); 282 tuner_dbg("%s called\n", __func__);
283 283
284 /* free allocated f/w string */
285 if (priv->fname != firmware_name)
286 kfree(priv->fname);
287 priv->fname = NULL;
288
289 priv->state = XC2028_NO_FIRMWARE;
290 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
291
284 if (!priv->firm) 292 if (!priv->firm)
285 return; 293 return;
286 294
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv)
291 299
292 priv->firm = NULL; 300 priv->firm = NULL;
293 priv->firm_size = 0; 301 priv->firm_size = 0;
294 priv->state = XC2028_NO_FIRMWARE;
295
296 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
297} 302}
298 303
299static int load_all_firmwares(struct dvb_frontend *fe, 304static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@ read_not_reliable:
884 return 0; 889 return 0;
885 890
886fail: 891fail:
887 priv->state = XC2028_NO_FIRMWARE; 892 free_firmware(priv);
888 893
889 memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
890 if (retry_count < 8) { 894 if (retry_count < 8) {
891 msleep(50); 895 msleep(50);
892 retry_count++; 896 retry_count++;
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
1332 mutex_lock(&xc2028_list_mutex); 1336 mutex_lock(&xc2028_list_mutex);
1333 1337
1334 /* only perform final cleanup if this is the last instance */ 1338 /* only perform final cleanup if this is the last instance */
1335 if (hybrid_tuner_report_instance_count(priv) == 1) { 1339 if (hybrid_tuner_report_instance_count(priv) == 1)
1336 free_firmware(priv); 1340 free_firmware(priv);
1337 kfree(priv->ctrl.fname);
1338 priv->ctrl.fname = NULL;
1339 }
1340 1341
1341 if (priv) 1342 if (priv)
1342 hybrid_tuner_release_state(priv); 1343 hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1399 1400
1400 /* 1401 /*
1401 * Copy the config data. 1402 * Copy the config data.
1402 * For the firmware name, keep a local copy of the string,
1403 * in order to avoid troubles during device release.
1404 */ 1403 */
1405 kfree(priv->ctrl.fname);
1406 priv->ctrl.fname = NULL;
1407 memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); 1404 memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
1408 if (p->fname) {
1409 priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
1410 if (priv->ctrl.fname == NULL) {
1411 rc = -ENOMEM;
1412 goto unlock;
1413 }
1414 }
1415 1405
1416 /* 1406 /*
1417 * If firmware name changed, frees firmware. As free_firmware will 1407 * If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
1426 1416
1427 if (priv->state == XC2028_NO_FIRMWARE) { 1417 if (priv->state == XC2028_NO_FIRMWARE) {
1428 if (!firmware_name[0]) 1418 if (!firmware_name[0])
1429 priv->fname = priv->ctrl.fname; 1419 priv->fname = kstrdup(p->fname, GFP_KERNEL);
1430 else 1420 else
1431 priv->fname = firmware_name; 1421 priv->fname = firmware_name;
1432 1422
1423 if (!priv->fname) {
1424 rc = -ENOMEM;
1425 goto unlock;
1426 }
1427
1433 rc = request_firmware_nowait(THIS_MODULE, 1, 1428 rc = request_firmware_nowait(THIS_MODULE, 1,
1434 priv->fname, 1429 priv->fname,
1435 priv->i2c_props.adap->dev.parent, 1430 priv->i2c_props.adap->dev.parent,
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 3228fd182a99..9ff243970e93 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
123 .properties = apl_i2c_properties, 123 .properties = apl_i2c_properties,
124}; 124};
125 125
126static const struct intel_lpss_platform_info kbl_info = {
127 .clk_rate = 120000000,
128};
129
130static const struct intel_lpss_platform_info kbl_uart_info = {
131 .clk_rate = 120000000,
132 .clk_con_id = "baudclk",
133};
134
135static const struct intel_lpss_platform_info kbl_i2c_info = {
136 .clk_rate = 133000000,
137};
138
139static const struct pci_device_id intel_lpss_pci_ids[] = { 126static const struct pci_device_id intel_lpss_pci_ids[] = {
140 /* BXT A-Step */ 127 /* BXT A-Step */
141 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, 128 { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
207 { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, 194 { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info },
208 { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, 195 { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
209 /* KBL-H */ 196 /* KBL-H */
210 { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, 197 { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info },
211 { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, 198 { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info },
212 { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, 199 { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info },
213 { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, 200 { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info },
214 { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, 201 { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info },
215 { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, 202 { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info },
216 { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, 203 { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
217 { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, 204 { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info },
218 { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, 205 { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info },
219 { } 206 { }
220}; 207};
221MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); 208MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 41b113875d64..70c646b0097d 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev)
502 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) 502 for (i = 0; i < LPSS_PRIV_REG_COUNT; i++)
503 lpss->priv_ctx[i] = readl(lpss->priv + i * 4); 503 lpss->priv_ctx[i] = readl(lpss->priv + i * 4);
504 504
505 /* Put the device into reset state */
506 writel(0, lpss->priv + LPSS_PRIV_RESETS);
507
508 return 0; 505 return 0;
509} 506}
510EXPORT_SYMBOL_GPL(intel_lpss_suspend); 507EXPORT_SYMBOL_GPL(intel_lpss_suspend);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 43e54b7e908f..f9a8c5203873 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 {
86 BXTWC_THRM2_IRQ, 86 BXTWC_THRM2_IRQ,
87 BXTWC_BCU_IRQ, 87 BXTWC_BCU_IRQ,
88 BXTWC_ADC_IRQ, 88 BXTWC_ADC_IRQ,
89 BXTWC_USBC_IRQ,
89 BXTWC_CHGR0_IRQ, 90 BXTWC_CHGR0_IRQ,
90 BXTWC_CHGR1_IRQ, 91 BXTWC_CHGR1_IRQ,
91 BXTWC_GPIO0_IRQ, 92 BXTWC_GPIO0_IRQ,
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
111 REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), 112 REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
112 REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), 113 REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
113 REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), 114 REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
114 REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), 115 REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)),
116 REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
115 REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), 117 REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
116 REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), 118 REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
117 REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), 119 REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = {
146}; 148};
147 149
148static struct resource usbc_resources[] = { 150static struct resource usbc_resources[] = {
149 DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), 151 DEFINE_RES_IRQ(BXTWC_USBC_IRQ),
150}; 152};
151 153
152static struct resource charger_resources[] = { 154static struct resource charger_resources[] = {
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 3ac486a597f3..c57e407020f1 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
399 clones[i]); 399 clones[i]);
400 } 400 }
401 401
402 put_device(dev);
403
402 return 0; 404 return 0;
403} 405}
404EXPORT_SYMBOL(mfd_clone_cell); 406EXPORT_SYMBOL(mfd_clone_cell);
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index cfdae8a3d779..b0c7bcdaf5df 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe)
851 if (ret < 0) 851 if (ret < 0)
852 return ret; 852 return ret;
853 853
854 msleep(10);
855
854 timeout = jiffies + msecs_to_jiffies(100); 856 timeout = jiffies + msecs_to_jiffies(100);
855 while (time_before(jiffies, timeout)) { 857 while (time_before(jiffies, timeout)) {
856 ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); 858 ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 2f2225e845ef..b93fe4c4957a 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -73,8 +73,10 @@ static struct syscon *of_syscon_register(struct device_node *np)
73 /* Parse the device's DT node for an endianness specification */ 73 /* Parse the device's DT node for an endianness specification */
74 if (of_property_read_bool(np, "big-endian")) 74 if (of_property_read_bool(np, "big-endian"))
75 syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; 75 syscon_config.val_format_endian = REGMAP_ENDIAN_BIG;
76 else if (of_property_read_bool(np, "little-endian")) 76 else if (of_property_read_bool(np, "little-endian"))
77 syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; 77 syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE;
78 else if (of_property_read_bool(np, "native-endian"))
79 syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE;
78 80
79 /* 81 /*
80 * search for reg-io-width property in DT. If it is not provided, 82 * search for reg-io-width property in DT. If it is not provided,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 7eec619a6023..8588dbad3301 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -393,8 +393,13 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
393 BUG(); 393 BUG();
394 goto err; 394 goto err;
395 } 395 }
396 396
397 ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies, 397 /*
398 * Can't use devres helper here as some of the supplies are provided by
399 * wm8994->dev's children (regulators) and those regulators are
400 * unregistered by the devres core before the supplies are freed.
401 */
402 ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
398 wm8994->supplies); 403 wm8994->supplies);
399 if (ret != 0) { 404 if (ret != 0) {
400 dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); 405 dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
@@ -405,7 +410,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
405 wm8994->supplies); 410 wm8994->supplies);
406 if (ret != 0) { 411 if (ret != 0) {
407 dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); 412 dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
408 goto err; 413 goto err_regulator_free;
409 } 414 }
410 415
411 ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); 416 ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET);
@@ -596,6 +601,8 @@ err_irq:
596err_enable: 601err_enable:
597 regulator_bulk_disable(wm8994->num_supplies, 602 regulator_bulk_disable(wm8994->num_supplies,
598 wm8994->supplies); 603 wm8994->supplies);
604err_regulator_free:
605 regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
599err: 606err:
600 mfd_remove_devices(wm8994->dev); 607 mfd_remove_devices(wm8994->dev);
601 return ret; 608 return ret;
@@ -604,10 +611,11 @@ err:
604static void wm8994_device_exit(struct wm8994 *wm8994) 611static void wm8994_device_exit(struct wm8994 *wm8994)
605{ 612{
606 pm_runtime_disable(wm8994->dev); 613 pm_runtime_disable(wm8994->dev);
607 mfd_remove_devices(wm8994->dev);
608 wm8994_irq_exit(wm8994); 614 wm8994_irq_exit(wm8994);
609 regulator_bulk_disable(wm8994->num_supplies, 615 regulator_bulk_disable(wm8994->num_supplies,
610 wm8994->supplies); 616 wm8994->supplies);
617 regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
618 mfd_remove_devices(wm8994->dev);
611} 619}
612 620
613static const struct of_device_id wm8994_of_match[] = { 621static const struct of_device_id wm8994_of_match[] = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 50a674be6655..df478ae72e23 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1058 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1058 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1059 1059
1060 if (host->dma_ops->start(host, sg_len)) { 1060 if (host->dma_ops->start(host, sg_len)) {
1061 host->dma_ops->stop(host);
1061 /* We can't do DMA, try PIO for this one */ 1062 /* We can't do DMA, try PIO for this one */
1062 dev_dbg(host->dev, 1063 dev_dbg(host->dev,
1063 "%s: fall back to PIO mode for current transfer\n", 1064 "%s: fall back to PIO mode for current transfer\n",
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index fb71c866eacc..1bb11e4a9fe5 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
66 return ret; 66 return ret;
67 } 67 }
68 } 68 }
69 /*
70 * The DAT[3:0] line signal levels and the CMD line signal level are
71 * not compatible with standard SDHC register. The line signal levels
72 * DAT[7:0] are at bits 31:24 and the command line signal level is at
73 * bit 23. All other bits are the same as in the standard SDHC
74 * register.
75 */
76 if (spec_reg == SDHCI_PRESENT_STATE) {
77 ret = value & 0x000fffff;
78 ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
79 ret |= (value << 1) & SDHCI_CMD_LVL;
80 return ret;
81 }
82
69 ret = value; 83 ret = value;
70 return ret; 84 return ret;
71} 85}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 766df17fb7eb..2570455b219a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -73,6 +73,7 @@
73#define SDHCI_DATA_LVL_MASK 0x00F00000 73#define SDHCI_DATA_LVL_MASK 0x00F00000
74#define SDHCI_DATA_LVL_SHIFT 20 74#define SDHCI_DATA_LVL_SHIFT 20
75#define SDHCI_DATA_0_LVL_MASK 0x00100000 75#define SDHCI_DATA_0_LVL_MASK 0x00100000
76#define SDHCI_CMD_LVL 0x01000000
76 77
77#define SDHCI_HOST_CONTROL 0x28 78#define SDHCI_HOST_CONTROL 0x28
78#define SDHCI_CTRL_LED 0x01 79#define SDHCI_CTRL_LED 0x01
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 3eb7430dffbf..f8ff25c8ee2e 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -142,6 +142,9 @@ struct plx_pci_card {
142#define CTI_PCI_VENDOR_ID 0x12c4 142#define CTI_PCI_VENDOR_ID 0x12c4
143#define CTI_PCI_DEVICE_ID_CRG001 0x0900 143#define CTI_PCI_DEVICE_ID_CRG001 0x0900
144 144
145#define MOXA_PCI_VENDOR_ID 0x1393
146#define MOXA_PCI_DEVICE_ID 0x0100
147
145static void plx_pci_reset_common(struct pci_dev *pdev); 148static void plx_pci_reset_common(struct pci_dev *pdev);
146static void plx9056_pci_reset_common(struct pci_dev *pdev); 149static void plx9056_pci_reset_common(struct pci_dev *pdev);
147static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); 150static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = {
258 /* based on PLX9030 */ 261 /* based on PLX9030 */
259}; 262};
260 263
264static struct plx_pci_card_info plx_pci_card_info_moxa = {
265 "MOXA", 2,
266 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
267 {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} },
268 &plx_pci_reset_common
269 /* based on PLX9052 */
270};
271
261static const struct pci_device_id plx_pci_tbl[] = { 272static const struct pci_device_id plx_pci_tbl[] = {
262 { 273 {
263 /* Adlink PCI-7841/cPCI-7841 */ 274 /* Adlink PCI-7841/cPCI-7841 */
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = {
357 0, 0, 368 0, 0,
358 (kernel_ulong_t)&plx_pci_card_info_elcus 369 (kernel_ulong_t)&plx_pci_card_info_elcus
359 }, 370 },
371 {
372 /* moxa */
373 MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID,
374 PCI_ANY_ID, PCI_ANY_ID,
375 0, 0,
376 (kernel_ulong_t)&plx_pci_card_info_moxa
377 },
360 { 0,} 378 { 0,}
361}; 379};
362MODULE_DEVICE_TABLE(pci, plx_pci_tbl); 380MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index e8fc4952c6b0..2147678f0225 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -43,11 +43,22 @@ struct __packed pucan_command {
43 u16 args[3]; 43 u16 args[3];
44}; 44};
45 45
46#define PUCAN_TSLOW_BRP_BITS 10
47#define PUCAN_TSLOW_TSGEG1_BITS 8
48#define PUCAN_TSLOW_TSGEG2_BITS 7
49#define PUCAN_TSLOW_SJW_BITS 7
50
51#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1)
52#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
53#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
54#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1)
55
46/* uCAN TIMING_SLOW command fields */ 56/* uCAN TIMING_SLOW command fields */
47#define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7)) 57#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \
48#define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf) 58 ((!!(t)) << 7))
49#define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f) 59#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK)
50#define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff) 60#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK)
61#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK)
51 62
52struct __packed pucan_timing_slow { 63struct __packed pucan_timing_slow {
53 __le16 opcode_channel; 64 __le16 opcode_channel;
@@ -60,11 +71,21 @@ struct __packed pucan_timing_slow {
60 __le16 brp; /* BaudRate Prescaler */ 71 __le16 brp; /* BaudRate Prescaler */
61}; 72};
62 73
74#define PUCAN_TFAST_BRP_BITS 10
75#define PUCAN_TFAST_TSGEG1_BITS 5
76#define PUCAN_TFAST_TSGEG2_BITS 4
77#define PUCAN_TFAST_SJW_BITS 4
78
79#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1)
80#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
81#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
82#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1)
83
63/* uCAN TIMING_FAST command fields */ 84/* uCAN TIMING_FAST command fields */
64#define PUCAN_TFAST_SJW(s) ((s) & 0x3) 85#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK)
65#define PUCAN_TFAST_TSEG2(t) ((t) & 0x7) 86#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK)
66#define PUCAN_TFAST_TSEG1(t) ((t) & 0xf) 87#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK)
67#define PUCAN_TFAST_BRP(b) ((b) & 0x3ff) 88#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK)
68 89
69struct __packed pucan_timing_fast { 90struct __packed pucan_timing_fast {
70 __le16 opcode_channel; 91 __le16 opcode_channel;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c06382cdfdfe..f3141ca56bc3 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = {
39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, 39 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, 40 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, 41 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
42 {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
42 {} /* Terminating entry */ 43 {} /* Terminating entry */
43}; 44};
44 45
@@ -50,6 +51,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = {
50 &pcan_usb_pro, 51 &pcan_usb_pro,
51 &pcan_usb_fd, 52 &pcan_usb_fd,
52 &pcan_usb_pro_fd, 53 &pcan_usb_pro_fd,
54 &pcan_usb_x6,
53}; 55};
54 56
55/* 57/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 506fe506c9d3..3cbfb069893d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -27,6 +27,7 @@
27#define PCAN_USBPRO_PRODUCT_ID 0x000d 27#define PCAN_USBPRO_PRODUCT_ID 0x000d
28#define PCAN_USBPROFD_PRODUCT_ID 0x0011 28#define PCAN_USBPROFD_PRODUCT_ID 0x0011
29#define PCAN_USBFD_PRODUCT_ID 0x0012 29#define PCAN_USBFD_PRODUCT_ID 0x0012
30#define PCAN_USBX6_PRODUCT_ID 0x0014
30 31
31#define PCAN_USB_DRIVER_NAME "peak_usb" 32#define PCAN_USB_DRIVER_NAME "peak_usb"
32 33
@@ -90,6 +91,7 @@ extern const struct peak_usb_adapter pcan_usb;
90extern const struct peak_usb_adapter pcan_usb_pro; 91extern const struct peak_usb_adapter pcan_usb_pro;
91extern const struct peak_usb_adapter pcan_usb_fd; 92extern const struct peak_usb_adapter pcan_usb_fd;
92extern const struct peak_usb_adapter pcan_usb_pro_fd; 93extern const struct peak_usb_adapter pcan_usb_pro_fd;
94extern const struct peak_usb_adapter pcan_usb_x6;
93 95
94struct peak_time_ref { 96struct peak_time_ref {
95 struct timeval tv_host_0, tv_host; 97 struct timeval tv_host_0, tv_host;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ce44a033f63b..304732550f0a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -993,24 +993,24 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
993static const struct can_bittiming_const pcan_usb_fd_const = { 993static const struct can_bittiming_const pcan_usb_fd_const = {
994 .name = "pcan_usb_fd", 994 .name = "pcan_usb_fd",
995 .tseg1_min = 1, 995 .tseg1_min = 1,
996 .tseg1_max = 64, 996 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
997 .tseg2_min = 1, 997 .tseg2_min = 1,
998 .tseg2_max = 16, 998 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
999 .sjw_max = 16, 999 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
1000 .brp_min = 1, 1000 .brp_min = 1,
1001 .brp_max = 1024, 1001 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
1002 .brp_inc = 1, 1002 .brp_inc = 1,
1003}; 1003};
1004 1004
1005static const struct can_bittiming_const pcan_usb_fd_data_const = { 1005static const struct can_bittiming_const pcan_usb_fd_data_const = {
1006 .name = "pcan_usb_fd", 1006 .name = "pcan_usb_fd",
1007 .tseg1_min = 1, 1007 .tseg1_min = 1,
1008 .tseg1_max = 16, 1008 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
1009 .tseg2_min = 1, 1009 .tseg2_min = 1,
1010 .tseg2_max = 8, 1010 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
1011 .sjw_max = 4, 1011 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
1012 .brp_min = 1, 1012 .brp_min = 1,
1013 .brp_max = 1024, 1013 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
1014 .brp_inc = 1, 1014 .brp_inc = 1,
1015}; 1015};
1016 1016
@@ -1065,24 +1065,24 @@ const struct peak_usb_adapter pcan_usb_fd = {
1065static const struct can_bittiming_const pcan_usb_pro_fd_const = { 1065static const struct can_bittiming_const pcan_usb_pro_fd_const = {
1066 .name = "pcan_usb_pro_fd", 1066 .name = "pcan_usb_pro_fd",
1067 .tseg1_min = 1, 1067 .tseg1_min = 1,
1068 .tseg1_max = 64, 1068 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
1069 .tseg2_min = 1, 1069 .tseg2_min = 1,
1070 .tseg2_max = 16, 1070 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
1071 .sjw_max = 16, 1071 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
1072 .brp_min = 1, 1072 .brp_min = 1,
1073 .brp_max = 1024, 1073 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
1074 .brp_inc = 1, 1074 .brp_inc = 1,
1075}; 1075};
1076 1076
1077static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { 1077static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
1078 .name = "pcan_usb_pro_fd", 1078 .name = "pcan_usb_pro_fd",
1079 .tseg1_min = 1, 1079 .tseg1_min = 1,
1080 .tseg1_max = 16, 1080 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
1081 .tseg2_min = 1, 1081 .tseg2_min = 1,
1082 .tseg2_max = 8, 1082 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
1083 .sjw_max = 4, 1083 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
1084 .brp_min = 1, 1084 .brp_min = 1,
1085 .brp_max = 1024, 1085 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
1086 .brp_inc = 1, 1086 .brp_inc = 1,
1087}; 1087};
1088 1088
@@ -1132,3 +1132,75 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
1132 1132
1133 .do_get_berr_counter = pcan_usb_fd_get_berr_counter, 1133 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1134}; 1134};
1135
1136/* describes the PCAN-USB X6 adapter */
1137static const struct can_bittiming_const pcan_usb_x6_const = {
1138 .name = "pcan_usb_x6",
1139 .tseg1_min = 1,
1140 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
1141 .tseg2_min = 1,
1142 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
1143 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
1144 .brp_min = 1,
1145 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
1146 .brp_inc = 1,
1147};
1148
1149static const struct can_bittiming_const pcan_usb_x6_data_const = {
1150 .name = "pcan_usb_x6",
1151 .tseg1_min = 1,
1152 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
1153 .tseg2_min = 1,
1154 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
1155 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
1156 .brp_min = 1,
1157 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
1158 .brp_inc = 1,
1159};
1160
1161const struct peak_usb_adapter pcan_usb_x6 = {
1162 .name = "PCAN-USB X6",
1163 .device_id = PCAN_USBX6_PRODUCT_ID,
1164 .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
1165 .ctrlmode_supported = CAN_CTRLMODE_FD |
1166 CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
1167 .clock = {
1168 .freq = PCAN_UFD_CRYSTAL_HZ,
1169 },
1170 .bittiming_const = &pcan_usb_x6_const,
1171 .data_bittiming_const = &pcan_usb_x6_data_const,
1172
1173 /* size of device private data */
1174 .sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
1175
1176 /* timestamps usage */
1177 .ts_used_bits = 32,
1178 .ts_period = 1000000, /* calibration period in ts. */
1179 .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
1180 .us_per_ts_shift = 0,
1181
1182 /* give here messages in/out endpoints */
1183 .ep_msg_in = PCAN_USBPRO_EP_MSGIN,
1184 .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
1185
1186 /* size of rx/tx usb buffers */
1187 .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
1188 .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
1189
1190 /* device callbacks */
1191 .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
1192 .dev_init = pcan_usb_fd_init,
1193
1194 .dev_exit = pcan_usb_fd_exit,
1195 .dev_free = pcan_usb_fd_free,
1196 .dev_set_bus = pcan_usb_fd_set_bus,
1197 .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
1198 .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
1199 .dev_decode_buf = pcan_usb_fd_decode_buf,
1200 .dev_start = pcan_usb_fd_start,
1201 .dev_stop = pcan_usb_fd_stop,
1202 .dev_restart_async = pcan_usb_fd_restart_async,
1203 .dev_encode_msg = pcan_usb_fd_encode_msg,
1204
1205 .do_get_berr_counter = pcan_usb_fd_get_berr_counter,
1206};
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 7717b19dc806..947adda3397d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
962 962
963 vl->members |= BIT(port) | BIT(cpu_port); 963 vl->members |= BIT(port) | BIT(cpu_port);
964 if (untagged) 964 if (untagged)
965 vl->untag |= BIT(port) | BIT(cpu_port); 965 vl->untag |= BIT(port);
966 else 966 else
967 vl->untag &= ~(BIT(port) | BIT(cpu_port)); 967 vl->untag &= ~BIT(port);
968 vl->untag &= ~BIT(cpu_port);
968 969
969 b53_set_vlan_entry(dev, vid, vl); 970 b53_set_vlan_entry(dev, vid, vl);
970 b53_fast_age_vlan(dev, vid); 971 b53_fast_age_vlan(dev, vid);
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
973 if (pvid) { 974 if (pvid) {
974 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), 975 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
975 vlan->vid_end); 976 vlan->vid_end);
976 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
977 vlan->vid_end);
978 b53_fast_age_vlan(dev, vid); 977 b53_fast_age_vlan(dev, vid);
979 } 978 }
980} 979}
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
984{ 983{
985 struct b53_device *dev = ds->priv; 984 struct b53_device *dev = ds->priv;
986 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; 985 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
987 unsigned int cpu_port = dev->cpu_port;
988 struct b53_vlan *vl; 986 struct b53_vlan *vl;
989 u16 vid; 987 u16 vid;
990 u16 pvid; 988 u16 pvid;
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
997 b53_get_vlan_entry(dev, vid, vl); 995 b53_get_vlan_entry(dev, vid, vl);
998 996
999 vl->members &= ~BIT(port); 997 vl->members &= ~BIT(port);
1000 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1001 vl->members = 0;
1002 998
1003 if (pvid == vid) { 999 if (pvid == vid) {
1004 if (is5325(dev) || is5365(dev)) 1000 if (is5325(dev) || is5365(dev))
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
1007 pvid = 0; 1003 pvid = 0;
1008 } 1004 }
1009 1005
1010 if (untagged) { 1006 if (untagged)
1011 vl->untag &= ~(BIT(port)); 1007 vl->untag &= ~(BIT(port));
1012 if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
1013 vl->untag = 0;
1014 }
1015 1008
1016 b53_set_vlan_entry(dev, vid, vl); 1009 b53_set_vlan_entry(dev, vid, vl);
1017 b53_fast_age_vlan(dev, vid); 1010 b53_fast_age_vlan(dev, vid);
1018 } 1011 }
1019 1012
1020 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); 1013 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
1021 b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
1022 b53_fast_age_vlan(dev, pvid); 1014 b53_fast_age_vlan(dev, pvid);
1023 1015
1024 return 0; 1016 return 0;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e3ee27ce13dd..9ec33b51a0ed 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -588,6 +588,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
588 struct phy_device *phydev) 588 struct phy_device *phydev)
589{ 589{
590 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 590 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
591 struct ethtool_eee *p = &priv->port_sts[port].eee;
591 u32 id_mode_dis = 0, port_mode; 592 u32 id_mode_dis = 0, port_mode;
592 const char *str = NULL; 593 const char *str = NULL;
593 u32 reg; 594 u32 reg;
@@ -662,6 +663,9 @@ force_link:
662 reg |= DUPLX_MODE; 663 reg |= DUPLX_MODE;
663 664
664 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); 665 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
666
667 if (!phydev->is_pseudo_fixed_link)
668 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
665} 669}
666 670
667static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, 671static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bda31f308cc2..a0eee7218695 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -400,12 +400,6 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
400 400
401 skb_put(skb, pktlength); 401 skb_put(skb, pktlength);
402 402
403 /* make cache consistent with receive packet buffer */
404 dma_sync_single_for_cpu(priv->device,
405 priv->rx_ring[entry].dma_addr,
406 priv->rx_ring[entry].len,
407 DMA_FROM_DEVICE);
408
409 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, 403 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
410 priv->rx_ring[entry].len, DMA_FROM_DEVICE); 404 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
411 405
@@ -469,7 +463,6 @@ static int tse_tx_complete(struct altera_tse_private *priv)
469 463
470 if (unlikely(netif_queue_stopped(priv->dev) && 464 if (unlikely(netif_queue_stopped(priv->dev) &&
471 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { 465 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
472 netif_tx_lock(priv->dev);
473 if (netif_queue_stopped(priv->dev) && 466 if (netif_queue_stopped(priv->dev) &&
474 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { 467 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
475 if (netif_msg_tx_done(priv)) 468 if (netif_msg_tx_done(priv))
@@ -477,7 +470,6 @@ static int tse_tx_complete(struct altera_tse_private *priv)
477 __func__); 470 __func__);
478 netif_wake_queue(priv->dev); 471 netif_wake_queue(priv->dev);
479 } 472 }
480 netif_tx_unlock(priv->dev);
481 } 473 }
482 474
483 spin_unlock(&priv->tx_lock); 475 spin_unlock(&priv->tx_lock);
@@ -592,10 +584,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
592 buffer->dma_addr = dma_addr; 584 buffer->dma_addr = dma_addr;
593 buffer->len = nopaged_len; 585 buffer->len = nopaged_len;
594 586
595 /* Push data out of the cache hierarchy into main memory */
596 dma_sync_single_for_device(priv->device, buffer->dma_addr,
597 buffer->len, DMA_TO_DEVICE);
598
599 priv->dmaops->tx_buffer(priv, buffer); 587 priv->dmaops->tx_buffer(priv, buffer);
600 588
601 skb_tx_timestamp(skb); 589 skb_tx_timestamp(skb);
@@ -819,6 +807,8 @@ static int init_phy(struct net_device *dev)
819 807
820 if (!phydev) { 808 if (!phydev) {
821 netdev_err(dev, "Could not find the PHY\n"); 809 netdev_err(dev, "Could not find the PHY\n");
810 if (fixed_link)
811 of_phy_deregister_fixed_link(priv->device->of_node);
822 return -ENODEV; 812 return -ENODEV;
823 } 813 }
824 814
@@ -1545,10 +1535,15 @@ err_free_netdev:
1545static int altera_tse_remove(struct platform_device *pdev) 1535static int altera_tse_remove(struct platform_device *pdev)
1546{ 1536{
1547 struct net_device *ndev = platform_get_drvdata(pdev); 1537 struct net_device *ndev = platform_get_drvdata(pdev);
1538 struct altera_tse_private *priv = netdev_priv(ndev);
1548 1539
1549 if (ndev->phydev) 1540 if (ndev->phydev) {
1550 phy_disconnect(ndev->phydev); 1541 phy_disconnect(ndev->phydev);
1551 1542
1543 if (of_phy_is_fixed_link(priv->device->of_node))
1544 of_phy_deregister_fixed_link(priv->device->of_node);
1545 }
1546
1552 platform_set_drvdata(pdev, NULL); 1547 platform_set_drvdata(pdev, NULL);
1553 altera_tse_mdio_destroy(ndev); 1548 altera_tse_mdio_destroy(ndev);
1554 unregister_netdev(ndev); 1549 unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 9de078819aa6..4f7635178200 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -829,7 +829,7 @@ static int xgbe_remove(struct platform_device *pdev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832#ifdef CONFIG_PM_SLEEP
833static int xgbe_suspend(struct device *dev) 833static int xgbe_suspend(struct device *dev)
834{ 834{
835 struct net_device *netdev = dev_get_drvdata(dev); 835 struct net_device *netdev = dev_get_drvdata(dev);
@@ -874,7 +874,7 @@ static int xgbe_resume(struct device *dev)
874 874
875 return ret; 875 return ret;
876} 876}
877#endif /* CONFIG_PM */ 877#endif /* CONFIG_PM_SLEEP */
878 878
879#ifdef CONFIG_ACPI 879#ifdef CONFIG_ACPI
880static const struct acpi_device_id xgbe_acpi_match[] = { 880static const struct acpi_device_id xgbe_acpi_match[] = {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index c481f104a8fe..5390ae89136c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
204 return num_msgs; 204 return num_msgs;
205} 205}
206 206
207static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
208{
209 u32 data = 0x7777;
210
211 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
212 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
213 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
214 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
215 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
216}
217
218void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, 207void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
219 struct xgene_enet_pdata *pdata, 208 struct xgene_enet_pdata *pdata,
220 enum xgene_enet_err_code status) 209 enum xgene_enet_err_code status)
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = {
929 .clear = xgene_enet_clear_ring, 918 .clear = xgene_enet_clear_ring,
930 .wr_cmd = xgene_enet_wr_cmd, 919 .wr_cmd = xgene_enet_wr_cmd,
931 .len = xgene_enet_ring_len, 920 .len = xgene_enet_ring_len,
932 .coalesce = xgene_enet_setup_coalescing,
933}; 921};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8456337a237d..06e598c8bc16 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -55,8 +55,10 @@ enum xgene_enet_rm {
55#define PREFETCH_BUF_EN BIT(21) 55#define PREFETCH_BUF_EN BIT(21)
56#define CSR_RING_ID_BUF 0x000c 56#define CSR_RING_ID_BUF 0x000c
57#define CSR_PBM_COAL 0x0014 57#define CSR_PBM_COAL 0x0014
58#define CSR_PBM_CTICK0 0x0018
58#define CSR_PBM_CTICK1 0x001c 59#define CSR_PBM_CTICK1 0x001c
59#define CSR_PBM_CTICK2 0x0020 60#define CSR_PBM_CTICK2 0x0020
61#define CSR_PBM_CTICK3 0x0024
60#define CSR_THRESHOLD0_SET1 0x0030 62#define CSR_THRESHOLD0_SET1 0x0030
61#define CSR_THRESHOLD1_SET1 0x0034 63#define CSR_THRESHOLD1_SET1 0x0034
62#define CSR_RING_NE_INT_MODE 0x017c 64#define CSR_RING_NE_INT_MODE 0x017c
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 429f18fc5503..8158d4698734 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); 1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1189 } 1189 }
1190 1190
1191 pdata->ring_ops->coalesce(pdata->tx_ring[0]); 1191 if (pdata->ring_ops->coalesce)
1192 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1192 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; 1193 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1193 1194
1194 return 0; 1195 return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
index 2b76732add5d..af51dd5844ce 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); 30 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
31 ring_cfg[3] |= SET_BIT(X2_DEQINTEN); 31 ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
32 } 32 }
33 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); 33 ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
34 34
35 addr >>= 8; 35 addr >>= 8;
36 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); 36 ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
192 192
193static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) 193static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
194{ 194{
195 u32 data = 0x7777; 195 u32 data = 0x77777777;
196 196
197 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); 197 xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
198 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
198 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); 199 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
199 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); 200 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
200 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); 201 xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
201 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); 202 xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
203 xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
202} 204}
203 205
204struct xgene_ring_ops xgene_ring2_ops = { 206struct xgene_ring_ops xgene_ring2_ops = {
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b0da9693f28a..be865b4dada2 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
460 if (ndev->flags & IFF_ALLMULTI) { 460 if (ndev->flags & IFF_ALLMULTI) {
461 arc_reg_set(priv, R_LAFL, ~0); 461 arc_reg_set(priv, R_LAFL, ~0);
462 arc_reg_set(priv, R_LAFH, ~0); 462 arc_reg_set(priv, R_LAFH, ~0);
463 } else { 463 } else if (ndev->flags & IFF_MULTICAST) {
464 struct netdev_hw_addr *ha; 464 struct netdev_hw_addr *ha;
465 unsigned int filter[2] = { 0, 0 }; 465 unsigned int filter[2] = { 0, 0 };
466 int bit; 466 int bit;
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
472 472
473 arc_reg_set(priv, R_LAFL, filter[0]); 473 arc_reg_set(priv, R_LAFL, filter[0]);
474 arc_reg_set(priv, R_LAFH, filter[1]); 474 arc_reg_set(priv, R_LAFH, filter[1]);
475 } else {
476 arc_reg_set(priv, R_LAFL, 0);
477 arc_reg_set(priv, R_LAFH, 0);
475 } 478 }
476 } 479 }
477} 480}
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface)
764 ndev->netdev_ops = &arc_emac_netdev_ops; 767 ndev->netdev_ops = &arc_emac_netdev_ops;
765 ndev->ethtool_ops = &arc_emac_ethtool_ops; 768 ndev->ethtool_ops = &arc_emac_ethtool_ops;
766 ndev->watchdog_timeo = TX_TIMEOUT; 769 ndev->watchdog_timeo = TX_TIMEOUT;
767 /* FIXME :: no multicast support yet */
768 ndev->flags &= ~IFF_MULTICAST;
769 770
770 priv = netdev_priv(ndev); 771 priv = netdev_priv(ndev);
771 priv->dev = dev; 772 priv->dev = dev;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 00c38bf151e6..e078d8da978c 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1466,12 +1466,12 @@ static int nb8800_probe(struct platform_device *pdev)
1466 1466
1467 ret = nb8800_hw_init(dev); 1467 ret = nb8800_hw_init(dev);
1468 if (ret) 1468 if (ret)
1469 goto err_free_bus; 1469 goto err_deregister_fixed_link;
1470 1470
1471 if (ops && ops->init) { 1471 if (ops && ops->init) {
1472 ret = ops->init(dev); 1472 ret = ops->init(dev);
1473 if (ret) 1473 if (ret)
1474 goto err_free_bus; 1474 goto err_deregister_fixed_link;
1475 } 1475 }
1476 1476
1477 dev->netdev_ops = &nb8800_netdev_ops; 1477 dev->netdev_ops = &nb8800_netdev_ops;
@@ -1504,6 +1504,9 @@ static int nb8800_probe(struct platform_device *pdev)
1504 1504
1505err_free_dma: 1505err_free_dma:
1506 nb8800_dma_free(dev); 1506 nb8800_dma_free(dev);
1507err_deregister_fixed_link:
1508 if (of_phy_is_fixed_link(pdev->dev.of_node))
1509 of_phy_deregister_fixed_link(pdev->dev.of_node);
1507err_free_bus: 1510err_free_bus:
1508 of_node_put(priv->phy_node); 1511 of_node_put(priv->phy_node);
1509 mdiobus_unregister(bus); 1512 mdiobus_unregister(bus);
@@ -1521,6 +1524,8 @@ static int nb8800_remove(struct platform_device *pdev)
1521 struct nb8800_priv *priv = netdev_priv(ndev); 1524 struct nb8800_priv *priv = netdev_priv(ndev);
1522 1525
1523 unregister_netdev(ndev); 1526 unregister_netdev(ndev);
1527 if (of_phy_is_fixed_link(pdev->dev.of_node))
1528 of_phy_deregister_fixed_link(pdev->dev.of_node);
1524 of_node_put(priv->phy_node); 1529 of_node_put(priv->phy_node);
1525 1530
1526 mdiobus_unregister(priv->mii_bus); 1531 mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3354b9941d1..25d1eb4933d0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1755,13 +1755,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1755 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1755 if (priv->irq0 <= 0 || priv->irq1 <= 0) {
1756 dev_err(&pdev->dev, "invalid interrupts\n"); 1756 dev_err(&pdev->dev, "invalid interrupts\n");
1757 ret = -EINVAL; 1757 ret = -EINVAL;
1758 goto err; 1758 goto err_free_netdev;
1759 } 1759 }
1760 1760
1761 priv->base = devm_ioremap_resource(&pdev->dev, r); 1761 priv->base = devm_ioremap_resource(&pdev->dev, r);
1762 if (IS_ERR(priv->base)) { 1762 if (IS_ERR(priv->base)) {
1763 ret = PTR_ERR(priv->base); 1763 ret = PTR_ERR(priv->base);
1764 goto err; 1764 goto err_free_netdev;
1765 } 1765 }
1766 1766
1767 priv->netdev = dev; 1767 priv->netdev = dev;
@@ -1779,7 +1779,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1779 ret = of_phy_register_fixed_link(dn); 1779 ret = of_phy_register_fixed_link(dn);
1780 if (ret) { 1780 if (ret) {
1781 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1781 dev_err(&pdev->dev, "failed to register fixed PHY\n");
1782 goto err; 1782 goto err_free_netdev;
1783 } 1783 }
1784 1784
1785 priv->phy_dn = dn; 1785 priv->phy_dn = dn;
@@ -1821,7 +1821,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1821 ret = register_netdev(dev); 1821 ret = register_netdev(dev);
1822 if (ret) { 1822 if (ret) {
1823 dev_err(&pdev->dev, "failed to register net_device\n"); 1823 dev_err(&pdev->dev, "failed to register net_device\n");
1824 goto err; 1824 goto err_deregister_fixed_link;
1825 } 1825 }
1826 1826
1827 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 1827 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
@@ -1832,7 +1832,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1832 priv->base, priv->irq0, priv->irq1, txq, rxq); 1832 priv->base, priv->irq0, priv->irq1, txq, rxq);
1833 1833
1834 return 0; 1834 return 0;
1835err: 1835
1836err_deregister_fixed_link:
1837 if (of_phy_is_fixed_link(dn))
1838 of_phy_deregister_fixed_link(dn);
1839err_free_netdev:
1836 free_netdev(dev); 1840 free_netdev(dev);
1837 return ret; 1841 return ret;
1838} 1842}
@@ -1840,11 +1844,14 @@ err:
1840static int bcm_sysport_remove(struct platform_device *pdev) 1844static int bcm_sysport_remove(struct platform_device *pdev)
1841{ 1845{
1842 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1846 struct net_device *dev = dev_get_drvdata(&pdev->dev);
1847 struct device_node *dn = pdev->dev.of_node;
1843 1848
1844 /* Not much to do, ndo_close has been called 1849 /* Not much to do, ndo_close has been called
1845 * and we use managed allocations 1850 * and we use managed allocations
1846 */ 1851 */
1847 unregister_netdev(dev); 1852 unregister_netdev(dev);
1853 if (of_phy_is_fixed_link(dn))
1854 of_phy_deregister_fixed_link(dn);
1848 free_netdev(dev); 1855 free_netdev(dev);
1849 dev_set_drvdata(&pdev->dev, NULL); 1856 dev_set_drvdata(&pdev->dev, NULL);
1850 1857
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 31ca204b38d2..49f4cafe5438 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
307 u32 ctl; 307 u32 ctl;
308 308
309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); 309 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
310
311 /* preserve ONLY bits 16-17 from current hardware value */
312 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
313
310 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { 314 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
311 ctl &= ~BGMAC_DMA_RX_BL_MASK; 315 ctl &= ~BGMAC_DMA_RX_BL_MASK;
312 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; 316 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
317 ctl &= ~BGMAC_DMA_RX_PT_MASK; 321 ctl &= ~BGMAC_DMA_RX_PT_MASK;
318 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; 322 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
319 } 323 }
320 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
321 ctl |= BGMAC_DMA_RX_ENABLE; 324 ctl |= BGMAC_DMA_RX_ENABLE;
322 ctl |= BGMAC_DMA_RX_PARITY_DISABLE; 325 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
323 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; 326 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac)
1046 1049
1047 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> 1050 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1048 BGMAC_DS_MM_SHIFT; 1051 BGMAC_DS_MM_SHIFT;
1049 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) 1052 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1050 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); 1053 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1051 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) 1054 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1052 bgmac_cco_ctl_maskset(bgmac, 1, ~0, 1055 bgmac_cco_ctl_maskset(bgmac, 1, ~0,
1053 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); 1056 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1054 1057
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b3791b394715..1f7034d739b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -49,6 +49,7 @@
49#include <linux/firmware.h> 49#include <linux/firmware.h>
50#include <linux/log2.h> 50#include <linux/log2.h>
51#include <linux/aer.h> 51#include <linux/aer.h>
52#include <linux/crash_dump.h>
52 53
53#if IS_ENABLED(CONFIG_CNIC) 54#if IS_ENABLED(CONFIG_CNIC)
54#define BCM_CNIC 1 55#define BCM_CNIC 1
@@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp)
4764 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); 4765 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4765} 4766}
4766 4767
4767static int 4768static void
4768bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) 4769bnx2_wait_dma_complete(struct bnx2 *bp)
4769{ 4770{
4770 u32 val; 4771 u32 val;
4771 int i, rc = 0; 4772 int i;
4772 u8 old_port;
4773 4773
4774 /* Wait for the current PCI transaction to complete before 4774 /*
4775 * issuing a reset. */ 4775 * Wait for the current PCI transaction to complete before
4776 * issuing a reset.
4777 */
4776 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || 4778 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4777 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { 4779 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4778 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4780 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
@@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4796 } 4798 }
4797 } 4799 }
4798 4800
4801 return;
4802}
4803
4804
4805static int
4806bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4807{
4808 u32 val;
4809 int i, rc = 0;
4810 u8 old_port;
4811
4812 /* Wait for the current PCI transaction to complete before
4813 * issuing a reset. */
4814 bnx2_wait_dma_complete(bp);
4815
4799 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4816 /* Wait for the firmware to tell us it is ok to issue a reset. */
4800 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4817 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4801 4818
@@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev)
6361 struct bnx2 *bp = netdev_priv(dev); 6378 struct bnx2 *bp = netdev_priv(dev);
6362 int rc; 6379 int rc;
6363 6380
6381 rc = bnx2_request_firmware(bp);
6382 if (rc < 0)
6383 goto out;
6384
6364 netif_carrier_off(dev); 6385 netif_carrier_off(dev);
6365 6386
6366 bnx2_disable_int(bp); 6387 bnx2_disable_int(bp);
@@ -6429,6 +6450,7 @@ open_err:
6429 bnx2_free_irq(bp); 6450 bnx2_free_irq(bp);
6430 bnx2_free_mem(bp); 6451 bnx2_free_mem(bp);
6431 bnx2_del_napi(bp); 6452 bnx2_del_napi(bp);
6453 bnx2_release_firmware(bp);
6432 goto out; 6454 goto out;
6433} 6455}
6434 6456
@@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8575 8597
8576 pci_set_drvdata(pdev, dev); 8598 pci_set_drvdata(pdev, dev);
8577 8599
8578 rc = bnx2_request_firmware(bp); 8600 /*
8579 if (rc < 0) 8601 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8580 goto error; 8602 * New io-page table has been created before bnx2 does reset at open stage.
8581 8603 * We have to wait for the in-flight DMA to complete to avoid it look up
8604 * into the newly created io-page table.
8605 */
8606 if (is_kdump_kernel())
8607 bnx2_wait_dma_complete(bp);
8582 8608
8583 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
8584 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); 8609 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8585 8610
8586 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 8611 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8613 return 0; 8638 return 0;
8614 8639
8615error: 8640error:
8616 bnx2_release_firmware(bp);
8617 pci_iounmap(pdev, bp->regview); 8641 pci_iounmap(pdev, bp->regview);
8618 pci_release_regions(pdev); 8642 pci_release_regions(pdev);
8619 pci_disable_device(pdev); 8643 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a9f9f3738022..ee1a803aa11a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1811,6 +1811,9 @@ static int bnxt_busy_poll(struct napi_struct *napi)
1811 if (atomic_read(&bp->intr_sem) != 0) 1811 if (atomic_read(&bp->intr_sem) != 0)
1812 return LL_FLUSH_FAILED; 1812 return LL_FLUSH_FAILED;
1813 1813
1814 if (!bp->link_info.link_up)
1815 return LL_FLUSH_FAILED;
1816
1814 if (!bnxt_lock_poll(bnapi)) 1817 if (!bnxt_lock_poll(bnapi))
1815 return LL_FLUSH_BUSY; 1818 return LL_FLUSH_BUSY;
1816 1819
@@ -3210,11 +3213,17 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3210 goto err_out; 3213 goto err_out;
3211 } 3214 }
3212 3215
3213 if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) 3216 switch (tunnel_type) {
3217 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3214 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; 3218 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3215 3219 break;
3216 else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) 3220 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3217 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; 3221 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3222 break;
3223 default:
3224 break;
3225 }
3226
3218err_out: 3227err_out:
3219 mutex_unlock(&bp->hwrm_cmd_lock); 3228 mutex_unlock(&bp->hwrm_cmd_lock);
3220 return rc; 3229 return rc;
@@ -4934,6 +4943,10 @@ static void bnxt_del_napi(struct bnxt *bp)
4934 napi_hash_del(&bnapi->napi); 4943 napi_hash_del(&bnapi->napi);
4935 netif_napi_del(&bnapi->napi); 4944 netif_napi_del(&bnapi->napi);
4936 } 4945 }
4946 /* We called napi_hash_del() before netif_napi_del(), we need
4947 * to respect an RCU grace period before freeing napi structures.
4948 */
4949 synchronize_net();
4937} 4950}
4938 4951
4939static void bnxt_init_napi(struct bnxt *bp) 4952static void bnxt_init_napi(struct bnxt *bp)
@@ -6309,6 +6322,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6309 struct tc_to_netdev *ntc) 6322 struct tc_to_netdev *ntc)
6310{ 6323{
6311 struct bnxt *bp = netdev_priv(dev); 6324 struct bnxt *bp = netdev_priv(dev);
6325 bool sh = false;
6312 u8 tc; 6326 u8 tc;
6313 6327
6314 if (ntc->type != TC_SETUP_MQPRIO) 6328 if (ntc->type != TC_SETUP_MQPRIO)
@@ -6325,12 +6339,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6325 if (netdev_get_num_tc(dev) == tc) 6339 if (netdev_get_num_tc(dev) == tc)
6326 return 0; 6340 return 0;
6327 6341
6342 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6343 sh = true;
6344
6328 if (tc) { 6345 if (tc) {
6329 int max_rx_rings, max_tx_rings, rc; 6346 int max_rx_rings, max_tx_rings, rc;
6330 bool sh = false;
6331
6332 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6333 sh = true;
6334 6347
6335 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); 6348 rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6336 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) 6349 if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
@@ -6348,7 +6361,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
6348 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 6361 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
6349 netdev_reset_tc(dev); 6362 netdev_reset_tc(dev);
6350 } 6363 }
6351 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); 6364 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
6365 bp->tx_nr_rings + bp->rx_nr_rings;
6352 bp->num_stat_ctxs = bp->cp_nr_rings; 6366 bp->num_stat_ctxs = bp->cp_nr_rings;
6353 6367
6354 if (netif_running(bp->dev)) 6368 if (netif_running(bp->dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index ec6cd18842c3..60e2af8678bd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
774 774
775 if (vf->flags & BNXT_VF_LINK_UP) { 775 if (vf->flags & BNXT_VF_LINK_UP) {
776 /* if physical link is down, force link up on VF */ 776 /* if physical link is down, force link up on VF */
777 if (phy_qcfg_resp.link == 777 if (phy_qcfg_resp.link !=
778 PORT_PHY_QCFG_RESP_LINK_NO_LINK) { 778 PORT_PHY_QCFG_RESP_LINK_LINK) {
779 phy_qcfg_resp.link = 779 phy_qcfg_resp.link =
780 PORT_PHY_QCFG_RESP_LINK_LINK; 780 PORT_PHY_QCFG_RESP_LINK_LINK;
781 phy_qcfg_resp.link_speed = cpu_to_le16( 781 phy_qcfg_resp.link_speed = cpu_to_le16(
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 4464bc5db934..a4e60e56c14f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1172,6 +1172,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1172 struct bcmgenet_tx_ring *ring) 1172 struct bcmgenet_tx_ring *ring)
1173{ 1173{
1174 struct bcmgenet_priv *priv = netdev_priv(dev); 1174 struct bcmgenet_priv *priv = netdev_priv(dev);
1175 struct device *kdev = &priv->pdev->dev;
1175 struct enet_cb *tx_cb_ptr; 1176 struct enet_cb *tx_cb_ptr;
1176 struct netdev_queue *txq; 1177 struct netdev_queue *txq;
1177 unsigned int pkts_compl = 0; 1178 unsigned int pkts_compl = 0;
@@ -1199,13 +1200,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1199 if (tx_cb_ptr->skb) { 1200 if (tx_cb_ptr->skb) {
1200 pkts_compl++; 1201 pkts_compl++;
1201 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; 1202 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
1202 dma_unmap_single(&dev->dev, 1203 dma_unmap_single(kdev,
1203 dma_unmap_addr(tx_cb_ptr, dma_addr), 1204 dma_unmap_addr(tx_cb_ptr, dma_addr),
1204 dma_unmap_len(tx_cb_ptr, dma_len), 1205 dma_unmap_len(tx_cb_ptr, dma_len),
1205 DMA_TO_DEVICE); 1206 DMA_TO_DEVICE);
1206 bcmgenet_free_cb(tx_cb_ptr); 1207 bcmgenet_free_cb(tx_cb_ptr);
1207 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { 1208 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1208 dma_unmap_page(&dev->dev, 1209 dma_unmap_page(kdev,
1209 dma_unmap_addr(tx_cb_ptr, dma_addr), 1210 dma_unmap_addr(tx_cb_ptr, dma_addr),
1210 dma_unmap_len(tx_cb_ptr, dma_len), 1211 dma_unmap_len(tx_cb_ptr, dma_len),
1211 DMA_TO_DEVICE); 1212 DMA_TO_DEVICE);
@@ -1775,6 +1776,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1775 1776
1776static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1777static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1777{ 1778{
1779 struct device *kdev = &priv->pdev->dev;
1778 struct enet_cb *cb; 1780 struct enet_cb *cb;
1779 int i; 1781 int i;
1780 1782
@@ -1782,7 +1784,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1782 cb = &priv->rx_cbs[i]; 1784 cb = &priv->rx_cbs[i];
1783 1785
1784 if (dma_unmap_addr(cb, dma_addr)) { 1786 if (dma_unmap_addr(cb, dma_addr)) {
1785 dma_unmap_single(&priv->dev->dev, 1787 dma_unmap_single(kdev,
1786 dma_unmap_addr(cb, dma_addr), 1788 dma_unmap_addr(cb, dma_addr),
1787 priv->rx_buf_len, DMA_FROM_DEVICE); 1789 priv->rx_buf_len, DMA_FROM_DEVICE);
1788 dma_unmap_addr_set(cb, dma_addr, 0); 1790 dma_unmap_addr_set(cb, dma_addr, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 457c3bc8cfff..e87607621e62 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -542,8 +542,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
542 /* Make sure we initialize MoCA PHYs with a link down */ 542 /* Make sure we initialize MoCA PHYs with a link down */
543 if (phy_mode == PHY_INTERFACE_MODE_MOCA) { 543 if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
544 phydev = of_phy_find_device(dn); 544 phydev = of_phy_find_device(dn);
545 if (phydev) 545 if (phydev) {
546 phydev->link = 0; 546 phydev->link = 0;
547 put_device(&phydev->mdio.dev);
548 }
547 } 549 }
548 550
549 return 0; 551 return 0;
@@ -625,6 +627,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
625int bcmgenet_mii_init(struct net_device *dev) 627int bcmgenet_mii_init(struct net_device *dev)
626{ 628{
627 struct bcmgenet_priv *priv = netdev_priv(dev); 629 struct bcmgenet_priv *priv = netdev_priv(dev);
630 struct device_node *dn = priv->pdev->dev.of_node;
628 int ret; 631 int ret;
629 632
630 ret = bcmgenet_mii_alloc(priv); 633 ret = bcmgenet_mii_alloc(priv);
@@ -638,6 +641,8 @@ int bcmgenet_mii_init(struct net_device *dev)
638 return 0; 641 return 0;
639 642
640out: 643out:
644 if (of_phy_is_fixed_link(dn))
645 of_phy_deregister_fixed_link(dn);
641 of_node_put(priv->phy_dn); 646 of_node_put(priv->phy_dn);
642 mdiobus_unregister(priv->mii_bus); 647 mdiobus_unregister(priv->mii_bus);
643 mdiobus_free(priv->mii_bus); 648 mdiobus_free(priv->mii_bus);
@@ -647,7 +652,10 @@ out:
647void bcmgenet_mii_exit(struct net_device *dev) 652void bcmgenet_mii_exit(struct net_device *dev)
648{ 653{
649 struct bcmgenet_priv *priv = netdev_priv(dev); 654 struct bcmgenet_priv *priv = netdev_priv(dev);
655 struct device_node *dn = priv->pdev->dev.of_node;
650 656
657 if (of_phy_is_fixed_link(dn))
658 of_phy_deregister_fixed_link(dn);
651 of_node_put(priv->phy_dn); 659 of_node_put(priv->phy_dn);
652 mdiobus_unregister(priv->mii_bus); 660 mdiobus_unregister(priv->mii_bus);
653 mdiobus_free(priv->mii_bus); 661 mdiobus_free(priv->mii_bus);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index f9df4b5ae90e..f42f672b0e7e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
177 return 0; 177 return 0;
178 178
179 hw_cons = *(tcb->hw_consumer_index); 179 hw_cons = *(tcb->hw_consumer_index);
180 rmb();
180 cons = tcb->consumer_index; 181 cons = tcb->consumer_index;
181 q_depth = tcb->q_depth; 182 q_depth = tcb->q_depth;
182 183
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3094 BNA_QE_INDX_INC(prod, q_depth); 3095 BNA_QE_INDX_INC(prod, q_depth);
3095 tcb->producer_index = prod; 3096 tcb->producer_index = prod;
3096 3097
3097 smp_mb(); 3098 wmb();
3098 3099
3099 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 3100 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3100 return NETDEV_TX_OK; 3101 return NETDEV_TX_OK;
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3102 skb_tx_timestamp(skb); 3103 skb_tx_timestamp(skb);
3103 3104
3104 bna_txq_prod_indx_doorbell(tcb); 3105 bna_txq_prod_indx_doorbell(tcb);
3105 smp_mb();
3106 3106
3107 return NETDEV_TX_OK; 3107 return NETDEV_TX_OK;
3108} 3108}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b32444a3ed79..ec09fcece711 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -975,6 +975,7 @@ static inline void macb_init_rx_ring(struct macb *bp)
975 addr += bp->rx_buffer_size; 975 addr += bp->rx_buffer_size;
976 } 976 }
977 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); 977 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
978 bp->rx_tail = 0;
978} 979}
979 980
980static int macb_rx(struct macb *bp, int budget) 981static int macb_rx(struct macb *bp, int budget)
@@ -1156,6 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1156 if (status & MACB_BIT(RXUBR)) { 1157 if (status & MACB_BIT(RXUBR)) {
1157 ctrl = macb_readl(bp, NCR); 1158 ctrl = macb_readl(bp, NCR);
1158 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); 1159 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1160 wmb();
1159 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); 1161 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1160 1162
1161 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1163 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1616,8 +1618,6 @@ static void macb_init_rings(struct macb *bp)
1616 bp->queues[0].tx_head = 0; 1618 bp->queues[0].tx_head = 0;
1617 bp->queues[0].tx_tail = 0; 1619 bp->queues[0].tx_tail = 0;
1618 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 1620 bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1619
1620 bp->rx_tail = 0;
1621} 1621}
1622 1622
1623static void macb_reset_hw(struct macb *bp) 1623static void macb_reset_hw(struct macb *bp)
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2673 lp->skb_length = skb->len; 2673 lp->skb_length = skb->len;
2674 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, 2674 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2675 DMA_TO_DEVICE); 2675 DMA_TO_DEVICE);
2676 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
2677 dev_kfree_skb_any(skb);
2678 dev->stats.tx_dropped++;
2679 netdev_err(dev, "%s: DMA mapping error\n", __func__);
2680 return NETDEV_TX_OK;
2681 }
2676 2682
2677 /* Set address of the data in the Transmit Address register */ 2683 /* Set address of the data in the Transmit Address register */
2678 macb_writel(lp, TAR, lp->skb_physaddr); 2684 macb_writel(lp, TAR, lp->skb_physaddr);
@@ -2764,6 +2770,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
2764 if (intstatus & MACB_BIT(RXUBR)) { 2770 if (intstatus & MACB_BIT(RXUBR)) {
2765 ctl = macb_readl(lp, NCR); 2771 ctl = macb_readl(lp, NCR);
2766 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); 2772 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
2773 wmb();
2767 macb_writel(lp, NCR, ctl | MACB_BIT(RE)); 2774 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
2768 } 2775 }
2769 2776
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 30426109711c..86bd93ce2ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -47,7 +47,7 @@
47 47
48/* Min/Max packet size */ 48/* Min/Max packet size */
49#define NIC_HW_MIN_FRS 64 49#define NIC_HW_MIN_FRS 64
50#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ 50#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
51 51
52/* Max pkinds */ 52/* Max pkinds */
53#define NIC_MAX_PKIND 16 53#define NIC_MAX_PKIND 16
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset {
178 178
179struct nicvf_hw_stats { 179struct nicvf_hw_stats {
180 u64 rx_bytes; 180 u64 rx_bytes;
181 u64 rx_frames;
181 u64 rx_ucast_frames; 182 u64 rx_ucast_frames;
182 u64 rx_bcast_frames; 183 u64 rx_bcast_frames;
183 u64 rx_mcast_frames; 184 u64 rx_mcast_frames;
184 u64 rx_fcs_errors; 185 u64 rx_drops;
185 u64 rx_l2_errors;
186 u64 rx_drop_red; 186 u64 rx_drop_red;
187 u64 rx_drop_red_bytes; 187 u64 rx_drop_red_bytes;
188 u64 rx_drop_overrun; 188 u64 rx_drop_overrun;
@@ -191,6 +191,19 @@ struct nicvf_hw_stats {
191 u64 rx_drop_mcast; 191 u64 rx_drop_mcast;
192 u64 rx_drop_l3_bcast; 192 u64 rx_drop_l3_bcast;
193 u64 rx_drop_l3_mcast; 193 u64 rx_drop_l3_mcast;
194 u64 rx_fcs_errors;
195 u64 rx_l2_errors;
196
197 u64 tx_bytes;
198 u64 tx_frames;
199 u64 tx_ucast_frames;
200 u64 tx_bcast_frames;
201 u64 tx_mcast_frames;
202 u64 tx_drops;
203};
204
205struct nicvf_drv_stats {
206 /* CQE Rx errs */
194 u64 rx_bgx_truncated_pkts; 207 u64 rx_bgx_truncated_pkts;
195 u64 rx_jabber_errs; 208 u64 rx_jabber_errs;
196 u64 rx_fcs_errs; 209 u64 rx_fcs_errs;
@@ -216,34 +229,30 @@ struct nicvf_hw_stats {
216 u64 rx_l4_pclp; 229 u64 rx_l4_pclp;
217 u64 rx_truncated_pkts; 230 u64 rx_truncated_pkts;
218 231
219 u64 tx_bytes_ok; 232 /* CQE Tx errs */
220 u64 tx_ucast_frames_ok; 233 u64 tx_desc_fault;
221 u64 tx_bcast_frames_ok; 234 u64 tx_hdr_cons_err;
222 u64 tx_mcast_frames_ok; 235 u64 tx_subdesc_err;
223 u64 tx_drops; 236 u64 tx_max_size_exceeded;
224}; 237 u64 tx_imm_size_oflow;
225 238 u64 tx_data_seq_err;
226struct nicvf_drv_stats { 239 u64 tx_mem_seq_err;
227 /* Rx */ 240 u64 tx_lock_viol;
228 u64 rx_frames_ok; 241 u64 tx_data_fault;
229 u64 rx_frames_64; 242 u64 tx_tstmp_conflict;
230 u64 rx_frames_127; 243 u64 tx_tstmp_timeout;
231 u64 rx_frames_255; 244 u64 tx_mem_fault;
232 u64 rx_frames_511; 245 u64 tx_csum_overlap;
233 u64 rx_frames_1023; 246 u64 tx_csum_overflow;
234 u64 rx_frames_1518; 247
235 u64 rx_frames_jumbo; 248 /* driver debug stats */
236 u64 rx_drops;
237
238 u64 rcv_buffer_alloc_failures; 249 u64 rcv_buffer_alloc_failures;
239
240 /* Tx */
241 u64 tx_frames_ok;
242 u64 tx_drops;
243 u64 tx_tso; 250 u64 tx_tso;
244 u64 tx_timeout; 251 u64 tx_timeout;
245 u64 txq_stop; 252 u64 txq_stop;
246 u64 txq_wake; 253 u64 txq_wake;
254
255 struct u64_stats_sync syncp;
247}; 256};
248 257
249struct nicvf { 258struct nicvf {
@@ -282,7 +291,6 @@ struct nicvf {
282 291
283 u8 node; 292 u8 node;
284 u8 cpi_alg; 293 u8 cpi_alg;
285 u16 mtu;
286 bool link_up; 294 bool link_up;
287 u8 duplex; 295 u8 duplex;
288 u32 speed; 296 u32 speed;
@@ -298,7 +306,7 @@ struct nicvf {
298 306
299 /* Stats */ 307 /* Stats */
300 struct nicvf_hw_stats hw_stats; 308 struct nicvf_hw_stats hw_stats;
301 struct nicvf_drv_stats drv_stats; 309 struct nicvf_drv_stats __percpu *drv_stats;
302 struct bgx_stats bgx_stats; 310 struct bgx_stats bgx_stats;
303 311
304 /* MSI-X */ 312 /* MSI-X */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 2bbf4cbf08b2..6677b96e1f3f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -11,6 +11,7 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/if_vlan.h>
14 15
15#include "nic_reg.h" 16#include "nic_reg.h"
16#include "nic.h" 17#include "nic.h"
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
260/* Update hardware min/max frame size */ 261/* Update hardware min/max frame size */
261static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) 262static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
262{ 263{
263 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { 264 int bgx, lmac, lmac_cnt;
264 dev_err(&nic->pdev->dev, 265 u64 lmac_credits;
265 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", 266
266 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); 267 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
267 return 1; 268 return 1;
268 }
269 new_frs += ETH_HLEN;
270 if (new_frs <= nic->pkind.maxlen)
271 return 0;
272 269
273 nic->pkind.maxlen = new_frs; 270 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
274 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); 271 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
272 lmac += bgx * MAX_LMAC_PER_BGX;
273
274 new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
275
276 /* Update corresponding LMAC credits */
277 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
278 lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
279 lmac_credits &= ~(0xFFFFFULL << 12);
280 lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
281 nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
282
283 /* Enforce MTU in HW
284 * This config is supported only from 88xx pass 2.0 onwards.
285 */
286 if (!pass1_silicon(nic->pdev))
287 nic_reg_write(nic,
288 NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
275 return 0; 289 return 0;
276} 290}
277 291
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic)
464 478
465 /* PKIND configuration */ 479 /* PKIND configuration */
466 nic->pkind.minlen = 0; 480 nic->pkind.minlen = 0;
467 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; 481 nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
468 nic->pkind.lenerr_en = 1; 482 nic->pkind.lenerr_en = 1;
469 nic->pkind.rx_hdr = 0; 483 nic->pkind.rx_hdr = 0;
470 nic->pkind.hdr_sl = 0; 484 nic->pkind.hdr_sl = 0;
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic,
837 nic_reg_write(nic, reg_addr, 0); 851 nic_reg_write(nic, reg_addr, 0);
838 } 852 }
839 } 853 }
854
840 return 0; 855 return 0;
841} 856}
842 857
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index edf779f5a227..80d46337cf29 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -106,6 +106,7 @@
106#define NIC_PF_MPI_0_2047_CFG (0x210000) 106#define NIC_PF_MPI_0_2047_CFG (0x210000)
107#define NIC_PF_RSSI_0_4097_RQ (0x220000) 107#define NIC_PF_RSSI_0_4097_RQ (0x220000)
108#define NIC_PF_LMAC_0_7_CFG (0x240000) 108#define NIC_PF_LMAC_0_7_CFG (0x240000)
109#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
109#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) 110#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
110#define NIC_PF_LMAC_0_7_CREDIT (0x244000) 111#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
111#define NIC_PF_CHAN_0_255_TX_CFG (0x400000) 112#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index ad4fddb55421..432bf6be57cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -36,11 +36,11 @@ struct nicvf_stat {
36 36
37static const struct nicvf_stat nicvf_hw_stats[] = { 37static const struct nicvf_stat nicvf_hw_stats[] = {
38 NICVF_HW_STAT(rx_bytes), 38 NICVF_HW_STAT(rx_bytes),
39 NICVF_HW_STAT(rx_frames),
39 NICVF_HW_STAT(rx_ucast_frames), 40 NICVF_HW_STAT(rx_ucast_frames),
40 NICVF_HW_STAT(rx_bcast_frames), 41 NICVF_HW_STAT(rx_bcast_frames),
41 NICVF_HW_STAT(rx_mcast_frames), 42 NICVF_HW_STAT(rx_mcast_frames),
42 NICVF_HW_STAT(rx_fcs_errors), 43 NICVF_HW_STAT(rx_drops),
43 NICVF_HW_STAT(rx_l2_errors),
44 NICVF_HW_STAT(rx_drop_red), 44 NICVF_HW_STAT(rx_drop_red),
45 NICVF_HW_STAT(rx_drop_red_bytes), 45 NICVF_HW_STAT(rx_drop_red_bytes),
46 NICVF_HW_STAT(rx_drop_overrun), 46 NICVF_HW_STAT(rx_drop_overrun),
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = {
49 NICVF_HW_STAT(rx_drop_mcast), 49 NICVF_HW_STAT(rx_drop_mcast),
50 NICVF_HW_STAT(rx_drop_l3_bcast), 50 NICVF_HW_STAT(rx_drop_l3_bcast),
51 NICVF_HW_STAT(rx_drop_l3_mcast), 51 NICVF_HW_STAT(rx_drop_l3_mcast),
52 NICVF_HW_STAT(rx_bgx_truncated_pkts), 52 NICVF_HW_STAT(rx_fcs_errors),
53 NICVF_HW_STAT(rx_jabber_errs), 53 NICVF_HW_STAT(rx_l2_errors),
54 NICVF_HW_STAT(rx_fcs_errs), 54 NICVF_HW_STAT(tx_bytes),
55 NICVF_HW_STAT(rx_bgx_errs), 55 NICVF_HW_STAT(tx_frames),
56 NICVF_HW_STAT(rx_prel2_errs), 56 NICVF_HW_STAT(tx_ucast_frames),
57 NICVF_HW_STAT(rx_l2_hdr_malformed), 57 NICVF_HW_STAT(tx_bcast_frames),
58 NICVF_HW_STAT(rx_oversize), 58 NICVF_HW_STAT(tx_mcast_frames),
59 NICVF_HW_STAT(rx_undersize), 59 NICVF_HW_STAT(tx_drops),
60 NICVF_HW_STAT(rx_l2_len_mismatch),
61 NICVF_HW_STAT(rx_l2_pclp),
62 NICVF_HW_STAT(rx_ip_ver_errs),
63 NICVF_HW_STAT(rx_ip_csum_errs),
64 NICVF_HW_STAT(rx_ip_hdr_malformed),
65 NICVF_HW_STAT(rx_ip_payload_malformed),
66 NICVF_HW_STAT(rx_ip_ttl_errs),
67 NICVF_HW_STAT(rx_l3_pclp),
68 NICVF_HW_STAT(rx_l4_malformed),
69 NICVF_HW_STAT(rx_l4_csum_errs),
70 NICVF_HW_STAT(rx_udp_len_errs),
71 NICVF_HW_STAT(rx_l4_port_errs),
72 NICVF_HW_STAT(rx_tcp_flag_errs),
73 NICVF_HW_STAT(rx_tcp_offset_errs),
74 NICVF_HW_STAT(rx_l4_pclp),
75 NICVF_HW_STAT(rx_truncated_pkts),
76 NICVF_HW_STAT(tx_bytes_ok),
77 NICVF_HW_STAT(tx_ucast_frames_ok),
78 NICVF_HW_STAT(tx_bcast_frames_ok),
79 NICVF_HW_STAT(tx_mcast_frames_ok),
80}; 60};
81 61
82static const struct nicvf_stat nicvf_drv_stats[] = { 62static const struct nicvf_stat nicvf_drv_stats[] = {
83 NICVF_DRV_STAT(rx_frames_ok), 63 NICVF_DRV_STAT(rx_bgx_truncated_pkts),
84 NICVF_DRV_STAT(rx_frames_64), 64 NICVF_DRV_STAT(rx_jabber_errs),
85 NICVF_DRV_STAT(rx_frames_127), 65 NICVF_DRV_STAT(rx_fcs_errs),
86 NICVF_DRV_STAT(rx_frames_255), 66 NICVF_DRV_STAT(rx_bgx_errs),
87 NICVF_DRV_STAT(rx_frames_511), 67 NICVF_DRV_STAT(rx_prel2_errs),
88 NICVF_DRV_STAT(rx_frames_1023), 68 NICVF_DRV_STAT(rx_l2_hdr_malformed),
89 NICVF_DRV_STAT(rx_frames_1518), 69 NICVF_DRV_STAT(rx_oversize),
90 NICVF_DRV_STAT(rx_frames_jumbo), 70 NICVF_DRV_STAT(rx_undersize),
91 NICVF_DRV_STAT(rx_drops), 71 NICVF_DRV_STAT(rx_l2_len_mismatch),
72 NICVF_DRV_STAT(rx_l2_pclp),
73 NICVF_DRV_STAT(rx_ip_ver_errs),
74 NICVF_DRV_STAT(rx_ip_csum_errs),
75 NICVF_DRV_STAT(rx_ip_hdr_malformed),
76 NICVF_DRV_STAT(rx_ip_payload_malformed),
77 NICVF_DRV_STAT(rx_ip_ttl_errs),
78 NICVF_DRV_STAT(rx_l3_pclp),
79 NICVF_DRV_STAT(rx_l4_malformed),
80 NICVF_DRV_STAT(rx_l4_csum_errs),
81 NICVF_DRV_STAT(rx_udp_len_errs),
82 NICVF_DRV_STAT(rx_l4_port_errs),
83 NICVF_DRV_STAT(rx_tcp_flag_errs),
84 NICVF_DRV_STAT(rx_tcp_offset_errs),
85 NICVF_DRV_STAT(rx_l4_pclp),
86 NICVF_DRV_STAT(rx_truncated_pkts),
87
88 NICVF_DRV_STAT(tx_desc_fault),
89 NICVF_DRV_STAT(tx_hdr_cons_err),
90 NICVF_DRV_STAT(tx_subdesc_err),
91 NICVF_DRV_STAT(tx_max_size_exceeded),
92 NICVF_DRV_STAT(tx_imm_size_oflow),
93 NICVF_DRV_STAT(tx_data_seq_err),
94 NICVF_DRV_STAT(tx_mem_seq_err),
95 NICVF_DRV_STAT(tx_lock_viol),
96 NICVF_DRV_STAT(tx_data_fault),
97 NICVF_DRV_STAT(tx_tstmp_conflict),
98 NICVF_DRV_STAT(tx_tstmp_timeout),
99 NICVF_DRV_STAT(tx_mem_fault),
100 NICVF_DRV_STAT(tx_csum_overlap),
101 NICVF_DRV_STAT(tx_csum_overflow),
102
92 NICVF_DRV_STAT(rcv_buffer_alloc_failures), 103 NICVF_DRV_STAT(rcv_buffer_alloc_failures),
93 NICVF_DRV_STAT(tx_frames_ok),
94 NICVF_DRV_STAT(tx_tso), 104 NICVF_DRV_STAT(tx_tso),
95 NICVF_DRV_STAT(tx_drops),
96 NICVF_DRV_STAT(tx_timeout), 105 NICVF_DRV_STAT(tx_timeout),
97 NICVF_DRV_STAT(txq_stop), 106 NICVF_DRV_STAT(txq_stop),
98 NICVF_DRV_STAT(txq_wake), 107 NICVF_DRV_STAT(txq_wake),
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
278 struct ethtool_stats *stats, u64 *data) 287 struct ethtool_stats *stats, u64 *data)
279{ 288{
280 struct nicvf *nic = netdev_priv(netdev); 289 struct nicvf *nic = netdev_priv(netdev);
281 int stat; 290 int stat, tmp_stats;
282 int sqs; 291 int sqs, cpu;
283 292
284 nicvf_update_stats(nic); 293 nicvf_update_stats(nic);
285 294
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
289 for (stat = 0; stat < nicvf_n_hw_stats; stat++) 298 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
290 *(data++) = ((u64 *)&nic->hw_stats) 299 *(data++) = ((u64 *)&nic->hw_stats)
291 [nicvf_hw_stats[stat].index]; 300 [nicvf_hw_stats[stat].index];
292 for (stat = 0; stat < nicvf_n_drv_stats; stat++) 301 for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
293 *(data++) = ((u64 *)&nic->drv_stats) 302 tmp_stats = 0;
294 [nicvf_drv_stats[stat].index]; 303 for_each_possible_cpu(cpu)
304 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
305 [nicvf_drv_stats[stat].index];
306 *(data++) = tmp_stats;
307 }
295 308
296 nicvf_get_qset_stats(nic, stats, &data); 309 nicvf_get_qset_stats(nic, stats, &data);
297 310
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 45a13f718863..8a37012c9c89 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
69 return qidx; 69 return qidx;
70} 70}
71 71
72static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
73 struct sk_buff *skb)
74{
75 if (skb->len <= 64)
76 nic->drv_stats.rx_frames_64++;
77 else if (skb->len <= 127)
78 nic->drv_stats.rx_frames_127++;
79 else if (skb->len <= 255)
80 nic->drv_stats.rx_frames_255++;
81 else if (skb->len <= 511)
82 nic->drv_stats.rx_frames_511++;
83 else if (skb->len <= 1023)
84 nic->drv_stats.rx_frames_1023++;
85 else if (skb->len <= 1518)
86 nic->drv_stats.rx_frames_1518++;
87 else
88 nic->drv_stats.rx_frames_jumbo++;
89}
90
91/* The Cavium ThunderX network controller can *only* be found in SoCs 72/* The Cavium ThunderX network controller can *only* be found in SoCs
92 * containing the ThunderX ARM64 CPU implementation. All accesses to the device 73 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
93 * registers on this platform are implicitly strongly ordered with respect 74 * registers on this platform are implicitly strongly ordered with respect
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev,
492static int nicvf_init_resources(struct nicvf *nic) 473static int nicvf_init_resources(struct nicvf *nic)
493{ 474{
494 int err; 475 int err;
495 union nic_mbx mbx = {};
496
497 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
498 476
499 /* Enable Qset */ 477 /* Enable Qset */
500 nicvf_qset_config(nic, true); 478 nicvf_qset_config(nic, true);
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic)
507 return err; 485 return err;
508 } 486 }
509 487
510 /* Send VF config done msg to PF */
511 nicvf_write_to_mbx(nic, &mbx);
512
513 return 0; 488 return 0;
514} 489}
515 490
516static void nicvf_snd_pkt_handler(struct net_device *netdev, 491static void nicvf_snd_pkt_handler(struct net_device *netdev,
517 struct cmp_queue *cq,
518 struct cqe_send_t *cqe_tx, 492 struct cqe_send_t *cqe_tx,
519 int cqe_type, int budget, 493 int cqe_type, int budget,
520 unsigned int *tx_pkts, unsigned int *tx_bytes) 494 unsigned int *tx_pkts, unsigned int *tx_bytes)
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
536 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, 510 __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
537 cqe_tx->sqe_ptr, hdr->subdesc_cnt); 511 cqe_tx->sqe_ptr, hdr->subdesc_cnt);
538 512
539 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); 513 nicvf_check_cqe_tx_errs(nic, cqe_tx);
540 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; 514 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
541 if (skb) { 515 if (skb) {
542 /* Check for dummy descriptor used for HW TSO offload on 88xx */ 516 /* Check for dummy descriptor used for HW TSO offload on 88xx */
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
630 return; 604 return;
631 } 605 }
632 606
633 nicvf_set_rx_frame_cnt(nic, skb);
634
635 nicvf_set_rxhash(netdev, cqe_rx, skb); 607 nicvf_set_rxhash(netdev, cqe_rx, skb);
636 608
637 skb_record_rx_queue(skb, rq_idx); 609 skb_record_rx_queue(skb, rq_idx);
@@ -703,7 +675,7 @@ loop:
703 work_done++; 675 work_done++;
704 break; 676 break;
705 case CQE_TYPE_SEND: 677 case CQE_TYPE_SEND:
706 nicvf_snd_pkt_handler(netdev, cq, 678 nicvf_snd_pkt_handler(netdev,
707 (void *)cq_desc, CQE_TYPE_SEND, 679 (void *)cq_desc, CQE_TYPE_SEND,
708 budget, &tx_pkts, &tx_bytes); 680 budget, &tx_pkts, &tx_bytes);
709 tx_done++; 681 tx_done++;
@@ -740,7 +712,7 @@ done:
740 nic = nic->pnicvf; 712 nic = nic->pnicvf;
741 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { 713 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
742 netif_tx_start_queue(txq); 714 netif_tx_start_queue(txq);
743 nic->drv_stats.txq_wake++; 715 this_cpu_inc(nic->drv_stats->txq_wake);
744 if (netif_msg_tx_err(nic)) 716 if (netif_msg_tx_err(nic))
745 netdev_warn(netdev, 717 netdev_warn(netdev,
746 "%s: Transmit queue wakeup SQ%d\n", 718 "%s: Transmit queue wakeup SQ%d\n",
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1084 1056
1085 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { 1057 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
1086 netif_tx_stop_queue(txq); 1058 netif_tx_stop_queue(txq);
1087 nic->drv_stats.txq_stop++; 1059 this_cpu_inc(nic->drv_stats->txq_stop);
1088 if (netif_msg_tx_err(nic)) 1060 if (netif_msg_tx_err(nic))
1089 netdev_warn(netdev, 1061 netdev_warn(netdev,
1090 "%s: Transmit ring full, stopping SQ%d\n", 1062 "%s: Transmit ring full, stopping SQ%d\n",
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev)
1189 return 0; 1161 return 0;
1190} 1162}
1191 1163
1164static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1165{
1166 union nic_mbx mbx = {};
1167
1168 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1169 mbx.frs.max_frs = mtu;
1170 mbx.frs.vf_id = nic->vf_id;
1171
1172 return nicvf_send_msg_to_pf(nic, &mbx);
1173}
1174
1192int nicvf_open(struct net_device *netdev) 1175int nicvf_open(struct net_device *netdev)
1193{ 1176{
1194 int err, qidx; 1177 int cpu, err, qidx;
1195 struct nicvf *nic = netdev_priv(netdev); 1178 struct nicvf *nic = netdev_priv(netdev);
1196 struct queue_set *qs = nic->qs; 1179 struct queue_set *qs = nic->qs;
1197 struct nicvf_cq_poll *cq_poll = NULL; 1180 struct nicvf_cq_poll *cq_poll = NULL;
1198 1181 union nic_mbx mbx = {};
1199 nic->mtu = netdev->mtu;
1200 1182
1201 netif_carrier_off(netdev); 1183 netif_carrier_off(netdev);
1202 1184
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev)
1248 if (nic->sqs_mode) 1230 if (nic->sqs_mode)
1249 nicvf_get_primary_vf_struct(nic); 1231 nicvf_get_primary_vf_struct(nic);
1250 1232
1251 /* Configure receive side scaling */ 1233 /* Configure receive side scaling and MTU */
1252 if (!nic->sqs_mode) 1234 if (!nic->sqs_mode) {
1253 nicvf_rss_init(nic); 1235 nicvf_rss_init(nic);
1236 if (nicvf_update_hw_max_frs(nic, netdev->mtu))
1237 goto cleanup;
1238
1239 /* Clear percpu stats */
1240 for_each_possible_cpu(cpu)
1241 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1242 sizeof(struct nicvf_drv_stats));
1243 }
1254 1244
1255 err = nicvf_register_interrupts(nic); 1245 err = nicvf_register_interrupts(nic);
1256 if (err) 1246 if (err)
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev)
1276 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1266 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1277 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1267 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1278 1268
1279 nic->drv_stats.txq_stop = 0; 1269 /* Send VF config done msg to PF */
1280 nic->drv_stats.txq_wake = 0; 1270 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1271 nicvf_write_to_mbx(nic, &mbx);
1281 1272
1282 return 0; 1273 return 0;
1283cleanup: 1274cleanup:
@@ -1297,17 +1288,6 @@ napi_del:
1297 return err; 1288 return err;
1298} 1289}
1299 1290
1300static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1301{
1302 union nic_mbx mbx = {};
1303
1304 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1305 mbx.frs.max_frs = mtu;
1306 mbx.frs.vf_id = nic->vf_id;
1307
1308 return nicvf_send_msg_to_pf(nic, &mbx);
1309}
1310
1311static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) 1291static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1312{ 1292{
1313 struct nicvf *nic = netdev_priv(netdev); 1293 struct nicvf *nic = netdev_priv(netdev);
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1318 if (new_mtu < NIC_HW_MIN_FRS) 1298 if (new_mtu < NIC_HW_MIN_FRS)
1319 return -EINVAL; 1299 return -EINVAL;
1320 1300
1301 netdev->mtu = new_mtu;
1302
1303 if (!netif_running(netdev))
1304 return 0;
1305
1321 if (nicvf_update_hw_max_frs(nic, new_mtu)) 1306 if (nicvf_update_hw_max_frs(nic, new_mtu))
1322 return -EINVAL; 1307 return -EINVAL;
1323 netdev->mtu = new_mtu;
1324 nic->mtu = new_mtu;
1325 1308
1326 return 0; 1309 return 0;
1327} 1310}
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic)
1379 1362
1380void nicvf_update_stats(struct nicvf *nic) 1363void nicvf_update_stats(struct nicvf *nic)
1381{ 1364{
1382 int qidx; 1365 int qidx, cpu;
1366 u64 tmp_stats = 0;
1383 struct nicvf_hw_stats *stats = &nic->hw_stats; 1367 struct nicvf_hw_stats *stats = &nic->hw_stats;
1384 struct nicvf_drv_stats *drv_stats = &nic->drv_stats; 1368 struct nicvf_drv_stats *drv_stats;
1385 struct queue_set *qs = nic->qs; 1369 struct queue_set *qs = nic->qs;
1386 1370
1387#define GET_RX_STATS(reg) \ 1371#define GET_RX_STATS(reg) \
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic)
1404 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); 1388 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1405 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); 1389 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1406 1390
1407 stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); 1391 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1408 stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); 1392 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1409 stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); 1393 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1410 stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); 1394 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1411 stats->tx_drops = GET_TX_STATS(TX_DROP); 1395 stats->tx_drops = GET_TX_STATS(TX_DROP);
1412 1396
1413 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1397 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1414 stats->tx_bcast_frames_ok + 1398 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1415 stats->tx_mcast_frames_ok; 1399 * pointed by dummy SQE and results in tx_drops counter being
1416 drv_stats->rx_frames_ok = stats->rx_ucast_frames + 1400 * incremented. Subtracting it from tx_tso counter will give
1417 stats->rx_bcast_frames + 1401 * exact tx_drops counter.
1418 stats->rx_mcast_frames; 1402 */
1419 drv_stats->rx_drops = stats->rx_drop_red + 1403 if (nic->t88 && nic->hw_tso) {
1420 stats->rx_drop_overrun; 1404 for_each_possible_cpu(cpu) {
1421 drv_stats->tx_drops = stats->tx_drops; 1405 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1406 tmp_stats += drv_stats->tx_tso;
1407 }
1408 stats->tx_drops = tmp_stats - stats->tx_drops;
1409 }
1410 stats->tx_frames = stats->tx_ucast_frames +
1411 stats->tx_bcast_frames +
1412 stats->tx_mcast_frames;
1413 stats->rx_frames = stats->rx_ucast_frames +
1414 stats->rx_bcast_frames +
1415 stats->rx_mcast_frames;
1416 stats->rx_drops = stats->rx_drop_red +
1417 stats->rx_drop_overrun;
1422 1418
1423 /* Update RQ and SQ stats */ 1419 /* Update RQ and SQ stats */
1424 for (qidx = 0; qidx < qs->rq_cnt; qidx++) 1420 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
1432{ 1428{
1433 struct nicvf *nic = netdev_priv(netdev); 1429 struct nicvf *nic = netdev_priv(netdev);
1434 struct nicvf_hw_stats *hw_stats = &nic->hw_stats; 1430 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1435 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1436 1431
1437 nicvf_update_stats(nic); 1432 nicvf_update_stats(nic);
1438 1433
1439 stats->rx_bytes = hw_stats->rx_bytes; 1434 stats->rx_bytes = hw_stats->rx_bytes;
1440 stats->rx_packets = drv_stats->rx_frames_ok; 1435 stats->rx_packets = hw_stats->rx_frames;
1441 stats->rx_dropped = drv_stats->rx_drops; 1436 stats->rx_dropped = hw_stats->rx_drops;
1442 stats->multicast = hw_stats->rx_mcast_frames; 1437 stats->multicast = hw_stats->rx_mcast_frames;
1443 1438
1444 stats->tx_bytes = hw_stats->tx_bytes_ok; 1439 stats->tx_bytes = hw_stats->tx_bytes;
1445 stats->tx_packets = drv_stats->tx_frames_ok; 1440 stats->tx_packets = hw_stats->tx_frames;
1446 stats->tx_dropped = drv_stats->tx_drops; 1441 stats->tx_dropped = hw_stats->tx_drops;
1447 1442
1448 return stats; 1443 return stats;
1449} 1444}
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
1456 netdev_warn(dev, "%s: Transmit timed out, resetting\n", 1451 netdev_warn(dev, "%s: Transmit timed out, resetting\n",
1457 dev->name); 1452 dev->name);
1458 1453
1459 nic->drv_stats.tx_timeout++; 1454 this_cpu_inc(nic->drv_stats->tx_timeout);
1460 schedule_work(&nic->reset_task); 1455 schedule_work(&nic->reset_task);
1461} 1456}
1462 1457
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1590 goto err_free_netdev; 1585 goto err_free_netdev;
1591 } 1586 }
1592 1587
1588 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1589 if (!nic->drv_stats) {
1590 err = -ENOMEM;
1591 goto err_free_netdev;
1592 }
1593
1593 err = nicvf_set_qset_resources(nic); 1594 err = nicvf_set_qset_resources(nic);
1594 if (err) 1595 if (err)
1595 goto err_free_netdev; 1596 goto err_free_netdev;
@@ -1648,6 +1649,8 @@ err_unregister_interrupts:
1648 nicvf_unregister_interrupts(nic); 1649 nicvf_unregister_interrupts(nic);
1649err_free_netdev: 1650err_free_netdev:
1650 pci_set_drvdata(pdev, NULL); 1651 pci_set_drvdata(pdev, NULL);
1652 if (nic->drv_stats)
1653 free_percpu(nic->drv_stats);
1651 free_netdev(netdev); 1654 free_netdev(netdev);
1652err_release_regions: 1655err_release_regions:
1653 pci_release_regions(pdev); 1656 pci_release_regions(pdev);
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev)
1675 unregister_netdev(pnetdev); 1678 unregister_netdev(pnetdev);
1676 nicvf_unregister_interrupts(nic); 1679 nicvf_unregister_interrupts(nic);
1677 pci_set_drvdata(pdev, NULL); 1680 pci_set_drvdata(pdev, NULL);
1681 if (nic->drv_stats)
1682 free_percpu(nic->drv_stats);
1678 free_netdev(netdev); 1683 free_netdev(netdev);
1679 pci_release_regions(pdev); 1684 pci_release_regions(pdev);
1680 pci_disable_device(pdev); 1685 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a4fc50155881..747ef0882976 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 104 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
105 order); 105 order);
106 if (!nic->rb_page) { 106 if (!nic->rb_page) {
107 nic->drv_stats.rcv_buffer_alloc_failures++; 107 this_cpu_inc(nic->pnicvf->drv_stats->
108 rcv_buffer_alloc_failures);
108 return -ENOMEM; 109 return -ENOMEM;
109 } 110 }
110 nic->rb_page_offset = 0; 111 nic->rb_page_offset = 0;
@@ -270,7 +271,8 @@ refill:
270 rbdr_idx, new_rb); 271 rbdr_idx, new_rb);
271next_rbdr: 272next_rbdr:
272 /* Re-enable RBDR interrupts only if buffer allocation is success */ 273 /* Re-enable RBDR interrupts only if buffer allocation is success */
273 if (!nic->rb_alloc_fail && rbdr->enable) 274 if (!nic->rb_alloc_fail && rbdr->enable &&
275 netif_running(nic->pnicvf->netdev))
274 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); 276 nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
275 277
276 if (rbdr_idx) 278 if (rbdr_idx)
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
361 363
362static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) 364static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
363{ 365{
366 struct sk_buff *skb;
367
364 if (!sq) 368 if (!sq)
365 return; 369 return;
366 if (!sq->dmem.base) 370 if (!sq->dmem.base)
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
371 sq->dmem.q_len * TSO_HEADER_SIZE, 375 sq->dmem.q_len * TSO_HEADER_SIZE,
372 sq->tso_hdrs, sq->tso_hdrs_phys); 376 sq->tso_hdrs, sq->tso_hdrs_phys);
373 377
378 /* Free pending skbs in the queue */
379 smp_rmb();
380 while (sq->head != sq->tail) {
381 skb = (struct sk_buff *)sq->skbuff[sq->head];
382 if (skb)
383 dev_kfree_skb_any(skb);
384 sq->head++;
385 sq->head &= (sq->dmem.q_len - 1);
386 }
374 kfree(sq->skbuff); 387 kfree(sq->skbuff);
375 nicvf_free_q_desc_mem(nic, &sq->dmem); 388 nicvf_free_q_desc_mem(nic, &sq->dmem);
376} 389}
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
483{ 496{
484 union nic_mbx mbx = {}; 497 union nic_mbx mbx = {};
485 498
486 /* Reset all RXQ's stats */ 499 /* Reset all RQ/SQ and VF stats */
487 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; 500 mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
501 mbx.reset_stat.rx_stat_mask = 0x3FFF;
502 mbx.reset_stat.tx_stat_mask = 0x1F;
488 mbx.reset_stat.rq_stat_mask = 0xFFFF; 503 mbx.reset_stat.rq_stat_mask = 0xFFFF;
504 mbx.reset_stat.sq_stat_mask = 0xFFFF;
489 nicvf_send_msg_to_pf(nic, &mbx); 505 nicvf_send_msg_to_pf(nic, &mbx);
490} 506}
491 507
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
538 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); 554 mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
539 nicvf_send_msg_to_pf(nic, &mbx); 555 nicvf_send_msg_to_pf(nic, &mbx);
540 556
541 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); 557 if (!nic->sqs_mode && (qidx == 0)) {
542 if (!nic->sqs_mode) 558 /* Enable checking L3/L4 length and TCP/UDP checksums */
559 nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
560 (BIT(24) | BIT(23) | BIT(21)));
543 nicvf_config_vlan_stripping(nic, nic->netdev->features); 561 nicvf_config_vlan_stripping(nic, nic->netdev->features);
562 }
544 563
545 /* Enable Receive queue */ 564 /* Enable Receive queue */
546 memset(&rq_cfg, 0, sizeof(struct rq_cfg)); 565 memset(&rq_cfg, 0, sizeof(struct rq_cfg));
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1029 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; 1048 hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
1030 /* For non-tunneled pkts, point this to L2 ethertype */ 1049 /* For non-tunneled pkts, point this to L2 ethertype */
1031 hdr->inner_l3_offset = skb_network_offset(skb) - 2; 1050 hdr->inner_l3_offset = skb_network_offset(skb) - 2;
1032 nic->drv_stats.tx_tso++; 1051 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1033 } 1052 }
1034} 1053}
1035 1054
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1161 1180
1162 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); 1181 nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
1163 1182
1164 nic->drv_stats.tx_tso++; 1183 this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
1165 return 1; 1184 return 1;
1166} 1185}
1167 1186
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1422/* Check for errors in the receive cmp.queue entry */ 1441/* Check for errors in the receive cmp.queue entry */
1423int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) 1442int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1424{ 1443{
1425 struct nicvf_hw_stats *stats = &nic->hw_stats;
1426
1427 if (!cqe_rx->err_level && !cqe_rx->err_opcode) 1444 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1428 return 0; 1445 return 0;
1429 1446
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1435 1452
1436 switch (cqe_rx->err_opcode) { 1453 switch (cqe_rx->err_opcode) {
1437 case CQ_RX_ERROP_RE_PARTIAL: 1454 case CQ_RX_ERROP_RE_PARTIAL:
1438 stats->rx_bgx_truncated_pkts++; 1455 this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
1439 break; 1456 break;
1440 case CQ_RX_ERROP_RE_JABBER: 1457 case CQ_RX_ERROP_RE_JABBER:
1441 stats->rx_jabber_errs++; 1458 this_cpu_inc(nic->drv_stats->rx_jabber_errs);
1442 break; 1459 break;
1443 case CQ_RX_ERROP_RE_FCS: 1460 case CQ_RX_ERROP_RE_FCS:
1444 stats->rx_fcs_errs++; 1461 this_cpu_inc(nic->drv_stats->rx_fcs_errs);
1445 break; 1462 break;
1446 case CQ_RX_ERROP_RE_RX_CTL: 1463 case CQ_RX_ERROP_RE_RX_CTL:
1447 stats->rx_bgx_errs++; 1464 this_cpu_inc(nic->drv_stats->rx_bgx_errs);
1448 break; 1465 break;
1449 case CQ_RX_ERROP_PREL2_ERR: 1466 case CQ_RX_ERROP_PREL2_ERR:
1450 stats->rx_prel2_errs++; 1467 this_cpu_inc(nic->drv_stats->rx_prel2_errs);
1451 break; 1468 break;
1452 case CQ_RX_ERROP_L2_MAL: 1469 case CQ_RX_ERROP_L2_MAL:
1453 stats->rx_l2_hdr_malformed++; 1470 this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
1454 break; 1471 break;
1455 case CQ_RX_ERROP_L2_OVERSIZE: 1472 case CQ_RX_ERROP_L2_OVERSIZE:
1456 stats->rx_oversize++; 1473 this_cpu_inc(nic->drv_stats->rx_oversize);
1457 break; 1474 break;
1458 case CQ_RX_ERROP_L2_UNDERSIZE: 1475 case CQ_RX_ERROP_L2_UNDERSIZE:
1459 stats->rx_undersize++; 1476 this_cpu_inc(nic->drv_stats->rx_undersize);
1460 break; 1477 break;
1461 case CQ_RX_ERROP_L2_LENMISM: 1478 case CQ_RX_ERROP_L2_LENMISM:
1462 stats->rx_l2_len_mismatch++; 1479 this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
1463 break; 1480 break;
1464 case CQ_RX_ERROP_L2_PCLP: 1481 case CQ_RX_ERROP_L2_PCLP:
1465 stats->rx_l2_pclp++; 1482 this_cpu_inc(nic->drv_stats->rx_l2_pclp);
1466 break; 1483 break;
1467 case CQ_RX_ERROP_IP_NOT: 1484 case CQ_RX_ERROP_IP_NOT:
1468 stats->rx_ip_ver_errs++; 1485 this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
1469 break; 1486 break;
1470 case CQ_RX_ERROP_IP_CSUM_ERR: 1487 case CQ_RX_ERROP_IP_CSUM_ERR:
1471 stats->rx_ip_csum_errs++; 1488 this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
1472 break; 1489 break;
1473 case CQ_RX_ERROP_IP_MAL: 1490 case CQ_RX_ERROP_IP_MAL:
1474 stats->rx_ip_hdr_malformed++; 1491 this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
1475 break; 1492 break;
1476 case CQ_RX_ERROP_IP_MALD: 1493 case CQ_RX_ERROP_IP_MALD:
1477 stats->rx_ip_payload_malformed++; 1494 this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
1478 break; 1495 break;
1479 case CQ_RX_ERROP_IP_HOP: 1496 case CQ_RX_ERROP_IP_HOP:
1480 stats->rx_ip_ttl_errs++; 1497 this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
1481 break; 1498 break;
1482 case CQ_RX_ERROP_L3_PCLP: 1499 case CQ_RX_ERROP_L3_PCLP:
1483 stats->rx_l3_pclp++; 1500 this_cpu_inc(nic->drv_stats->rx_l3_pclp);
1484 break; 1501 break;
1485 case CQ_RX_ERROP_L4_MAL: 1502 case CQ_RX_ERROP_L4_MAL:
1486 stats->rx_l4_malformed++; 1503 this_cpu_inc(nic->drv_stats->rx_l4_malformed);
1487 break; 1504 break;
1488 case CQ_RX_ERROP_L4_CHK: 1505 case CQ_RX_ERROP_L4_CHK:
1489 stats->rx_l4_csum_errs++; 1506 this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
1490 break; 1507 break;
1491 case CQ_RX_ERROP_UDP_LEN: 1508 case CQ_RX_ERROP_UDP_LEN:
1492 stats->rx_udp_len_errs++; 1509 this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
1493 break; 1510 break;
1494 case CQ_RX_ERROP_L4_PORT: 1511 case CQ_RX_ERROP_L4_PORT:
1495 stats->rx_l4_port_errs++; 1512 this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
1496 break; 1513 break;
1497 case CQ_RX_ERROP_TCP_FLAG: 1514 case CQ_RX_ERROP_TCP_FLAG:
1498 stats->rx_tcp_flag_errs++; 1515 this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
1499 break; 1516 break;
1500 case CQ_RX_ERROP_TCP_OFFSET: 1517 case CQ_RX_ERROP_TCP_OFFSET:
1501 stats->rx_tcp_offset_errs++; 1518 this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
1502 break; 1519 break;
1503 case CQ_RX_ERROP_L4_PCLP: 1520 case CQ_RX_ERROP_L4_PCLP:
1504 stats->rx_l4_pclp++; 1521 this_cpu_inc(nic->drv_stats->rx_l4_pclp);
1505 break; 1522 break;
1506 case CQ_RX_ERROP_RBDR_TRUNC: 1523 case CQ_RX_ERROP_RBDR_TRUNC:
1507 stats->rx_truncated_pkts++; 1524 this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
1508 break; 1525 break;
1509 } 1526 }
1510 1527
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1512} 1529}
1513 1530
1514/* Check for errors in the send cmp.queue entry */ 1531/* Check for errors in the send cmp.queue entry */
1515int nicvf_check_cqe_tx_errs(struct nicvf *nic, 1532int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
1516 struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
1517{ 1533{
1518 struct cmp_queue_stats *stats = &cq->stats;
1519
1520 switch (cqe_tx->send_status) { 1534 switch (cqe_tx->send_status) {
1521 case CQ_TX_ERROP_GOOD: 1535 case CQ_TX_ERROP_GOOD:
1522 stats->tx.good++;
1523 return 0; 1536 return 0;
1524 case CQ_TX_ERROP_DESC_FAULT: 1537 case CQ_TX_ERROP_DESC_FAULT:
1525 stats->tx.desc_fault++; 1538 this_cpu_inc(nic->drv_stats->tx_desc_fault);
1526 break; 1539 break;
1527 case CQ_TX_ERROP_HDR_CONS_ERR: 1540 case CQ_TX_ERROP_HDR_CONS_ERR:
1528 stats->tx.hdr_cons_err++; 1541 this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
1529 break; 1542 break;
1530 case CQ_TX_ERROP_SUBDC_ERR: 1543 case CQ_TX_ERROP_SUBDC_ERR:
1531 stats->tx.subdesc_err++; 1544 this_cpu_inc(nic->drv_stats->tx_subdesc_err);
1545 break;
1546 case CQ_TX_ERROP_MAX_SIZE_VIOL:
1547 this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
1532 break; 1548 break;
1533 case CQ_TX_ERROP_IMM_SIZE_OFLOW: 1549 case CQ_TX_ERROP_IMM_SIZE_OFLOW:
1534 stats->tx.imm_size_oflow++; 1550 this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
1535 break; 1551 break;
1536 case CQ_TX_ERROP_DATA_SEQUENCE_ERR: 1552 case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
1537 stats->tx.data_seq_err++; 1553 this_cpu_inc(nic->drv_stats->tx_data_seq_err);
1538 break; 1554 break;
1539 case CQ_TX_ERROP_MEM_SEQUENCE_ERR: 1555 case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
1540 stats->tx.mem_seq_err++; 1556 this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
1541 break; 1557 break;
1542 case CQ_TX_ERROP_LOCK_VIOL: 1558 case CQ_TX_ERROP_LOCK_VIOL:
1543 stats->tx.lock_viol++; 1559 this_cpu_inc(nic->drv_stats->tx_lock_viol);
1544 break; 1560 break;
1545 case CQ_TX_ERROP_DATA_FAULT: 1561 case CQ_TX_ERROP_DATA_FAULT:
1546 stats->tx.data_fault++; 1562 this_cpu_inc(nic->drv_stats->tx_data_fault);
1547 break; 1563 break;
1548 case CQ_TX_ERROP_TSTMP_CONFLICT: 1564 case CQ_TX_ERROP_TSTMP_CONFLICT:
1549 stats->tx.tstmp_conflict++; 1565 this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
1550 break; 1566 break;
1551 case CQ_TX_ERROP_TSTMP_TIMEOUT: 1567 case CQ_TX_ERROP_TSTMP_TIMEOUT:
1552 stats->tx.tstmp_timeout++; 1568 this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
1553 break; 1569 break;
1554 case CQ_TX_ERROP_MEM_FAULT: 1570 case CQ_TX_ERROP_MEM_FAULT:
1555 stats->tx.mem_fault++; 1571 this_cpu_inc(nic->drv_stats->tx_mem_fault);
1556 break; 1572 break;
1557 case CQ_TX_ERROP_CK_OVERLAP: 1573 case CQ_TX_ERROP_CK_OVERLAP:
1558 stats->tx.csum_overlap++; 1574 this_cpu_inc(nic->drv_stats->tx_csum_overlap);
1559 break; 1575 break;
1560 case CQ_TX_ERROP_CK_OFLOW: 1576 case CQ_TX_ERROP_CK_OFLOW:
1561 stats->tx.csum_overflow++; 1577 this_cpu_inc(nic->drv_stats->tx_csum_overflow);
1562 break; 1578 break;
1563 } 1579 }
1564 1580
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 869f3386028b..2e3c940c1093 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E {
158 CQ_TX_ERROP_DESC_FAULT = 0x10, 158 CQ_TX_ERROP_DESC_FAULT = 0x10,
159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11, 159 CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
160 CQ_TX_ERROP_SUBDC_ERR = 0x12, 160 CQ_TX_ERROP_SUBDC_ERR = 0x12,
161 CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
161 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, 162 CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
162 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, 163 CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
163 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, 164 CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E {
171 CQ_TX_ERROP_ENUM_LAST = 0x8a, 172 CQ_TX_ERROP_ENUM_LAST = 0x8a,
172}; 173};
173 174
174struct cmp_queue_stats {
175 struct tx_stats {
176 u64 good;
177 u64 desc_fault;
178 u64 hdr_cons_err;
179 u64 subdesc_err;
180 u64 imm_size_oflow;
181 u64 data_seq_err;
182 u64 mem_seq_err;
183 u64 lock_viol;
184 u64 data_fault;
185 u64 tstmp_conflict;
186 u64 tstmp_timeout;
187 u64 mem_fault;
188 u64 csum_overlap;
189 u64 csum_overflow;
190 } tx;
191} ____cacheline_aligned_in_smp;
192
193enum RQ_SQ_STATS { 175enum RQ_SQ_STATS {
194 RQ_SQ_STATS_OCTS, 176 RQ_SQ_STATS_OCTS,
195 RQ_SQ_STATS_PKTS, 177 RQ_SQ_STATS_PKTS,
@@ -241,7 +223,6 @@ struct cmp_queue {
241 spinlock_t lock; /* lock to serialize processing CQEs */ 223 spinlock_t lock; /* lock to serialize processing CQEs */
242 void *desc; 224 void *desc;
243 struct q_desc_mem dmem; 225 struct q_desc_mem dmem;
244 struct cmp_queue_stats stats;
245 int irq; 226 int irq;
246} ____cacheline_aligned_in_smp; 227} ____cacheline_aligned_in_smp;
247 228
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
336void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 317void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
337void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 318void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
338int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); 319int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
339int nicvf_check_cqe_tx_errs(struct nicvf *nic, 320int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
340 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
341#endif /* NICVF_QUEUES_H */ 321#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 8bbaedbb7b94..050e21fbb147 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1242 1242
1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); 1243 pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { 1244 if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
1245 bgx->bgx_id = 1245 bgx->bgx_id = (pci_resource_start(pdev,
1246 (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; 1246 PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; 1247 bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
1248 bgx->max_lmac = MAX_LMAC_PER_BGX; 1248 bgx->max_lmac = MAX_LMAC_PER_BGX;
1249 bgx_vnic[bgx->bgx_id] = bgx; 1249 bgx_vnic[bgx->bgx_id] = bgx;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index d59c71e4a000..01cc7c859131 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -28,6 +28,8 @@
28#define MAX_DMAC_PER_LMAC 8 28#define MAX_DMAC_PER_LMAC 8
29#define MAX_FRAME_SIZE 9216 29#define MAX_FRAME_SIZE 9216
30 30
31#define BGX_ID_MASK 0x3
32
31#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 33#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
32 34
33/* Registers */ 35/* Registers */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e74fd6085df..e19a0ca8e5dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2951 rq->cntxt_id, fl_id, 0xffff); 2951 rq->cntxt_id, fl_id, 0xffff);
2952 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, 2952 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2953 rq->desc, rq->phys_addr); 2953 rq->desc, rq->phys_addr);
2954 napi_hash_del(&rq->napi);
2955 netif_napi_del(&rq->napi); 2954 netif_napi_del(&rq->napi);
2956 rq->netdev = NULL; 2955 rq->netdev = NULL;
2957 rq->cntxt_id = rq->abs_id = 0; 2956 rq->cntxt_id = rq->abs_id = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 50812a1d67bd..ecf3ccc257bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -168,6 +168,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
168 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 168 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
169 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ 169 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
170 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ 170 CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
171 CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/
171 172
172 /* T6 adapters: 173 /* T6 adapters:
173 */ 174 */
@@ -178,9 +179,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
178 CH_PCI_ID_TABLE_FENTRY(0x6005), 179 CH_PCI_ID_TABLE_FENTRY(0x6005),
179 CH_PCI_ID_TABLE_FENTRY(0x6006), 180 CH_PCI_ID_TABLE_FENTRY(0x6006),
180 CH_PCI_ID_TABLE_FENTRY(0x6007), 181 CH_PCI_ID_TABLE_FENTRY(0x6007),
182 CH_PCI_ID_TABLE_FENTRY(0x6008),
181 CH_PCI_ID_TABLE_FENTRY(0x6009), 183 CH_PCI_ID_TABLE_FENTRY(0x6009),
182 CH_PCI_ID_TABLE_FENTRY(0x600d), 184 CH_PCI_ID_TABLE_FENTRY(0x600d),
183 CH_PCI_ID_TABLE_FENTRY(0x6010),
184 CH_PCI_ID_TABLE_FENTRY(0x6011), 185 CH_PCI_ID_TABLE_FENTRY(0x6011),
185 CH_PCI_ID_TABLE_FENTRY(0x6014), 186 CH_PCI_ID_TABLE_FENTRY(0x6014),
186 CH_PCI_ID_TABLE_FENTRY(0x6015), 187 CH_PCI_ID_TABLE_FENTRY(0x6015),
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index cece8a08edca..93aa2939142a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2813 if (eqo->q.created) { 2813 if (eqo->q.created) {
2814 be_eq_clean(eqo); 2814 be_eq_clean(eqo);
2815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2815 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2816 napi_hash_del(&eqo->napi);
2817 netif_napi_del(&eqo->napi); 2816 netif_napi_del(&eqo->napi);
2818 free_cpumask_var(eqo->affinity_mask); 2817 free_cpumask_var(eqo->affinity_mask);
2819 } 2818 }
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c865135f3cb9..5ea740b4cf14 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -574,6 +574,8 @@ struct fec_enet_private {
574 unsigned int reload_period; 574 unsigned int reload_period;
575 int pps_enable; 575 int pps_enable;
576 unsigned int next_counter; 576 unsigned int next_counter;
577
578 u64 ethtool_stats[0];
577}; 579};
578 580
579void fec_ptp_init(struct platform_device *pdev); 581void fec_ptp_init(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5aa9d4ded214..5f77caa59534 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2313,14 +2313,24 @@ static const struct fec_stat {
2313 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2313 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2314}; 2314};
2315 2315
2316static void fec_enet_get_ethtool_stats(struct net_device *dev, 2316static void fec_enet_update_ethtool_stats(struct net_device *dev)
2317 struct ethtool_stats *stats, u64 *data)
2318{ 2317{
2319 struct fec_enet_private *fep = netdev_priv(dev); 2318 struct fec_enet_private *fep = netdev_priv(dev);
2320 int i; 2319 int i;
2321 2320
2322 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2321 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2323 data[i] = readl(fep->hwp + fec_stats[i].offset); 2322 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2323}
2324
2325static void fec_enet_get_ethtool_stats(struct net_device *dev,
2326 struct ethtool_stats *stats, u64 *data)
2327{
2328 struct fec_enet_private *fep = netdev_priv(dev);
2329
2330 if (netif_running(dev))
2331 fec_enet_update_ethtool_stats(dev);
2332
2333 memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64));
2324} 2334}
2325 2335
2326static void fec_enet_get_strings(struct net_device *netdev, 2336static void fec_enet_get_strings(struct net_device *netdev,
@@ -2874,6 +2884,8 @@ fec_enet_close(struct net_device *ndev)
2874 if (fep->quirks & FEC_QUIRK_ERR006687) 2884 if (fep->quirks & FEC_QUIRK_ERR006687)
2875 imx6q_cpuidle_fec_irqs_unused(); 2885 imx6q_cpuidle_fec_irqs_unused();
2876 2886
2887 fec_enet_update_ethtool_stats(ndev);
2888
2877 fec_enet_clk_enable(ndev, false); 2889 fec_enet_clk_enable(ndev, false);
2878 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2890 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2879 pm_runtime_mark_last_busy(&fep->pdev->dev); 2891 pm_runtime_mark_last_busy(&fep->pdev->dev);
@@ -3180,6 +3192,8 @@ static int fec_enet_init(struct net_device *ndev)
3180 3192
3181 fec_restart(ndev); 3193 fec_restart(ndev);
3182 3194
3195 fec_enet_update_ethtool_stats(ndev);
3196
3183 return 0; 3197 return 0;
3184} 3198}
3185 3199
@@ -3278,7 +3292,8 @@ fec_probe(struct platform_device *pdev)
3278 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3292 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
3279 3293
3280 /* Init network device */ 3294 /* Init network device */
3281 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), 3295 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
3296 ARRAY_SIZE(fec_stats) * sizeof(u64),
3282 num_tx_qs, num_rx_qs); 3297 num_tx_qs, num_rx_qs);
3283 if (!ndev) 3298 if (!ndev)
3284 return -ENOMEM; 3299 return -ENOMEM;
@@ -3475,6 +3490,8 @@ failed_regulator:
3475failed_clk_ipg: 3490failed_clk_ipg:
3476 fec_enet_clk_enable(ndev, false); 3491 fec_enet_clk_enable(ndev, false);
3477failed_clk: 3492failed_clk:
3493 if (of_phy_is_fixed_link(np))
3494 of_phy_deregister_fixed_link(np);
3478failed_phy: 3495failed_phy:
3479 of_node_put(phy_node); 3496 of_node_put(phy_node);
3480failed_ioremap: 3497failed_ioremap:
@@ -3488,6 +3505,7 @@ fec_drv_remove(struct platform_device *pdev)
3488{ 3505{
3489 struct net_device *ndev = platform_get_drvdata(pdev); 3506 struct net_device *ndev = platform_get_drvdata(pdev);
3490 struct fec_enet_private *fep = netdev_priv(ndev); 3507 struct fec_enet_private *fep = netdev_priv(ndev);
3508 struct device_node *np = pdev->dev.of_node;
3491 3509
3492 cancel_work_sync(&fep->tx_timeout_work); 3510 cancel_work_sync(&fep->tx_timeout_work);
3493 fec_ptp_stop(pdev); 3511 fec_ptp_stop(pdev);
@@ -3495,6 +3513,8 @@ fec_drv_remove(struct platform_device *pdev)
3495 fec_enet_mii_remove(fep); 3513 fec_enet_mii_remove(fep);
3496 if (fep->reg_phy) 3514 if (fep->reg_phy)
3497 regulator_disable(fep->reg_phy); 3515 regulator_disable(fep->reg_phy);
3516 if (of_phy_is_fixed_link(np))
3517 of_phy_deregister_fixed_link(np);
3498 of_node_put(fep->phy_node); 3518 of_node_put(fep->phy_node);
3499 free_netdev(ndev); 3519 free_netdev(ndev);
3500 3520
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 53ef51e3bd9e..71a5ded9d1de 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1107,6 +1107,9 @@ int memac_free(struct fman_mac *memac)
1107{ 1107{
1108 free_init_resources(memac); 1108 free_init_resources(memac);
1109 1109
1110 if (memac->pcsphy)
1111 put_device(&memac->pcsphy->mdio.dev);
1112
1110 kfree(memac->memac_drv_param); 1113 kfree(memac->memac_drv_param);
1111 kfree(memac); 1114 kfree(memac);
1112 1115
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index efabb04a1ae8..4b0f3a50b293 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -722,9 +722,6 @@ int tgec_free(struct fman_mac *tgec)
722{ 722{
723 free_init_resources(tgec); 723 free_init_resources(tgec);
724 724
725 if (tgec->cfg)
726 tgec->cfg = NULL;
727
728 kfree(tgec->cfg); 725 kfree(tgec->cfg);
729 kfree(tgec); 726 kfree(tgec);
730 727
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 8fe6b3e253fa..736db9d9b0ad 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -892,6 +892,8 @@ static int mac_probe(struct platform_device *_of_dev)
892 priv->fixed_link->duplex = phy->duplex; 892 priv->fixed_link->duplex = phy->duplex;
893 priv->fixed_link->pause = phy->pause; 893 priv->fixed_link->pause = phy->pause;
894 priv->fixed_link->asym_pause = phy->asym_pause; 894 priv->fixed_link->asym_pause = phy->asym_pause;
895
896 put_device(&phy->mdio.dev);
895 } 897 }
896 898
897 err = mac_dev->init(mac_dev); 899 err = mac_dev->init(mac_dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc120c148d97..4b86260584a0 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -980,7 +980,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
980 err = clk_prepare_enable(clk); 980 err = clk_prepare_enable(clk);
981 if (err) { 981 if (err) {
982 ret = err; 982 ret = err;
983 goto out_free_fpi; 983 goto out_deregister_fixed_link;
984 } 984 }
985 fpi->clk_per = clk; 985 fpi->clk_per = clk;
986 } 986 }
@@ -1061,6 +1061,9 @@ out_put:
1061 of_node_put(fpi->phy_node); 1061 of_node_put(fpi->phy_node);
1062 if (fpi->clk_per) 1062 if (fpi->clk_per)
1063 clk_disable_unprepare(fpi->clk_per); 1063 clk_disable_unprepare(fpi->clk_per);
1064out_deregister_fixed_link:
1065 if (of_phy_is_fixed_link(ofdev->dev.of_node))
1066 of_phy_deregister_fixed_link(ofdev->dev.of_node);
1064out_free_fpi: 1067out_free_fpi:
1065 kfree(fpi); 1068 kfree(fpi);
1066 return ret; 1069 return ret;
@@ -1079,6 +1082,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
1079 of_node_put(fep->fpi->phy_node); 1082 of_node_put(fep->fpi->phy_node);
1080 if (fep->fpi->clk_per) 1083 if (fep->fpi->clk_per)
1081 clk_disable_unprepare(fep->fpi->clk_per); 1084 clk_disable_unprepare(fep->fpi->clk_per);
1085 if (of_phy_is_fixed_link(ofdev->dev.of_node))
1086 of_phy_deregister_fixed_link(ofdev->dev.of_node);
1082 free_netdev(ndev); 1087 free_netdev(ndev);
1083 return 0; 1088 return 0;
1084} 1089}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b4f5bc0e279..9061c2f82b9c 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1312,6 +1312,7 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv)
1312 */ 1312 */
1313static int gfar_probe(struct platform_device *ofdev) 1313static int gfar_probe(struct platform_device *ofdev)
1314{ 1314{
1315 struct device_node *np = ofdev->dev.of_node;
1315 struct net_device *dev = NULL; 1316 struct net_device *dev = NULL;
1316 struct gfar_private *priv = NULL; 1317 struct gfar_private *priv = NULL;
1317 int err = 0, i; 1318 int err = 0, i;
@@ -1462,6 +1463,8 @@ static int gfar_probe(struct platform_device *ofdev)
1462 return 0; 1463 return 0;
1463 1464
1464register_fail: 1465register_fail:
1466 if (of_phy_is_fixed_link(np))
1467 of_phy_deregister_fixed_link(np);
1465 unmap_group_regs(priv); 1468 unmap_group_regs(priv);
1466 gfar_free_rx_queues(priv); 1469 gfar_free_rx_queues(priv);
1467 gfar_free_tx_queues(priv); 1470 gfar_free_tx_queues(priv);
@@ -1474,11 +1477,16 @@ register_fail:
1474static int gfar_remove(struct platform_device *ofdev) 1477static int gfar_remove(struct platform_device *ofdev)
1475{ 1478{
1476 struct gfar_private *priv = platform_get_drvdata(ofdev); 1479 struct gfar_private *priv = platform_get_drvdata(ofdev);
1480 struct device_node *np = ofdev->dev.of_node;
1477 1481
1478 of_node_put(priv->phy_node); 1482 of_node_put(priv->phy_node);
1479 of_node_put(priv->tbi_node); 1483 of_node_put(priv->tbi_node);
1480 1484
1481 unregister_netdev(priv->ndev); 1485 unregister_netdev(priv->ndev);
1486
1487 if (of_phy_is_fixed_link(np))
1488 of_phy_deregister_fixed_link(np);
1489
1482 unmap_group_regs(priv); 1490 unmap_group_regs(priv);
1483 gfar_free_rx_queues(priv); 1491 gfar_free_rx_queues(priv);
1484 gfar_free_tx_queues(priv); 1492 gfar_free_tx_queues(priv);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 186ef8f16c80..f76d33279454 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3868,9 +3868,8 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3868 dev = alloc_etherdev(sizeof(*ugeth)); 3868 dev = alloc_etherdev(sizeof(*ugeth));
3869 3869
3870 if (dev == NULL) { 3870 if (dev == NULL) {
3871 of_node_put(ug_info->tbi_node); 3871 err = -ENOMEM;
3872 of_node_put(ug_info->phy_node); 3872 goto err_deregister_fixed_link;
3873 return -ENOMEM;
3874 } 3873 }
3875 3874
3876 ugeth = netdev_priv(dev); 3875 ugeth = netdev_priv(dev);
@@ -3907,10 +3906,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3907 if (netif_msg_probe(ugeth)) 3906 if (netif_msg_probe(ugeth))
3908 pr_err("%s: Cannot register net device, aborting\n", 3907 pr_err("%s: Cannot register net device, aborting\n",
3909 dev->name); 3908 dev->name);
3910 free_netdev(dev); 3909 goto err_free_netdev;
3911 of_node_put(ug_info->tbi_node);
3912 of_node_put(ug_info->phy_node);
3913 return err;
3914 } 3910 }
3915 3911
3916 mac_addr = of_get_mac_address(np); 3912 mac_addr = of_get_mac_address(np);
@@ -3923,16 +3919,29 @@ static int ucc_geth_probe(struct platform_device* ofdev)
3923 ugeth->node = np; 3919 ugeth->node = np;
3924 3920
3925 return 0; 3921 return 0;
3922
3923err_free_netdev:
3924 free_netdev(dev);
3925err_deregister_fixed_link:
3926 if (of_phy_is_fixed_link(np))
3927 of_phy_deregister_fixed_link(np);
3928 of_node_put(ug_info->tbi_node);
3929 of_node_put(ug_info->phy_node);
3930
3931 return err;
3926} 3932}
3927 3933
3928static int ucc_geth_remove(struct platform_device* ofdev) 3934static int ucc_geth_remove(struct platform_device* ofdev)
3929{ 3935{
3930 struct net_device *dev = platform_get_drvdata(ofdev); 3936 struct net_device *dev = platform_get_drvdata(ofdev);
3931 struct ucc_geth_private *ugeth = netdev_priv(dev); 3937 struct ucc_geth_private *ugeth = netdev_priv(dev);
3938 struct device_node *np = ofdev->dev.of_node;
3932 3939
3933 unregister_netdev(dev); 3940 unregister_netdev(dev);
3934 free_netdev(dev); 3941 free_netdev(dev);
3935 ucc_geth_memclean(ugeth); 3942 ucc_geth_memclean(ugeth);
3943 if (of_phy_is_fixed_link(np))
3944 of_phy_deregister_fixed_link(np);
3936 of_node_put(ugeth->ug_info->tbi_node); 3945 of_node_put(ugeth->ug_info->tbi_node);
3937 of_node_put(ugeth->ug_info->phy_node); 3946 of_node_put(ugeth->ug_info->phy_node);
3938 3947
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index c54c6fac0d1d..b6ed818f78ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
332 return ERR_PTR(-ENODEV); 332 return ERR_PTR(-ENODEV);
333 333
334 handle = dev->ops->get_handle(dev, port_id); 334 handle = dev->ops->get_handle(dev, port_id);
335 if (IS_ERR(handle)) 335 if (IS_ERR(handle)) {
336 put_device(&dev->cls_dev);
336 return handle; 337 return handle;
338 }
337 339
338 handle->dev = dev; 340 handle->dev = dev;
339 handle->owner_dev = owner_dev; 341 handle->owner_dev = owner_dev;
@@ -356,6 +358,8 @@ out_when_init_queue:
356 for (j = i - 1; j >= 0; j--) 358 for (j = i - 1; j >= 0; j--)
357 hnae_fini_queue(handle->qs[j]); 359 hnae_fini_queue(handle->qs[j]);
358 360
361 put_device(&dev->cls_dev);
362
359 return ERR_PTR(-ENOMEM); 363 return ERR_PTR(-ENOMEM);
360} 364}
361EXPORT_SYMBOL(hnae_get_handle); 365EXPORT_SYMBOL(hnae_get_handle);
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h)
377 dev->ops->put_handle(h); 381 dev->ops->put_handle(h);
378 382
379 module_put(dev->owner); 383 module_put(dev->owner);
384
385 put_device(&dev->cls_dev);
380} 386}
381EXPORT_SYMBOL(hnae_put_handle); 387EXPORT_SYMBOL(hnae_put_handle);
382 388
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 54efa9a5167b..bd719e25dd76 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev)
2446 2446
2447 netif_info(port, ifup, dev, "enabling port\n"); 2447 netif_info(port, ifup, dev, "enabling port\n");
2448 2448
2449 netif_carrier_off(dev);
2450
2449 ret = ehea_up(dev); 2451 ret = ehea_up(dev);
2450 if (!ret) { 2452 if (!ret) {
2451 port_napi_enable(port); 2453 port_napi_enable(port);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5f44c5520fbc..0fbf686f5e7c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -74,7 +74,6 @@
74#include <asm/iommu.h> 74#include <asm/iommu.h>
75#include <linux/uaccess.h> 75#include <linux/uaccess.h>
76#include <asm/firmware.h> 76#include <asm/firmware.h>
77#include <linux/seq_file.h>
78#include <linux/workqueue.h> 77#include <linux/workqueue.h>
79 78
80#include "ibmvnic.h" 79#include "ibmvnic.h"
@@ -1505,9 +1504,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1505 adapter->max_rx_add_entries_per_subcrq > entries_page ? 1504 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1506 entries_page : adapter->max_rx_add_entries_per_subcrq; 1505 entries_page : adapter->max_rx_add_entries_per_subcrq;
1507 1506
1508 /* Choosing the maximum number of queues supported by firmware*/ 1507 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1509 adapter->req_tx_queues = adapter->max_tx_queues; 1508 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1510 adapter->req_rx_queues = adapter->max_rx_queues;
1511 adapter->req_rx_add_queues = adapter->max_rx_add_queues; 1509 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1512 1510
1513 adapter->req_mtu = adapter->max_mtu; 1511 adapter->req_mtu = adapter->max_mtu;
@@ -3706,7 +3704,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3706 struct net_device *netdev; 3704 struct net_device *netdev;
3707 unsigned char *mac_addr_p; 3705 unsigned char *mac_addr_p;
3708 struct dentry *ent; 3706 struct dentry *ent;
3709 char buf[16]; /* debugfs name buf */ 3707 char buf[17]; /* debugfs name buf */
3710 int rc; 3708 int rc;
3711 3709
3712 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", 3710 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -3845,6 +3843,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
3845 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) 3843 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3846 debugfs_remove_recursive(adapter->debugfs_dir); 3844 debugfs_remove_recursive(adapter->debugfs_dir);
3847 3845
3846 dma_unmap_single(&dev->dev, adapter->stats_token,
3847 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3848
3848 if (adapter->ras_comps) 3849 if (adapter->ras_comps)
3849 dma_free_coherent(&dev->dev, 3850 dma_free_coherent(&dev->dev,
3850 adapter->ras_comp_num * 3851 adapter->ras_comp_num *
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index edc9a6ac5169..9affd7c198bd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4931,11 +4931,15 @@ static int igb_tso(struct igb_ring *tx_ring,
4931 4931
4932 /* initialize outer IP header fields */ 4932 /* initialize outer IP header fields */
4933 if (ip.v4->version == 4) { 4933 if (ip.v4->version == 4) {
4934 unsigned char *csum_start = skb_checksum_start(skb);
4935 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
4936
4934 /* IP header will have to cancel out any data that 4937 /* IP header will have to cancel out any data that
4935 * is not a part of the outer IP header 4938 * is not a part of the outer IP header
4936 */ 4939 */
4937 ip.v4->check = csum_fold(csum_add(lco_csum(skb), 4940 ip.v4->check = csum_fold(csum_partial(trans_start,
4938 csum_unfold(l4.tcp->check))); 4941 csum_start - trans_start,
4942 0));
4939 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4943 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4940 4944
4941 ip.v4->tot_len = 0; 4945 ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 12bb877df860..7dff7f6239cd 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1965,11 +1965,15 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
1965 1965
1966 /* initialize outer IP header fields */ 1966 /* initialize outer IP header fields */
1967 if (ip.v4->version == 4) { 1967 if (ip.v4->version == 4) {
1968 unsigned char *csum_start = skb_checksum_start(skb);
1969 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1970
1968 /* IP header will have to cancel out any data that 1971 /* IP header will have to cancel out any data that
1969 * is not a part of the outer IP header 1972 * is not a part of the outer IP header
1970 */ 1973 */
1971 ip.v4->check = csum_fold(csum_add(lco_csum(skb), 1974 ip.v4->check = csum_fold(csum_partial(trans_start,
1972 csum_unfold(l4.tcp->check))); 1975 csum_start - trans_start,
1976 0));
1973 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 1977 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
1974 1978
1975 ip.v4->tot_len = 0; 1979 ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd93d823cc25..fee1f2918ead 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7277,11 +7277,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7277 7277
7278 /* initialize outer IP header fields */ 7278 /* initialize outer IP header fields */
7279 if (ip.v4->version == 4) { 7279 if (ip.v4->version == 4) {
7280 unsigned char *csum_start = skb_checksum_start(skb);
7281 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
7282
7280 /* IP header will have to cancel out any data that 7283 /* IP header will have to cancel out any data that
7281 * is not a part of the outer IP header 7284 * is not a part of the outer IP header
7282 */ 7285 */
7283 ip.v4->check = csum_fold(csum_add(lco_csum(skb), 7286 ip.v4->check = csum_fold(csum_partial(trans_start,
7284 csum_unfold(l4.tcp->check))); 7287 csum_start - trans_start,
7288 0));
7285 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 7289 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7286 7290
7287 ip.v4->tot_len = 0; 7291 ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 7eaac3234049..cbf70fe4028a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3329,11 +3329,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3329 3329
3330 /* initialize outer IP header fields */ 3330 /* initialize outer IP header fields */
3331 if (ip.v4->version == 4) { 3331 if (ip.v4->version == 4) {
3332 unsigned char *csum_start = skb_checksum_start(skb);
3333 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3334
3332 /* IP header will have to cancel out any data that 3335 /* IP header will have to cancel out any data that
3333 * is not a part of the outer IP header 3336 * is not a part of the outer IP header
3334 */ 3337 */
3335 ip.v4->check = csum_fold(csum_add(lco_csum(skb), 3338 ip.v4->check = csum_fold(csum_partial(trans_start,
3336 csum_unfold(l4.tcp->check))); 3339 csum_start - trans_start,
3340 0));
3337 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 3341 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3338 3342
3339 ip.v4->tot_len = 0; 3343 ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bf5cc55ba24c..5b12022adf1f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1381 temp = (val & 0x003fff00) >> 8; 1381 temp = (val & 0x003fff00) >> 8;
1382 1382
1383 temp *= 64000000; 1383 temp *= 64000000;
1384 temp += mp->t_clk / 2;
1384 do_div(temp, mp->t_clk); 1385 do_div(temp, mp->t_clk);
1385 1386
1386 return (unsigned int)temp; 1387 return (unsigned int)temp;
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1417 1418
1418 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1419 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1419 temp *= 64000000; 1420 temp *= 64000000;
1421 temp += mp->t_clk / 2;
1420 do_div(temp, mp->t_clk); 1422 do_div(temp, mp->t_clk);
1421 1423
1422 return (unsigned int)temp; 1424 return (unsigned int)temp;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5cb07c2017bf..707bc4680b9b 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4151,7 +4151,7 @@ static int mvneta_probe(struct platform_device *pdev)
4151 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 4151 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
4152 dev->hw_features |= dev->features; 4152 dev->hw_features |= dev->features;
4153 dev->vlan_features |= dev->features; 4153 dev->vlan_features |= dev->features;
4154 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; 4154 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4155 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; 4155 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4156 4156
4157 err = register_netdev(dev); 4157 err = register_netdev(dev);
@@ -4191,6 +4191,8 @@ err_clk:
4191 clk_disable_unprepare(pp->clk); 4191 clk_disable_unprepare(pp->clk);
4192err_put_phy_node: 4192err_put_phy_node:
4193 of_node_put(phy_node); 4193 of_node_put(phy_node);
4194 if (of_phy_is_fixed_link(dn))
4195 of_phy_deregister_fixed_link(dn);
4194err_free_irq: 4196err_free_irq:
4195 irq_dispose_mapping(dev->irq); 4197 irq_dispose_mapping(dev->irq);
4196err_free_netdev: 4198err_free_netdev:
@@ -4202,6 +4204,7 @@ err_free_netdev:
4202static int mvneta_remove(struct platform_device *pdev) 4204static int mvneta_remove(struct platform_device *pdev)
4203{ 4205{
4204 struct net_device *dev = platform_get_drvdata(pdev); 4206 struct net_device *dev = platform_get_drvdata(pdev);
4207 struct device_node *dn = pdev->dev.of_node;
4205 struct mvneta_port *pp = netdev_priv(dev); 4208 struct mvneta_port *pp = netdev_priv(dev);
4206 4209
4207 unregister_netdev(dev); 4210 unregister_netdev(dev);
@@ -4209,6 +4212,8 @@ static int mvneta_remove(struct platform_device *pdev)
4209 clk_disable_unprepare(pp->clk); 4212 clk_disable_unprepare(pp->clk);
4210 free_percpu(pp->ports); 4213 free_percpu(pp->ports);
4211 free_percpu(pp->stats); 4214 free_percpu(pp->stats);
4215 if (of_phy_is_fixed_link(dn))
4216 of_phy_deregister_fixed_link(dn);
4212 irq_dispose_mapping(dev->irq); 4217 irq_dispose_mapping(dev->irq);
4213 of_node_put(pp->phy_node); 4218 of_node_put(pp->phy_node);
4214 free_netdev(dev); 4219 free_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 60227a3452a4..1026c452e39d 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3293,7 +3293,7 @@ static void mvpp2_cls_init(struct mvpp2 *priv)
3293 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 3293 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3294 3294
3295 /* Clear classifier flow table */ 3295 /* Clear classifier flow table */
3296 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 3296 memset(&fe.data, 0, sizeof(fe.data));
3297 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 3297 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3298 fe.index = index; 3298 fe.index = index;
3299 mvpp2_cls_flow_write(priv, &fe); 3299 mvpp2_cls_flow_write(priv, &fe);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index f05ea56dcff2..941c8e2c944e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5220 5220
5221static void sky2_shutdown(struct pci_dev *pdev) 5221static void sky2_shutdown(struct pci_dev *pdev)
5222{ 5222{
5223 struct sky2_hw *hw = pci_get_drvdata(pdev);
5224 int port;
5225
5226 for (port = 0; port < hw->ports; port++) {
5227 struct net_device *ndev = hw->dev[port];
5228
5229 rtnl_lock();
5230 if (netif_running(ndev)) {
5231 dev_close(ndev);
5232 netif_device_detach(ndev);
5233 }
5234 rtnl_unlock();
5235 }
5223 sky2_suspend(&pdev->dev); 5236 sky2_suspend(&pdev->dev);
5224 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); 5237 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5225 pci_set_power_state(pdev, PCI_D3hot); 5238 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4a62ffd7729d..86a89cbd3ec9 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -318,6 +318,8 @@ static int mtk_phy_connect(struct net_device *dev)
318 return 0; 318 return 0;
319 319
320err_phy: 320err_phy:
321 if (of_phy_is_fixed_link(mac->of_node))
322 of_phy_deregister_fixed_link(mac->of_node);
321 of_node_put(np); 323 of_node_put(np);
322 dev_err(eth->dev, "%s: invalid phy\n", __func__); 324 dev_err(eth->dev, "%s: invalid phy\n", __func__);
323 return -EINVAL; 325 return -EINVAL;
@@ -1923,6 +1925,8 @@ static void mtk_uninit(struct net_device *dev)
1923 struct mtk_eth *eth = mac->hw; 1925 struct mtk_eth *eth = mac->hw;
1924 1926
1925 phy_disconnect(dev->phydev); 1927 phy_disconnect(dev->phydev);
1928 if (of_phy_is_fixed_link(mac->of_node))
1929 of_phy_deregister_fixed_link(mac->of_node);
1926 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); 1930 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1927 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); 1931 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
1928} 1932}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 12c99a2655f2..fb8bb027b69c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -129,6 +129,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
129 } 129 }
130}; 130};
131 131
132/* Must not acquire state_lock, as its corresponding work_sync
133 * is done under it.
134 */
132static void mlx4_en_filter_work(struct work_struct *work) 135static void mlx4_en_filter_work(struct work_struct *work)
133{ 136{
134 struct mlx4_en_filter *filter = container_of(work, 137 struct mlx4_en_filter *filter = container_of(work,
@@ -2076,13 +2079,6 @@ err:
2076 return -ENOMEM; 2079 return -ENOMEM;
2077} 2080}
2078 2081
2079static void mlx4_en_shutdown(struct net_device *dev)
2080{
2081 rtnl_lock();
2082 netif_device_detach(dev);
2083 mlx4_en_close(dev);
2084 rtnl_unlock();
2085}
2086 2082
2087static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, 2083static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2088 struct mlx4_en_priv *src, 2084 struct mlx4_en_priv *src,
@@ -2159,8 +2155,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2159{ 2155{
2160 struct mlx4_en_priv *priv = netdev_priv(dev); 2156 struct mlx4_en_priv *priv = netdev_priv(dev);
2161 struct mlx4_en_dev *mdev = priv->mdev; 2157 struct mlx4_en_dev *mdev = priv->mdev;
2162 bool shutdown = mdev->dev->persist->interface_state &
2163 MLX4_INTERFACE_STATE_SHUTDOWN;
2164 2158
2165 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 2159 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2166 2160
@@ -2168,10 +2162,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2168 if (priv->registered) { 2162 if (priv->registered) {
2169 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, 2163 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2170 priv->port)); 2164 priv->port));
2171 if (shutdown) 2165 unregister_netdev(dev);
2172 mlx4_en_shutdown(dev);
2173 else
2174 unregister_netdev(dev);
2175 } 2166 }
2176 2167
2177 if (priv->allocated) 2168 if (priv->allocated)
@@ -2189,20 +2180,18 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2189 mutex_lock(&mdev->state_lock); 2180 mutex_lock(&mdev->state_lock);
2190 mdev->pndev[priv->port] = NULL; 2181 mdev->pndev[priv->port] = NULL;
2191 mdev->upper[priv->port] = NULL; 2182 mdev->upper[priv->port] = NULL;
2192 mutex_unlock(&mdev->state_lock);
2193 2183
2194#ifdef CONFIG_RFS_ACCEL 2184#ifdef CONFIG_RFS_ACCEL
2195 mlx4_en_cleanup_filters(priv); 2185 mlx4_en_cleanup_filters(priv);
2196#endif 2186#endif
2197 2187
2198 mlx4_en_free_resources(priv); 2188 mlx4_en_free_resources(priv);
2189 mutex_unlock(&mdev->state_lock);
2199 2190
2200 kfree(priv->tx_ring); 2191 kfree(priv->tx_ring);
2201 kfree(priv->tx_cq); 2192 kfree(priv->tx_cq);
2202 2193
2203 if (!shutdown) 2194 free_netdev(dev);
2204 free_netdev(dev);
2205 dev->ethtool_ops = NULL;
2206} 2195}
2207 2196
2208static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 2197static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6f4e67bc3538..75d07fa9d0b1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4147,11 +4147,8 @@ static void mlx4_shutdown(struct pci_dev *pdev)
4147 4147
4148 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4148 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4149 mutex_lock(&persist->interface_state_mutex); 4149 mutex_lock(&persist->interface_state_mutex);
4150 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { 4150 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4151 /* Notify mlx4 clients that the kernel is being shut down */
4152 persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
4153 mlx4_unload_one(pdev); 4151 mlx4_unload_one(pdev);
4154 }
4155 mutex_unlock(&persist->interface_state_mutex); 4152 mutex_unlock(&persist->interface_state_mutex);
4156} 4153}
4157 4154
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 94b891c118c1..1a670b681555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1457,7 +1457,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
1457int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1457int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
1458 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1458 u32 qpn, enum mlx4_net_trans_promisc_mode mode)
1459{ 1459{
1460 struct mlx4_net_trans_rule rule; 1460 struct mlx4_net_trans_rule rule = {
1461 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1462 .exclusive = 0,
1463 .allow_loopback = 1,
1464 };
1465
1461 u64 *regid_p; 1466 u64 *regid_p;
1462 1467
1463 switch (mode) { 1468 switch (mode) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f4c687ce4c59..84e8b250e2af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1445 c->netdev = priv->netdev; 1445 c->netdev = priv->netdev;
1446 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); 1446 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1447 c->num_tc = priv->params.num_tc; 1447 c->num_tc = priv->params.num_tc;
1448 c->xdp = !!priv->xdp_prog;
1448 1449
1449 if (priv->params.rx_am_enabled) 1450 if (priv->params.rx_am_enabled)
1450 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); 1451 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1468 if (err) 1469 if (err)
1469 goto err_close_tx_cqs; 1470 goto err_close_tx_cqs;
1470 1471
1472 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1473 err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
1474 priv->params.tx_cq_moderation) : 0;
1475 if (err)
1476 goto err_close_rx_cq;
1477
1471 napi_enable(&c->napi); 1478 napi_enable(&c->napi);
1472 1479
1473 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); 1480 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1488 } 1495 }
1489 } 1496 }
1490 1497
1491 if (priv->xdp_prog) { 1498 err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
1492 /* XDP SQ CQ params are same as normal TXQ sq CQ params */ 1499 if (err)
1493 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, 1500 goto err_close_sqs;
1494 priv->params.tx_cq_moderation);
1495 if (err)
1496 goto err_close_sqs;
1497
1498 err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
1499 if (err) {
1500 mlx5e_close_cq(&c->xdp_sq.cq);
1501 goto err_close_sqs;
1502 }
1503 }
1504 1501
1505 c->xdp = !!priv->xdp_prog;
1506 err = mlx5e_open_rq(c, &cparam->rq, &c->rq); 1502 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1507 if (err) 1503 if (err)
1508 goto err_close_xdp_sq; 1504 goto err_close_xdp_sq;
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1512 1508
1513 return 0; 1509 return 0;
1514err_close_xdp_sq: 1510err_close_xdp_sq:
1515 mlx5e_close_sq(&c->xdp_sq); 1511 if (c->xdp)
1512 mlx5e_close_sq(&c->xdp_sq);
1516 1513
1517err_close_sqs: 1514err_close_sqs:
1518 mlx5e_close_sqs(c); 1515 mlx5e_close_sqs(c);
@@ -1522,6 +1519,10 @@ err_close_icosq:
1522 1519
1523err_disable_napi: 1520err_disable_napi:
1524 napi_disable(&c->napi); 1521 napi_disable(&c->napi);
1522 if (c->xdp)
1523 mlx5e_close_cq(&c->xdp_sq.cq);
1524
1525err_close_rx_cq:
1525 mlx5e_close_cq(&c->rq.cq); 1526 mlx5e_close_cq(&c->rq.cq);
1526 1527
1527err_close_tx_cqs: 1528err_close_tx_cqs:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7fe6559e4ab3..bf1c09ca73c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
308 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; 308 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
309#endif 309#endif
310 310
311 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; 311 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
312 netdev->hw_features |= NETIF_F_HW_TC; 312 netdev->hw_features |= NETIF_F_HW_TC;
313 313
314 eth_hw_addr_random(netdev); 314 eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ce8c54d18906..6bb21b31cfeb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
237 skb_flow_dissector_target(f->dissector, 237 skb_flow_dissector_target(f->dissector,
238 FLOW_DISSECTOR_KEY_VLAN, 238 FLOW_DISSECTOR_KEY_VLAN,
239 f->mask); 239 f->mask);
240 if (mask->vlan_id) { 240 if (mask->vlan_id || mask->vlan_priority) {
241 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); 241 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
242 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); 242 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
243 243
244 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 244 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
245 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 245 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
246
247 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
248 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
246 } 249 }
247 } 250 }
248 251
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c55ad8d00c05..d239f5d0ea36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
57 if (esw->mode != SRIOV_OFFLOADS) 57 if (esw->mode != SRIOV_OFFLOADS)
58 return ERR_PTR(-EOPNOTSUPP); 58 return ERR_PTR(-EOPNOTSUPP);
59 59
60 action = attr->action; 60 /* per flow vlan pop/push is emulated, don't set that into the firmware */
61 action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
61 62
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 63 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 64 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 89696048b045..914e5466f729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering)
1690{ 1690{
1691 1691
1692 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 1692 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1693 if (IS_ERR_OR_NULL(steering->root_ns)) 1693 if (!steering->root_ns)
1694 goto cleanup; 1694 goto cleanup;
1695 1695
1696 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) 1696 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d5433c49b2b0..3b7c6a9f2b5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,7 +46,6 @@
46#include <linux/mlx5/srq.h> 46#include <linux/mlx5/srq.h>
47#include <linux/debugfs.h> 47#include <linux/debugfs.h>
48#include <linux/kmod.h> 48#include <linux/kmod.h>
49#include <linux/delay.h>
50#include <linux/mlx5/mlx5_ifc.h> 49#include <linux/mlx5/mlx5_ifc.h>
51#ifdef CONFIG_RFS_ACCEL 50#ifdef CONFIG_RFS_ACCEL
52#include <linux/cpu_rmap.h> 51#include <linux/cpu_rmap.h>
@@ -1226,6 +1225,9 @@ static int init_one(struct pci_dev *pdev,
1226 1225
1227 pci_set_drvdata(pdev, dev); 1226 pci_set_drvdata(pdev, dev);
1228 1227
1228 dev->pdev = pdev;
1229 dev->event = mlx5_core_event;
1230
1229 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { 1231 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1230 mlx5_core_warn(dev, 1232 mlx5_core_warn(dev,
1231 "selected profile out of range, selecting default (%d)\n", 1233 "selected profile out of range, selecting default (%d)\n",
@@ -1233,8 +1235,6 @@ static int init_one(struct pci_dev *pdev,
1233 prof_sel = MLX5_DEFAULT_PROF; 1235 prof_sel = MLX5_DEFAULT_PROF;
1234 } 1236 }
1235 dev->profile = &profile[prof_sel]; 1237 dev->profile = &profile[prof_sel];
1236 dev->pdev = pdev;
1237 dev->event = mlx5_core_event;
1238 1238
1239 INIT_LIST_HEAD(&priv->ctx_list); 1239 INIT_LIST_HEAD(&priv->ctx_list);
1240 spin_lock_init(&priv->ctx_lock); 1240 spin_lock_init(&priv->ctx_lock);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1ec0a4ce3c46..dda5761e91bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
231 231
232 span_entry->used = true; 232 span_entry->used = true;
233 span_entry->id = index; 233 span_entry->id = index;
234 span_entry->ref_count = 0; 234 span_entry->ref_count = 1;
235 span_entry->local_port = local_port; 235 span_entry->local_port = local_port;
236 return span_entry; 236 return span_entry;
237} 237}
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry
270 270
271 span_entry = mlxsw_sp_span_entry_find(port); 271 span_entry = mlxsw_sp_span_entry_find(port);
272 if (span_entry) { 272 if (span_entry) {
273 /* Already exists, just take a reference */
273 span_entry->ref_count++; 274 span_entry->ref_count++;
274 return span_entry; 275 return span_entry;
275 } 276 }
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry
280static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, 281static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
281 struct mlxsw_sp_span_entry *span_entry) 282 struct mlxsw_sp_span_entry *span_entry)
282{ 283{
284 WARN_ON(!span_entry->ref_count);
283 if (--span_entry->ref_count == 0) 285 if (--span_entry->ref_count == 0)
284 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); 286 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
285 return 0; 287 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 9b22863a924b..97bbc1d21df8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif {
115struct mlxsw_sp_mid { 115struct mlxsw_sp_mid {
116 struct list_head list; 116 struct list_head list;
117 unsigned char addr[ETH_ALEN]; 117 unsigned char addr[ETH_ALEN];
118 u16 vid; 118 u16 fid;
119 u16 mid; 119 u16 mid;
120 unsigned int ref_count; 120 unsigned int ref_count;
121}; 121};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4573da2c5560..e83072da6272 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
594 return 0; 594 return 0;
595} 595}
596 596
597static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
598
597static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) 599static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
598{ 600{
601 mlxsw_sp_router_fib_flush(mlxsw_sp);
599 kfree(mlxsw_sp->router.vrs); 602 kfree(mlxsw_sp->router.vrs);
600} 603}
601 604
602struct mlxsw_sp_neigh_key { 605struct mlxsw_sp_neigh_key {
603 unsigned char addr[sizeof(struct in6_addr)]; 606 struct neighbour *n;
604 struct net_device *dev;
605}; 607};
606 608
607struct mlxsw_sp_neigh_entry { 609struct mlxsw_sp_neigh_entry {
608 struct rhash_head ht_node; 610 struct rhash_head ht_node;
609 struct mlxsw_sp_neigh_key key; 611 struct mlxsw_sp_neigh_key key;
610 u16 rif; 612 u16 rif;
611 struct neighbour *n;
612 bool offloaded; 613 bool offloaded;
613 struct delayed_work dw; 614 struct delayed_work dw;
614 struct mlxsw_sp_port *mlxsw_sp_port; 615 struct mlxsw_sp_port *mlxsw_sp_port;
@@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
646static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); 647static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
647 648
648static struct mlxsw_sp_neigh_entry * 649static struct mlxsw_sp_neigh_entry *
649mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, 650mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
650 struct net_device *dev, u16 rif,
651 struct neighbour *n)
652{ 651{
653 struct mlxsw_sp_neigh_entry *neigh_entry; 652 struct mlxsw_sp_neigh_entry *neigh_entry;
654 653
655 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); 654 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
656 if (!neigh_entry) 655 if (!neigh_entry)
657 return NULL; 656 return NULL;
658 memcpy(neigh_entry->key.addr, addr, addr_len); 657 neigh_entry->key.n = n;
659 neigh_entry->key.dev = dev;
660 neigh_entry->rif = rif; 658 neigh_entry->rif = rif;
661 neigh_entry->n = n;
662 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); 659 INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
663 INIT_LIST_HEAD(&neigh_entry->nexthop_list); 660 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
664 return neigh_entry; 661 return neigh_entry;
@@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
671} 668}
672 669
673static struct mlxsw_sp_neigh_entry * 670static struct mlxsw_sp_neigh_entry *
674mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, 671mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
675 size_t addr_len, struct net_device *dev)
676{ 672{
677 struct mlxsw_sp_neigh_key key = {{ 0 } }; 673 struct mlxsw_sp_neigh_key key;
678 674
679 memcpy(key.addr, addr, addr_len); 675 key.n = n;
680 key.dev = dev;
681 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, 676 return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
682 &key, mlxsw_sp_neigh_ht_params); 677 &key, mlxsw_sp_neigh_ht_params);
683} 678}
@@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
689 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
690 struct mlxsw_sp_neigh_entry *neigh_entry; 685 struct mlxsw_sp_neigh_entry *neigh_entry;
691 struct mlxsw_sp_rif *r; 686 struct mlxsw_sp_rif *r;
692 u32 dip;
693 int err; 687 int err;
694 688
695 if (n->tbl != &arp_tbl) 689 if (n->tbl != &arp_tbl)
696 return 0; 690 return 0;
697 691
698 dip = ntohl(*((__be32 *) n->primary_key)); 692 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
699 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), 693 if (neigh_entry)
700 n->dev);
701 if (neigh_entry) {
702 WARN_ON(neigh_entry->n != n);
703 return 0; 694 return 0;
704 }
705 695
706 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); 696 r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
707 if (WARN_ON(!r)) 697 if (WARN_ON(!r))
708 return -EINVAL; 698 return -EINVAL;
709 699
710 neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, 700 neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
711 r->rif, n);
712 if (!neigh_entry) 701 if (!neigh_entry)
713 return -ENOMEM; 702 return -ENOMEM;
714 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); 703 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
@@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
727 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); 716 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
728 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 717 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
729 struct mlxsw_sp_neigh_entry *neigh_entry; 718 struct mlxsw_sp_neigh_entry *neigh_entry;
730 u32 dip;
731 719
732 if (n->tbl != &arp_tbl) 720 if (n->tbl != &arp_tbl)
733 return; 721 return;
734 722
735 dip = ntohl(*((__be32 *) n->primary_key)); 723 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
736 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
737 n->dev);
738 if (!neigh_entry) 724 if (!neigh_entry)
739 return; 725 return;
740 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); 726 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
@@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
817 } 803 }
818} 804}
819 805
806static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
807{
808 u8 num_rec, last_rec_index, num_entries;
809
810 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
811 last_rec_index = num_rec - 1;
812
813 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
814 return false;
815 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
816 MLXSW_REG_RAUHTD_TYPE_IPV6)
817 return true;
818
819 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
820 last_rec_index);
821 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
822 return true;
823 return false;
824}
825
820static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) 826static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
821{ 827{
822 char *rauhtd_pl; 828 char *rauhtd_pl;
@@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
843 for (i = 0; i < num_rec; i++) 849 for (i = 0; i < num_rec; i++)
844 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, 850 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
845 i); 851 i);
846 } while (num_rec); 852 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
847 rtnl_unlock(); 853 rtnl_unlock();
848 854
849 kfree(rauhtd_pl); 855 kfree(rauhtd_pl);
@@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
862 * is active regardless of the traffic. 868 * is active regardless of the traffic.
863 */ 869 */
864 if (!list_empty(&neigh_entry->nexthop_list)) 870 if (!list_empty(&neigh_entry->nexthop_list))
865 neigh_event_send(neigh_entry->n, NULL); 871 neigh_event_send(neigh_entry->key.n, NULL);
866 } 872 }
867 rtnl_unlock(); 873 rtnl_unlock();
868} 874}
@@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
908 rtnl_lock(); 914 rtnl_lock();
909 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, 915 list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
910 nexthop_neighs_list_node) { 916 nexthop_neighs_list_node) {
911 if (!(neigh_entry->n->nud_state & NUD_VALID) && 917 if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
912 !list_empty(&neigh_entry->nexthop_list)) 918 !list_empty(&neigh_entry->nexthop_list))
913 neigh_event_send(neigh_entry->n, NULL); 919 neigh_event_send(neigh_entry->key.n, NULL);
914 } 920 }
915 rtnl_unlock(); 921 rtnl_unlock();
916 922
@@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
927{ 933{
928 struct mlxsw_sp_neigh_entry *neigh_entry = 934 struct mlxsw_sp_neigh_entry *neigh_entry =
929 container_of(work, struct mlxsw_sp_neigh_entry, dw.work); 935 container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
930 struct neighbour *n = neigh_entry->n; 936 struct neighbour *n = neigh_entry->key.n;
931 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; 937 struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
932 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 938 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
933 char rauht_pl[MLXSW_REG_RAUHT_LEN]; 939 char rauht_pl[MLXSW_REG_RAUHT_LEN];
@@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1030 1036
1031 mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 1037 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1032 dip = ntohl(*((__be32 *) n->primary_key)); 1038 dip = ntohl(*((__be32 *) n->primary_key));
1033 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, 1039 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1034 &dip, 1040 if (WARN_ON(!neigh_entry)) {
1035 sizeof(__be32),
1036 dev);
1037 if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
1038 mlxsw_sp_port_dev_put(mlxsw_sp_port); 1041 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1039 return NOTIFY_DONE; 1042 return NOTIFY_DONE;
1040 } 1043 }
@@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1343 struct fib_nh *fib_nh) 1346 struct fib_nh *fib_nh)
1344{ 1347{
1345 struct mlxsw_sp_neigh_entry *neigh_entry; 1348 struct mlxsw_sp_neigh_entry *neigh_entry;
1346 u32 gwip = ntohl(fib_nh->nh_gw);
1347 struct net_device *dev = fib_nh->nh_dev; 1349 struct net_device *dev = fib_nh->nh_dev;
1348 struct neighbour *n; 1350 struct neighbour *n;
1349 u8 nud_state; 1351 u8 nud_state;
1350 1352
1351 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1353 /* Take a reference of neigh here ensuring that neigh would
1352 sizeof(gwip), dev); 1354 * not be detructed before the nexthop entry is finished.
1353 if (!neigh_entry) { 1355 * The reference is taken either in neigh_lookup() or
1354 __be32 gwipn = htonl(gwip); 1356 * in neith_create() in case n is not found.
1355 1357 */
1356 n = neigh_create(&arp_tbl, &gwipn, dev); 1358 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
1359 if (!n) {
1360 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
1357 if (IS_ERR(n)) 1361 if (IS_ERR(n))
1358 return PTR_ERR(n); 1362 return PTR_ERR(n);
1359 neigh_event_send(n, NULL); 1363 neigh_event_send(n, NULL);
1360 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, 1364 }
1361 sizeof(gwip), dev); 1365 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1362 if (!neigh_entry) { 1366 if (!neigh_entry) {
1363 neigh_release(n); 1367 neigh_release(n);
1364 return -EINVAL; 1368 return -EINVAL;
1365 }
1366 } else {
1367 /* Take a reference of neigh here ensuring that neigh would
1368 * not be detructed before the nexthop entry is finished.
1369 * The second branch takes the reference in neith_create()
1370 */
1371 n = neigh_entry->n;
1372 neigh_clone(n);
1373 } 1369 }
1374 1370
1375 /* If that is the first nexthop connected to that neigh, add to 1371 /* If that is the first nexthop connected to that neigh, add to
@@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1403 if (list_empty(&nh->neigh_entry->nexthop_list)) 1399 if (list_empty(&nh->neigh_entry->nexthop_list))
1404 list_del(&nh->neigh_entry->nexthop_neighs_list_node); 1400 list_del(&nh->neigh_entry->nexthop_neighs_list_node);
1405 1401
1406 neigh_release(neigh_entry->n); 1402 neigh_release(neigh_entry->key.n);
1407} 1403}
1408 1404
1409static struct mlxsw_sp_nexthop_group * 1405static struct mlxsw_sp_nexthop_group *
@@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
1463 1459
1464 for (i = 0; i < fi->fib_nhs; i++) { 1460 for (i = 0; i < fi->fib_nhs; i++) {
1465 struct fib_nh *fib_nh = &fi->fib_nh[i]; 1461 struct fib_nh *fib_nh = &fi->fib_nh[i];
1466 u32 gwip = ntohl(fib_nh->nh_gw); 1462 struct neighbour *n = nh->neigh_entry->key.n;
1467 1463
1468 if (memcmp(nh->neigh_entry->key.addr, 1464 if (memcmp(n->primary_key, &fib_nh->nh_gw,
1469 &gwip, sizeof(u32)) == 0 && 1465 sizeof(fib_nh->nh_gw)) == 0 &&
1470 nh->neigh_entry->key.dev == fib_nh->nh_dev) 1466 n->dev == fib_nh->nh_dev)
1471 return true; 1467 return true;
1472 } 1468 }
1473 return false; 1469 return false;
@@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
1874 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); 1870 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
1875} 1871}
1876 1872
1877static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) 1873static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
1878{ 1874{
1879 struct mlxsw_resources *resources; 1875 struct mlxsw_resources *resources;
1880 struct mlxsw_sp_fib_entry *fib_entry; 1876 struct mlxsw_sp_fib_entry *fib_entry;
1881 struct mlxsw_sp_fib_entry *tmp; 1877 struct mlxsw_sp_fib_entry *tmp;
1882 struct mlxsw_sp_vr *vr; 1878 struct mlxsw_sp_vr *vr;
1883 int i; 1879 int i;
1884 int err;
1885 1880
1886 resources = mlxsw_core_resources_get(mlxsw_sp->core); 1881 resources = mlxsw_core_resources_get(mlxsw_sp->core);
1887 for (i = 0; i < resources->max_virtual_routers; i++) { 1882 for (i = 0; i < resources->max_virtual_routers; i++) {
1888 vr = &mlxsw_sp->router.vrs[i]; 1883 vr = &mlxsw_sp->router.vrs[i];
1884
1889 if (!vr->used) 1885 if (!vr->used)
1890 continue; 1886 continue;
1891 1887
@@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1901 break; 1897 break;
1902 } 1898 }
1903 } 1899 }
1900}
1901
1902static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
1903{
1904 int err;
1905
1906 mlxsw_sp_router_fib_flush(mlxsw_sp);
1904 mlxsw_sp->router.aborted = true; 1907 mlxsw_sp->router.aborted = true;
1905 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); 1908 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
1906 if (err) 1909 if (err)
@@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
1958 struct fib_entry_notifier_info *fen_info = ptr; 1961 struct fib_entry_notifier_info *fen_info = ptr;
1959 int err; 1962 int err;
1960 1963
1964 if (!net_eq(fen_info->info.net, &init_net))
1965 return NOTIFY_DONE;
1966
1961 switch (event) { 1967 switch (event) {
1962 case FIB_EVENT_ENTRY_ADD: 1968 case FIB_EVENT_ENTRY_ADD:
1963 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); 1969 err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 5e00c79e8133..1e2c8eca3af1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
929 929
930static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, 930static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
931 const unsigned char *addr, 931 const unsigned char *addr,
932 u16 vid) 932 u16 fid)
933{ 933{
934 struct mlxsw_sp_mid *mid; 934 struct mlxsw_sp_mid *mid;
935 935
936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { 936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
937 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) 937 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
938 return mid; 938 return mid;
939 } 939 }
940 return NULL; 940 return NULL;
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
942 942
943static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, 943static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
944 const unsigned char *addr, 944 const unsigned char *addr,
945 u16 vid) 945 u16 fid)
946{ 946{
947 struct mlxsw_sp_mid *mid; 947 struct mlxsw_sp_mid *mid;
948 u16 mid_idx; 948 u16 mid_idx;
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
958 958
959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped); 959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
960 ether_addr_copy(mid->addr, addr); 960 ether_addr_copy(mid->addr, addr);
961 mid->vid = vid; 961 mid->fid = fid;
962 mid->mid = mid_idx; 962 mid->mid = mid_idx;
963 mid->ref_count = 0; 963 mid->ref_count = 0;
964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); 964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
991 if (switchdev_trans_ph_prepare(trans)) 991 if (switchdev_trans_ph_prepare(trans))
992 return 0; 992 return 0;
993 993
994 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 994 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
995 if (!mid) { 995 if (!mid) {
996 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); 996 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
997 if (!mid) { 997 if (!mid) {
998 netdev_err(dev, "Unable to allocate MC group\n"); 998 netdev_err(dev, "Unable to allocate MC group\n");
999 return -ENOMEM; 999 return -ENOMEM;
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1137 u16 mid_idx; 1137 u16 mid_idx;
1138 int err = 0; 1138 int err = 0;
1139 1139
1140 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); 1140 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1141 if (!mid) { 1141 if (!mid) {
1142 netdev_err(dev, "Unable to remove port from MC DB\n"); 1142 netdev_err(dev, "Unable to remove port from MC DB\n");
1143 return -EINVAL; 1143 return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 72eee29c677f..2777d5bb4380 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,9 +727,6 @@ struct core_tx_bd_flags {
727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
730#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
731#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
732
733}; 730};
734 731
735struct core_tx_bd { 732struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 63e1a1b0ef8e..f95385cbbd40 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1119 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << 1119 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1120 CORE_TX_BD_FLAGS_START_BD_SHIFT; 1120 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1121 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); 1121 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1122 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1122 DMA_REGPAIR_LE(start_bd->addr, first_frag); 1123 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1123 start_bd->nbytes = cpu_to_le16(first_frag_len); 1124 start_bd->nbytes = cpu_to_le16(first_frag_len);
1124 1125
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c418360ba02a..333c7442e48a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev,
839{ 839{
840 int i; 840 int i;
841 841
842 if (IS_ENABLED(CONFIG_QED_RDMA)) {
843 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
844 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
845 /* divide by 3 the MRs to avoid MF ILT overflow */
846 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
847 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
848 }
849
842 for (i = 0; i < cdev->num_hwfns; i++) { 850 for (i = 0; i < cdev->num_hwfns; i++) {
843 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 851 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
844 852
845 p_hwfn->pf_params = *params; 853 p_hwfn->pf_params = *params;
846 } 854 }
847
848 if (!IS_ENABLED(CONFIG_QED_RDMA))
849 return;
850
851 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
852 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
853 /* divide by 3 the MRs to avoid MF ILT overflow */
854 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
855 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
856} 855}
857 856
858static int qed_slowpath_start(struct qed_dev *cdev, 857static int qed_slowpath_start(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 12251a1032d1..7567cc464b88 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
175 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { 175 for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) {
176 int tc; 176 int tc;
177 177
178 for (j = 0; j < QEDE_NUM_RQSTATS; j++) 178 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
179 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 179 for (j = 0; j < QEDE_NUM_RQSTATS; j++)
180 "%d: %s", i, qede_rqstats_arr[j].string);
181 k += QEDE_NUM_RQSTATS;
182 for (tc = 0; tc < edev->num_tc; tc++) {
183 for (j = 0; j < QEDE_NUM_TQSTATS; j++)
184 sprintf(buf + (k + j) * ETH_GSTRING_LEN, 180 sprintf(buf + (k + j) * ETH_GSTRING_LEN,
185 "%d.%d: %s", i, tc, 181 "%d: %s", i,
186 qede_tqstats_arr[j].string); 182 qede_rqstats_arr[j].string);
187 k += QEDE_NUM_TQSTATS; 183 k += QEDE_NUM_RQSTATS;
184 }
185
186 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
187 for (tc = 0; tc < edev->num_tc; tc++) {
188 for (j = 0; j < QEDE_NUM_TQSTATS; j++)
189 sprintf(buf + (k + j) *
190 ETH_GSTRING_LEN,
191 "%d.%d: %s", i, tc,
192 qede_tqstats_arr[j].string);
193 k += QEDE_NUM_TQSTATS;
194 }
188 } 195 }
189 } 196 }
190 197
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 7def29aaf65c..85f46dbecd5b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
2839 } 2839 }
2840 2840
2841 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, 2841 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
2842 rxq->rx_buf_size, DMA_FROM_DEVICE); 2842 PAGE_SIZE, DMA_FROM_DEVICE);
2843 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { 2843 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
2844 DP_NOTICE(edev, 2844 DP_NOTICE(edev,
2845 "Failed to map TPA replacement buffer\n"); 2845 "Failed to map TPA replacement buffer\n");
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 6fb3bee904d3..0b4deb31e742 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt)
575 575
576 mac |= TXEN | RXEN; /* enable RX/TX */ 576 mac |= TXEN | RXEN; /* enable RX/TX */
577 577
578 /* We don't have ethtool support yet, so force flow-control mode 578 /* Configure MAC flow control to match the PHY's settings. */
579 * to 'full' always. 579 if (phydev->pause)
580 */ 580 mac |= RXFC;
581 mac |= TXFC | RXFC; 581 if (phydev->pause != phydev->asym_pause)
582 mac |= TXFC;
582 583
583 /* setup link speed */ 584 /* setup link speed */
584 mac &= ~SPEED_MASK; 585 mac &= ~SPEED_MASK;
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt)
1003 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); 1004 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
1004 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); 1005 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
1005 1006
1007 /* Enable pause frames. Without this feature, the EMAC has been shown
1008 * to receive (and drop) frames with FCS errors at gigabit connections.
1009 */
1010 adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1011 adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1012
1006 adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 1013 adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
1007 phy_start(adpt->phydev); 1014 phy_start(adpt->phydev);
1008 1015
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index da4e90db4d98..99a14df28b96 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -212,6 +212,7 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
212 212
213 phy_np = of_parse_phandle(np, "phy-handle", 0); 213 phy_np = of_parse_phandle(np, "phy-handle", 0);
214 adpt->phydev = of_phy_find_device(phy_np); 214 adpt->phydev = of_phy_find_device(phy_np);
215 of_node_put(phy_np);
215 } 216 }
216 217
217 if (!adpt->phydev) { 218 if (!adpt->phydev) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index 75c1b530e39e..72fe343c7a36 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = {
421 /* CDR Settings */ 421 /* CDR Settings */
422 {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, 422 {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
423 UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, 423 UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)},
424 {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, 424 {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)},
425 {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, 425 {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
426 426
427 /* TX/RX Settings */ 427 /* TX/RX Settings */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 4fede4b86538..57b35aeac51a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -711,6 +711,8 @@ static int emac_probe(struct platform_device *pdev)
711err_undo_napi: 711err_undo_napi:
712 netif_napi_del(&adpt->rx_q.napi); 712 netif_napi_del(&adpt->rx_q.napi);
713err_undo_mdiobus: 713err_undo_mdiobus:
714 if (!has_acpi_companion(&pdev->dev))
715 put_device(&adpt->phydev->mdio.dev);
714 mdiobus_unregister(adpt->mii_bus); 716 mdiobus_unregister(adpt->mii_bus);
715err_undo_clocks: 717err_undo_clocks:
716 emac_clks_teardown(adpt); 718 emac_clks_teardown(adpt);
@@ -730,6 +732,8 @@ static int emac_remove(struct platform_device *pdev)
730 732
731 emac_clks_teardown(adpt); 733 emac_clks_teardown(adpt);
732 734
735 if (!has_acpi_companion(&pdev->dev))
736 put_device(&adpt->phydev->mdio.dev);
733 mdiobus_unregister(adpt->mii_bus); 737 mdiobus_unregister(adpt->mii_bus);
734 free_netdev(netdev); 738 free_netdev(netdev);
735 739
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 630536bc72f9..d6a217874a8b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1008,20 +1008,18 @@ static int ravb_phy_init(struct net_device *ndev)
1008 of_node_put(pn); 1008 of_node_put(pn);
1009 if (!phydev) { 1009 if (!phydev) {
1010 netdev_err(ndev, "failed to connect PHY\n"); 1010 netdev_err(ndev, "failed to connect PHY\n");
1011 return -ENOENT; 1011 err = -ENOENT;
1012 goto err_deregister_fixed_link;
1012 } 1013 }
1013 1014
1014 /* This driver only support 10/100Mbit speeds on Gen3 1015 /* This driver only support 10/100Mbit speeds on Gen3
1015 * at this time. 1016 * at this time.
1016 */ 1017 */
1017 if (priv->chip_id == RCAR_GEN3) { 1018 if (priv->chip_id == RCAR_GEN3) {
1018 int err;
1019
1020 err = phy_set_max_speed(phydev, SPEED_100); 1019 err = phy_set_max_speed(phydev, SPEED_100);
1021 if (err) { 1020 if (err) {
1022 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); 1021 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
1023 phy_disconnect(phydev); 1022 goto err_phy_disconnect;
1024 return err;
1025 } 1023 }
1026 1024
1027 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); 1025 netdev_info(ndev, "limited PHY to 100Mbit/s\n");
@@ -1033,6 +1031,14 @@ static int ravb_phy_init(struct net_device *ndev)
1033 phy_attached_info(phydev); 1031 phy_attached_info(phydev);
1034 1032
1035 return 0; 1033 return 0;
1034
1035err_phy_disconnect:
1036 phy_disconnect(phydev);
1037err_deregister_fixed_link:
1038 if (of_phy_is_fixed_link(np))
1039 of_phy_deregister_fixed_link(np);
1040
1041 return err;
1036} 1042}
1037 1043
1038/* PHY control start function */ 1044/* PHY control start function */
@@ -1634,6 +1640,7 @@ static void ravb_set_rx_mode(struct net_device *ndev)
1634/* Device close function for Ethernet AVB */ 1640/* Device close function for Ethernet AVB */
1635static int ravb_close(struct net_device *ndev) 1641static int ravb_close(struct net_device *ndev)
1636{ 1642{
1643 struct device_node *np = ndev->dev.parent->of_node;
1637 struct ravb_private *priv = netdev_priv(ndev); 1644 struct ravb_private *priv = netdev_priv(ndev);
1638 struct ravb_tstamp_skb *ts_skb, *ts_skb2; 1645 struct ravb_tstamp_skb *ts_skb, *ts_skb2;
1639 1646
@@ -1663,6 +1670,8 @@ static int ravb_close(struct net_device *ndev)
1663 if (ndev->phydev) { 1670 if (ndev->phydev) {
1664 phy_stop(ndev->phydev); 1671 phy_stop(ndev->phydev);
1665 phy_disconnect(ndev->phydev); 1672 phy_disconnect(ndev->phydev);
1673 if (of_phy_is_fixed_link(np))
1674 of_phy_deregister_fixed_link(np);
1666 } 1675 }
1667 1676
1668 if (priv->chip_id != RCAR_GEN2) { 1677 if (priv->chip_id != RCAR_GEN2) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 05b0dc55de77..1a92de705199 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -518,7 +518,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
518 518
519 .ecsr_value = ECSR_ICD, 519 .ecsr_value = ECSR_ICD,
520 .ecsipr_value = ECSIPR_ICDIP, 520 .ecsipr_value = ECSIPR_ICDIP,
521 .eesipr_value = 0xff7f009f, 521 .eesipr_value = 0xe77f009f,
522 522
523 .tx_check = EESR_TC1 | EESR_FTC, 523 .tx_check = EESR_TC1 | EESR_FTC,
524 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | 524 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cf3557106c2..6b89e4a7b164 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel)
485 *channel = *old_channel; 485 *channel = *old_channel;
486 486
487 channel->napi_dev = NULL; 487 channel->napi_dev = NULL;
488 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
489 channel->napi_str.napi_id = 0;
490 channel->napi_str.state = 0;
488 memset(&channel->eventq, 0, sizeof(channel->eventq)); 491 memset(&channel->eventq, 0, sizeof(channel->eventq));
489 492
490 for (j = 0; j < EFX_TXQ_TYPES; j++) { 493 for (j = 0; j < EFX_TXQ_TYPES; j++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 3818c5e06eba..4b78168a5f3c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -107,7 +107,7 @@ config DWMAC_STI
107config DWMAC_STM32 107config DWMAC_STM32
108 tristate "STM32 DWMAC support" 108 tristate "STM32 DWMAC support"
109 default ARCH_STM32 109 default ARCH_STM32
110 depends on OF && HAS_IOMEM 110 depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
111 select MFD_SYSCON 111 select MFD_SYSCON
112 ---help--- 112 ---help---
113 Support for ethernet controller on STM32 SOCs. 113 Support for ethernet controller on STM32 SOCs.
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index 2920e2ee3864..489ef146201e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -63,8 +63,8 @@
63#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 63#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
64#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 64#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
65#define TSE_PCS_SW_RESET_TIMEOUT 100 65#define TSE_PCS_SW_RESET_TIMEOUT 100
66#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) 66#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
67#define TSE_PCS_USE_SGMII_ENA BIT(1) 67#define TSE_PCS_USE_SGMII_ENA BIT(0)
68 68
69#define SGMII_ADAPTER_CTRL_REG 0x00 69#define SGMII_ADAPTER_CTRL_REG 0x00
70#define SGMII_ADAPTER_DISABLE 0x0001 70#define SGMII_ADAPTER_DISABLE 0x0001
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index d3292c4a6eda..6d2de4e01f6d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -120,14 +120,17 @@ struct stmmac_extra_stats {
120 unsigned long ip_csum_bypassed; 120 unsigned long ip_csum_bypassed;
121 unsigned long ipv4_pkt_rcvd; 121 unsigned long ipv4_pkt_rcvd;
122 unsigned long ipv6_pkt_rcvd; 122 unsigned long ipv6_pkt_rcvd;
123 unsigned long rx_msg_type_ext_no_ptp; 123 unsigned long no_ptp_rx_msg_type_ext;
124 unsigned long rx_msg_type_sync; 124 unsigned long ptp_rx_msg_type_sync;
125 unsigned long rx_msg_type_follow_up; 125 unsigned long ptp_rx_msg_type_follow_up;
126 unsigned long rx_msg_type_delay_req; 126 unsigned long ptp_rx_msg_type_delay_req;
127 unsigned long rx_msg_type_delay_resp; 127 unsigned long ptp_rx_msg_type_delay_resp;
128 unsigned long rx_msg_type_pdelay_req; 128 unsigned long ptp_rx_msg_type_pdelay_req;
129 unsigned long rx_msg_type_pdelay_resp; 129 unsigned long ptp_rx_msg_type_pdelay_resp;
130 unsigned long rx_msg_type_pdelay_follow_up; 130 unsigned long ptp_rx_msg_type_pdelay_follow_up;
131 unsigned long ptp_rx_msg_type_announce;
132 unsigned long ptp_rx_msg_type_management;
133 unsigned long ptp_rx_msg_pkt_reserved_type;
131 unsigned long ptp_frame_type; 134 unsigned long ptp_frame_type;
132 unsigned long ptp_ver; 135 unsigned long ptp_ver;
133 unsigned long timestamp_dropped; 136 unsigned long timestamp_dropped;
@@ -482,11 +485,12 @@ struct stmmac_ops {
482/* PTP and HW Timer helpers */ 485/* PTP and HW Timer helpers */
483struct stmmac_hwtimestamp { 486struct stmmac_hwtimestamp {
484 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); 487 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
485 u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); 488 u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
489 int gmac4);
486 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); 490 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
487 int (*config_addend) (void __iomem *ioaddr, u32 addend); 491 int (*config_addend) (void __iomem *ioaddr, u32 addend);
488 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, 492 int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
489 int add_sub); 493 int add_sub, int gmac4);
490 u64(*get_systime) (void __iomem *ioaddr); 494 u64(*get_systime) (void __iomem *ioaddr);
491}; 495};
492 496
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 2e4c171a2b41..e3c86d422109 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -155,14 +155,18 @@
155#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) 155#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
156 156
157/* Extended RDES4 message type definitions */ 157/* Extended RDES4 message type definitions */
158#define RDES_EXT_NO_PTP 0 158#define RDES_EXT_NO_PTP 0x0
159#define RDES_EXT_SYNC 1 159#define RDES_EXT_SYNC 0x1
160#define RDES_EXT_FOLLOW_UP 2 160#define RDES_EXT_FOLLOW_UP 0x2
161#define RDES_EXT_DELAY_REQ 3 161#define RDES_EXT_DELAY_REQ 0x3
162#define RDES_EXT_DELAY_RESP 4 162#define RDES_EXT_DELAY_RESP 0x4
163#define RDES_EXT_PDELAY_REQ 5 163#define RDES_EXT_PDELAY_REQ 0x5
164#define RDES_EXT_PDELAY_RESP 6 164#define RDES_EXT_PDELAY_RESP 0x6
165#define RDES_EXT_PDELAY_FOLLOW_UP 7 165#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
166#define RDES_PTP_ANNOUNCE 0x8
167#define RDES_PTP_MANAGEMENT 0x9
168#define RDES_PTP_SIGNALING 0xa
169#define RDES_PTP_PKT_RESERVED_TYPE 0xf
166 170
167/* Basic descriptor structure for normal and alternate descriptors */ 171/* Basic descriptor structure for normal and alternate descriptors */
168struct dma_desc { 172struct dma_desc {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index b1e5f24708c9..e6e6c2fcc4b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -50,10 +50,23 @@ static int dwmac_generic_probe(struct platform_device *pdev)
50 if (plat_dat->init) { 50 if (plat_dat->init) {
51 ret = plat_dat->init(pdev, plat_dat->bsp_priv); 51 ret = plat_dat->init(pdev, plat_dat->bsp_priv);
52 if (ret) 52 if (ret)
53 return ret; 53 goto err_remove_config_dt;
54 } 54 }
55 55
56 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 56 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
57 if (ret)
58 goto err_exit;
59
60 return 0;
61
62err_exit:
63 if (plat_dat->exit)
64 plat_dat->exit(pdev, plat_dat->bsp_priv);
65err_remove_config_dt:
66 if (pdev->dev.of_node)
67 stmmac_remove_config_dt(pdev, plat_dat);
68
69 return ret;
57} 70}
58 71
59static const struct of_device_id dwmac_generic_match[] = { 72static const struct of_device_id dwmac_generic_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 36d3355f2fb0..866444b6c82f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -271,15 +271,17 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
271 return PTR_ERR(plat_dat); 271 return PTR_ERR(plat_dat);
272 272
273 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); 273 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
274 if (!gmac) 274 if (!gmac) {
275 return -ENOMEM; 275 err = -ENOMEM;
276 goto err_remove_config_dt;
277 }
276 278
277 gmac->pdev = pdev; 279 gmac->pdev = pdev;
278 280
279 err = ipq806x_gmac_of_parse(gmac); 281 err = ipq806x_gmac_of_parse(gmac);
280 if (err) { 282 if (err) {
281 dev_err(dev, "device tree parsing error\n"); 283 dev_err(dev, "device tree parsing error\n");
282 return err; 284 goto err_remove_config_dt;
283 } 285 }
284 286
285 regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, 287 regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -300,7 +302,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
300 default: 302 default:
301 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", 303 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
302 phy_modes(gmac->phy_mode)); 304 phy_modes(gmac->phy_mode));
303 return -EINVAL; 305 err = -EINVAL;
306 goto err_remove_config_dt;
304 } 307 }
305 regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); 308 regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
306 309
@@ -319,7 +322,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
319 default: 322 default:
320 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", 323 dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
321 phy_modes(gmac->phy_mode)); 324 phy_modes(gmac->phy_mode));
322 return -EINVAL; 325 err = -EINVAL;
326 goto err_remove_config_dt;
323 } 327 }
324 regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); 328 regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
325 329
@@ -346,7 +350,16 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
346 plat_dat->bsp_priv = gmac; 350 plat_dat->bsp_priv = gmac;
347 plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; 351 plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
348 352
349 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 353 err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
354 if (err)
355 goto err_remove_config_dt;
356
357 return 0;
358
359err_remove_config_dt:
360 stmmac_remove_config_dt(pdev, plat_dat);
361
362 return err;
350} 363}
351 364
352static const struct of_device_id ipq806x_gmac_dwmac_match[] = { 365static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 78e9d1861896..3d3f43d91b98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -46,7 +46,8 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
46 reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); 46 reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
47 if (IS_ERR(reg)) { 47 if (IS_ERR(reg)) {
48 dev_err(&pdev->dev, "syscon lookup failed\n"); 48 dev_err(&pdev->dev, "syscon lookup failed\n");
49 return PTR_ERR(reg); 49 ret = PTR_ERR(reg);
50 goto err_remove_config_dt;
50 } 51 }
51 52
52 if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { 53 if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
@@ -55,13 +56,23 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
55 ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; 56 ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
56 } else { 57 } else {
57 dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); 58 dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
58 return -EINVAL; 59 ret = -EINVAL;
60 goto err_remove_config_dt;
59 } 61 }
60 62
61 regmap_update_bits(reg, LPC18XX_CREG_CREG6, 63 regmap_update_bits(reg, LPC18XX_CREG_CREG6,
62 LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); 64 LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
63 65
64 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 66 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
67 if (ret)
68 goto err_remove_config_dt;
69
70 return 0;
71
72err_remove_config_dt:
73 stmmac_remove_config_dt(pdev, plat_dat);
74
75 return ret;
65} 76}
66 77
67static const struct of_device_id lpc18xx_dwmac_match[] = { 78static const struct of_device_id lpc18xx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 309d99536a2c..7fdd1760a74c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -64,18 +64,31 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
64 return PTR_ERR(plat_dat); 64 return PTR_ERR(plat_dat);
65 65
66 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 66 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
67 if (!dwmac) 67 if (!dwmac) {
68 return -ENOMEM; 68 ret = -ENOMEM;
69 goto err_remove_config_dt;
70 }
69 71
70 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 72 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
71 dwmac->reg = devm_ioremap_resource(&pdev->dev, res); 73 dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
72 if (IS_ERR(dwmac->reg)) 74 if (IS_ERR(dwmac->reg)) {
73 return PTR_ERR(dwmac->reg); 75 ret = PTR_ERR(dwmac->reg);
76 goto err_remove_config_dt;
77 }
74 78
75 plat_dat->bsp_priv = dwmac; 79 plat_dat->bsp_priv = dwmac;
76 plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; 80 plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
77 81
78 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 82 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
83 if (ret)
84 goto err_remove_config_dt;
85
86 return 0;
87
88err_remove_config_dt:
89 stmmac_remove_config_dt(pdev, plat_dat);
90
91 return ret;
79} 92}
80 93
81static const struct of_device_id meson6_dwmac_match[] = { 94static const struct of_device_id meson6_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 250e4ceafc8d..ffaed1f35efe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -264,32 +264,48 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
264 return PTR_ERR(plat_dat); 264 return PTR_ERR(plat_dat);
265 265
266 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 266 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
267 if (!dwmac) 267 if (!dwmac) {
268 return -ENOMEM; 268 ret = -ENOMEM;
269 goto err_remove_config_dt;
270 }
269 271
270 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 272 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
271 dwmac->regs = devm_ioremap_resource(&pdev->dev, res); 273 dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
272 if (IS_ERR(dwmac->regs)) 274 if (IS_ERR(dwmac->regs)) {
273 return PTR_ERR(dwmac->regs); 275 ret = PTR_ERR(dwmac->regs);
276 goto err_remove_config_dt;
277 }
274 278
275 dwmac->pdev = pdev; 279 dwmac->pdev = pdev;
276 dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); 280 dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
277 if (dwmac->phy_mode < 0) { 281 if (dwmac->phy_mode < 0) {
278 dev_err(&pdev->dev, "missing phy-mode property\n"); 282 dev_err(&pdev->dev, "missing phy-mode property\n");
279 return -EINVAL; 283 ret = -EINVAL;
284 goto err_remove_config_dt;
280 } 285 }
281 286
282 ret = meson8b_init_clk(dwmac); 287 ret = meson8b_init_clk(dwmac);
283 if (ret) 288 if (ret)
284 return ret; 289 goto err_remove_config_dt;
285 290
286 ret = meson8b_init_prg_eth(dwmac); 291 ret = meson8b_init_prg_eth(dwmac);
287 if (ret) 292 if (ret)
288 return ret; 293 goto err_remove_config_dt;
289 294
290 plat_dat->bsp_priv = dwmac; 295 plat_dat->bsp_priv = dwmac;
291 296
292 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 297 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
298 if (ret)
299 goto err_clk_disable;
300
301 return 0;
302
303err_clk_disable:
304 clk_disable_unprepare(dwmac->m25_div_clk);
305err_remove_config_dt:
306 stmmac_remove_config_dt(pdev, plat_dat);
307
308 return ret;
293} 309}
294 310
295static int meson8b_dwmac_remove(struct platform_device *pdev) 311static int meson8b_dwmac_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 3740a4417fa0..d80c88bd2bba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -981,14 +981,27 @@ static int rk_gmac_probe(struct platform_device *pdev)
981 plat_dat->resume = rk_gmac_resume; 981 plat_dat->resume = rk_gmac_resume;
982 982
983 plat_dat->bsp_priv = rk_gmac_setup(pdev, data); 983 plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
984 if (IS_ERR(plat_dat->bsp_priv)) 984 if (IS_ERR(plat_dat->bsp_priv)) {
985 return PTR_ERR(plat_dat->bsp_priv); 985 ret = PTR_ERR(plat_dat->bsp_priv);
986 goto err_remove_config_dt;
987 }
986 988
987 ret = rk_gmac_init(pdev, plat_dat->bsp_priv); 989 ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
988 if (ret) 990 if (ret)
989 return ret; 991 goto err_remove_config_dt;
992
993 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
994 if (ret)
995 goto err_gmac_exit;
996
997 return 0;
998
999err_gmac_exit:
1000 rk_gmac_exit(pdev, plat_dat->bsp_priv);
1001err_remove_config_dt:
1002 stmmac_remove_config_dt(pdev, plat_dat);
990 1003
991 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 1004 return ret;
992} 1005}
993 1006
994static const struct of_device_id rk_gmac_dwmac_match[] = { 1007static const struct of_device_id rk_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index bec6963ac71e..0c420e97de1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -304,6 +304,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
304 struct device *dev = &pdev->dev; 304 struct device *dev = &pdev->dev;
305 int ret; 305 int ret;
306 struct socfpga_dwmac *dwmac; 306 struct socfpga_dwmac *dwmac;
307 struct net_device *ndev;
308 struct stmmac_priv *stpriv;
307 309
308 ret = stmmac_get_platform_resources(pdev, &stmmac_res); 310 ret = stmmac_get_platform_resources(pdev, &stmmac_res);
309 if (ret) 311 if (ret)
@@ -314,32 +316,43 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
314 return PTR_ERR(plat_dat); 316 return PTR_ERR(plat_dat);
315 317
316 dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); 318 dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
317 if (!dwmac) 319 if (!dwmac) {
318 return -ENOMEM; 320 ret = -ENOMEM;
321 goto err_remove_config_dt;
322 }
319 323
320 ret = socfpga_dwmac_parse_data(dwmac, dev); 324 ret = socfpga_dwmac_parse_data(dwmac, dev);
321 if (ret) { 325 if (ret) {
322 dev_err(dev, "Unable to parse OF data\n"); 326 dev_err(dev, "Unable to parse OF data\n");
323 return ret; 327 goto err_remove_config_dt;
324 } 328 }
325 329
326 plat_dat->bsp_priv = dwmac; 330 plat_dat->bsp_priv = dwmac;
327 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 331 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
328 332
329 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 333 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
334 if (ret)
335 goto err_remove_config_dt;
330 336
331 if (!ret) { 337 ndev = platform_get_drvdata(pdev);
332 struct net_device *ndev = platform_get_drvdata(pdev); 338 stpriv = netdev_priv(ndev);
333 struct stmmac_priv *stpriv = netdev_priv(ndev);
334 339
335 /* The socfpga driver needs to control the stmmac reset to 340 /* The socfpga driver needs to control the stmmac reset to set the phy
336 * set the phy mode. Create a copy of the core reset handel 341 * mode. Create a copy of the core reset handle so it can be used by
337 * so it can be used by the driver later. 342 * the driver later.
338 */ 343 */
339 dwmac->stmmac_rst = stpriv->stmmac_rst; 344 dwmac->stmmac_rst = stpriv->stmmac_rst;
340 345
341 ret = socfpga_dwmac_set_phy_mode(dwmac); 346 ret = socfpga_dwmac_set_phy_mode(dwmac);
342 } 347 if (ret)
348 goto err_dvr_remove;
349
350 return 0;
351
352err_dvr_remove:
353 stmmac_dvr_remove(&pdev->dev);
354err_remove_config_dt:
355 stmmac_remove_config_dt(pdev, plat_dat);
343 356
344 return ret; 357 return ret;
345} 358}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 58c05acc2aab..060b98c37a85 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -345,13 +345,15 @@ static int sti_dwmac_probe(struct platform_device *pdev)
345 return PTR_ERR(plat_dat); 345 return PTR_ERR(plat_dat);
346 346
347 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 347 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
348 if (!dwmac) 348 if (!dwmac) {
349 return -ENOMEM; 349 ret = -ENOMEM;
350 goto err_remove_config_dt;
351 }
350 352
351 ret = sti_dwmac_parse_data(dwmac, pdev); 353 ret = sti_dwmac_parse_data(dwmac, pdev);
352 if (ret) { 354 if (ret) {
353 dev_err(&pdev->dev, "Unable to parse OF data\n"); 355 dev_err(&pdev->dev, "Unable to parse OF data\n");
354 return ret; 356 goto err_remove_config_dt;
355 } 357 }
356 358
357 dwmac->fix_retime_src = data->fix_retime_src; 359 dwmac->fix_retime_src = data->fix_retime_src;
@@ -363,9 +365,20 @@ static int sti_dwmac_probe(struct platform_device *pdev)
363 365
364 ret = sti_dwmac_init(pdev, plat_dat->bsp_priv); 366 ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
365 if (ret) 367 if (ret)
366 return ret; 368 goto err_remove_config_dt;
369
370 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
371 if (ret)
372 goto err_dwmac_exit;
373
374 return 0;
375
376err_dwmac_exit:
377 sti_dwmac_exit(pdev, plat_dat->bsp_priv);
378err_remove_config_dt:
379 stmmac_remove_config_dt(pdev, plat_dat);
367 380
368 return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 381 return ret;
369} 382}
370 383
371static const struct sti_dwmac_of_data stih4xx_dwmac_data = { 384static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index e5a926b8bee7..61cb24810d10 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -107,24 +107,33 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
107 return PTR_ERR(plat_dat); 107 return PTR_ERR(plat_dat);
108 108
109 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); 109 dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
110 if (!dwmac) 110 if (!dwmac) {
111 return -ENOMEM; 111 ret = -ENOMEM;
112 goto err_remove_config_dt;
113 }
112 114
113 ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); 115 ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
114 if (ret) { 116 if (ret) {
115 dev_err(&pdev->dev, "Unable to parse OF data\n"); 117 dev_err(&pdev->dev, "Unable to parse OF data\n");
116 return ret; 118 goto err_remove_config_dt;
117 } 119 }
118 120
119 plat_dat->bsp_priv = dwmac; 121 plat_dat->bsp_priv = dwmac;
120 122
121 ret = stm32_dwmac_init(plat_dat); 123 ret = stm32_dwmac_init(plat_dat);
122 if (ret) 124 if (ret)
123 return ret; 125 goto err_remove_config_dt;
124 126
125 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 127 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
126 if (ret) 128 if (ret)
127 stm32_dwmac_clk_disable(dwmac); 129 goto err_clk_disable;
130
131 return 0;
132
133err_clk_disable:
134 stm32_dwmac_clk_disable(dwmac);
135err_remove_config_dt:
136 stmmac_remove_config_dt(pdev, plat_dat);
128 137
129 return ret; 138 return ret;
130} 139}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index adff46375a32..d07520fb969e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -120,22 +120,27 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
120 return PTR_ERR(plat_dat); 120 return PTR_ERR(plat_dat);
121 121
122 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); 122 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
123 if (!gmac) 123 if (!gmac) {
124 return -ENOMEM; 124 ret = -ENOMEM;
125 goto err_remove_config_dt;
126 }
125 127
126 gmac->interface = of_get_phy_mode(dev->of_node); 128 gmac->interface = of_get_phy_mode(dev->of_node);
127 129
128 gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); 130 gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
129 if (IS_ERR(gmac->tx_clk)) { 131 if (IS_ERR(gmac->tx_clk)) {
130 dev_err(dev, "could not get tx clock\n"); 132 dev_err(dev, "could not get tx clock\n");
131 return PTR_ERR(gmac->tx_clk); 133 ret = PTR_ERR(gmac->tx_clk);
134 goto err_remove_config_dt;
132 } 135 }
133 136
134 /* Optional regulator for PHY */ 137 /* Optional regulator for PHY */
135 gmac->regulator = devm_regulator_get_optional(dev, "phy"); 138 gmac->regulator = devm_regulator_get_optional(dev, "phy");
136 if (IS_ERR(gmac->regulator)) { 139 if (IS_ERR(gmac->regulator)) {
137 if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) 140 if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
138 return -EPROBE_DEFER; 141 ret = -EPROBE_DEFER;
142 goto err_remove_config_dt;
143 }
139 dev_info(dev, "no regulator found\n"); 144 dev_info(dev, "no regulator found\n");
140 gmac->regulator = NULL; 145 gmac->regulator = NULL;
141 } 146 }
@@ -151,11 +156,18 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
151 156
152 ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); 157 ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
153 if (ret) 158 if (ret)
154 return ret; 159 goto err_remove_config_dt;
155 160
156 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 161 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
157 if (ret) 162 if (ret)
158 sun7i_gmac_exit(pdev, plat_dat->bsp_priv); 163 goto err_gmac_exit;
164
165 return 0;
166
167err_gmac_exit:
168 sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
169err_remove_config_dt:
170 stmmac_remove_config_dt(pdev, plat_dat);
159 171
160 return ret; 172 return ret;
161} 173}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index a1b17cd7886b..a601f8d43b75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
123 x->ipv4_pkt_rcvd++; 123 x->ipv4_pkt_rcvd++;
124 if (rdes1 & RDES1_IPV6_HEADER) 124 if (rdes1 & RDES1_IPV6_HEADER)
125 x->ipv6_pkt_rcvd++; 125 x->ipv6_pkt_rcvd++;
126 if (message_type == RDES_EXT_SYNC) 126
127 x->rx_msg_type_sync++; 127 if (message_type == RDES_EXT_NO_PTP)
128 x->no_ptp_rx_msg_type_ext++;
129 else if (message_type == RDES_EXT_SYNC)
130 x->ptp_rx_msg_type_sync++;
128 else if (message_type == RDES_EXT_FOLLOW_UP) 131 else if (message_type == RDES_EXT_FOLLOW_UP)
129 x->rx_msg_type_follow_up++; 132 x->ptp_rx_msg_type_follow_up++;
130 else if (message_type == RDES_EXT_DELAY_REQ) 133 else if (message_type == RDES_EXT_DELAY_REQ)
131 x->rx_msg_type_delay_req++; 134 x->ptp_rx_msg_type_delay_req++;
132 else if (message_type == RDES_EXT_DELAY_RESP) 135 else if (message_type == RDES_EXT_DELAY_RESP)
133 x->rx_msg_type_delay_resp++; 136 x->ptp_rx_msg_type_delay_resp++;
134 else if (message_type == RDES_EXT_PDELAY_REQ) 137 else if (message_type == RDES_EXT_PDELAY_REQ)
135 x->rx_msg_type_pdelay_req++; 138 x->ptp_rx_msg_type_pdelay_req++;
136 else if (message_type == RDES_EXT_PDELAY_RESP) 139 else if (message_type == RDES_EXT_PDELAY_RESP)
137 x->rx_msg_type_pdelay_resp++; 140 x->ptp_rx_msg_type_pdelay_resp++;
138 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 141 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
139 x->rx_msg_type_pdelay_follow_up++; 142 x->ptp_rx_msg_type_pdelay_follow_up++;
140 else 143 else if (message_type == RDES_PTP_ANNOUNCE)
141 x->rx_msg_type_ext_no_ptp++; 144 x->ptp_rx_msg_type_announce++;
145 else if (message_type == RDES_PTP_MANAGEMENT)
146 x->ptp_rx_msg_type_management++;
147 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
148 x->ptp_rx_msg_pkt_reserved_type++;
142 149
143 if (rdes1 & RDES1_PTP_PACKET_TYPE) 150 if (rdes1 & RDES1_PTP_PACKET_TYPE)
144 x->ptp_frame_type++; 151 x->ptp_frame_type++;
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
204 211
205static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) 212static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
206{ 213{
207 return (p->des3 & TDES3_TIMESTAMP_STATUS) 214 /* Context type from W/B descriptor must be zero */
208 >> TDES3_TIMESTAMP_STATUS_SHIFT; 215 if (p->des3 & TDES3_CONTEXT_TYPE)
216 return -EINVAL;
217
218 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
219 if (p->des3 & TDES3_TIMESTAMP_STATUS)
220 return 0;
221
222 return 1;
209} 223}
210 224
211/* NOTE: For RX CTX bit has to be checked before 225static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
212 * HAVE a specific function for TX and another one for RX
213 */
214static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
215{ 226{
216 struct dma_desc *p = (struct dma_desc *)desc; 227 struct dma_desc *p = (struct dma_desc *)desc;
217 u64 ns; 228 u64 ns;
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
223 return ns; 234 return ns;
224} 235}
225 236
226static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) 237static int dwmac4_rx_check_timestamp(void *desc)
238{
239 struct dma_desc *p = (struct dma_desc *)desc;
240 u32 own, ctxt;
241 int ret = 1;
242
243 own = p->des3 & RDES3_OWN;
244 ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
245 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
246
247 if (likely(!own && ctxt)) {
248 if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
249 /* Corrupted value */
250 ret = -EINVAL;
251 else
252 /* A valid Timestamp is ready to be read */
253 ret = 0;
254 }
255
256 /* Timestamp not ready */
257 return ret;
258}
259
260static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
227{ 261{
228 struct dma_desc *p = (struct dma_desc *)desc; 262 struct dma_desc *p = (struct dma_desc *)desc;
263 int ret = -EINVAL;
264
265 /* Get the status from normal w/b descriptor */
266 if (likely(p->des3 & TDES3_RS1V)) {
267 if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
268 int i = 0;
269
270 /* Check if timestamp is OK from context descriptor */
271 do {
272 ret = dwmac4_rx_check_timestamp(desc);
273 if (ret < 0)
274 goto exit;
275 i++;
229 276
230 return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) 277 } while ((ret == 1) || (i < 10));
231 >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; 278
279 if (i == 10)
280 ret = -EBUSY;
281 }
282 }
283exit:
284 return ret;
232} 285}
233 286
234static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 287static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = {
373 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, 426 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
374 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, 427 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
375 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, 428 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
376 .get_timestamp = dwmac4_wrback_get_timestamp, 429 .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
377 .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, 430 .get_timestamp = dwmac4_get_timestamp,
378 .set_tx_ic = dwmac4_rd_set_tx_ic, 431 .set_tx_ic = dwmac4_rd_set_tx_ic,
379 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, 432 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
380 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, 433 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
index 0902a2edeaa9..9736c505211a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -59,10 +59,13 @@
59#define TDES3_CTXT_TCMSSV BIT(26) 59#define TDES3_CTXT_TCMSSV BIT(26)
60 60
61/* TDES3 Common */ 61/* TDES3 Common */
62#define TDES3_RS1V BIT(26)
63#define TDES3_RS1V_SHIFT 26
62#define TDES3_LAST_DESCRIPTOR BIT(28) 64#define TDES3_LAST_DESCRIPTOR BIT(28)
63#define TDES3_LAST_DESCRIPTOR_SHIFT 28 65#define TDES3_LAST_DESCRIPTOR_SHIFT 28
64#define TDES3_FIRST_DESCRIPTOR BIT(29) 66#define TDES3_FIRST_DESCRIPTOR BIT(29)
65#define TDES3_CONTEXT_TYPE BIT(30) 67#define TDES3_CONTEXT_TYPE BIT(30)
68#define TDES3_CONTEXT_TYPE_SHIFT 30
66 69
67/* TDS3 use for both format (read and write back) */ 70/* TDS3 use for both format (read and write back) */
68#define TDES3_OWN BIT(31) 71#define TDES3_OWN BIT(31)
@@ -117,6 +120,7 @@
117#define RDES3_LAST_DESCRIPTOR BIT(28) 120#define RDES3_LAST_DESCRIPTOR BIT(28)
118#define RDES3_FIRST_DESCRIPTOR BIT(29) 121#define RDES3_FIRST_DESCRIPTOR BIT(29)
119#define RDES3_CONTEXT_DESCRIPTOR BIT(30) 122#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
123#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
120 124
121/* RDES3 (read format) */ 125/* RDES3 (read format) */
122#define RDES3_BUFFER1_VALID_ADDR BIT(24) 126#define RDES3_BUFFER1_VALID_ADDR BIT(24)
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 38f19c99cf59..e75549327c34 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
150 x->ipv4_pkt_rcvd++; 150 x->ipv4_pkt_rcvd++;
151 if (rdes4 & ERDES4_IPV6_PKT_RCVD) 151 if (rdes4 & ERDES4_IPV6_PKT_RCVD)
152 x->ipv6_pkt_rcvd++; 152 x->ipv6_pkt_rcvd++;
153 if (message_type == RDES_EXT_SYNC) 153
154 x->rx_msg_type_sync++; 154 if (message_type == RDES_EXT_NO_PTP)
155 x->no_ptp_rx_msg_type_ext++;
156 else if (message_type == RDES_EXT_SYNC)
157 x->ptp_rx_msg_type_sync++;
155 else if (message_type == RDES_EXT_FOLLOW_UP) 158 else if (message_type == RDES_EXT_FOLLOW_UP)
156 x->rx_msg_type_follow_up++; 159 x->ptp_rx_msg_type_follow_up++;
157 else if (message_type == RDES_EXT_DELAY_REQ) 160 else if (message_type == RDES_EXT_DELAY_REQ)
158 x->rx_msg_type_delay_req++; 161 x->ptp_rx_msg_type_delay_req++;
159 else if (message_type == RDES_EXT_DELAY_RESP) 162 else if (message_type == RDES_EXT_DELAY_RESP)
160 x->rx_msg_type_delay_resp++; 163 x->ptp_rx_msg_type_delay_resp++;
161 else if (message_type == RDES_EXT_PDELAY_REQ) 164 else if (message_type == RDES_EXT_PDELAY_REQ)
162 x->rx_msg_type_pdelay_req++; 165 x->ptp_rx_msg_type_pdelay_req++;
163 else if (message_type == RDES_EXT_PDELAY_RESP) 166 else if (message_type == RDES_EXT_PDELAY_RESP)
164 x->rx_msg_type_pdelay_resp++; 167 x->ptp_rx_msg_type_pdelay_resp++;
165 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) 168 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
166 x->rx_msg_type_pdelay_follow_up++; 169 x->ptp_rx_msg_type_pdelay_follow_up++;
167 else 170 else if (message_type == RDES_PTP_ANNOUNCE)
168 x->rx_msg_type_ext_no_ptp++; 171 x->ptp_rx_msg_type_announce++;
172 else if (message_type == RDES_PTP_MANAGEMENT)
173 x->ptp_rx_msg_type_management++;
174 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
175 x->ptp_rx_msg_pkt_reserved_type++;
176
169 if (rdes4 & ERDES4_PTP_FRAME_TYPE) 177 if (rdes4 & ERDES4_PTP_FRAME_TYPE)
170 x->ptp_frame_type++; 178 x->ptp_frame_type++;
171 if (rdes4 & ERDES4_PTP_VER) 179 if (rdes4 & ERDES4_PTP_VER)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b15fc55f1b96..4d2a759b8465 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -129,6 +129,7 @@ struct stmmac_priv {
129 int irq_wake; 129 int irq_wake;
130 spinlock_t ptp_lock; 130 spinlock_t ptp_lock;
131 void __iomem *mmcaddr; 131 void __iomem *mmcaddr;
132 void __iomem *ptpaddr;
132 u32 rx_tail_addr; 133 u32 rx_tail_addr;
133 u32 tx_tail_addr; 134 u32 tx_tail_addr;
134 u32 mss; 135 u32 mss;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1e06173fc9d7..c5d0142adda2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
115 STMMAC_STAT(ip_csum_bypassed), 115 STMMAC_STAT(ip_csum_bypassed),
116 STMMAC_STAT(ipv4_pkt_rcvd), 116 STMMAC_STAT(ipv4_pkt_rcvd),
117 STMMAC_STAT(ipv6_pkt_rcvd), 117 STMMAC_STAT(ipv6_pkt_rcvd),
118 STMMAC_STAT(rx_msg_type_ext_no_ptp), 118 STMMAC_STAT(no_ptp_rx_msg_type_ext),
119 STMMAC_STAT(rx_msg_type_sync), 119 STMMAC_STAT(ptp_rx_msg_type_sync),
120 STMMAC_STAT(rx_msg_type_follow_up), 120 STMMAC_STAT(ptp_rx_msg_type_follow_up),
121 STMMAC_STAT(rx_msg_type_delay_req), 121 STMMAC_STAT(ptp_rx_msg_type_delay_req),
122 STMMAC_STAT(rx_msg_type_delay_resp), 122 STMMAC_STAT(ptp_rx_msg_type_delay_resp),
123 STMMAC_STAT(rx_msg_type_pdelay_req), 123 STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
124 STMMAC_STAT(rx_msg_type_pdelay_resp), 124 STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
125 STMMAC_STAT(rx_msg_type_pdelay_follow_up), 125 STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
126 STMMAC_STAT(ptp_rx_msg_type_announce),
127 STMMAC_STAT(ptp_rx_msg_type_management),
128 STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
126 STMMAC_STAT(ptp_frame_type), 129 STMMAC_STAT(ptp_frame_type),
127 STMMAC_STAT(ptp_ver), 130 STMMAC_STAT(ptp_ver),
128 STMMAC_STAT(timestamp_dropped), 131 STMMAC_STAT(timestamp_dropped),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index a77f68918010..10d6059b2f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
34} 34}
35 35
36static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, 36static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
37 u32 ptp_clock) 37 u32 ptp_clock, int gmac4)
38{ 38{
39 u32 value = readl(ioaddr + PTP_TCR); 39 u32 value = readl(ioaddr + PTP_TCR);
40 unsigned long data; 40 unsigned long data;
41 41
42 /* Convert the ptp_clock to nano second 42 /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
43 * formula = (2/ptp_clock) * 1000000000 43 * formula = (1/ptp_clock) * 1000000000
44 * where, ptp_clock = 50MHz. 44 * where ptp_clock is 50MHz if fine method is used to update system
45 */ 45 */
46 data = (2000000000ULL / ptp_clock); 46 if (value & PTP_TCR_TSCFUPDT)
47 data = (1000000000ULL / 50000000);
48 else
49 data = (1000000000ULL / ptp_clock);
47 50
48 /* 0.465ns accuracy */ 51 /* 0.465ns accuracy */
49 if (!(value & PTP_TCR_TSCTRLSSR)) 52 if (!(value & PTP_TCR_TSCTRLSSR))
50 data = (data * 1000) / 465; 53 data = (data * 1000) / 465;
51 54
55 data &= PTP_SSIR_SSINC_MASK;
56
57 if (gmac4)
58 data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
59
52 writel(data, ioaddr + PTP_SSIR); 60 writel(data, ioaddr + PTP_SSIR);
53 61
54 return data; 62 return data;
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
104} 112}
105 113
106static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, 114static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
107 int add_sub) 115 int add_sub, int gmac4)
108{ 116{
109 u32 value; 117 u32 value;
110 int limit; 118 int limit;
111 119
120 if (add_sub) {
121 /* If the new sec value needs to be subtracted with
122 * the system time, then MAC_STSUR reg should be
123 * programmed with (2^32 – <new_sec_value>)
124 */
125 if (gmac4)
126 sec = (100000000ULL - sec);
127
128 value = readl(ioaddr + PTP_TCR);
129 if (value & PTP_TCR_TSCTRLSSR)
130 nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
131 else
132 nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
133 }
134
112 writel(sec, ioaddr + PTP_STSUR); 135 writel(sec, ioaddr + PTP_STSUR);
113 writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), 136 value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
114 ioaddr + PTP_STNSUR); 137 writel(value, ioaddr + PTP_STNSUR);
138
115 /* issue command to initialize the system time value */ 139 /* issue command to initialize the system time value */
116 value = readl(ioaddr + PTP_TCR); 140 value = readl(ioaddr + PTP_TCR);
117 value |= PTP_TCR_TSUPDT; 141 value |= PTP_TCR_TSUPDT;
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr)
134{ 158{
135 u64 ns; 159 u64 ns;
136 160
161 /* Get the TSSS value */
137 ns = readl(ioaddr + PTP_STNSR); 162 ns = readl(ioaddr + PTP_STNSR);
138 /* convert sec time value to nanosecond */ 163 /* Get the TSS and convert sec time value to nanosecond */
139 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; 164 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
140 165
141 return ns; 166 return ns;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48e71fad4210..caf069a465f2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -340,18 +340,17 @@ out:
340 340
341/* stmmac_get_tx_hwtstamp - get HW TX timestamps 341/* stmmac_get_tx_hwtstamp - get HW TX timestamps
342 * @priv: driver private structure 342 * @priv: driver private structure
343 * @entry : descriptor index to be used. 343 * @p : descriptor pointer
344 * @skb : the socket buffer 344 * @skb : the socket buffer
345 * Description : 345 * Description :
346 * This function will read timestamp from the descriptor & pass it to stack. 346 * This function will read timestamp from the descriptor & pass it to stack.
347 * and also perform some sanity checks. 347 * and also perform some sanity checks.
348 */ 348 */
349static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 349static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
350 unsigned int entry, struct sk_buff *skb) 350 struct dma_desc *p, struct sk_buff *skb)
351{ 351{
352 struct skb_shared_hwtstamps shhwtstamp; 352 struct skb_shared_hwtstamps shhwtstamp;
353 u64 ns; 353 u64 ns;
354 void *desc = NULL;
355 354
356 if (!priv->hwts_tx_en) 355 if (!priv->hwts_tx_en)
357 return; 356 return;
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
360 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 359 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
361 return; 360 return;
362 361
363 if (priv->adv_ts)
364 desc = (priv->dma_etx + entry);
365 else
366 desc = (priv->dma_tx + entry);
367
368 /* check tx tstamp status */ 362 /* check tx tstamp status */
369 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) 363 if (!priv->hw->desc->get_tx_timestamp_status(p)) {
370 return; 364 /* get the valid tstamp */
365 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
371 366
372 /* get the valid tstamp */ 367 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
373 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 368 shhwtstamp.hwtstamp = ns_to_ktime(ns);
374 369
375 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 370 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
376 shhwtstamp.hwtstamp = ns_to_ktime(ns); 371 /* pass tstamp to stack */
377 /* pass tstamp to stack */ 372 skb_tstamp_tx(skb, &shhwtstamp);
378 skb_tstamp_tx(skb, &shhwtstamp); 373 }
379 374
380 return; 375 return;
381} 376}
382 377
383/* stmmac_get_rx_hwtstamp - get HW RX timestamps 378/* stmmac_get_rx_hwtstamp - get HW RX timestamps
384 * @priv: driver private structure 379 * @priv: driver private structure
385 * @entry : descriptor index to be used. 380 * @p : descriptor pointer
381 * @np : next descriptor pointer
386 * @skb : the socket buffer 382 * @skb : the socket buffer
387 * Description : 383 * Description :
388 * This function will read received packet's timestamp from the descriptor 384 * This function will read received packet's timestamp from the descriptor
389 * and pass it to stack. It also perform some sanity checks. 385 * and pass it to stack. It also perform some sanity checks.
390 */ 386 */
391static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, 387static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
392 unsigned int entry, struct sk_buff *skb) 388 struct dma_desc *np, struct sk_buff *skb)
393{ 389{
394 struct skb_shared_hwtstamps *shhwtstamp = NULL; 390 struct skb_shared_hwtstamps *shhwtstamp = NULL;
395 u64 ns; 391 u64 ns;
396 void *desc = NULL;
397 392
398 if (!priv->hwts_rx_en) 393 if (!priv->hwts_rx_en)
399 return; 394 return;
400 395
401 if (priv->adv_ts) 396 /* Check if timestamp is available */
402 desc = (priv->dma_erx + entry); 397 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
403 else 398 /* For GMAC4, the valid timestamp is from CTX next desc. */
404 desc = (priv->dma_rx + entry); 399 if (priv->plat->has_gmac4)
405 400 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
406 /* exit if rx tstamp is not valid */ 401 else
407 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) 402 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
408 return;
409 403
410 /* get valid tstamp */ 404 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
411 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); 405 shhwtstamp = skb_hwtstamps(skb);
412 shhwtstamp = skb_hwtstamps(skb); 406 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
413 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 407 shhwtstamp->hwtstamp = ns_to_ktime(ns);
414 shhwtstamp->hwtstamp = ns_to_ktime(ns); 408 } else {
409 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
410 }
415} 411}
416 412
417/** 413/**
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
598 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 594 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
599 595
600 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 596 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
601 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); 597 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
602 else { 598 else {
603 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 599 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
604 tstamp_all | ptp_v2 | ptp_over_ethernet | 600 tstamp_all | ptp_v2 | ptp_over_ethernet |
605 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 601 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
606 ts_master_en | snap_type_sel); 602 ts_master_en | snap_type_sel);
607 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); 603 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
608 604
609 /* program Sub Second Increment reg */ 605 /* program Sub Second Increment reg */
610 sec_inc = priv->hw->ptp->config_sub_second_increment( 606 sec_inc = priv->hw->ptp->config_sub_second_increment(
611 priv->ioaddr, priv->clk_ptp_rate); 607 priv->ptpaddr, priv->clk_ptp_rate,
608 priv->plat->has_gmac4);
612 temp = div_u64(1000000000ULL, sec_inc); 609 temp = div_u64(1000000000ULL, sec_inc);
613 610
614 /* calculate default added value: 611 /* calculate default added value:
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
618 */ 615 */
619 temp = (u64)(temp << 32); 616 temp = (u64)(temp << 32);
620 priv->default_addend = div_u64(temp, priv->clk_ptp_rate); 617 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
621 priv->hw->ptp->config_addend(priv->ioaddr, 618 priv->hw->ptp->config_addend(priv->ptpaddr,
622 priv->default_addend); 619 priv->default_addend);
623 620
624 /* initialize system time */ 621 /* initialize system time */
625 ktime_get_real_ts64(&now); 622 ktime_get_real_ts64(&now);
626 623
627 /* lower 32 bits of tv_sec are safe until y2106 */ 624 /* lower 32 bits of tv_sec are safe until y2106 */
628 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, 625 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
629 now.tv_nsec); 626 now.tv_nsec);
630 } 627 }
631 628
@@ -880,6 +877,13 @@ static int stmmac_init_phy(struct net_device *dev)
880 return -ENODEV; 877 return -ENODEV;
881 } 878 }
882 879
880 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
881 * subsequent PHY polling, make sure we force a link transition if
882 * we have a UP/DOWN/UP transition
883 */
884 if (phydev->is_pseudo_fixed_link)
885 phydev->irq = PHY_POLL;
886
883 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" 887 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
884 " Link = %d\n", dev->name, phydev->phy_id, phydev->link); 888 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
885 889
@@ -1333,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1333 priv->dev->stats.tx_packets++; 1337 priv->dev->stats.tx_packets++;
1334 priv->xstats.tx_pkt_n++; 1338 priv->xstats.tx_pkt_n++;
1335 } 1339 }
1336 stmmac_get_tx_hwtstamp(priv, entry, skb); 1340 stmmac_get_tx_hwtstamp(priv, p, skb);
1337 } 1341 }
1338 1342
1339 if (likely(priv->tx_skbuff_dma[entry].buf)) { 1343 if (likely(priv->tx_skbuff_dma[entry].buf)) {
@@ -1479,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
1479 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 1483 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1480 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 1484 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1481 1485
1482 if (priv->synopsys_id >= DWMAC_CORE_4_00) 1486 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1487 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1483 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; 1488 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1484 else 1489 } else {
1490 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1485 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; 1491 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1492 }
1486 1493
1487 dwmac_mmc_intr_all_mask(priv->mmcaddr); 1494 dwmac_mmc_intr_all_mask(priv->mmcaddr);
1488 1495
@@ -2477,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2477 if (netif_msg_rx_status(priv)) { 2484 if (netif_msg_rx_status(priv)) {
2478 void *rx_head; 2485 void *rx_head;
2479 2486
2480 pr_debug("%s: descriptor ring:\n", __func__); 2487 pr_info(">>>>>> %s: descriptor ring:\n", __func__);
2481 if (priv->extend_desc) 2488 if (priv->extend_desc)
2482 rx_head = (void *)priv->dma_erx; 2489 rx_head = (void *)priv->dma_erx;
2483 else 2490 else
@@ -2488,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2488 while (count < limit) { 2495 while (count < limit) {
2489 int status; 2496 int status;
2490 struct dma_desc *p; 2497 struct dma_desc *p;
2498 struct dma_desc *np;
2491 2499
2492 if (priv->extend_desc) 2500 if (priv->extend_desc)
2493 p = (struct dma_desc *)(priv->dma_erx + entry); 2501 p = (struct dma_desc *)(priv->dma_erx + entry);
@@ -2507,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2507 next_entry = priv->cur_rx; 2515 next_entry = priv->cur_rx;
2508 2516
2509 if (priv->extend_desc) 2517 if (priv->extend_desc)
2510 prefetch(priv->dma_erx + next_entry); 2518 np = (struct dma_desc *)(priv->dma_erx + next_entry);
2511 else 2519 else
2512 prefetch(priv->dma_rx + next_entry); 2520 np = priv->dma_rx + next_entry;
2521
2522 prefetch(np);
2513 2523
2514 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) 2524 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2515 priv->hw->desc->rx_extended_status(&priv->dev->stats, 2525 priv->hw->desc->rx_extended_status(&priv->dev->stats,
@@ -2561,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2561 frame_len -= ETH_FCS_LEN; 2571 frame_len -= ETH_FCS_LEN;
2562 2572
2563 if (netif_msg_rx_status(priv)) { 2573 if (netif_msg_rx_status(priv)) {
2564 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", 2574 pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
2565 p, entry, des); 2575 p, entry, des);
2566 if (frame_len > ETH_FRAME_LEN) 2576 if (frame_len > ETH_FRAME_LEN)
2567 pr_debug("\tframe size %d, COE: %d\n", 2577 pr_debug("\tframe size %d, COE: %d\n",
@@ -2618,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2618 DMA_FROM_DEVICE); 2628 DMA_FROM_DEVICE);
2619 } 2629 }
2620 2630
2621 stmmac_get_rx_hwtstamp(priv, entry, skb);
2622
2623 if (netif_msg_pktdata(priv)) { 2631 if (netif_msg_pktdata(priv)) {
2624 pr_debug("frame received (%dbytes)", frame_len); 2632 pr_debug("frame received (%dbytes)", frame_len);
2625 print_pkt(skb->data, frame_len); 2633 print_pkt(skb->data, frame_len);
2626 } 2634 }
2627 2635
2636 stmmac_get_rx_hwtstamp(priv, p, np, skb);
2637
2628 stmmac_rx_vlan(priv->dev, skb); 2638 stmmac_rx_vlan(priv->dev, skb);
2629 2639
2630 skb->protocol = eth_type_trans(skb, priv->dev); 2640 skb->protocol = eth_type_trans(skb, priv->dev);
@@ -3406,7 +3416,6 @@ int stmmac_dvr_remove(struct device *dev)
3406 stmmac_set_mac(priv->ioaddr, false); 3416 stmmac_set_mac(priv->ioaddr, false);
3407 netif_carrier_off(ndev); 3417 netif_carrier_off(ndev);
3408 unregister_netdev(ndev); 3418 unregister_netdev(ndev);
3409 of_node_put(priv->plat->phy_node);
3410 if (priv->stmmac_rst) 3419 if (priv->stmmac_rst)
3411 reset_control_assert(priv->stmmac_rst); 3420 reset_control_assert(priv->stmmac_rst);
3412 clk_disable_unprepare(priv->pclk); 3421 clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0a0d6a86f397..a840818bf4df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -200,7 +200,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
200/** 200/**
201 * stmmac_probe_config_dt - parse device-tree driver parameters 201 * stmmac_probe_config_dt - parse device-tree driver parameters
202 * @pdev: platform_device structure 202 * @pdev: platform_device structure
203 * @plat: driver data platform structure
204 * @mac: MAC address to use 203 * @mac: MAC address to use
205 * Description: 204 * Description:
206 * this function is to read the driver parameters from device-tree and 205 * this function is to read the driver parameters from device-tree and
@@ -306,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
306 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), 305 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
307 GFP_KERNEL); 306 GFP_KERNEL);
308 if (!dma_cfg) { 307 if (!dma_cfg) {
309 of_node_put(plat->phy_node); 308 stmmac_remove_config_dt(pdev, plat);
310 return ERR_PTR(-ENOMEM); 309 return ERR_PTR(-ENOMEM);
311 } 310 }
312 plat->dma_cfg = dma_cfg; 311 plat->dma_cfg = dma_cfg;
@@ -329,14 +328,37 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
329 328
330 return plat; 329 return plat;
331} 330}
331
332/**
333 * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
334 * @pdev: platform_device structure
335 * @plat: driver data platform structure
336 *
337 * Release resources claimed by stmmac_probe_config_dt().
338 */
339void stmmac_remove_config_dt(struct platform_device *pdev,
340 struct plat_stmmacenet_data *plat)
341{
342 struct device_node *np = pdev->dev.of_node;
343
344 if (of_phy_is_fixed_link(np))
345 of_phy_deregister_fixed_link(np);
346 of_node_put(plat->phy_node);
347}
332#else 348#else
333struct plat_stmmacenet_data * 349struct plat_stmmacenet_data *
334stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) 350stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
335{ 351{
336 return ERR_PTR(-ENOSYS); 352 return ERR_PTR(-ENOSYS);
337} 353}
354
355void stmmac_remove_config_dt(struct platform_device *pdev,
356 struct plat_stmmacenet_data *plat)
357{
358}
338#endif /* CONFIG_OF */ 359#endif /* CONFIG_OF */
339EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); 360EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
361EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
340 362
341int stmmac_get_platform_resources(struct platform_device *pdev, 363int stmmac_get_platform_resources(struct platform_device *pdev,
342 struct stmmac_resources *stmmac_res) 364 struct stmmac_resources *stmmac_res)
@@ -392,10 +414,13 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
392{ 414{
393 struct net_device *ndev = platform_get_drvdata(pdev); 415 struct net_device *ndev = platform_get_drvdata(pdev);
394 struct stmmac_priv *priv = netdev_priv(ndev); 416 struct stmmac_priv *priv = netdev_priv(ndev);
417 struct plat_stmmacenet_data *plat = priv->plat;
395 int ret = stmmac_dvr_remove(&pdev->dev); 418 int ret = stmmac_dvr_remove(&pdev->dev);
396 419
397 if (priv->plat->exit) 420 if (plat->exit)
398 priv->plat->exit(pdev, priv->plat->bsp_priv); 421 plat->exit(pdev, plat->bsp_priv);
422
423 stmmac_remove_config_dt(pdev, plat);
399 424
400 return ret; 425 return ret;
401} 426}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 64e147f53a9c..b72eb0de57b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -23,6 +23,8 @@
23 23
24struct plat_stmmacenet_data * 24struct plat_stmmacenet_data *
25stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); 25stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
26void stmmac_remove_config_dt(struct platform_device *pdev,
27 struct plat_stmmacenet_data *plat);
26 28
27int stmmac_get_platform_resources(struct platform_device *pdev, 29int stmmac_get_platform_resources(struct platform_device *pdev,
28 struct stmmac_resources *stmmac_res); 30 struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 1477471f8d44..3eb281d1db08 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
54 54
55 spin_lock_irqsave(&priv->ptp_lock, flags); 55 spin_lock_irqsave(&priv->ptp_lock, flags);
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ptpaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->ptp_lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
89 89
90 spin_lock_irqsave(&priv->ptp_lock, flags); 90 spin_lock_irqsave(&priv->ptp_lock, flags);
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
93 priv->plat->has_gmac4);
93 94
94 spin_unlock_irqrestore(&priv->ptp_lock, flags); 95 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 96
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
114 115
115 spin_lock_irqsave(&priv->ptp_lock, flags); 116 spin_lock_irqsave(&priv->ptp_lock, flags);
116 117
117 ns = priv->hw->ptp->get_systime(priv->ioaddr); 118 ns = priv->hw->ptp->get_systime(priv->ptpaddr);
118 119
119 spin_unlock_irqrestore(&priv->ptp_lock, flags); 120 spin_unlock_irqrestore(&priv->ptp_lock, flags);
120 121
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp,
141 142
142 spin_lock_irqsave(&priv->ptp_lock, flags); 143 spin_lock_irqsave(&priv->ptp_lock, flags);
143 144
144 priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); 145 priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
145 146
146 spin_unlock_irqrestore(&priv->ptp_lock, flags); 147 spin_unlock_irqrestore(&priv->ptp_lock, flags);
147 148
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 4535df37c227..c06938c47af5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -22,51 +22,53 @@
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> 22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23******************************************************************************/ 23******************************************************************************/
24 24
25#ifndef __STMMAC_PTP_H__ 25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__ 26#define __STMMAC_PTP_H__
27 27
28/* IEEE 1588 PTP register offsets */ 28#define PTP_GMAC4_OFFSET 0xb00
29#define PTP_TCR 0x0700 /* Timestamp Control Reg */ 29#define PTP_GMAC3_X_OFFSET 0x700
30#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
31#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
32#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
33#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
34#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
35#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
36#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
37#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
38#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
39#define PTP_TSR 0x0728 /* Timestamp Status */
40 30
41#define PTP_STNSUR_ADDSUB_SHIFT 31 31/* IEEE 1588 PTP register offsets */
32#define PTP_TCR 0x00 /* Timestamp Control Reg */
33#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
34#define PTP_STSR 0x08 /* System Time – Seconds Regr */
35#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
36#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
37#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
38#define PTP_TAR 0x18 /* Timestamp Addend Reg */
42 39
43/* PTP TCR defines */ 40#define PTP_STNSUR_ADDSUB_SHIFT 31
44#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ 41#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
45#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ 42#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
46#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
47#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
48/* Timestamp Interrupt Trigger Enable */
49#define PTP_TCR_TSTRIG 0x00000010
50#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
51#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
52/* Timestamp Digital or Binary Rollover Control */
53#define PTP_TCR_TSCTRLSSR 0x00000200
54 43
44/* PTP Timestamp control register defines */
45#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
46#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
47#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
48#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
49#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
50#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
51#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
52#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
55/* Enable PTP packet Processing for Version 2 Format */ 53/* Enable PTP packet Processing for Version 2 Format */
56#define PTP_TCR_TSVER2ENA 0x00000400 54#define PTP_TCR_TSVER2ENA BIT(10)
57/* Enable Processing of PTP over Ethernet Frames */ 55/* Enable Processing of PTP over Ethernet Frames */
58#define PTP_TCR_TSIPENA 0x00000800 56#define PTP_TCR_TSIPENA BIT(11)
59/* Enable Processing of PTP Frames Sent over IPv6-UDP */ 57/* Enable Processing of PTP Frames Sent over IPv6-UDP */
60#define PTP_TCR_TSIPV6ENA 0x00001000 58#define PTP_TCR_TSIPV6ENA BIT(12)
61/* Enable Processing of PTP Frames Sent over IPv4-UDP */ 59/* Enable Processing of PTP Frames Sent over IPv4-UDP */
62#define PTP_TCR_TSIPV4ENA 0x00002000 60#define PTP_TCR_TSIPV4ENA BIT(13)
63/* Enable Timestamp Snapshot for Event Messages */ 61/* Enable Timestamp Snapshot for Event Messages */
64#define PTP_TCR_TSEVNTENA 0x00004000 62#define PTP_TCR_TSEVNTENA BIT(14)
65/* Enable Snapshot for Messages Relevant to Master */ 63/* Enable Snapshot for Messages Relevant to Master */
66#define PTP_TCR_TSMSTRENA 0x00008000 64#define PTP_TCR_TSMSTRENA BIT(15)
67/* Select PTP packets for Taking Snapshots */ 65/* Select PTP packets for Taking Snapshots */
68#define PTP_TCR_SNAPTYPSEL_1 0x00010000 66#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
69/* Enable MAC address for PTP Frame Filtering */ 67/* Enable MAC address for PTP Frame Filtering */
70#define PTP_TCR_TSENMACADDR 0x00040000 68#define PTP_TCR_TSENMACADDR BIT(18)
69
70/* SSIR defines */
71#define PTP_SSIR_SSINC_MASK 0xff
72#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
71 73
72#endif /* __STMMAC_PTP_H__ */ 74#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index aa4f9d2d8fa9..02f452730d52 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
623 void __iomem *gregs = bp->gregs; 623 void __iomem *gregs = bp->gregs;
624 void __iomem *cregs = bp->creg; 624 void __iomem *cregs = bp->creg;
625 void __iomem *bregs = bp->bregs; 625 void __iomem *bregs = bp->bregs;
626 __u32 bblk_dvma = (__u32)bp->bblock_dvma;
626 unsigned char *e = &bp->dev->dev_addr[0]; 627 unsigned char *e = &bp->dev->dev_addr[0];
627 628
628 /* Latch current counters into statistics. */ 629 /* Latch current counters into statistics. */
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
671 bregs + BMAC_XIFCFG); 672 bregs + BMAC_XIFCFG);
672 673
673 /* Tell the QEC where the ring descriptors are. */ 674 /* Tell the QEC where the ring descriptors are. */
674 sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), 675 sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
675 cregs + CREG_RXDS); 676 cregs + CREG_RXDS);
676 sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), 677 sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
677 cregs + CREG_TXDS); 678 cregs + CREG_TXDS);
678 679
679 /* Setup the FIFO pointers into QEC local memory. */ 680 /* Setup the FIFO pointers into QEC local memory. */
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h
index 06dd21707353..532fc56830cf 100644
--- a/drivers/net/ethernet/sun/sunbmac.h
+++ b/drivers/net/ethernet/sun/sunbmac.h
@@ -291,7 +291,7 @@ struct bigmac {
291 void __iomem *bregs; /* BigMAC Registers */ 291 void __iomem *bregs; /* BigMAC Registers */
292 void __iomem *tregs; /* BigMAC Transceiver */ 292 void __iomem *tregs; /* BigMAC Transceiver */
293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */ 293 struct bmac_init_block *bmac_block; /* RX and TX descriptors */
294 __u32 bblock_dvma; /* RX and TX descriptors */ 294 dma_addr_t bblock_dvma; /* RX and TX descriptors */
295 295
296 spinlock_t lock; 296 spinlock_t lock;
297 297
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 9b825780b3be..9582948145c1 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep)
124{ 124{
125 struct qe_init_block *qb = qep->qe_block; 125 struct qe_init_block *qb = qep->qe_block;
126 struct sunqe_buffers *qbufs = qep->buffers; 126 struct sunqe_buffers *qbufs = qep->buffers;
127 __u32 qbufs_dvma = qep->buffers_dvma; 127 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
128 int i; 128 int i;
129 129
130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; 130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
144 void __iomem *mregs = qep->mregs; 144 void __iomem *mregs = qep->mregs;
145 void __iomem *gregs = qecp->gregs; 145 void __iomem *gregs = qecp->gregs;
146 unsigned char *e = &qep->dev->dev_addr[0]; 146 unsigned char *e = &qep->dev->dev_addr[0];
147 __u32 qblk_dvma = (__u32)qep->qblock_dvma;
147 u32 tmp; 148 u32 tmp;
148 int i; 149 int i;
149 150
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq)
152 return -EAGAIN; 153 return -EAGAIN;
153 154
154 /* Setup initial rx/tx init block pointers. */ 155 /* Setup initial rx/tx init block pointers. */
155 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); 156 sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
156 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); 157 sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
157 158
158 /* Enable/mask the various irq's. */ 159 /* Enable/mask the various irq's. */
159 sbus_writel(0, cregs + CREG_RIMASK); 160 sbus_writel(0, cregs + CREG_RIMASK);
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
413 struct net_device *dev = qep->dev; 414 struct net_device *dev = qep->dev;
414 struct qe_rxd *this; 415 struct qe_rxd *this;
415 struct sunqe_buffers *qbufs = qep->buffers; 416 struct sunqe_buffers *qbufs = qep->buffers;
416 __u32 qbufs_dvma = qep->buffers_dvma; 417 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
417 int elem = qep->rx_new; 418 int elem = qep->rx_new;
418 u32 flags; 419 u32 flags;
419 420
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
572{ 573{
573 struct sunqe *qep = netdev_priv(dev); 574 struct sunqe *qep = netdev_priv(dev);
574 struct sunqe_buffers *qbufs = qep->buffers; 575 struct sunqe_buffers *qbufs = qep->buffers;
575 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 576 __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
576 unsigned char *txbuf; 577 unsigned char *txbuf;
577 int len, entry; 578 int len, entry;
578 579
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h
index 581781b6b2fa..ae190b77431b 100644
--- a/drivers/net/ethernet/sun/sunqe.h
+++ b/drivers/net/ethernet/sun/sunqe.h
@@ -334,12 +334,12 @@ struct sunqe {
334 void __iomem *qcregs; /* QEC per-channel Registers */ 334 void __iomem *qcregs; /* QEC per-channel Registers */
335 void __iomem *mregs; /* Per-channel MACE Registers */ 335 void __iomem *mregs; /* Per-channel MACE Registers */
336 struct qe_init_block *qe_block; /* RX and TX descriptors */ 336 struct qe_init_block *qe_block; /* RX and TX descriptors */
337 __u32 qblock_dvma; /* RX and TX descriptors */ 337 dma_addr_t qblock_dvma; /* RX and TX descriptors */
338 spinlock_t lock; /* Protects txfull state */ 338 spinlock_t lock; /* Protects txfull state */
339 int rx_new, rx_old; /* RX ring extents */ 339 int rx_new, rx_old; /* RX ring extents */
340 int tx_new, tx_old; /* TX ring extents */ 340 int tx_new, tx_old; /* TX ring extents */
341 struct sunqe_buffers *buffers; /* CPU visible address. */ 341 struct sunqe_buffers *buffers; /* CPU visible address. */
342 __u32 buffers_dvma; /* DVMA visible address. */ 342 dma_addr_t buffers_dvma; /* DVMA visible address. */
343 struct sunqec *parent; 343 struct sunqec *parent;
344 u8 mconfig; /* Base MACE mconfig value */ 344 u8 mconfig; /* Base MACE mconfig value */
345 struct platform_device *op; /* QE's OF device struct */ 345 struct platform_device *op; /* QE's OF device struct */
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 5eedac495077..97d64bfed465 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -33,7 +33,6 @@
33#include <linux/stat.h> 33#include <linux/stat.h>
34#include <linux/types.h> 34#include <linux/types.h>
35 35
36#include <linux/types.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38#include <linux/delay.h> 37#include <linux/delay.h>
39#include <linux/mm.h> 38#include <linux/mm.h>
@@ -43,7 +42,6 @@
43 42
44#include <linux/phy.h> 43#include <linux/phy.h>
45#include <linux/mii.h> 44#include <linux/mii.h>
46#include <linux/delay.h>
47#include <linux/dma-mapping.h> 45#include <linux/dma-mapping.h>
48#include <linux/vmalloc.h> 46#include <linux/vmalloc.h>
49 47
@@ -2883,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2883 ret = of_get_phy_mode(lp->pdev->dev.of_node); 2881 ret = of_get_phy_mode(lp->pdev->dev.of_node);
2884 if (ret < 0) { 2882 if (ret < 0) {
2885 dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); 2883 dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
2886 goto err_out_clk_dis_phy; 2884 goto err_out_deregister_fixed_link;
2887 } 2885 }
2888 2886
2889 lp->phy_interface = ret; 2887 lp->phy_interface = ret;
@@ -2891,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
2891 ret = dwceqos_mii_init(lp); 2889 ret = dwceqos_mii_init(lp);
2892 if (ret) { 2890 if (ret) {
2893 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); 2891 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
2894 goto err_out_clk_dis_phy; 2892 goto err_out_deregister_fixed_link;
2895 } 2893 }
2896 2894
2897 ret = dwceqos_mii_probe(ndev); 2895 ret = dwceqos_mii_probe(ndev);
2898 if (ret != 0) { 2896 if (ret != 0) {
2899 netdev_err(ndev, "mii_probe fail.\n"); 2897 netdev_err(ndev, "mii_probe fail.\n");
2900 ret = -ENXIO; 2898 ret = -ENXIO;
2901 goto err_out_clk_dis_phy; 2899 goto err_out_deregister_fixed_link;
2902 } 2900 }
2903 2901
2904 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 2902 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2916,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2916 if (ret) { 2914 if (ret) {
2917 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", 2915 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
2918 ret); 2916 ret);
2919 goto err_out_clk_dis_phy; 2917 goto err_out_deregister_fixed_link;
2920 } 2918 }
2921 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", 2919 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
2922 pdev->id, ndev->base_addr, ndev->irq); 2920 pdev->id, ndev->base_addr, ndev->irq);
@@ -2926,7 +2924,7 @@ static int dwceqos_probe(struct platform_device *pdev)
2926 if (ret) { 2924 if (ret) {
2927 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", 2925 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
2928 ndev->irq, ret); 2926 ndev->irq, ret);
2929 goto err_out_clk_dis_phy; 2927 goto err_out_deregister_fixed_link;
2930 } 2928 }
2931 2929
2932 if (netif_msg_probe(lp)) 2930 if (netif_msg_probe(lp))
@@ -2937,11 +2935,14 @@ static int dwceqos_probe(struct platform_device *pdev)
2937 ret = register_netdev(ndev); 2935 ret = register_netdev(ndev);
2938 if (ret) { 2936 if (ret) {
2939 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2937 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2940 goto err_out_clk_dis_phy; 2938 goto err_out_deregister_fixed_link;
2941 } 2939 }
2942 2940
2943 return 0; 2941 return 0;
2944 2942
2943err_out_deregister_fixed_link:
2944 if (of_phy_is_fixed_link(pdev->dev.of_node))
2945 of_phy_deregister_fixed_link(pdev->dev.of_node);
2945err_out_clk_dis_phy: 2946err_out_clk_dis_phy:
2946 clk_disable_unprepare(lp->phy_ref_clk); 2947 clk_disable_unprepare(lp->phy_ref_clk);
2947err_out_clk_dis_aper: 2948err_out_clk_dis_aper:
@@ -2961,8 +2962,11 @@ static int dwceqos_remove(struct platform_device *pdev)
2961 if (ndev) { 2962 if (ndev) {
2962 lp = netdev_priv(ndev); 2963 lp = netdev_priv(ndev);
2963 2964
2964 if (ndev->phydev) 2965 if (ndev->phydev) {
2965 phy_disconnect(ndev->phydev); 2966 phy_disconnect(ndev->phydev);
2967 if (of_phy_is_fixed_link(pdev->dev.of_node))
2968 of_phy_deregister_fixed_link(pdev->dev.of_node);
2969 }
2966 mdiobus_unregister(lp->mii_bus); 2970 mdiobus_unregister(lp->mii_bus);
2967 mdiobus_free(lp->mii_bus); 2971 mdiobus_free(lp->mii_bus);
2968 2972
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 054a8dd23dae..ba1e45ff6aae 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
176 } 176 }
177 177
178 dev = bus_find_device(&platform_bus_type, NULL, node, match); 178 dev = bus_find_device(&platform_bus_type, NULL, node, match);
179 of_node_put(node);
179 priv = dev_get_drvdata(dev); 180 priv = dev_get_drvdata(dev);
180 181
181 priv->cpsw_phy_sel(priv, phy_mode, slave); 182 priv->cpsw_phy_sel(priv, phy_mode, slave);
183
184 put_device(dev);
182} 185}
183EXPORT_SYMBOL_GPL(cpsw_phy_sel); 186EXPORT_SYMBOL_GPL(cpsw_phy_sel);
184 187
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c6cff3d2ff05..b9087b828eff 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2375 * to the PHY is the Ethernet MAC DT node. 2375 * to the PHY is the Ethernet MAC DT node.
2376 */ 2376 */
2377 ret = of_phy_register_fixed_link(slave_node); 2377 ret = of_phy_register_fixed_link(slave_node);
2378 if (ret) 2378 if (ret) {
2379 if (ret != -EPROBE_DEFER)
2380 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
2379 return ret; 2381 return ret;
2382 }
2380 slave_data->phy_node = of_node_get(slave_node); 2383 slave_data->phy_node = of_node_get(slave_node);
2381 } else if (parp) { 2384 } else if (parp) {
2382 u32 phyid; 2385 u32 phyid;
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2397 } 2400 }
2398 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2401 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2399 PHY_ID_FMT, mdio->name, phyid); 2402 PHY_ID_FMT, mdio->name, phyid);
2403 put_device(&mdio->dev);
2400 } else { 2404 } else {
2401 dev_err(&pdev->dev, 2405 dev_err(&pdev->dev,
2402 "No slave[%d] phy_id, phy-handle, or fixed-link property\n", 2406 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
@@ -2440,6 +2444,34 @@ no_phy_slave:
2440 return 0; 2444 return 0;
2441} 2445}
2442 2446
2447static void cpsw_remove_dt(struct platform_device *pdev)
2448{
2449 struct net_device *ndev = platform_get_drvdata(pdev);
2450 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2451 struct cpsw_platform_data *data = &cpsw->data;
2452 struct device_node *node = pdev->dev.of_node;
2453 struct device_node *slave_node;
2454 int i = 0;
2455
2456 for_each_available_child_of_node(node, slave_node) {
2457 struct cpsw_slave_data *slave_data = &data->slave_data[i];
2458
2459 if (strcmp(slave_node->name, "slave"))
2460 continue;
2461
2462 if (of_phy_is_fixed_link(slave_node))
2463 of_phy_deregister_fixed_link(slave_node);
2464
2465 of_node_put(slave_data->phy_node);
2466
2467 i++;
2468 if (i == data->slaves)
2469 break;
2470 }
2471
2472 of_platform_depopulate(&pdev->dev);
2473}
2474
2443static int cpsw_probe_dual_emac(struct cpsw_priv *priv) 2475static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
2444{ 2476{
2445 struct cpsw_common *cpsw = priv->cpsw; 2477 struct cpsw_common *cpsw = priv->cpsw;
@@ -2547,6 +2579,9 @@ static int cpsw_probe(struct platform_device *pdev)
2547 int irq; 2579 int irq;
2548 2580
2549 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); 2581 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
2582 if (!cpsw)
2583 return -ENOMEM;
2584
2550 cpsw->dev = &pdev->dev; 2585 cpsw->dev = &pdev->dev;
2551 2586
2552 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); 2587 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
@@ -2584,11 +2619,19 @@ static int cpsw_probe(struct platform_device *pdev)
2584 /* Select default pin state */ 2619 /* Select default pin state */
2585 pinctrl_pm_select_default_state(&pdev->dev); 2620 pinctrl_pm_select_default_state(&pdev->dev);
2586 2621
2587 if (cpsw_probe_dt(&cpsw->data, pdev)) { 2622 /* Need to enable clocks with runtime PM api to access module
2588 dev_err(&pdev->dev, "cpsw: platform data missing\n"); 2623 * registers
2589 ret = -ENODEV; 2624 */
2625 ret = pm_runtime_get_sync(&pdev->dev);
2626 if (ret < 0) {
2627 pm_runtime_put_noidle(&pdev->dev);
2590 goto clean_runtime_disable_ret; 2628 goto clean_runtime_disable_ret;
2591 } 2629 }
2630
2631 ret = cpsw_probe_dt(&cpsw->data, pdev);
2632 if (ret)
2633 goto clean_dt_ret;
2634
2592 data = &cpsw->data; 2635 data = &cpsw->data;
2593 cpsw->rx_ch_num = 1; 2636 cpsw->rx_ch_num = 1;
2594 cpsw->tx_ch_num = 1; 2637 cpsw->tx_ch_num = 1;
@@ -2608,7 +2651,7 @@ static int cpsw_probe(struct platform_device *pdev)
2608 GFP_KERNEL); 2651 GFP_KERNEL);
2609 if (!cpsw->slaves) { 2652 if (!cpsw->slaves) {
2610 ret = -ENOMEM; 2653 ret = -ENOMEM;
2611 goto clean_runtime_disable_ret; 2654 goto clean_dt_ret;
2612 } 2655 }
2613 for (i = 0; i < data->slaves; i++) 2656 for (i = 0; i < data->slaves; i++)
2614 cpsw->slaves[i].slave_num = i; 2657 cpsw->slaves[i].slave_num = i;
@@ -2620,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev)
2620 if (IS_ERR(clk)) { 2663 if (IS_ERR(clk)) {
2621 dev_err(priv->dev, "fck is not found\n"); 2664 dev_err(priv->dev, "fck is not found\n");
2622 ret = -ENODEV; 2665 ret = -ENODEV;
2623 goto clean_runtime_disable_ret; 2666 goto clean_dt_ret;
2624 } 2667 }
2625 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; 2668 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
2626 2669
@@ -2628,26 +2671,17 @@ static int cpsw_probe(struct platform_device *pdev)
2628 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); 2671 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
2629 if (IS_ERR(ss_regs)) { 2672 if (IS_ERR(ss_regs)) {
2630 ret = PTR_ERR(ss_regs); 2673 ret = PTR_ERR(ss_regs);
2631 goto clean_runtime_disable_ret; 2674 goto clean_dt_ret;
2632 } 2675 }
2633 cpsw->regs = ss_regs; 2676 cpsw->regs = ss_regs;
2634 2677
2635 /* Need to enable clocks with runtime PM api to access module
2636 * registers
2637 */
2638 ret = pm_runtime_get_sync(&pdev->dev);
2639 if (ret < 0) {
2640 pm_runtime_put_noidle(&pdev->dev);
2641 goto clean_runtime_disable_ret;
2642 }
2643 cpsw->version = readl(&cpsw->regs->id_ver); 2678 cpsw->version = readl(&cpsw->regs->id_ver);
2644 pm_runtime_put_sync(&pdev->dev);
2645 2679
2646 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2680 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2647 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); 2681 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
2648 if (IS_ERR(cpsw->wr_regs)) { 2682 if (IS_ERR(cpsw->wr_regs)) {
2649 ret = PTR_ERR(cpsw->wr_regs); 2683 ret = PTR_ERR(cpsw->wr_regs);
2650 goto clean_runtime_disable_ret; 2684 goto clean_dt_ret;
2651 } 2685 }
2652 2686
2653 memset(&dma_params, 0, sizeof(dma_params)); 2687 memset(&dma_params, 0, sizeof(dma_params));
@@ -2684,7 +2718,7 @@ static int cpsw_probe(struct platform_device *pdev)
2684 default: 2718 default:
2685 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); 2719 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
2686 ret = -ENODEV; 2720 ret = -ENODEV;
2687 goto clean_runtime_disable_ret; 2721 goto clean_dt_ret;
2688 } 2722 }
2689 for (i = 0; i < cpsw->data.slaves; i++) { 2723 for (i = 0; i < cpsw->data.slaves; i++) {
2690 struct cpsw_slave *slave = &cpsw->slaves[i]; 2724 struct cpsw_slave *slave = &cpsw->slaves[i];
@@ -2713,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev)
2713 if (!cpsw->dma) { 2747 if (!cpsw->dma) {
2714 dev_err(priv->dev, "error initializing dma\n"); 2748 dev_err(priv->dev, "error initializing dma\n");
2715 ret = -ENOMEM; 2749 ret = -ENOMEM;
2716 goto clean_runtime_disable_ret; 2750 goto clean_dt_ret;
2717 } 2751 }
2718 2752
2719 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); 2753 cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
@@ -2811,16 +2845,23 @@ static int cpsw_probe(struct platform_device *pdev)
2811 ret = cpsw_probe_dual_emac(priv); 2845 ret = cpsw_probe_dual_emac(priv);
2812 if (ret) { 2846 if (ret) {
2813 cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); 2847 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
2814 goto clean_ale_ret; 2848 goto clean_unregister_netdev_ret;
2815 } 2849 }
2816 } 2850 }
2817 2851
2852 pm_runtime_put(&pdev->dev);
2853
2818 return 0; 2854 return 0;
2819 2855
2856clean_unregister_netdev_ret:
2857 unregister_netdev(ndev);
2820clean_ale_ret: 2858clean_ale_ret:
2821 cpsw_ale_destroy(cpsw->ale); 2859 cpsw_ale_destroy(cpsw->ale);
2822clean_dma_ret: 2860clean_dma_ret:
2823 cpdma_ctlr_destroy(cpsw->dma); 2861 cpdma_ctlr_destroy(cpsw->dma);
2862clean_dt_ret:
2863 cpsw_remove_dt(pdev);
2864 pm_runtime_put_sync(&pdev->dev);
2824clean_runtime_disable_ret: 2865clean_runtime_disable_ret:
2825 pm_runtime_disable(&pdev->dev); 2866 pm_runtime_disable(&pdev->dev);
2826clean_ndev_ret: 2867clean_ndev_ret:
@@ -2846,7 +2887,7 @@ static int cpsw_remove(struct platform_device *pdev)
2846 2887
2847 cpsw_ale_destroy(cpsw->ale); 2888 cpsw_ale_destroy(cpsw->ale);
2848 cpdma_ctlr_destroy(cpsw->dma); 2889 cpdma_ctlr_destroy(cpsw->dma);
2849 of_platform_depopulate(&pdev->dev); 2890 cpsw_remove_dt(pdev);
2850 pm_runtime_put_sync(&pdev->dev); 2891 pm_runtime_put_sync(&pdev->dev);
2851 pm_runtime_disable(&pdev->dev); 2892 pm_runtime_disable(&pdev->dev);
2852 if (cpsw->data.dual_emac) 2893 if (cpsw->data.dual_emac)
@@ -2889,6 +2930,8 @@ static int cpsw_resume(struct device *dev)
2889 /* Select default pin state */ 2930 /* Select default pin state */
2890 pinctrl_pm_select_default_state(dev); 2931 pinctrl_pm_select_default_state(dev);
2891 2932
2933 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
2934 rtnl_lock();
2892 if (cpsw->data.dual_emac) { 2935 if (cpsw->data.dual_emac) {
2893 int i; 2936 int i;
2894 2937
@@ -2900,6 +2943,8 @@ static int cpsw_resume(struct device *dev)
2900 if (netif_running(ndev)) 2943 if (netif_running(ndev))
2901 cpsw_ndo_open(ndev); 2944 cpsw_ndo_open(ndev);
2902 } 2945 }
2946 rtnl_unlock();
2947
2903 return 0; 2948 return 0;
2904} 2949}
2905#endif 2950#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2fd94a5bc1f3..481c7bf0395b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev)
1410 int i = 0; 1410 int i = 0;
1411 struct emac_priv *priv = netdev_priv(ndev); 1411 struct emac_priv *priv = netdev_priv(ndev);
1412 struct phy_device *phydev = NULL; 1412 struct phy_device *phydev = NULL;
1413 struct device *phy = NULL;
1413 1414
1414 ret = pm_runtime_get_sync(&priv->pdev->dev); 1415 ret = pm_runtime_get_sync(&priv->pdev->dev);
1415 if (ret < 0) { 1416 if (ret < 0) {
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev)
1488 1489
1489 /* use the first phy on the bus if pdata did not give us a phy id */ 1490 /* use the first phy on the bus if pdata did not give us a phy id */
1490 if (!phydev && !priv->phy_id) { 1491 if (!phydev && !priv->phy_id) {
1491 struct device *phy;
1492
1493 phy = bus_find_device(&mdio_bus_type, NULL, NULL, 1492 phy = bus_find_device(&mdio_bus_type, NULL, NULL,
1494 match_first_device); 1493 match_first_device);
1495 if (phy) 1494 if (phy) {
1496 priv->phy_id = dev_name(phy); 1495 priv->phy_id = dev_name(phy);
1496 if (!priv->phy_id || !*priv->phy_id)
1497 put_device(phy);
1498 }
1497 } 1499 }
1498 1500
1499 if (!phydev && priv->phy_id && *priv->phy_id) { 1501 if (!phydev && priv->phy_id && *priv->phy_id) {
1500 phydev = phy_connect(ndev, priv->phy_id, 1502 phydev = phy_connect(ndev, priv->phy_id,
1501 &emac_adjust_link, 1503 &emac_adjust_link,
1502 PHY_INTERFACE_MODE_MII); 1504 PHY_INTERFACE_MODE_MII);
1503 1505 put_device(phy); /* reference taken by bus_find_device */
1504 if (IS_ERR(phydev)) { 1506 if (IS_ERR(phydev)) {
1505 dev_err(emac_dev, "could not connect to phy %s\n", 1507 dev_err(emac_dev, "could not connect to phy %s\n",
1506 priv->phy_id); 1508 priv->phy_id);
@@ -1765,6 +1767,7 @@ static int davinci_emac_try_get_mac(struct platform_device *pdev,
1765 */ 1767 */
1766static int davinci_emac_probe(struct platform_device *pdev) 1768static int davinci_emac_probe(struct platform_device *pdev)
1767{ 1769{
1770 struct device_node *np = pdev->dev.of_node;
1768 int rc = 0; 1771 int rc = 0;
1769 struct resource *res, *res_ctrl; 1772 struct resource *res, *res_ctrl;
1770 struct net_device *ndev; 1773 struct net_device *ndev;
@@ -1803,7 +1806,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1803 if (!pdata) { 1806 if (!pdata) {
1804 dev_err(&pdev->dev, "no platform data\n"); 1807 dev_err(&pdev->dev, "no platform data\n");
1805 rc = -ENODEV; 1808 rc = -ENODEV;
1806 goto no_pdata; 1809 goto err_free_netdev;
1807 } 1810 }
1808 1811
1809 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1812 /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1939,6 +1942,10 @@ no_cpdma_chan:
1939 cpdma_chan_destroy(priv->rxchan); 1942 cpdma_chan_destroy(priv->rxchan);
1940 cpdma_ctlr_destroy(priv->dma); 1943 cpdma_ctlr_destroy(priv->dma);
1941no_pdata: 1944no_pdata:
1945 if (of_phy_is_fixed_link(np))
1946 of_phy_deregister_fixed_link(np);
1947 of_node_put(priv->phy_node);
1948err_free_netdev:
1942 free_netdev(ndev); 1949 free_netdev(ndev);
1943 return rc; 1950 return rc;
1944} 1951}
@@ -1954,6 +1961,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
1954{ 1961{
1955 struct net_device *ndev = platform_get_drvdata(pdev); 1962 struct net_device *ndev = platform_get_drvdata(pdev);
1956 struct emac_priv *priv = netdev_priv(ndev); 1963 struct emac_priv *priv = netdev_priv(ndev);
1964 struct device_node *np = pdev->dev.of_node;
1957 1965
1958 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 1966 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
1959 1967
@@ -1966,6 +1974,8 @@ static int davinci_emac_remove(struct platform_device *pdev)
1966 unregister_netdev(ndev); 1974 unregister_netdev(ndev);
1967 of_node_put(priv->phy_node); 1975 of_node_put(priv->phy_node);
1968 pm_runtime_disable(&pdev->dev); 1976 pm_runtime_disable(&pdev->dev);
1977 if (of_phy_is_fixed_link(np))
1978 of_phy_deregister_fixed_link(np);
1969 free_netdev(ndev); 1979 free_netdev(ndev);
1970 1980
1971 return 0; 1981 return 0;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 446ea580ad42..928c1dca2673 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1694 pr_debug("%s: bssid matched\n", __func__); 1694 pr_debug("%s: bssid matched\n", __func__);
1695 break; 1695 break;
1696 } else { 1696 } else {
1697 pr_debug("%s: bssid unmached\n", __func__); 1697 pr_debug("%s: bssid unmatched\n", __func__);
1698 continue; 1698 continue;
1699 } 1699 }
1700 } 1700 }
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 7f127dc1b7ba..fa32391720fe 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
708 if (!qmgr_stat_below_low_watermark(rxq) && 708 if (!qmgr_stat_below_low_watermark(rxq) &&
709 napi_reschedule(napi)) { /* not empty again */ 709 napi_reschedule(napi)) { /* not empty again */
710#if DEBUG_RX 710#if DEBUG_RX
711 printk(KERN_DEBUG "%s: eth_poll" 711 printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
712 " napi_reschedule successed\n",
713 dev->name); 712 dev->name);
714#endif 713#endif
715 qmgr_disable_irq(rxq); 714 qmgr_disable_irq(rxq);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 42edd7b7902f..8b4822ad27cb 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -859,7 +859,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
859 struct geneve_dev *geneve = netdev_priv(dev); 859 struct geneve_dev *geneve = netdev_priv(dev);
860 struct geneve_sock *gs4; 860 struct geneve_sock *gs4;
861 struct rtable *rt = NULL; 861 struct rtable *rt = NULL;
862 const struct iphdr *iip; /* interior IP header */
863 int err = -EINVAL; 862 int err = -EINVAL;
864 struct flowi4 fl4; 863 struct flowi4 fl4;
865 __u8 tos, ttl; 864 __u8 tos, ttl;
@@ -890,8 +889,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
890 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 889 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
891 skb_reset_mac_header(skb); 890 skb_reset_mac_header(skb);
892 891
893 iip = ip_hdr(skb);
894
895 if (info) { 892 if (info) {
896 const struct ip_tunnel_key *key = &info->key; 893 const struct ip_tunnel_key *key = &info->key;
897 u8 *opts = NULL; 894 u8 *opts = NULL;
@@ -911,7 +908,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
911 if (unlikely(err)) 908 if (unlikely(err))
912 goto tx_error; 909 goto tx_error;
913 910
914 tos = ip_tunnel_ecn_encap(key->tos, iip, skb); 911 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
915 ttl = key->ttl; 912 ttl = key->ttl;
916 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 913 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
917 } else { 914 } else {
@@ -920,7 +917,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
920 if (unlikely(err)) 917 if (unlikely(err))
921 goto tx_error; 918 goto tx_error;
922 919
923 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 920 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
924 ttl = geneve->ttl; 921 ttl = geneve->ttl;
925 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) 922 if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
926 ttl = 1; 923 ttl = 1;
@@ -952,7 +949,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
952{ 949{
953 struct geneve_dev *geneve = netdev_priv(dev); 950 struct geneve_dev *geneve = netdev_priv(dev);
954 struct dst_entry *dst = NULL; 951 struct dst_entry *dst = NULL;
955 const struct iphdr *iip; /* interior IP header */
956 struct geneve_sock *gs6; 952 struct geneve_sock *gs6;
957 int err = -EINVAL; 953 int err = -EINVAL;
958 struct flowi6 fl6; 954 struct flowi6 fl6;
@@ -982,8 +978,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
982 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); 978 sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
983 skb_reset_mac_header(skb); 979 skb_reset_mac_header(skb);
984 980
985 iip = ip_hdr(skb);
986
987 if (info) { 981 if (info) {
988 const struct ip_tunnel_key *key = &info->key; 982 const struct ip_tunnel_key *key = &info->key;
989 u8 *opts = NULL; 983 u8 *opts = NULL;
@@ -1004,7 +998,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1004 if (unlikely(err)) 998 if (unlikely(err))
1005 goto tx_error; 999 goto tx_error;
1006 1000
1007 prio = ip_tunnel_ecn_encap(key->tos, iip, skb); 1001 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
1008 ttl = key->ttl; 1002 ttl = key->ttl;
1009 label = info->key.label; 1003 label = info->key.label;
1010 } else { 1004 } else {
@@ -1014,7 +1008,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1014 goto tx_error; 1008 goto tx_error;
1015 1009
1016 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), 1010 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
1017 iip, skb); 1011 ip_hdr(skb), skb);
1018 ttl = geneve->ttl; 1012 ttl = geneve->ttl;
1019 if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) 1013 if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
1020 ttl = 1; 1014 ttl = 1;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 9fa7ac9f8e68..f355df7cf84a 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -20,7 +20,6 @@
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/delay.h>
24#include <linux/debugfs.h> 23#include <linux/debugfs.h>
25#include <linux/bitops.h> 24#include <linux/bitops.h>
26#include <linux/ieee802154.h> 25#include <linux/ieee802154.h>
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f442eb366863..0fef17874d50 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -497,6 +497,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
497 struct net_device *phy_dev; 497 struct net_device *phy_dev;
498 int err; 498 int err;
499 u16 mode = IPVLAN_MODE_L3; 499 u16 mode = IPVLAN_MODE_L3;
500 bool create = false;
500 501
501 if (!tb[IFLA_LINK]) 502 if (!tb[IFLA_LINK])
502 return -EINVAL; 503 return -EINVAL;
@@ -513,6 +514,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
513 err = ipvlan_port_create(phy_dev); 514 err = ipvlan_port_create(phy_dev);
514 if (err < 0) 515 if (err < 0)
515 return err; 516 return err;
517 create = true;
516 } 518 }
517 519
518 if (data && data[IFLA_IPVLAN_MODE]) 520 if (data && data[IFLA_IPVLAN_MODE])
@@ -536,22 +538,27 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
536 538
537 err = register_netdevice(dev); 539 err = register_netdevice(dev);
538 if (err < 0) 540 if (err < 0)
539 return err; 541 goto destroy_ipvlan_port;
540 542
541 err = netdev_upper_dev_link(phy_dev, dev); 543 err = netdev_upper_dev_link(phy_dev, dev);
542 if (err) { 544 if (err) {
543 unregister_netdevice(dev); 545 goto unregister_netdev;
544 return err;
545 } 546 }
546 err = ipvlan_set_port_mode(port, mode); 547 err = ipvlan_set_port_mode(port, mode);
547 if (err) { 548 if (err) {
548 unregister_netdevice(dev); 549 goto unregister_netdev;
549 return err;
550 } 550 }
551 551
552 list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); 552 list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
553 netif_stacked_transfer_operstate(phy_dev, dev); 553 netif_stacked_transfer_operstate(phy_dev, dev);
554 return 0; 554 return 0;
555
556unregister_netdev:
557 unregister_netdevice(dev);
558destroy_ipvlan_port:
559 if (create)
560 ipvlan_port_destroy(phy_dev);
561 return err;
555} 562}
556 563
557static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) 564static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4e3d2e7c697c..e8c3a8c32534 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
518 518
519 mtt = irda_get_mtt(skb); 519 mtt = irda_get_mtt(skb);
520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); 520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
521 if (mtt) 521 if (mtt > 1000)
522 mdelay(mtt/1000);
523 else if (mtt)
522 udelay(mtt); 524 udelay(mtt);
523 525
524 /* Enable DMA interrupt */ 526 /* Enable DMA interrupt */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3234fcdea317..26d6f0bbe14b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -623,7 +623,8 @@ hash_add:
623 return 0; 623 return 0;
624 624
625clear_multi: 625clear_multi:
626 dev_set_allmulti(lowerdev, -1); 626 if (dev->flags & IFF_ALLMULTI)
627 dev_set_allmulti(lowerdev, -1);
627del_unicast: 628del_unicast:
628 dev_uc_del(lowerdev, dev->dev_addr); 629 dev_uc_del(lowerdev, dev->dev_addr);
629out: 630out:
@@ -1278,6 +1279,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1278 struct net_device *lowerdev; 1279 struct net_device *lowerdev;
1279 int err; 1280 int err;
1280 int macmode; 1281 int macmode;
1282 bool create = false;
1281 1283
1282 if (!tb[IFLA_LINK]) 1284 if (!tb[IFLA_LINK])
1283 return -EINVAL; 1285 return -EINVAL;
@@ -1304,12 +1306,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1304 err = macvlan_port_create(lowerdev); 1306 err = macvlan_port_create(lowerdev);
1305 if (err < 0) 1307 if (err < 0)
1306 return err; 1308 return err;
1309 create = true;
1307 } 1310 }
1308 port = macvlan_port_get_rtnl(lowerdev); 1311 port = macvlan_port_get_rtnl(lowerdev);
1309 1312
1310 /* Only 1 macvlan device can be created in passthru mode */ 1313 /* Only 1 macvlan device can be created in passthru mode */
1311 if (port->passthru) 1314 if (port->passthru) {
1312 return -EINVAL; 1315 /* The macvlan port must be not created this time,
1316 * still goto destroy_macvlan_port for readability.
1317 */
1318 err = -EINVAL;
1319 goto destroy_macvlan_port;
1320 }
1313 1321
1314 vlan->lowerdev = lowerdev; 1322 vlan->lowerdev = lowerdev;
1315 vlan->dev = dev; 1323 vlan->dev = dev;
@@ -1325,24 +1333,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1325 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1333 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
1326 1334
1327 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 1335 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
1328 if (port->count) 1336 if (port->count) {
1329 return -EINVAL; 1337 err = -EINVAL;
1338 goto destroy_macvlan_port;
1339 }
1330 port->passthru = true; 1340 port->passthru = true;
1331 eth_hw_addr_inherit(dev, lowerdev); 1341 eth_hw_addr_inherit(dev, lowerdev);
1332 } 1342 }
1333 1343
1334 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { 1344 if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
1335 if (vlan->mode != MACVLAN_MODE_SOURCE) 1345 if (vlan->mode != MACVLAN_MODE_SOURCE) {
1336 return -EINVAL; 1346 err = -EINVAL;
1347 goto destroy_macvlan_port;
1348 }
1337 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); 1349 macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
1338 err = macvlan_changelink_sources(vlan, macmode, data); 1350 err = macvlan_changelink_sources(vlan, macmode, data);
1339 if (err) 1351 if (err)
1340 return err; 1352 goto destroy_macvlan_port;
1341 } 1353 }
1342 1354
1343 err = register_netdevice(dev); 1355 err = register_netdevice(dev);
1344 if (err < 0) 1356 if (err < 0)
1345 return err; 1357 goto destroy_macvlan_port;
1346 1358
1347 dev->priv_flags |= IFF_MACVLAN; 1359 dev->priv_flags |= IFF_MACVLAN;
1348 err = netdev_upper_dev_link(lowerdev, dev); 1360 err = netdev_upper_dev_link(lowerdev, dev);
@@ -1357,7 +1369,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
1357 1369
1358unregister_netdev: 1370unregister_netdev:
1359 unregister_netdevice(dev); 1371 unregister_netdevice(dev);
1360 1372destroy_macvlan_port:
1373 if (create)
1374 macvlan_port_destroy(port->dev);
1361 return err; 1375 return err;
1362} 1376}
1363EXPORT_SYMBOL_GPL(macvlan_common_newlink); 1377EXPORT_SYMBOL_GPL(macvlan_common_newlink);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 070e3290aa6e..7869b0651576 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -491,7 +491,13 @@ static int macvtap_newlink(struct net *src_net,
491 /* Don't put anything that may fail after macvlan_common_newlink 491 /* Don't put anything that may fail after macvlan_common_newlink
492 * because we can't undo what it does. 492 * because we can't undo what it does.
493 */ 493 */
494 return macvlan_common_newlink(src_net, dev, tb, data); 494 err = macvlan_common_newlink(src_net, dev, tb, data);
495 if (err) {
496 netdev_rx_handler_unregister(dev);
497 return err;
498 }
499
500 return 0;
495} 501}
496 502
497static void macvtap_dellink(struct net_device *dev, 503static void macvtap_dellink(struct net_device *dev,
@@ -736,13 +742,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
736 742
737 if (zerocopy) 743 if (zerocopy)
738 err = zerocopy_sg_from_iter(skb, from); 744 err = zerocopy_sg_from_iter(skb, from);
739 else { 745 else
740 err = skb_copy_datagram_from_iter(skb, 0, from, len); 746 err = skb_copy_datagram_from_iter(skb, 0, from, len);
741 if (!err && m && m->msg_control) {
742 struct ubuf_info *uarg = m->msg_control;
743 uarg->callback(uarg, false);
744 }
745 }
746 747
747 if (err) 748 if (err)
748 goto err_kfree; 749 goto err_kfree;
@@ -773,7 +774,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
773 skb_shinfo(skb)->destructor_arg = m->msg_control; 774 skb_shinfo(skb)->destructor_arg = m->msg_control;
774 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 775 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
775 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 776 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
777 } else if (m && m->msg_control) {
778 struct ubuf_info *uarg = m->msg_control;
779 uarg->callback(uarg, false);
776 } 780 }
781
777 if (vlan) { 782 if (vlan) {
778 skb->dev = vlan->dev; 783 skb->dev = vlan->dev;
779 dev_queue_xmit(skb); 784 dev_queue_xmit(skb);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index c649c101bbab..eb5167210681 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register);
279void fixed_phy_unregister(struct phy_device *phy) 279void fixed_phy_unregister(struct phy_device *phy)
280{ 280{
281 phy_device_remove(phy); 281 phy_device_remove(phy);
282 282 of_node_put(phy->mdio.dev.of_node);
283 fixed_phy_del(phy->mdio.addr); 283 fixed_phy_del(phy->mdio.addr);
284} 284}
285EXPORT_SYMBOL_GPL(fixed_phy_unregister); 285EXPORT_SYMBOL_GPL(fixed_phy_unregister);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 081df68d2ce1..ea92d524d5a8 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -318,12 +318,12 @@ static int ksz8041_config_init(struct phy_device *phydev)
318 /* Limit supported and advertised modes in fiber mode */ 318 /* Limit supported and advertised modes in fiber mode */
319 if (of_property_read_bool(of_node, "micrel,fiber-mode")) { 319 if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
320 phydev->dev_flags |= MICREL_PHY_FXEN; 320 phydev->dev_flags |= MICREL_PHY_FXEN;
321 phydev->supported &= SUPPORTED_FIBRE | 321 phydev->supported &= SUPPORTED_100baseT_Full |
322 SUPPORTED_100baseT_Full |
323 SUPPORTED_100baseT_Half; 322 SUPPORTED_100baseT_Half;
324 phydev->advertising &= ADVERTISED_FIBRE | 323 phydev->supported |= SUPPORTED_FIBRE;
325 ADVERTISED_100baseT_Full | 324 phydev->advertising &= ADVERTISED_100baseT_Full |
326 ADVERTISED_100baseT_Half; 325 ADVERTISED_100baseT_Half;
326 phydev->advertising |= ADVERTISED_FIBRE;
327 phydev->autoneg = AUTONEG_DISABLE; 327 phydev->autoneg = AUTONEG_DISABLE;
328 } 328 }
329 329
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e977ba931878..1a4bf8acad78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
723 phydev = to_phy_device(d); 723 phydev = to_phy_device(d);
724 724
725 rc = phy_connect_direct(dev, phydev, handler, interface); 725 rc = phy_connect_direct(dev, phydev, handler, interface);
726 put_device(d);
726 if (rc) 727 if (rc)
727 return ERR_PTR(rc); 728 return ERR_PTR(rc);
728 729
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
953 phydev = to_phy_device(d); 954 phydev = to_phy_device(d);
954 955
955 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); 956 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
957 put_device(d);
956 if (rc) 958 if (rc)
957 return ERR_PTR(rc); 959 return ERR_PTR(rc);
958 960
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index aadd6e9f54ad..9cbe645e3d89 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -102,15 +102,19 @@ static int rtl8211f_config_init(struct phy_device *phydev)
102 if (ret < 0) 102 if (ret < 0)
103 return ret; 103 return ret;
104 104
105 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 105 phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
106 /* enable TXDLY */ 106 reg = phy_read(phydev, 0x11);
107 phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); 107
108 reg = phy_read(phydev, 0x11); 108 /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
109 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
110 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
109 reg |= RTL8211F_TX_DELAY; 111 reg |= RTL8211F_TX_DELAY;
110 phy_write(phydev, 0x11, reg); 112 else
111 /* restore to default page 0 */ 113 reg &= ~RTL8211F_TX_DELAY;
112 phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); 114
113 } 115 phy_write(phydev, 0x11, reg);
116 /* restore to default page 0 */
117 phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
114 118
115 return 0; 119 return 0;
116} 120}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2e37eb337d48..24b4a09468dd 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -62,6 +62,10 @@
62/* Vitesse Extended Page Access Register */ 62/* Vitesse Extended Page Access Register */
63#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f 63#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
64 64
65/* Vitesse VSC8601 Extended PHY Control Register 1 */
66#define MII_VSC8601_EPHY_CTL 0x17
67#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
68
65#define PHY_ID_VSC8234 0x000fc620 69#define PHY_ID_VSC8234 0x000fc620
66#define PHY_ID_VSC8244 0x000fc6c0 70#define PHY_ID_VSC8244 0x000fc6c0
67#define PHY_ID_VSC8514 0x00070670 71#define PHY_ID_VSC8514 0x00070670
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev)
111 return err; 115 return err;
112} 116}
113 117
118/* This adds a skew for both TX and RX clocks, so the skew should only be
119 * applied to "rgmii-id" interfaces. It may not work as expected
120 * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
121static int vsc8601_add_skew(struct phy_device *phydev)
122{
123 int ret;
124
125 ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
126 if (ret < 0)
127 return ret;
128
129 ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
130 return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
131}
132
133static int vsc8601_config_init(struct phy_device *phydev)
134{
135 int ret = 0;
136
137 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
138 ret = vsc8601_add_skew(phydev);
139
140 if (ret < 0)
141 return ret;
142
143 return genphy_config_init(phydev);
144}
145
114static int vsc824x_ack_interrupt(struct phy_device *phydev) 146static int vsc824x_ack_interrupt(struct phy_device *phydev)
115{ 147{
116 int err = 0; 148 int err = 0;
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = {
275 .phy_id_mask = 0x000ffff0, 307 .phy_id_mask = 0x000ffff0,
276 .features = PHY_GBIT_FEATURES, 308 .features = PHY_GBIT_FEATURES,
277 .flags = PHY_HAS_INTERRUPT, 309 .flags = PHY_HAS_INTERRUPT,
278 .config_init = &genphy_config_init, 310 .config_init = &vsc8601_config_init,
279 .config_aneg = &genphy_config_aneg, 311 .config_aneg = &genphy_config_aneg,
280 .read_status = &genphy_read_status, 312 .read_status = &genphy_read_status,
281 .ack_interrupt = &vsc824x_ack_interrupt, 313 .ack_interrupt = &vsc824x_ack_interrupt,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8093e39ae263..db6acecabeaa 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1246,13 +1246,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1246 1246
1247 if (zerocopy) 1247 if (zerocopy)
1248 err = zerocopy_sg_from_iter(skb, from); 1248 err = zerocopy_sg_from_iter(skb, from);
1249 else { 1249 else
1250 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1250 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1251 if (!err && msg_control) {
1252 struct ubuf_info *uarg = msg_control;
1253 uarg->callback(uarg, false);
1254 }
1255 }
1256 1251
1257 if (err) { 1252 if (err) {
1258 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1253 this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -1298,6 +1293,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1298 skb_shinfo(skb)->destructor_arg = msg_control; 1293 skb_shinfo(skb)->destructor_arg = msg_control;
1299 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1294 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1300 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1295 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1296 } else if (msg_control) {
1297 struct ubuf_info *uarg = msg_control;
1298 uarg->callback(uarg, false);
1301 } 1299 }
1302 1300
1303 skb_reset_network_header(skb); 1301 skb_reset_network_header(skb);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index cce24950a0ab..dc7b6392e75a 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -603,12 +603,12 @@ static void ax88772_suspend(struct usbnet *dev)
603 u16 medium; 603 u16 medium;
604 604
605 /* Stop MAC operation */ 605 /* Stop MAC operation */
606 medium = asix_read_medium_status(dev, 0); 606 medium = asix_read_medium_status(dev, 1);
607 medium &= ~AX_MEDIUM_RE; 607 medium &= ~AX_MEDIUM_RE;
608 asix_write_medium_mode(dev, medium, 0); 608 asix_write_medium_mode(dev, medium, 1);
609 609
610 netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n", 610 netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
611 asix_read_medium_status(dev, 0)); 611 asix_read_medium_status(dev, 1));
612 612
613 /* Preserve BMCR for restoring */ 613 /* Preserve BMCR for restoring */
614 priv->presvd_phy_bmcr = 614 priv->presvd_phy_bmcr =
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index e6338c16081a..8a6675d92b98 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = {
1656 .tx_fixup = ax88179_tx_fixup, 1656 .tx_fixup = ax88179_tx_fixup,
1657}; 1657};
1658 1658
1659static const struct driver_info cypress_GX3_info = {
1660 .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller",
1661 .bind = ax88179_bind,
1662 .unbind = ax88179_unbind,
1663 .status = ax88179_status,
1664 .link_reset = ax88179_link_reset,
1665 .reset = ax88179_reset,
1666 .stop = ax88179_stop,
1667 .flags = FLAG_ETHER | FLAG_FRAMING_AX,
1668 .rx_fixup = ax88179_rx_fixup,
1669 .tx_fixup = ax88179_tx_fixup,
1670};
1671
1659static const struct driver_info dlink_dub1312_info = { 1672static const struct driver_info dlink_dub1312_info = {
1660 .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", 1673 .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
1661 .bind = ax88179_bind, 1674 .bind = ax88179_bind,
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = {
1718 USB_DEVICE(0x0b95, 0x178a), 1731 USB_DEVICE(0x0b95, 0x178a),
1719 .driver_info = (unsigned long)&ax88178a_info, 1732 .driver_info = (unsigned long)&ax88178a_info,
1720}, { 1733}, {
1734 /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */
1735 USB_DEVICE(0x04b4, 0x3610),
1736 .driver_info = (unsigned long)&cypress_GX3_info,
1737}, {
1721 /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ 1738 /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
1722 USB_DEVICE(0x2001, 0x4a00), 1739 USB_DEVICE(0x2001, 0x4a00),
1723 .driver_info = (unsigned long)&dlink_dub1312_info, 1740 .driver_info = (unsigned long)&dlink_dub1312_info,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c47ec0a04c8e..dd623f674487 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -388,12 +388,6 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb)
388 case USB_CDC_NOTIFY_NETWORK_CONNECTION: 388 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
389 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", 389 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
390 event->wValue ? "on" : "off"); 390 event->wValue ? "on" : "off");
391
392 /* Work-around for devices with broken off-notifications */
393 if (event->wValue &&
394 !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state))
395 usbnet_link_change(dev, 0, 0);
396
397 usbnet_link_change(dev, !!event->wValue, 0); 391 usbnet_link_change(dev, !!event->wValue, 0);
398 break; 392 break;
399 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ 393 case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
@@ -466,6 +460,36 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
466 return 1; 460 return 1;
467} 461}
468 462
463/* Ensure correct link state
464 *
465 * Some devices (ZTE MF823/831/910) export two carrier on notifications when
466 * connected. This causes the link state to be incorrect. Work around this by
467 * always setting the state to off, then on.
468 */
469void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
470{
471 struct usb_cdc_notification *event;
472
473 if (urb->actual_length < sizeof(*event))
474 return;
475
476 event = urb->transfer_buffer;
477
478 if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) {
479 usbnet_cdc_status(dev, urb);
480 return;
481 }
482
483 netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
484 event->wValue ? "on" : "off");
485
486 if (event->wValue &&
487 netif_carrier_ok(dev->net))
488 netif_carrier_off(dev->net);
489
490 usbnet_link_change(dev, !!event->wValue, 0);
491}
492
469static const struct driver_info cdc_info = { 493static const struct driver_info cdc_info = {
470 .description = "CDC Ethernet Device", 494 .description = "CDC Ethernet Device",
471 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 495 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -481,7 +505,7 @@ static const struct driver_info zte_cdc_info = {
481 .flags = FLAG_ETHER | FLAG_POINTTOPOINT, 505 .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
482 .bind = usbnet_cdc_zte_bind, 506 .bind = usbnet_cdc_zte_bind,
483 .unbind = usbnet_cdc_unbind, 507 .unbind = usbnet_cdc_unbind,
484 .status = usbnet_cdc_status, 508 .status = usbnet_cdc_zte_status,
485 .set_rx_mode = usbnet_cdc_update_filter, 509 .set_rx_mode = usbnet_cdc_update_filter,
486 .manage_power = usbnet_manage_power, 510 .manage_power = usbnet_manage_power,
487 .rx_fixup = usbnet_cdc_zte_rx_fixup, 511 .rx_fixup = usbnet_cdc_zte_rx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3ff76c6db4f6..6fe1cdb0174f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -894,6 +894,7 @@ static const struct usb_device_id products[] = {
894 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 894 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
895 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 895 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
896 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 896 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
897 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
897 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 898 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
898 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 899 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
899 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 900 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 44d439f50961..efb84f092492 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1730 u8 checksum = CHECKSUM_NONE; 1730 u8 checksum = CHECKSUM_NONE;
1731 u32 opts2, opts3; 1731 u32 opts2, opts3;
1732 1732
1733 if (tp->version == RTL_VER_01) 1733 if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
1734 goto return_result; 1734 goto return_result;
1735 1735
1736 opts2 = le32_to_cpu(rx_desc->opts2); 1736 opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
1745 checksum = CHECKSUM_NONE; 1745 checksum = CHECKSUM_NONE;
1746 else 1746 else
1747 checksum = CHECKSUM_UNNECESSARY; 1747 checksum = CHECKSUM_UNNECESSARY;
1748 } else if (RD_IPV6_CS) { 1748 } else if (opts2 & RD_IPV6_CS) {
1749 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) 1749 if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
1750 checksum = CHECKSUM_UNNECESSARY; 1750 checksum = CHECKSUM_UNNECESSARY;
1751 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) 1751 else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev)
3266 goto out; 3266 goto out;
3267 3267
3268 res = usb_autopm_get_interface(tp->intf); 3268 res = usb_autopm_get_interface(tp->intf);
3269 if (res < 0) { 3269 if (res < 0)
3270 free_all_mem(tp); 3270 goto out_free;
3271 goto out;
3272 }
3273 3271
3274 mutex_lock(&tp->control); 3272 mutex_lock(&tp->control);
3275 3273
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev)
3285 netif_device_detach(tp->netdev); 3283 netif_device_detach(tp->netdev);
3286 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", 3284 netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
3287 res); 3285 res);
3288 free_all_mem(tp); 3286 goto out_unlock;
3289 } else {
3290 napi_enable(&tp->napi);
3291 } 3287 }
3288 napi_enable(&tp->napi);
3292 3289
3293 mutex_unlock(&tp->control); 3290 mutex_unlock(&tp->control);
3294 3291
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev)
3297 tp->pm_notifier.notifier_call = rtl_notifier; 3294 tp->pm_notifier.notifier_call = rtl_notifier;
3298 register_pm_notifier(&tp->pm_notifier); 3295 register_pm_notifier(&tp->pm_notifier);
3299#endif 3296#endif
3297 return 0;
3300 3298
3299out_unlock:
3300 mutex_unlock(&tp->control);
3301 usb_autopm_put_interface(tp->intf);
3302out_free:
3303 free_all_mem(tp);
3301out: 3304out:
3302 return res; 3305 return res;
3303} 3306}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fad84f3f4109..7276d5a95bd0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
1497 netif_napi_del(&vi->rq[i].napi); 1497 netif_napi_del(&vi->rq[i].napi);
1498 } 1498 }
1499 1499
1500 /* We called napi_hash_del() before netif_napi_del(),
1501 * we need to respect an RCU grace period before freeing vi->rq
1502 */
1503 synchronize_net();
1504
1500 kfree(vi->rq); 1505 kfree(vi->rq);
1501 kfree(vi->sq); 1506 kfree(vi->sq);
1502} 1507}
@@ -2038,23 +2043,33 @@ static struct virtio_device_id id_table[] = {
2038 { 0 }, 2043 { 0 },
2039}; 2044};
2040 2045
2046#define VIRTNET_FEATURES \
2047 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2048 VIRTIO_NET_F_MAC, \
2049 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2050 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2051 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2052 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2053 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2054 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2055 VIRTIO_NET_F_CTRL_MAC_ADDR, \
2056 VIRTIO_NET_F_MTU
2057
2041static unsigned int features[] = { 2058static unsigned int features[] = {
2042 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 2059 VIRTNET_FEATURES,
2043 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 2060};
2044 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 2061
2045 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 2062static unsigned int features_legacy[] = {
2046 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 2063 VIRTNET_FEATURES,
2047 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 2064 VIRTIO_NET_F_GSO,
2048 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
2049 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
2050 VIRTIO_NET_F_CTRL_MAC_ADDR,
2051 VIRTIO_F_ANY_LAYOUT, 2065 VIRTIO_F_ANY_LAYOUT,
2052 VIRTIO_NET_F_MTU,
2053}; 2066};
2054 2067
2055static struct virtio_driver virtio_net_driver = { 2068static struct virtio_driver virtio_net_driver = {
2056 .feature_table = features, 2069 .feature_table = features,
2057 .feature_table_size = ARRAY_SIZE(features), 2070 .feature_table_size = ARRAY_SIZE(features),
2071 .feature_table_legacy = features_legacy,
2072 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2058 .driver.name = KBUILD_MODNAME, 2073 .driver.name = KBUILD_MODNAME,
2059 .driver.owner = THIS_MODULE, 2074 .driver.owner = THIS_MODULE,
2060 .id_table = id_table, 2075 .id_table = id_table,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f3c2fa3ab0d5..2ba01ca02c9c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -611,6 +611,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
611 struct vxlan_rdst *rd = NULL; 611 struct vxlan_rdst *rd = NULL;
612 struct vxlan_fdb *f; 612 struct vxlan_fdb *f;
613 int notify = 0; 613 int notify = 0;
614 int rc;
614 615
615 f = __vxlan_find_mac(vxlan, mac); 616 f = __vxlan_find_mac(vxlan, mac);
616 if (f) { 617 if (f) {
@@ -641,8 +642,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
641 if ((flags & NLM_F_APPEND) && 642 if ((flags & NLM_F_APPEND) &&
642 (is_multicast_ether_addr(f->eth_addr) || 643 (is_multicast_ether_addr(f->eth_addr) ||
643 is_zero_ether_addr(f->eth_addr))) { 644 is_zero_ether_addr(f->eth_addr))) {
644 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, 645 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
645 &rd);
646 646
647 if (rc < 0) 647 if (rc < 0)
648 return rc; 648 return rc;
@@ -673,7 +673,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
673 INIT_LIST_HEAD(&f->remotes); 673 INIT_LIST_HEAD(&f->remotes);
674 memcpy(f->eth_addr, mac, ETH_ALEN); 674 memcpy(f->eth_addr, mac, ETH_ALEN);
675 675
676 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); 676 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
677 if (rc < 0) {
678 kfree(f);
679 return rc;
680 }
677 681
678 ++vxlan->addrcnt; 682 ++vxlan->addrcnt;
679 hlist_add_head_rcu(&f->hlist, 683 hlist_add_head_rcu(&f->hlist,
@@ -944,7 +948,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
944{ 948{
945 struct vxlan_dev *vxlan; 949 struct vxlan_dev *vxlan;
946 struct vxlan_sock *sock4; 950 struct vxlan_sock *sock4;
947 struct vxlan_sock *sock6 = NULL; 951#if IS_ENABLED(CONFIG_IPV6)
952 struct vxlan_sock *sock6;
953#endif
948 unsigned short family = dev->default_dst.remote_ip.sa.sa_family; 954 unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
949 955
950 sock4 = rtnl_dereference(dev->vn4_sock); 956 sock4 = rtnl_dereference(dev->vn4_sock);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b777e1b2f87a..78d9966a3957 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4516 /* store current 11d setting */ 4516 /* store current 11d setting */
4517 if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, 4517 if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY,
4518 &ifp->vif->is_11d)) { 4518 &ifp->vif->is_11d)) {
4519 supports_11d = false; 4519 is_11d = supports_11d = false;
4520 } else { 4520 } else {
4521 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, 4521 country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail,
4522 settings->beacon.tail_len, 4522 settings->beacon.tail_len,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4fdc3dad3e85..b88e2048ae0b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1087 ret = iwl_mvm_switch_to_d3(mvm); 1087 ret = iwl_mvm_switch_to_d3(mvm);
1088 if (ret) 1088 if (ret)
1089 return ret; 1089 return ret;
1090 } else {
1091 /* In theory, we wouldn't have to stop a running sched
1092 * scan in order to start another one (for
1093 * net-detect). But in practice this doesn't seem to
1094 * work properly, so stop any running sched_scan now.
1095 */
1096 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1097 if (ret)
1098 return ret;
1090 } 1099 }
1091 1100
1092 /* rfkill release can be either for wowlan or netdetect */ 1101 /* rfkill release can be either for wowlan or netdetect */
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1254 out: 1263 out:
1255 if (ret < 0) { 1264 if (ret < 0) {
1256 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1265 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1257 ieee80211_restart_hw(mvm->hw); 1266 if (mvm->restart_fw > 0) {
1267 mvm->restart_fw--;
1268 ieee80211_restart_hw(mvm->hw);
1269 }
1258 iwl_mvm_free_nd(mvm); 1270 iwl_mvm_free_nd(mvm);
1259 } 1271 }
1260 out_noreset: 1272 out_noreset:
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2088 iwl_mvm_update_changed_regdom(mvm); 2100 iwl_mvm_update_changed_regdom(mvm);
2089 2101
2090 if (mvm->net_detect) { 2102 if (mvm->net_detect) {
2103 /* If this is a non-unified image, we restart the FW,
2104 * so no need to stop the netdetect scan. If that
2105 * fails, continue and try to get the wake-up reasons,
2106 * but trigger a HW restart by keeping a failure code
2107 * in ret.
2108 */
2109 if (unified_image)
2110 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2111 false);
2112
2091 iwl_mvm_query_netdetect_reasons(mvm, vif); 2113 iwl_mvm_query_netdetect_reasons(mvm, vif);
2092 /* has unlocked the mutex, so skip that */ 2114 /* has unlocked the mutex, so skip that */
2093 goto out; 2115 goto out;
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2271static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) 2293static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2272{ 2294{
2273 struct iwl_mvm *mvm = inode->i_private; 2295 struct iwl_mvm *mvm = inode->i_private;
2274 int remaining_time = 10; 2296 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2297 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2275 2298
2276 mvm->d3_test_active = false; 2299 mvm->d3_test_active = false;
2277 2300
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2282 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2305 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2283 2306
2284 iwl_abort_notification_waits(&mvm->notif_wait); 2307 iwl_abort_notification_waits(&mvm->notif_wait);
2285 ieee80211_restart_hw(mvm->hw); 2308 if (!unified_image) {
2309 int remaining_time = 10;
2286 2310
2287 /* wait for restart and disconnect all interfaces */ 2311 ieee80211_restart_hw(mvm->hw);
2288 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2312
2289 remaining_time > 0) { 2313 /* wait for restart and disconnect all interfaces */
2290 remaining_time--; 2314 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2291 msleep(1000); 2315 remaining_time > 0) {
2292 } 2316 remaining_time--;
2317 msleep(1000);
2318 }
2293 2319
2294 if (remaining_time == 0) 2320 if (remaining_time == 0)
2295 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); 2321 IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2322 }
2296 2323
2297 ieee80211_iterate_active_interfaces_atomic( 2324 ieee80211_iterate_active_interfaces_atomic(
2298 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2325 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 07da4efe8458..7b7d2a146e30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
1529 .data = { &cmd, }, 1529 .data = { &cmd, },
1530 .len = { sizeof(cmd) }, 1530 .len = { sizeof(cmd) },
1531 }; 1531 };
1532 size_t delta, len; 1532 size_t delta;
1533 ssize_t ret; 1533 ssize_t ret, len;
1534 1534
1535 hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, 1535 hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
1536 DEBUG_GROUP, 0); 1536 DEBUG_GROUP, 0);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 318efd814037..1db1dc13e988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4121 struct iwl_mvm_internal_rxq_notif *notif, 4121 struct iwl_mvm_internal_rxq_notif *notif,
4122 u32 size) 4122 u32 size)
4123{ 4123{
4124 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
4125 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4124 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4126 int ret; 4125 int ret;
4127 4126
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4143 } 4142 }
4144 4143
4145 if (notif->sync) 4144 if (notif->sync)
4146 ret = wait_event_timeout(notif_waitq, 4145 ret = wait_event_timeout(mvm->rx_sync_waitq,
4147 atomic_read(&mvm->queue_sync_counter) == 0, 4146 atomic_read(&mvm->queue_sync_counter) == 0,
4148 HZ); 4147 HZ);
4149 WARN_ON_ONCE(!ret); 4148 WARN_ON_ONCE(!ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index d17cbf603f7c..c60703e0c246 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -937,6 +937,7 @@ struct iwl_mvm {
937 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ 937 /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
938 spinlock_t d0i3_tx_lock; 938 spinlock_t d0i3_tx_lock;
939 wait_queue_head_t d0i3_exit_waitq; 939 wait_queue_head_t d0i3_exit_waitq;
940 wait_queue_head_t rx_sync_waitq;
940 941
941 /* BT-Coex */ 942 /* BT-Coex */
942 struct iwl_bt_coex_profile_notif last_bt_notif; 943 struct iwl_bt_coex_profile_notif last_bt_notif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 05fe6dd1a2c8..4d35deb628bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
619 spin_lock_init(&mvm->refs_lock); 619 spin_lock_init(&mvm->refs_lock);
620 skb_queue_head_init(&mvm->d0i3_tx); 620 skb_queue_head_init(&mvm->d0i3_tx);
621 init_waitqueue_head(&mvm->d0i3_exit_waitq); 621 init_waitqueue_head(&mvm->d0i3_exit_waitq);
622 init_waitqueue_head(&mvm->rx_sync_waitq);
622 623
623 atomic_set(&mvm->queue_sync_counter, 0); 624 atomic_set(&mvm->queue_sync_counter, 0);
624 625
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a57c6ef5bc14..6c802cee900c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
547 "Received expired RX queue sync message\n"); 547 "Received expired RX queue sync message\n");
548 return; 548 return;
549 } 549 }
550 atomic_dec(&mvm->queue_sync_counter); 550 if (!atomic_dec_return(&mvm->queue_sync_counter))
551 wake_up(&mvm->rx_sync_waitq);
551 } 552 }
552 553
553 switch (internal_notif->type) { 554 switch (internal_notif->type) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index f279fdd6eb44..fa9743205491 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1199 1199
1200static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) 1200static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1201{ 1201{
1202 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1203 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1204
1202 /* This looks a bit arbitrary, but the idea is that if we run 1205 /* This looks a bit arbitrary, but the idea is that if we run
1203 * out of possible simultaneous scans and the userspace is 1206 * out of possible simultaneous scans and the userspace is
1204 * trying to run a scan type that is already running, we 1207 * trying to run a scan type that is already running, we
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1225 return -EBUSY; 1228 return -EBUSY;
1226 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 1229 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1227 case IWL_MVM_SCAN_NETDETECT: 1230 case IWL_MVM_SCAN_NETDETECT:
1228 /* No need to stop anything for net-detect since the 1231 /* For non-unified images, there's no need to stop
1229 * firmware is restarted anyway. This way, any sched 1232 * anything for net-detect since the firmware is
1230 * scans that were running will be restarted when we 1233 * restarted anyway. This way, any sched scans that
1231 * resume. 1234 * were running will be restarted when we resume.
1232 */ 1235 */
1233 return 0; 1236 if (!unified_image)
1237 return 0;
1238
1239 /* If this is a unified image and we ran out of scans,
1240 * we need to stop something. Prefer stopping regular
1241 * scans, because the results are useless at this
1242 * point, and we should be able to keep running
1243 * another scheduled scan while suspended.
1244 */
1245 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1246 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
1247 true);
1248 if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1249 return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
1250 true);
1251
1252 /* fall through, something is wrong if no scan was
1253 * running but we ran out of scans.
1254 */
1234 default: 1255 default:
1235 WARN_ON(1); 1256 WARN_ON(1);
1236 break; 1257 break;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 001be406a3d3..2f8134b2a504 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
541MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 541MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
542 542
543#ifdef CONFIG_ACPI 543#ifdef CONFIG_ACPI
544#define SPL_METHOD "SPLC" 544#define ACPI_SPLC_METHOD "SPLC"
545#define SPL_DOMAINTYPE_MODULE BIT(0) 545#define ACPI_SPLC_DOMAIN_WIFI (0x07)
546#define SPL_DOMAINTYPE_WIFI BIT(1)
547#define SPL_DOMAINTYPE_WIGIG BIT(2)
548#define SPL_DOMAINTYPE_RFEM BIT(3)
549 546
550static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) 547static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
551{ 548{
552 union acpi_object *limits, *domain_type, *power_limit; 549 union acpi_object *data_pkg, *dflt_pwr_limit;
553 550 int i;
554 if (splx->type != ACPI_TYPE_PACKAGE || 551
555 splx->package.count != 2 || 552 /* We need at least two elements, one for the revision and one
556 splx->package.elements[0].type != ACPI_TYPE_INTEGER || 553 * for the data itself. Also check that the revision is
557 splx->package.elements[0].integer.value != 0) { 554 * supported (currently only revision 0).
558 IWL_ERR(trans, "Unsupported splx structure\n"); 555 */
556 if (splc->type != ACPI_TYPE_PACKAGE ||
557 splc->package.count < 2 ||
558 splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
559 splc->package.elements[0].integer.value != 0) {
560 IWL_DEBUG_INFO(trans,
561 "Unsupported structure returned by the SPLC method. Ignoring.\n");
559 return 0; 562 return 0;
560 } 563 }
561 564
562 limits = &splx->package.elements[1]; 565 /* loop through all the packages to find the one for WiFi */
563 if (limits->type != ACPI_TYPE_PACKAGE || 566 for (i = 1; i < splc->package.count; i++) {
564 limits->package.count < 2 || 567 union acpi_object *domain;
565 limits->package.elements[0].type != ACPI_TYPE_INTEGER || 568
566 limits->package.elements[1].type != ACPI_TYPE_INTEGER) { 569 data_pkg = &splc->package.elements[i];
567 IWL_ERR(trans, "Invalid limits element\n"); 570
568 return 0; 571 /* Skip anything that is not a package with the right
572 * amount of elements (i.e. at least 2 integers).
573 */
574 if (data_pkg->type != ACPI_TYPE_PACKAGE ||
575 data_pkg->package.count < 2 ||
576 data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
577 data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
578 continue;
579
580 domain = &data_pkg->package.elements[0];
581 if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
582 break;
583
584 data_pkg = NULL;
569 } 585 }
570 586
571 domain_type = &limits->package.elements[0]; 587 if (!data_pkg) {
572 power_limit = &limits->package.elements[1]; 588 IWL_DEBUG_INFO(trans,
573 if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { 589 "No element for the WiFi domain returned by the SPLC method.\n");
574 IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
575 return 0; 590 return 0;
576 } 591 }
577 592
578 return power_limit->integer.value; 593 dflt_pwr_limit = &data_pkg->package.elements[1];
594 return dflt_pwr_limit->integer.value;
579} 595}
580 596
581static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) 597static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
582{ 598{
583 acpi_handle pxsx_handle; 599 acpi_handle pxsx_handle;
584 acpi_handle handle; 600 acpi_handle handle;
585 struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; 601 struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
586 acpi_status status; 602 acpi_status status;
587 603
588 pxsx_handle = ACPI_HANDLE(&pdev->dev); 604 pxsx_handle = ACPI_HANDLE(&pdev->dev);
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
593 } 609 }
594 610
595 /* Get the method's handle */ 611 /* Get the method's handle */
596 status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); 612 status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
613 &handle);
597 if (ACPI_FAILURE(status)) { 614 if (ACPI_FAILURE(status)) {
598 IWL_DEBUG_INFO(trans, "SPL method not found\n"); 615 IWL_DEBUG_INFO(trans, "SPLC method not found\n");
599 return; 616 return;
600 } 617 }
601 618
602 /* Call SPLC with no arguments */ 619 /* Call SPLC with no arguments */
603 status = acpi_evaluate_object(handle, NULL, NULL, &splx); 620 status = acpi_evaluate_object(handle, NULL, NULL, &splc);
604 if (ACPI_FAILURE(status)) { 621 if (ACPI_FAILURE(status)) {
605 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); 622 IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
606 return; 623 return;
607 } 624 }
608 625
609 trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); 626 trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
610 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", 627 IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
611 trans->dflt_pwr_limit); 628 trans->dflt_pwr_limit);
612 kfree(splx.pointer); 629 kfree(splc.pointer);
613} 630}
614 631
615#else /* CONFIG_ACPI */ 632#else /* CONFIG_ACPI */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index e9a278b60dfd..5f840f16f40b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -592,6 +592,7 @@ error:
592static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 592static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
593 int slots_num, u32 txq_id) 593 int slots_num, u32 txq_id)
594{ 594{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
595 int ret; 596 int ret;
596 597
597 txq->need_update = false; 598 txq->need_update = false;
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
606 return ret; 607 return ret;
607 608
608 spin_lock_init(&txq->lock); 609 spin_lock_init(&txq->lock);
610
611 if (txq_id == trans_pcie->cmd_queue) {
612 static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
613
614 lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
615 }
616
609 __skb_queue_head_init(&txq->overflow_q); 617 __skb_queue_head_init(&txq->overflow_q);
610 618
611 /* 619 /*
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 431f13b4faf6..d3bad5779376 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
826 data->bcn_delta = do_div(delta, bcn_int); 826 data->bcn_delta = do_div(delta, bcn_int);
827 } else { 827 } else {
828 data->tsf_offset -= delta; 828 data->tsf_offset -= delta;
829 data->bcn_delta = -do_div(delta, bcn_int); 829 data->bcn_delta = -(s64)do_div(delta, bcn_int);
830 } 830 }
831} 831}
832 832
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 39ce76ad00bc..16241d21727b 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2222,8 +2222,9 @@ done:
2222 is_scanning_required = 1; 2222 is_scanning_required = 1;
2223 } else { 2223 } else {
2224 mwifiex_dbg(priv->adapter, MSG, 2224 mwifiex_dbg(priv->adapter, MSG,
2225 "info: trying to associate to '%s' bssid %pM\n", 2225 "info: trying to associate to '%.*s' bssid %pM\n",
2226 (char *)req_ssid.ssid, bss->bssid); 2226 req_ssid.ssid_len, (char *)req_ssid.ssid,
2227 bss->bssid);
2227 memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); 2228 memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
2228 break; 2229 break;
2229 } 2230 }
@@ -2283,8 +2284,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
2283 } 2284 }
2284 2285
2285 mwifiex_dbg(adapter, INFO, 2286 mwifiex_dbg(adapter, INFO,
2286 "info: Trying to associate to %s and bssid %pM\n", 2287 "info: Trying to associate to %.*s and bssid %pM\n",
2287 (char *)sme->ssid, sme->bssid); 2288 (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
2288 2289
2289 if (!mwifiex_stop_bg_scan(priv)) 2290 if (!mwifiex_stop_bg_scan(priv))
2290 cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); 2291 cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
@@ -2417,8 +2418,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
2417 } 2418 }
2418 2419
2419 mwifiex_dbg(priv->adapter, MSG, 2420 mwifiex_dbg(priv->adapter, MSG,
2420 "info: trying to join to %s and bssid %pM\n", 2421 "info: trying to join to %.*s and bssid %pM\n",
2421 (char *)params->ssid, params->bssid); 2422 params->ssid_len, (char *)params->ssid, params->bssid);
2422 2423
2423 mwifiex_set_ibss_params(priv, params); 2424 mwifiex_set_ibss_params(priv, params);
2424 2425
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e17879dd5d5a..bf2744e1e3db 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
304 queue->rx_skbs[id] = skb; 304 queue->rx_skbs[id] = skb;
305 305
306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 306 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
307 BUG_ON((signed short)ref < 0); 307 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
308 queue->grant_rx_ref[id] = ref; 308 queue->grant_rx_ref[id] = ref;
309 309
310 page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 310 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
428 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 428 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
429 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 429 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
431 BUG_ON((signed short)ref < 0); 431 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
432 432
433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
434 gfn, GNTMAP_readonly); 434 gfn, GNTMAP_readonly);
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
index 0d5c29ae51de..7310a261c858 100644
--- a/drivers/ntb/hw/intel/ntb_hw_intel.c
+++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
112 112
113module_param_named(xeon_b2b_usd_bar4_addr64, 113module_param_named(xeon_b2b_usd_bar4_addr64,
114 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); 114 xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
115MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 115MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
116 "XEON B2B USD BAR 4 64-bit address"); 116 "XEON B2B USD BAR 4 64-bit address");
117 117
118module_param_named(xeon_b2b_usd_bar4_addr32, 118module_param_named(xeon_b2b_usd_bar4_addr32,
119 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); 119 xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
120MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 120MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
121 "XEON B2B USD split-BAR 4 32-bit address"); 121 "XEON B2B USD split-BAR 4 32-bit address");
122 122
123module_param_named(xeon_b2b_usd_bar5_addr32, 123module_param_named(xeon_b2b_usd_bar5_addr32,
124 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); 124 xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
125MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, 125MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
126 "XEON B2B USD split-BAR 5 32-bit address"); 126 "XEON B2B USD split-BAR 5 32-bit address");
127 127
128module_param_named(xeon_b2b_dsd_bar2_addr64, 128module_param_named(xeon_b2b_dsd_bar2_addr64,
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
132 132
133module_param_named(xeon_b2b_dsd_bar4_addr64, 133module_param_named(xeon_b2b_dsd_bar4_addr64,
134 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); 134 xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
135MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 135MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
136 "XEON B2B DSD BAR 4 64-bit address"); 136 "XEON B2B DSD BAR 4 64-bit address");
137 137
138module_param_named(xeon_b2b_dsd_bar4_addr32, 138module_param_named(xeon_b2b_dsd_bar4_addr32,
139 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); 139 xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
140MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 140MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
141 "XEON B2B DSD split-BAR 4 32-bit address"); 141 "XEON B2B DSD split-BAR 4 32-bit address");
142 142
143module_param_named(xeon_b2b_dsd_bar5_addr32, 143module_param_named(xeon_b2b_dsd_bar5_addr32,
144 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); 144 xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
145MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, 145MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
146 "XEON B2B DSD split-BAR 5 32-bit address"); 146 "XEON B2B DSD split-BAR 5 32-bit address");
147 147
148#ifndef ioread64 148#ifndef ioread64
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1755 XEON_B2B_MIN_SIZE); 1755 XEON_B2B_MIN_SIZE);
1756 if (!ndev->peer_mmio) 1756 if (!ndev->peer_mmio)
1757 return -EIO; 1757 return -EIO;
1758
1759 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1758 } 1760 }
1759 1761
1760 return 0; 1762 return 0;
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
2019 goto err_mmio; 2021 goto err_mmio;
2020 } 2022 }
2021 ndev->peer_mmio = ndev->self_mmio; 2023 ndev->peer_mmio = ndev->self_mmio;
2024 ndev->peer_addr = pci_resource_start(pdev, 0);
2022 2025
2023 return 0; 2026 return 0;
2024 2027
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 8601c10acf74..4eb8adb34508 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -257,7 +257,7 @@ enum {
257#define NTB_QP_DEF_NUM_ENTRIES 100 257#define NTB_QP_DEF_NUM_ENTRIES 100
258#define NTB_LINK_DOWN_TIMEOUT 10 258#define NTB_LINK_DOWN_TIMEOUT 10
259#define DMA_RETRIES 20 259#define DMA_RETRIES 20
260#define DMA_OUT_RESOURCE_TO 50 260#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
261 261
262static void ntb_transport_rxc_db(unsigned long data); 262static void ntb_transport_rxc_db(unsigned long data);
263static const struct ntb_ctx_ops ntb_transport_ops; 263static const struct ntb_ctx_ops ntb_transport_ops;
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 6a50f20bf1cd..e75d4fdc0866 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -72,7 +72,7 @@
72#define MAX_THREADS 32 72#define MAX_THREADS 32
73#define MAX_TEST_SIZE SZ_1M 73#define MAX_TEST_SIZE SZ_1M
74#define MAX_SRCS 32 74#define MAX_SRCS 32
75#define DMA_OUT_RESOURCE_TO 50 75#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
76#define DMA_RETRIES 20 76#define DMA_RETRIES 20
77#define SZ_4G (1ULL << 32) 77#define SZ_4G (1ULL << 32)
78#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ 78#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
589 return -ENOMEM; 589 return -ENOMEM;
590 590
591 if (mutex_is_locked(&perf->run_mutex)) { 591 if (mutex_is_locked(&perf->run_mutex)) {
592 out_off = snprintf(buf, 64, "running\n"); 592 out_off = scnprintf(buf, 64, "running\n");
593 goto read_from_buf; 593 goto read_from_buf;
594 } 594 }
595 595
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
600 break; 600 break;
601 601
602 if (pctx->status) { 602 if (pctx->status) {
603 out_off += snprintf(buf + out_off, 1024 - out_off, 603 out_off += scnprintf(buf + out_off, 1024 - out_off,
604 "%d: error %d\n", i, 604 "%d: error %d\n", i,
605 pctx->status); 605 pctx->status);
606 continue; 606 continue;
607 } 607 }
608 608
609 rate = div64_u64(pctx->copied, pctx->diff_us); 609 rate = div64_u64(pctx->copied, pctx->diff_us);
610 out_off += snprintf(buf + out_off, 1024 - out_off, 610 out_off += scnprintf(buf + out_off, 1024 - out_off,
611 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", 611 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
612 i, pctx->copied, pctx->diff_us, rate); 612 i, pctx->copied, pctx->diff_us, rate);
613 } 613 }
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c
index 7d311799fca1..435861189d97 100644
--- a/drivers/ntb/test/ntb_pingpong.c
+++ b/drivers/ntb/test/ntb_pingpong.c
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
88 88
89static unsigned long db_init = 0x7; 89static unsigned long db_init = 0x7;
90module_param(db_init, ulong, 0644); 90module_param(db_init, ulong, 0644);
91MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); 91MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
92 92
93struct pp_ctx { 93struct pp_ctx {
94 struct ntb_dev *ntb; 94 struct ntb_dev *ntb;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0248d0e21fee..5e52034ab010 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1242 1242
1243 result = nvme_enable_ctrl(&dev->ctrl, cap); 1243 result = nvme_enable_ctrl(&dev->ctrl, cap);
1244 if (result) 1244 if (result)
1245 goto free_nvmeq; 1245 return result;
1246 1246
1247 nvmeq->cq_vector = 0; 1247 nvmeq->cq_vector = 0;
1248 result = queue_request_irq(nvmeq); 1248 result = queue_request_irq(nvmeq);
1249 if (result) { 1249 if (result) {
1250 nvmeq->cq_vector = -1; 1250 nvmeq->cq_vector = -1;
1251 goto free_nvmeq; 1251 return result;
1252 } 1252 }
1253 1253
1254 return result; 1254 return result;
1255
1256 free_nvmeq:
1257 nvme_free_queues(dev, 0);
1258 return result;
1259} 1255}
1260 1256
1261static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1257static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
@@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
1317 max = min(dev->max_qid, dev->queue_count - 1); 1313 max = min(dev->max_qid, dev->queue_count - 1);
1318 for (i = dev->online_queues; i <= max; i++) { 1314 for (i = dev->online_queues; i <= max; i++) {
1319 ret = nvme_create_queue(dev->queues[i], i); 1315 ret = nvme_create_queue(dev->queues[i], i);
1320 if (ret) { 1316 if (ret)
1321 nvme_free_queues(dev, i);
1322 break; 1317 break;
1323 }
1324 } 1318 }
1325 1319
1326 /* 1320 /*
@@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1460 result = queue_request_irq(adminq); 1454 result = queue_request_irq(adminq);
1461 if (result) { 1455 if (result) {
1462 adminq->cq_vector = -1; 1456 adminq->cq_vector = -1;
1463 goto free_queues; 1457 return result;
1464 } 1458 }
1465 return nvme_create_io_queues(dev); 1459 return nvme_create_io_queues(dev);
1466
1467 free_queues:
1468 nvme_free_queues(dev, 1);
1469 return result;
1470} 1460}
1471 1461
1472static void nvme_del_queue_end(struct request *req, int error) 1462static void nvme_del_queue_end(struct request *req, int error)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5a8388177959..3d25add36d91 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags {
83 NVME_RDMA_Q_CONNECTED = (1 << 0), 83 NVME_RDMA_Q_CONNECTED = (1 << 0),
84 NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), 84 NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
85 NVME_RDMA_Q_DELETING = (1 << 2), 85 NVME_RDMA_Q_DELETING = (1 << 2),
86 NVME_RDMA_Q_LIVE = (1 << 3),
86}; 87};
87 88
88struct nvme_rdma_queue { 89struct nvme_rdma_queue {
@@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
624 625
625 for (i = 1; i < ctrl->queue_count; i++) { 626 for (i = 1; i < ctrl->queue_count; i++) {
626 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); 627 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
627 if (ret) 628 if (ret) {
628 break; 629 dev_info(ctrl->ctrl.device,
630 "failed to connect i/o queue: %d\n", ret);
631 goto out_free_queues;
632 }
633 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
629 } 634 }
630 635
636 return 0;
637
638out_free_queues:
639 nvme_rdma_free_io_queues(ctrl);
631 return ret; 640 return ret;
632} 641}
633 642
@@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
712 if (ret) 721 if (ret)
713 goto stop_admin_q; 722 goto stop_admin_q;
714 723
724 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
725
715 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 726 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
716 if (ret) 727 if (ret)
717 goto stop_admin_q; 728 goto stop_admin_q;
@@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
761 772
762 nvme_stop_keep_alive(&ctrl->ctrl); 773 nvme_stop_keep_alive(&ctrl->ctrl);
763 774
764 for (i = 0; i < ctrl->queue_count; i++) 775 for (i = 0; i < ctrl->queue_count; i++) {
765 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); 776 clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
777 clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
778 }
766 779
767 if (ctrl->queue_count > 1) 780 if (ctrl->queue_count > 1)
768 nvme_stop_queues(&ctrl->ctrl); 781 nvme_stop_queues(&ctrl->ctrl);
@@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
1378 return BLK_EH_HANDLED; 1391 return BLK_EH_HANDLED;
1379} 1392}
1380 1393
1394/*
1395 * We cannot accept any other command until the Connect command has completed.
1396 */
1397static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1398 struct request *rq)
1399{
1400 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
1401 struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
1402
1403 if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
1404 cmd->common.opcode != nvme_fabrics_command ||
1405 cmd->fabrics.fctype != nvme_fabrics_type_connect)
1406 return false;
1407 }
1408
1409 return true;
1410}
1411
1381static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1412static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1382 const struct blk_mq_queue_data *bd) 1413 const struct blk_mq_queue_data *bd)
1383{ 1414{
@@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1394 1425
1395 WARN_ON_ONCE(rq->tag < 0); 1426 WARN_ON_ONCE(rq->tag < 0);
1396 1427
1428 if (!nvme_rdma_queue_is_ready(queue, rq))
1429 return BLK_MQ_RQ_QUEUE_BUSY;
1430
1397 dev = queue->device->dev; 1431 dev = queue->device->dev;
1398 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1432 ib_dma_sync_single_for_cpu(dev, sqe->dma,
1399 sizeof(struct nvme_command), DMA_TO_DEVICE); 1433 sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
1544 if (error) 1578 if (error)
1545 goto out_cleanup_queue; 1579 goto out_cleanup_queue;
1546 1580
1581 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
1582
1547 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); 1583 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
1548 if (error) { 1584 if (error) {
1549 dev_err(ctrl->ctrl.device, 1585 dev_err(ctrl->ctrl.device,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b4cacb6f0258..a21437a33adb 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work)
838 838
839void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) 839void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
840{ 840{
841 ctrl->csts |= NVME_CSTS_CFS; 841 mutex_lock(&ctrl->lock);
842 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); 842 if (!(ctrl->csts & NVME_CSTS_CFS)) {
843 schedule_work(&ctrl->fatal_err_work); 843 ctrl->csts |= NVME_CSTS_CFS;
844 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
845 schedule_work(&ctrl->fatal_err_work);
846 }
847 mutex_unlock(&ctrl->lock);
844} 848}
845EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); 849EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
846 850
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f8d23999e0f2..005ef5d17a19 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -951,6 +951,7 @@ err_destroy_cq:
951 951
952static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) 952static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
953{ 953{
954 ib_drain_qp(queue->cm_id->qp);
954 rdma_destroy_qp(queue->cm_id); 955 rdma_destroy_qp(queue->cm_id);
955 ib_free_cq(queue->cq); 956 ib_free_cq(queue->cq);
956} 957}
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1066 spin_lock_init(&queue->rsp_wr_wait_lock); 1067 spin_lock_init(&queue->rsp_wr_wait_lock);
1067 INIT_LIST_HEAD(&queue->free_rsps); 1068 INIT_LIST_HEAD(&queue->free_rsps);
1068 spin_lock_init(&queue->rsps_lock); 1069 spin_lock_init(&queue->rsps_lock);
1070 INIT_LIST_HEAD(&queue->queue_list);
1069 1071
1070 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); 1072 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1071 if (queue->idx < 0) { 1073 if (queue->idx < 0) {
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1244 1246
1245 if (disconnect) { 1247 if (disconnect) {
1246 rdma_disconnect(queue->cm_id); 1248 rdma_disconnect(queue->cm_id);
1247 ib_drain_qp(queue->cm_id->qp);
1248 schedule_work(&queue->release_work); 1249 schedule_work(&queue->release_work);
1249 } 1250 }
1250} 1251}
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1269{ 1270{
1270 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); 1271 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1271 1272
1272 pr_err("failed to connect queue\n"); 1273 mutex_lock(&nvmet_rdma_queue_mutex);
1274 if (!list_empty(&queue->queue_list))
1275 list_del_init(&queue->queue_list);
1276 mutex_unlock(&nvmet_rdma_queue_mutex);
1277
1278 pr_err("failed to connect queue %d\n", queue->idx);
1273 schedule_work(&queue->release_work); 1279 schedule_work(&queue->release_work);
1274} 1280}
1275 1281
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1352 case RDMA_CM_EVENT_ADDR_CHANGE: 1358 case RDMA_CM_EVENT_ADDR_CHANGE:
1353 case RDMA_CM_EVENT_DISCONNECTED: 1359 case RDMA_CM_EVENT_DISCONNECTED:
1354 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1360 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1355 nvmet_rdma_queue_disconnect(queue); 1361 /*
1362 * We might end up here when we already freed the qp
1363 * which means queue release sequence is in progress,
1364 * so don't get in the way...
1365 */
1366 if (queue)
1367 nvmet_rdma_queue_disconnect(queue);
1356 break; 1368 break;
1357 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1369 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1358 ret = nvmet_rdma_device_removal(cm_id, queue); 1370 ret = nvmet_rdma_device_removal(cm_id, queue);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index b470f7e3521d..262281bd68fa 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np)
292 mdiodev = to_mdio_device(d); 292 mdiodev = to_mdio_device(d);
293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) 293 if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
294 return to_phy_device(d); 294 return to_phy_device(d);
295 put_device(d);
295 } 296 }
296 297
297 return NULL; 298 return NULL;
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np)
456 status.link = 1; 457 status.link = 1;
457 status.duplex = of_property_read_bool(fixed_link_node, 458 status.duplex = of_property_read_bool(fixed_link_node,
458 "full-duplex"); 459 "full-duplex");
459 if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 460 if (of_property_read_u32(fixed_link_node, "speed",
461 &status.speed)) {
462 of_node_put(fixed_link_node);
460 return -EINVAL; 463 return -EINVAL;
464 }
461 status.pause = of_property_read_bool(fixed_link_node, "pause"); 465 status.pause = of_property_read_bool(fixed_link_node, "pause");
462 status.asym_pause = of_property_read_bool(fixed_link_node, 466 status.asym_pause = of_property_read_bool(fixed_link_node,
463 "asym-pause"); 467 "asym-pause");
@@ -486,3 +490,18 @@ int of_phy_register_fixed_link(struct device_node *np)
486 return -ENODEV; 490 return -ENODEV;
487} 491}
488EXPORT_SYMBOL(of_phy_register_fixed_link); 492EXPORT_SYMBOL(of_phy_register_fixed_link);
493
494void of_phy_deregister_fixed_link(struct device_node *np)
495{
496 struct phy_device *phydev;
497
498 phydev = of_phy_find_device(np);
499 if (!phydev)
500 return;
501
502 fixed_phy_unregister(phydev);
503
504 put_device(&phydev->mdio.dev); /* of_phy_find_device() */
505 phy_device_free(phydev); /* fixed_phy_register() */
506}
507EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 8df6312ed300..1a02038c4640 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) 4 * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
5 * 5 *
6 * Authors: Joao Pinto <jpmpinto@gmail.com> 6 * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 55f453de562e..c7f3408e3148 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
29 return intel_mid_pci_set_power_state(pdev, state); 29 return intel_mid_pci_set_power_state(pdev, state);
30} 30}
31 31
32static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
33{
34 return intel_mid_pci_get_power_state(pdev);
35}
36
32static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) 37static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
33{ 38{
34 return PCI_D3hot; 39 return PCI_D3hot;
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev)
52static struct pci_platform_pm_ops mid_pci_platform_pm = { 57static struct pci_platform_pm_ops mid_pci_platform_pm = {
53 .is_manageable = mid_pci_power_manageable, 58 .is_manageable = mid_pci_power_manageable,
54 .set_state = mid_pci_set_power_state, 59 .set_state = mid_pci_set_power_state,
60 .get_state = mid_pci_get_power_state,
55 .choose_state = mid_pci_choose_state, 61 .choose_state = mid_pci_choose_state,
56 .sleep_wake = mid_pci_sleep_wake, 62 .sleep_wake = mid_pci_sleep_wake,
57 .run_wake = mid_pci_run_wake, 63 .run_wake = mid_pci_run_wake,
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index db553dc22c8e..2b6a59266689 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -307,20 +307,6 @@ out:
307 return 0; 307 return 0;
308} 308}
309 309
310static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
311{
312 while (1) {
313 if (!pci_is_pcie(dev))
314 break;
315 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
316 return dev;
317 if (!dev->bus->self)
318 break;
319 dev = dev->bus->self;
320 }
321 return NULL;
322}
323
324static int find_aer_device_iter(struct device *device, void *data) 310static int find_aer_device_iter(struct device *device, void *data)
325{ 311{
326 struct pcie_device **result = data; 312 struct pcie_device **result = data;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab002671fa60..104c46d53121 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1439 dev_warn(&dev->dev, "PCI-X settings not supported\n"); 1439 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1440} 1440}
1441 1441
1442static bool pcie_root_rcb_set(struct pci_dev *dev)
1443{
1444 struct pci_dev *rp = pcie_find_root_port(dev);
1445 u16 lnkctl;
1446
1447 if (!rp)
1448 return false;
1449
1450 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1451 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1452 return true;
1453
1454 return false;
1455}
1456
1442static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) 1457static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1443{ 1458{
1444 int pos; 1459 int pos;
@@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1468 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 1483 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1469 1484
1470 /* Initialize Link Control Register */ 1485 /* Initialize Link Control Register */
1471 if (pcie_cap_has_lnkctl(dev)) 1486 if (pcie_cap_has_lnkctl(dev)) {
1487
1488 /*
1489 * If the Root Port supports Read Completion Boundary of
1490 * 128, set RCB to 128. Otherwise, clear it.
1491 */
1492 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1493 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1494 if (pcie_root_rcb_set(dev))
1495 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1496
1472 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 1497 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1473 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 1498 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1499 }
1474 1500
1475 /* Find Advanced Error Reporting Enhanced Capability */ 1501 /* Find Advanced Error Reporting Enhanced Capability */
1476 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 1502 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 87e6334eab93..547ca7b3f098 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy)
459 struct twl4030_usb *twl = phy_get_drvdata(phy); 459 struct twl4030_usb *twl = phy_get_drvdata(phy);
460 460
461 dev_dbg(twl->dev, "%s\n", __func__); 461 dev_dbg(twl->dev, "%s\n", __func__);
462 pm_runtime_mark_last_busy(twl->dev);
463 pm_runtime_put_autosuspend(twl->dev);
464 462
465 return 0; 463 return 0;
466} 464}
@@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy)
472 dev_dbg(twl->dev, "%s\n", __func__); 470 dev_dbg(twl->dev, "%s\n", __func__);
473 pm_runtime_get_sync(twl->dev); 471 pm_runtime_get_sync(twl->dev);
474 schedule_delayed_work(&twl->id_workaround_work, HZ); 472 schedule_delayed_work(&twl->id_workaround_work, HZ);
473 pm_runtime_mark_last_busy(twl->dev);
474 pm_runtime_put_autosuspend(twl->dev);
475 475
476 return 0; 476 return 0;
477} 477}
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 381871b2bb46..9d5bd7d5c610 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -474,6 +474,7 @@ static int meson_pwm_probe(struct platform_device *pdev)
474 if (IS_ERR(meson->base)) 474 if (IS_ERR(meson->base))
475 return PTR_ERR(meson->base); 475 return PTR_ERR(meson->base);
476 476
477 spin_lock_init(&meson->lock);
477 meson->chip.dev = &pdev->dev; 478 meson->chip.dev = &pdev->dev;
478 meson->chip.ops = &meson_pwm_ops; 479 meson->chip.ops = &meson_pwm_ops;
479 meson->chip.base = -1; 480 meson->chip.base = -1;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 0296d8178ae2..a813239300c3 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
425 if (test_bit(PWMF_EXPORTED, &pwm->flags)) 425 if (test_bit(PWMF_EXPORTED, &pwm->flags))
426 pwm_unexport_child(parent, pwm); 426 pwm_unexport_child(parent, pwm);
427 } 427 }
428
429 put_device(parent);
428} 430}
429 431
430static int __init pwm_sysfs_init(void) 432static int __init pwm_sysfs_init(void)
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 18a93d3e3f93..d36534965635 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = {
327 { .compatible = "alphascale,asm9260-rtc", }, 327 { .compatible = "alphascale,asm9260-rtc", },
328 {} 328 {}
329}; 329};
330MODULE_DEVICE_TABLE(of, asm9260_dt_ids);
330 331
331static struct platform_driver asm9260_rtc_driver = { 332static struct platform_driver asm9260_rtc_driver = {
332 .probe = asm9260_rtc_probe, 333 .probe = asm9260_rtc_probe,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index dd3d59806ffa..7030d7cd3861 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq)
776 spin_unlock_irq(&rtc_lock); 776 spin_unlock_irq(&rtc_lock);
777} 777}
778 778
779static void __exit cmos_do_remove(struct device *dev) 779static void cmos_do_remove(struct device *dev)
780{ 780{
781 struct cmos_rtc *cmos = dev_get_drvdata(dev); 781 struct cmos_rtc *cmos = dev_get_drvdata(dev);
782 struct resource *ports; 782 struct resource *ports;
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context)
996 struct cmos_rtc *cmos = dev_get_drvdata(dev); 996 struct cmos_rtc *cmos = dev_get_drvdata(dev);
997 unsigned char rtc_control = 0; 997 unsigned char rtc_control = 0;
998 unsigned char rtc_intr; 998 unsigned char rtc_intr;
999 unsigned long flags;
999 1000
1000 spin_lock_irq(&rtc_lock); 1001 spin_lock_irqsave(&rtc_lock, flags);
1001 if (cmos_rtc.suspend_ctrl) 1002 if (cmos_rtc.suspend_ctrl)
1002 rtc_control = CMOS_READ(RTC_CONTROL); 1003 rtc_control = CMOS_READ(RTC_CONTROL);
1003 if (rtc_control & RTC_AIE) { 1004 if (rtc_control & RTC_AIE) {
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context)
1006 rtc_intr = CMOS_READ(RTC_INTR_FLAGS); 1007 rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
1007 rtc_update_irq(cmos->rtc, 1, rtc_intr); 1008 rtc_update_irq(cmos->rtc, 1, rtc_intr);
1008 } 1009 }
1009 spin_unlock_irq(&rtc_lock); 1010 spin_unlock_irqrestore(&rtc_lock, flags);
1010 1011
1011 pm_wakeup_event(dev, 0); 1012 pm_wakeup_event(dev, 0);
1012 acpi_clear_event(ACPI_EVENT_RTC); 1013 acpi_clear_event(ACPI_EVENT_RTC);
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
1129 pnp_irq(pnp, 0)); 1130 pnp_irq(pnp, 0));
1130} 1131}
1131 1132
1132static void __exit cmos_pnp_remove(struct pnp_dev *pnp) 1133static void cmos_pnp_remove(struct pnp_dev *pnp)
1133{ 1134{
1134 cmos_do_remove(&pnp->dev); 1135 cmos_do_remove(&pnp->dev);
1135} 1136}
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = {
1161 .name = (char *) driver_name, 1162 .name = (char *) driver_name,
1162 .id_table = rtc_ids, 1163 .id_table = rtc_ids,
1163 .probe = cmos_pnp_probe, 1164 .probe = cmos_pnp_probe,
1164 .remove = __exit_p(cmos_pnp_remove), 1165 .remove = cmos_pnp_remove,
1165 .shutdown = cmos_pnp_shutdown, 1166 .shutdown = cmos_pnp_shutdown,
1166 1167
1167 /* flag ensures resume() gets called, and stops syslog spam */ 1168 /* flag ensures resume() gets called, and stops syslog spam */
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev)
1238 return cmos_do_probe(&pdev->dev, resource, irq); 1239 return cmos_do_probe(&pdev->dev, resource, irq);
1239} 1240}
1240 1241
1241static int __exit cmos_platform_remove(struct platform_device *pdev) 1242static int cmos_platform_remove(struct platform_device *pdev)
1242{ 1243{
1243 cmos_do_remove(&pdev->dev); 1244 cmos_do_remove(&pdev->dev);
1244 return 0; 1245 return 0;
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev)
1263MODULE_ALIAS("platform:rtc_cmos"); 1264MODULE_ALIAS("platform:rtc_cmos");
1264 1265
1265static struct platform_driver cmos_platform_driver = { 1266static struct platform_driver cmos_platform_driver = {
1266 .remove = __exit_p(cmos_platform_remove), 1267 .remove = cmos_platform_remove,
1267 .shutdown = cmos_platform_shutdown, 1268 .shutdown = cmos_platform_shutdown,
1268 .driver = { 1269 .driver = {
1269 .name = driver_name, 1270 .name = driver_name,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b04ea9b5ae67..51e52446eacb 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -113,6 +113,7 @@
113/* OMAP_RTC_OSC_REG bit fields: */ 113/* OMAP_RTC_OSC_REG bit fields: */
114#define OMAP_RTC_OSC_32KCLK_EN BIT(6) 114#define OMAP_RTC_OSC_32KCLK_EN BIT(6)
115#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) 115#define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3)
116#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4)
116 117
117/* OMAP_RTC_IRQWAKEEN bit fields: */ 118/* OMAP_RTC_IRQWAKEEN bit fields: */
118#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) 119#define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1)
@@ -146,6 +147,7 @@ struct omap_rtc {
146 u8 interrupts_reg; 147 u8 interrupts_reg;
147 bool is_pmic_controller; 148 bool is_pmic_controller;
148 bool has_ext_clk; 149 bool has_ext_clk;
150 bool is_suspending;
149 const struct omap_rtc_device_type *type; 151 const struct omap_rtc_device_type *type;
150 struct pinctrl_dev *pctldev; 152 struct pinctrl_dev *pctldev;
151}; 153};
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev)
786 */ 788 */
787 if (rtc->has_ext_clk) { 789 if (rtc->has_ext_clk) {
788 reg = rtc_read(rtc, OMAP_RTC_OSC_REG); 790 reg = rtc_read(rtc, OMAP_RTC_OSC_REG);
789 rtc_write(rtc, OMAP_RTC_OSC_REG, 791 reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE;
790 reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); 792 reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC;
793 rtc_writel(rtc, OMAP_RTC_OSC_REG, reg);
791 } 794 }
792 795
793 rtc->type->lock(rtc); 796 rtc->type->lock(rtc);
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev)
898 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); 901 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0);
899 rtc->type->lock(rtc); 902 rtc->type->lock(rtc);
900 903
901 /* Disable the clock/module */ 904 rtc->is_suspending = true;
902 pm_runtime_put_sync(dev);
903 905
904 return 0; 906 return 0;
905} 907}
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev)
908{ 910{
909 struct omap_rtc *rtc = dev_get_drvdata(dev); 911 struct omap_rtc *rtc = dev_get_drvdata(dev);
910 912
911 /* Enable the clock/module so that we can access the registers */
912 pm_runtime_get_sync(dev);
913
914 rtc->type->unlock(rtc); 913 rtc->type->unlock(rtc);
915 if (device_may_wakeup(dev)) 914 if (device_may_wakeup(dev))
916 disable_irq_wake(rtc->irq_alarm); 915 disable_irq_wake(rtc->irq_alarm);
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev)
918 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); 917 rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg);
919 rtc->type->lock(rtc); 918 rtc->type->lock(rtc);
920 919
920 rtc->is_suspending = false;
921
921 return 0; 922 return 0;
922} 923}
923#endif 924#endif
924 925
925static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); 926#ifdef CONFIG_PM
927static int omap_rtc_runtime_suspend(struct device *dev)
928{
929 struct omap_rtc *rtc = dev_get_drvdata(dev);
930
931 if (rtc->is_suspending && !rtc->has_ext_clk)
932 return -EBUSY;
933
934 return 0;
935}
936
937static int omap_rtc_runtime_resume(struct device *dev)
938{
939 return 0;
940}
941#endif
942
943static const struct dev_pm_ops omap_rtc_pm_ops = {
944 SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume)
945 SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend,
946 omap_rtc_runtime_resume, NULL)
947};
926 948
927static void omap_rtc_shutdown(struct platform_device *pdev) 949static void omap_rtc_shutdown(struct platform_device *pdev)
928{ 950{
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aebc4ddb3060..ac05317bba7f 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1083,7 +1083,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1083 nonemb_cmd = &phba->boot_struct.nonemb_cmd; 1083 nonemb_cmd = &phba->boot_struct.nonemb_cmd;
1084 nonemb_cmd->size = sizeof(*resp); 1084 nonemb_cmd->size = sizeof(*resp);
1085 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, 1085 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
1086 sizeof(nonemb_cmd->size), 1086 nonemb_cmd->size,
1087 &nonemb_cmd->dma); 1087 &nonemb_cmd->dma);
1088 if (!nonemb_cmd->va) { 1088 if (!nonemb_cmd->va) {
1089 mutex_unlock(&ctrl->mbox_lock); 1089 mutex_unlock(&ctrl->mbox_lock);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index d007ec18179a..a1d6ab76a514 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2009,7 +2009,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2009 2009
2010static int hpsa_slave_alloc(struct scsi_device *sdev) 2010static int hpsa_slave_alloc(struct scsi_device *sdev)
2011{ 2011{
2012 struct hpsa_scsi_dev_t *sd; 2012 struct hpsa_scsi_dev_t *sd = NULL;
2013 unsigned long flags; 2013 unsigned long flags;
2014 struct ctlr_info *h; 2014 struct ctlr_info *h;
2015 2015
@@ -2026,7 +2026,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev)
2026 sd->target = sdev_id(sdev); 2026 sd->target = sdev_id(sdev);
2027 sd->lun = sdev->lun; 2027 sd->lun = sdev->lun;
2028 } 2028 }
2029 } else 2029 }
2030 if (!sd)
2030 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), 2031 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2031 sdev_id(sdev), sdev->lun); 2032 sdev_id(sdev), sdev->lun);
2032 2033
@@ -3840,6 +3841,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3840 sizeof(this_device->vendor)); 3841 sizeof(this_device->vendor));
3841 memcpy(this_device->model, &inq_buff[16], 3842 memcpy(this_device->model, &inq_buff[16],
3842 sizeof(this_device->model)); 3843 sizeof(this_device->model));
3844 this_device->rev = inq_buff[2];
3843 memset(this_device->device_id, 0, 3845 memset(this_device->device_id, 0,
3844 sizeof(this_device->device_id)); 3846 sizeof(this_device->device_id));
3845 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, 3847 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
@@ -3929,10 +3931,14 @@ static void figure_bus_target_lun(struct ctlr_info *h,
3929 3931
3930 if (!is_logical_dev_addr_mode(lunaddrbytes)) { 3932 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3931 /* physical device, target and lun filled in later */ 3933 /* physical device, target and lun filled in later */
3932 if (is_hba_lunid(lunaddrbytes)) 3934 if (is_hba_lunid(lunaddrbytes)) {
3935 int bus = HPSA_HBA_BUS;
3936
3937 if (!device->rev)
3938 bus = HPSA_LEGACY_HBA_BUS;
3933 hpsa_set_bus_target_lun(device, 3939 hpsa_set_bus_target_lun(device,
3934 HPSA_HBA_BUS, 0, lunid & 0x3fff); 3940 bus, 0, lunid & 0x3fff);
3935 else 3941 } else
3936 /* defer target, lun assignment for physical devices */ 3942 /* defer target, lun assignment for physical devices */
3937 hpsa_set_bus_target_lun(device, 3943 hpsa_set_bus_target_lun(device,
3938 HPSA_PHYSICAL_DEVICE_BUS, -1, -1); 3944 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 82cdfad874f3..9ea162de80dc 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t {
69 u64 sas_address; 69 u64 sas_address;
70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 70 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
71 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 71 unsigned char model[16]; /* bytes 16-31 of inquiry data */
72 unsigned char rev; /* byte 2 of inquiry data */
72 unsigned char raid_level; /* from inquiry page 0xC1 */ 73 unsigned char raid_level; /* from inquiry page 0xC1 */
73 unsigned char volume_offline; /* discovered via TUR or VPD */ 74 unsigned char volume_offline; /* discovered via TUR or VPD */
74 u16 queue_depth; /* max queue_depth for this device */ 75 u16 queue_depth; /* max queue_depth for this device */
@@ -402,6 +403,7 @@ struct offline_device_entry {
402#define HPSA_RAID_VOLUME_BUS 1 403#define HPSA_RAID_VOLUME_BUS 1
403#define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 404#define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
404#define HPSA_HBA_BUS 0 405#define HPSA_HBA_BUS 0
406#define HPSA_LEGACY_HBA_BUS 3
405 407
406/* 408/*
407 Send the command to the hardware 409 Send the command to the hardware
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 04ce7cfb6d1b..50c71678a156 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
308 fc_stats = &lport->host_stats; 308 fc_stats = &lport->host_stats;
309 memset(fc_stats, 0, sizeof(struct fc_host_statistics)); 309 memset(fc_stats, 0, sizeof(struct fc_host_statistics));
310 310
311 fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; 311 fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
312 312
313 for_each_possible_cpu(cpu) { 313 for_each_possible_cpu(cpu) {
314 struct fc_stats *stats; 314 struct fc_stats *stats;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8aa769a2d919..1c4744e78173 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3885,6 +3885,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
3885 } 3885 }
3886} 3886}
3887 3887
3888static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
3889{
3890 return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
3891}
3892
3888/** 3893/**
3889 * _scsih_flush_running_cmds - completing outstanding commands. 3894 * _scsih_flush_running_cmds - completing outstanding commands.
3890 * @ioc: per adapter object 3895 * @ioc: per adapter object
@@ -3906,6 +3911,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
3906 if (!scmd) 3911 if (!scmd)
3907 continue; 3912 continue;
3908 count++; 3913 count++;
3914 if (ata_12_16_cmd(scmd))
3915 scsi_internal_device_unblock(scmd->device,
3916 SDEV_RUNNING);
3909 mpt3sas_base_free_smid(ioc, smid); 3917 mpt3sas_base_free_smid(ioc, smid);
3910 scsi_dma_unmap(scmd); 3918 scsi_dma_unmap(scmd);
3911 if (ioc->pci_error_recovery) 3919 if (ioc->pci_error_recovery)
@@ -4010,8 +4018,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4010 SAM_STAT_CHECK_CONDITION; 4018 SAM_STAT_CHECK_CONDITION;
4011} 4019}
4012 4020
4013
4014
4015/** 4021/**
4016 * scsih_qcmd - main scsi request entry point 4022 * scsih_qcmd - main scsi request entry point
4017 * @scmd: pointer to scsi command object 4023 * @scmd: pointer to scsi command object
@@ -4038,6 +4044,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4038 if (ioc->logging_level & MPT_DEBUG_SCSI) 4044 if (ioc->logging_level & MPT_DEBUG_SCSI)
4039 scsi_print_command(scmd); 4045 scsi_print_command(scmd);
4040 4046
4047 /*
4048 * Lock the device for any subsequent command until command is
4049 * done.
4050 */
4051 if (ata_12_16_cmd(scmd))
4052 scsi_internal_device_block(scmd->device);
4053
4041 sas_device_priv_data = scmd->device->hostdata; 4054 sas_device_priv_data = scmd->device->hostdata;
4042 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { 4055 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4043 scmd->result = DID_NO_CONNECT << 16; 4056 scmd->result = DID_NO_CONNECT << 16;
@@ -4613,6 +4626,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4613 if (scmd == NULL) 4626 if (scmd == NULL)
4614 return 1; 4627 return 1;
4615 4628
4629 if (ata_12_16_cmd(scmd))
4630 scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
4631
4616 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4632 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4617 4633
4618 if (mpi_reply == NULL) { 4634 if (mpi_reply == NULL) {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 86eb19902bac..c7cc8035eacb 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -791,8 +791,10 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
791 slot->slot_tag = tag; 791 slot->slot_tag = tag;
792 792
793 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); 793 slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
794 if (!slot->buf) 794 if (!slot->buf) {
795 rc = -ENOMEM;
795 goto err_out_tag; 796 goto err_out_tag;
797 }
796 memset(slot->buf, 0, MVS_SLOT_BUF_SZ); 798 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
797 799
798 tei.task = task; 800 tei.task = task;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 567fa080e261..56d6142852a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1456 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1457 sp = req->outstanding_cmds[cnt]; 1457 sp = req->outstanding_cmds[cnt];
1458 if (sp) { 1458 if (sp) {
1459 /* Get a reference to the sp and drop the lock. 1459 /* Don't abort commands in adapter during EEH
1460 * The reference ensures this sp->done() call 1460 * recovery as it's not accessible/responding.
1461 * - and not the call in qla2xxx_eh_abort() -
1462 * ends the SCSI command (with result 'res').
1463 */ 1461 */
1464 sp_get(sp); 1462 if (!ha->flags.eeh_busy) {
1465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1463 /* Get a reference to the sp and drop the lock.
1466 qla2xxx_eh_abort(GET_CMD_SP(sp)); 1464 * The reference ensures this sp->done() call
1467 spin_lock_irqsave(&ha->hardware_lock, flags); 1465 * - and not the call in qla2xxx_eh_abort() -
1466 * ends the SCSI command (with result 'res').
1467 */
1468 sp_get(sp);
1469 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1470 qla2xxx_eh_abort(GET_CMD_SP(sp));
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472 }
1468 req->outstanding_cmds[cnt] = NULL; 1473 req->outstanding_cmds[cnt] = NULL;
1469 sp->done(vha, sp, res); 1474 sp->done(vha, sp, res);
1470 } 1475 }
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 4377e87ee79c..892a0b058b99 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -356,8 +356,8 @@ struct qlogicpti {
356 356
357 /* The rest of the elements are unimportant for performance. */ 357 /* The rest of the elements are unimportant for performance. */
358 struct qlogicpti *next; 358 struct qlogicpti *next;
359 __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ 359 dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/
360 __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */ 360 dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */
361 u_char fware_majrev, fware_minrev, fware_micrev; 361 u_char fware_majrev, fware_minrev, fware_micrev;
362 struct Scsi_Host *qhost; 362 struct Scsi_Host *qhost;
363 int qpti_id; 363 int qpti_id;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 7a223074df3d..afada655f861 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
669 .set_cur_state = powerclamp_set_cur_state, 669 .set_cur_state = powerclamp_set_cur_state,
670}; 670};
671 671
672static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
673 { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
674 {}
675};
676MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
677
672static int __init powerclamp_probe(void) 678static int __init powerclamp_probe(void)
673{ 679{
674 if (!boot_cpu_has(X86_FEATURE_MWAIT)) { 680
681 if (!x86_match_cpu(intel_powerclamp_ids)) {
675 pr_err("CPU does not support MWAIT"); 682 pr_err("CPU does not support MWAIT");
676 return -ENODEV; 683 return -ENODEV;
677 } 684 }
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 69426e644d17..3dbb4a21ab44 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
914 if (!ci) 914 if (!ci)
915 return -ENOMEM; 915 return -ENOMEM;
916 916
917 spin_lock_init(&ci->lock);
917 ci->dev = dev; 918 ci->dev = dev;
918 ci->platdata = dev_get_platdata(dev); 919 ci->platdata = dev_get_platdata(dev);
919 ci->imx28_write_fix = !!(ci->platdata->flags & 920 ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 661f43fe0f9e..c9e80ad48fdc 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci)
1889 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; 1889 struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
1890 int retval = 0; 1890 int retval = 0;
1891 1891
1892 spin_lock_init(&ci->lock);
1893
1894 ci->gadget.ops = &usb_gadget_ops; 1892 ci->gadget.ops = &usb_gadget_ops;
1895 ci->gadget.speed = USB_SPEED_UNKNOWN; 1893 ci->gadget.speed = USB_SPEED_UNKNOWN;
1896 ci->gadget.max_speed = USB_SPEED_HIGH; 1894 ci->gadget.max_speed = USB_SPEED_HIGH;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e40d47d47d82..17989b72cdae 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f,
3225 3225
3226 switch (creq->bRequestType & USB_RECIP_MASK) { 3226 switch (creq->bRequestType & USB_RECIP_MASK) {
3227 case USB_RECIP_INTERFACE: 3227 case USB_RECIP_INTERFACE:
3228 return ffs_func_revmap_intf(func, 3228 return (ffs_func_revmap_intf(func,
3229 le16_to_cpu(creq->wIndex) >= 0); 3229 le16_to_cpu(creq->wIndex)) >= 0);
3230 case USB_RECIP_ENDPOINT: 3230 case USB_RECIP_ENDPOINT:
3231 return ffs_func_revmap_ep(func, 3231 return (ffs_func_revmap_ep(func,
3232 le16_to_cpu(creq->wIndex) >= 0); 3232 le16_to_cpu(creq->wIndex)) >= 0);
3233 default: 3233 default:
3234 return (bool) (func->ffs->user_flags & 3234 return (bool) (func->ffs->user_flags &
3235 FUNCTIONFS_ALL_CTRL_RECIP); 3235 FUNCTIONFS_ALL_CTRL_RECIP);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e01116e4c067..c3e172e15ec3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -986,7 +986,7 @@ b_host:
986 } 986 }
987#endif 987#endif
988 988
989 schedule_work(&musb->irq_work); 989 schedule_delayed_work(&musb->irq_work, 0);
990 990
991 return handled; 991 return handled;
992} 992}
@@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1855 MUSB_DEVCTL_HR; 1855 MUSB_DEVCTL_HR;
1856 switch (devctl & ~s) { 1856 switch (devctl & ~s) {
1857 case MUSB_QUIRK_B_INVALID_VBUS_91: 1857 case MUSB_QUIRK_B_INVALID_VBUS_91:
1858 if (!musb->session && !musb->quirk_invalid_vbus) { 1858 if (musb->quirk_retries--) {
1859 musb->quirk_invalid_vbus = true;
1860 musb_dbg(musb, 1859 musb_dbg(musb,
1861 "First invalid vbus, assume no session"); 1860 "Poll devctl on invalid vbus, assume no session");
1861 schedule_delayed_work(&musb->irq_work,
1862 msecs_to_jiffies(1000));
1863
1862 return; 1864 return;
1863 } 1865 }
1864 break;
1865 case MUSB_QUIRK_A_DISCONNECT_19: 1866 case MUSB_QUIRK_A_DISCONNECT_19:
1867 if (musb->quirk_retries--) {
1868 musb_dbg(musb,
1869 "Poll devctl on possible host mode disconnect");
1870 schedule_delayed_work(&musb->irq_work,
1871 msecs_to_jiffies(1000));
1872
1873 return;
1874 }
1866 if (!musb->session) 1875 if (!musb->session)
1867 break; 1876 break;
1868 musb_dbg(musb, "Allow PM on possible host mode disconnect"); 1877 musb_dbg(musb, "Allow PM on possible host mode disconnect");
@@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1886 if (error < 0) 1895 if (error < 0)
1887 dev_err(musb->controller, "Could not enable: %i\n", 1896 dev_err(musb->controller, "Could not enable: %i\n",
1888 error); 1897 error);
1898 musb->quirk_retries = 3;
1889 } else { 1899 } else {
1890 musb_dbg(musb, "Allow PM with no session: %02x", devctl); 1900 musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1891 musb->quirk_invalid_vbus = false;
1892 pm_runtime_mark_last_busy(musb->controller); 1901 pm_runtime_mark_last_busy(musb->controller);
1893 pm_runtime_put_autosuspend(musb->controller); 1902 pm_runtime_put_autosuspend(musb->controller);
1894 } 1903 }
@@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
1899/* Only used to provide driver mode change events */ 1908/* Only used to provide driver mode change events */
1900static void musb_irq_work(struct work_struct *data) 1909static void musb_irq_work(struct work_struct *data)
1901{ 1910{
1902 struct musb *musb = container_of(data, struct musb, irq_work); 1911 struct musb *musb = container_of(data, struct musb, irq_work.work);
1903 1912
1904 musb_pm_runtime_check_session(musb); 1913 musb_pm_runtime_check_session(musb);
1905 1914
@@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev,
1969 INIT_LIST_HEAD(&musb->control); 1978 INIT_LIST_HEAD(&musb->control);
1970 INIT_LIST_HEAD(&musb->in_bulk); 1979 INIT_LIST_HEAD(&musb->in_bulk);
1971 INIT_LIST_HEAD(&musb->out_bulk); 1980 INIT_LIST_HEAD(&musb->out_bulk);
1981 INIT_LIST_HEAD(&musb->pending_list);
1972 1982
1973 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1983 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1974 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1984 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
@@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb)
2018 musb_host_free(musb); 2028 musb_host_free(musb);
2019} 2029}
2020 2030
2031struct musb_pending_work {
2032 int (*callback)(struct musb *musb, void *data);
2033 void *data;
2034 struct list_head node;
2035};
2036
2037/*
2038 * Called from musb_runtime_resume(), musb_resume(), and
2039 * musb_queue_resume_work(). Callers must take musb->lock.
2040 */
2041static int musb_run_resume_work(struct musb *musb)
2042{
2043 struct musb_pending_work *w, *_w;
2044 unsigned long flags;
2045 int error = 0;
2046
2047 spin_lock_irqsave(&musb->list_lock, flags);
2048 list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2049 if (w->callback) {
2050 error = w->callback(musb, w->data);
2051 if (error < 0) {
2052 dev_err(musb->controller,
2053 "resume callback %p failed: %i\n",
2054 w->callback, error);
2055 }
2056 }
2057 list_del(&w->node);
2058 devm_kfree(musb->controller, w);
2059 }
2060 spin_unlock_irqrestore(&musb->list_lock, flags);
2061
2062 return error;
2063}
2064
2065/*
2066 * Called to run work if device is active or else queue the work to happen
2067 * on resume. Caller must take musb->lock and must hold an RPM reference.
2068 *
2069 * Note that we cowardly refuse queuing work after musb PM runtime
2070 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2071 * instead.
2072 */
2073int musb_queue_resume_work(struct musb *musb,
2074 int (*callback)(struct musb *musb, void *data),
2075 void *data)
2076{
2077 struct musb_pending_work *w;
2078 unsigned long flags;
2079 int error;
2080
2081 if (WARN_ON(!callback))
2082 return -EINVAL;
2083
2084 if (pm_runtime_active(musb->controller))
2085 return callback(musb, data);
2086
2087 w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2088 if (!w)
2089 return -ENOMEM;
2090
2091 w->callback = callback;
2092 w->data = data;
2093 spin_lock_irqsave(&musb->list_lock, flags);
2094 if (musb->is_runtime_suspended) {
2095 list_add_tail(&w->node, &musb->pending_list);
2096 error = 0;
2097 } else {
2098 dev_err(musb->controller, "could not add resume work %p\n",
2099 callback);
2100 devm_kfree(musb->controller, w);
2101 error = -EINPROGRESS;
2102 }
2103 spin_unlock_irqrestore(&musb->list_lock, flags);
2104
2105 return error;
2106}
2107EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2108
2021static void musb_deassert_reset(struct work_struct *work) 2109static void musb_deassert_reset(struct work_struct *work)
2022{ 2110{
2023 struct musb *musb; 2111 struct musb *musb;
@@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2065 } 2153 }
2066 2154
2067 spin_lock_init(&musb->lock); 2155 spin_lock_init(&musb->lock);
2156 spin_lock_init(&musb->list_lock);
2068 musb->board_set_power = plat->set_power; 2157 musb->board_set_power = plat->set_power;
2069 musb->min_power = plat->min_power; 2158 musb->min_power = plat->min_power;
2070 musb->ops = plat->platform_ops; 2159 musb->ops = plat->platform_ops;
@@ -2208,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2208 musb_generic_disable(musb); 2297 musb_generic_disable(musb);
2209 2298
2210 /* Init IRQ workqueue before request_irq */ 2299 /* Init IRQ workqueue before request_irq */
2211 INIT_WORK(&musb->irq_work, musb_irq_work); 2300 INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2212 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); 2301 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2213 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); 2302 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2214 2303
@@ -2291,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2291 if (status) 2380 if (status)
2292 goto fail5; 2381 goto fail5;
2293 2382
2383 musb->is_initialized = 1;
2294 pm_runtime_mark_last_busy(musb->controller); 2384 pm_runtime_mark_last_busy(musb->controller);
2295 pm_runtime_put_autosuspend(musb->controller); 2385 pm_runtime_put_autosuspend(musb->controller);
2296 2386
@@ -2304,7 +2394,7 @@ fail4:
2304 musb_host_cleanup(musb); 2394 musb_host_cleanup(musb);
2305 2395
2306fail3: 2396fail3:
2307 cancel_work_sync(&musb->irq_work); 2397 cancel_delayed_work_sync(&musb->irq_work);
2308 cancel_delayed_work_sync(&musb->finish_resume_work); 2398 cancel_delayed_work_sync(&musb->finish_resume_work);
2309 cancel_delayed_work_sync(&musb->deassert_reset_work); 2399 cancel_delayed_work_sync(&musb->deassert_reset_work);
2310 if (musb->dma_controller) 2400 if (musb->dma_controller)
@@ -2371,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev)
2371 */ 2461 */
2372 musb_exit_debugfs(musb); 2462 musb_exit_debugfs(musb);
2373 2463
2374 cancel_work_sync(&musb->irq_work); 2464 cancel_delayed_work_sync(&musb->irq_work);
2375 cancel_delayed_work_sync(&musb->finish_resume_work); 2465 cancel_delayed_work_sync(&musb->finish_resume_work);
2376 cancel_delayed_work_sync(&musb->deassert_reset_work); 2466 cancel_delayed_work_sync(&musb->deassert_reset_work);
2377 pm_runtime_get_sync(musb->controller); 2467 pm_runtime_get_sync(musb->controller);
@@ -2557,6 +2647,7 @@ static int musb_suspend(struct device *dev)
2557 2647
2558 musb_platform_disable(musb); 2648 musb_platform_disable(musb);
2559 musb_generic_disable(musb); 2649 musb_generic_disable(musb);
2650 WARN_ON(!list_empty(&musb->pending_list));
2560 2651
2561 spin_lock_irqsave(&musb->lock, flags); 2652 spin_lock_irqsave(&musb->lock, flags);
2562 2653
@@ -2578,9 +2669,11 @@ static int musb_suspend(struct device *dev)
2578 2669
2579static int musb_resume(struct device *dev) 2670static int musb_resume(struct device *dev)
2580{ 2671{
2581 struct musb *musb = dev_to_musb(dev); 2672 struct musb *musb = dev_to_musb(dev);
2582 u8 devctl; 2673 unsigned long flags;
2583 u8 mask; 2674 int error;
2675 u8 devctl;
2676 u8 mask;
2584 2677
2585 /* 2678 /*
2586 * For static cmos like DaVinci, register values were preserved 2679 * For static cmos like DaVinci, register values were preserved
@@ -2614,6 +2707,13 @@ static int musb_resume(struct device *dev)
2614 2707
2615 musb_start(musb); 2708 musb_start(musb);
2616 2709
2710 spin_lock_irqsave(&musb->lock, flags);
2711 error = musb_run_resume_work(musb);
2712 if (error)
2713 dev_err(musb->controller, "resume work failed with %i\n",
2714 error);
2715 spin_unlock_irqrestore(&musb->lock, flags);
2716
2617 return 0; 2717 return 0;
2618} 2718}
2619 2719
@@ -2622,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev)
2622 struct musb *musb = dev_to_musb(dev); 2722 struct musb *musb = dev_to_musb(dev);
2623 2723
2624 musb_save_context(musb); 2724 musb_save_context(musb);
2725 musb->is_runtime_suspended = 1;
2625 2726
2626 return 0; 2727 return 0;
2627} 2728}
2628 2729
2629static int musb_runtime_resume(struct device *dev) 2730static int musb_runtime_resume(struct device *dev)
2630{ 2731{
2631 struct musb *musb = dev_to_musb(dev); 2732 struct musb *musb = dev_to_musb(dev);
2632 static int first = 1; 2733 unsigned long flags;
2734 int error;
2633 2735
2634 /* 2736 /*
2635 * When pm_runtime_get_sync called for the first time in driver 2737 * When pm_runtime_get_sync called for the first time in driver
@@ -2640,9 +2742,10 @@ static int musb_runtime_resume(struct device *dev)
2640 * Also context restore without save does not make 2742 * Also context restore without save does not make
2641 * any sense 2743 * any sense
2642 */ 2744 */
2643 if (!first) 2745 if (!musb->is_initialized)
2644 musb_restore_context(musb); 2746 return 0;
2645 first = 0; 2747
2748 musb_restore_context(musb);
2646 2749
2647 if (musb->need_finish_resume) { 2750 if (musb->need_finish_resume) {
2648 musb->need_finish_resume = 0; 2751 musb->need_finish_resume = 0;
@@ -2650,6 +2753,14 @@ static int musb_runtime_resume(struct device *dev)
2650 msecs_to_jiffies(USB_RESUME_TIMEOUT)); 2753 msecs_to_jiffies(USB_RESUME_TIMEOUT));
2651 } 2754 }
2652 2755
2756 spin_lock_irqsave(&musb->lock, flags);
2757 error = musb_run_resume_work(musb);
2758 if (error)
2759 dev_err(musb->controller, "resume work failed with %i\n",
2760 error);
2761 musb->is_runtime_suspended = 0;
2762 spin_unlock_irqrestore(&musb->lock, flags);
2763
2653 return 0; 2764 return 0;
2654} 2765}
2655 2766
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 2cb88a498f8a..91817d77d59c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -303,13 +303,14 @@ struct musb_context_registers {
303struct musb { 303struct musb {
304 /* device lock */ 304 /* device lock */
305 spinlock_t lock; 305 spinlock_t lock;
306 spinlock_t list_lock; /* resume work list lock */
306 307
307 struct musb_io io; 308 struct musb_io io;
308 const struct musb_platform_ops *ops; 309 const struct musb_platform_ops *ops;
309 struct musb_context_registers context; 310 struct musb_context_registers context;
310 311
311 irqreturn_t (*isr)(int, void *); 312 irqreturn_t (*isr)(int, void *);
312 struct work_struct irq_work; 313 struct delayed_work irq_work;
313 struct delayed_work deassert_reset_work; 314 struct delayed_work deassert_reset_work;
314 struct delayed_work finish_resume_work; 315 struct delayed_work finish_resume_work;
315 struct delayed_work gadget_work; 316 struct delayed_work gadget_work;
@@ -337,6 +338,7 @@ struct musb {
337 struct list_head control; /* of musb_qh */ 338 struct list_head control; /* of musb_qh */
338 struct list_head in_bulk; /* of musb_qh */ 339 struct list_head in_bulk; /* of musb_qh */
339 struct list_head out_bulk; /* of musb_qh */ 340 struct list_head out_bulk; /* of musb_qh */
341 struct list_head pending_list; /* pending work list */
340 342
341 struct timer_list otg_timer; 343 struct timer_list otg_timer;
342 struct notifier_block nb; 344 struct notifier_block nb;
@@ -379,12 +381,15 @@ struct musb {
379 381
380 int port_mode; /* MUSB_PORT_MODE_* */ 382 int port_mode; /* MUSB_PORT_MODE_* */
381 bool session; 383 bool session;
382 bool quirk_invalid_vbus; 384 unsigned long quirk_retries;
383 bool is_host; 385 bool is_host;
384 386
385 int a_wait_bcon; /* VBUS timeout in msecs */ 387 int a_wait_bcon; /* VBUS timeout in msecs */
386 unsigned long idle_timeout; /* Next timeout in jiffies */ 388 unsigned long idle_timeout; /* Next timeout in jiffies */
387 389
390 unsigned is_initialized:1;
391 unsigned is_runtime_suspended:1;
392
388 /* active means connected and not suspended */ 393 /* active means connected and not suspended */
389 unsigned is_active:1; 394 unsigned is_active:1;
390 395
@@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *);
540 545
541extern void musb_hnp_stop(struct musb *musb); 546extern void musb_hnp_stop(struct musb *musb);
542 547
548int musb_queue_resume_work(struct musb *musb,
549 int (*callback)(struct musb *musb, void *data),
550 void *data);
551
543static inline void musb_platform_set_vbus(struct musb *musb, int is_on) 552static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
544{ 553{
545 if (musb->ops->set_vbus) 554 if (musb->ops->set_vbus)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 0f17d2140db6..feae1561b9ab 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb)
185 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 185 musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
186 musb_writel(reg_base, wrp->epintr_clear, 186 musb_writel(reg_base, wrp->epintr_clear,
187 wrp->txep_bitmap | wrp->rxep_bitmap); 187 wrp->txep_bitmap | wrp->rxep_bitmap);
188 del_timer_sync(&glue->timer);
188 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 189 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
189} 190}
190 191
191static void otg_timer(unsigned long _musb) 192/* Caller must take musb->lock */
193static int dsps_check_status(struct musb *musb, void *unused)
192{ 194{
193 struct musb *musb = (void *)_musb;
194 void __iomem *mregs = musb->mregs; 195 void __iomem *mregs = musb->mregs;
195 struct device *dev = musb->controller; 196 struct device *dev = musb->controller;
196 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 197 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
197 const struct dsps_musb_wrapper *wrp = glue->wrp; 198 const struct dsps_musb_wrapper *wrp = glue->wrp;
198 u8 devctl; 199 u8 devctl;
199 unsigned long flags;
200 int skip_session = 0; 200 int skip_session = 0;
201 int err;
202
203 err = pm_runtime_get_sync(dev);
204 if (err < 0)
205 dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
206 201
207 /* 202 /*
208 * We poll because DSPS IP's won't expose several OTG-critical 203 * We poll because DSPS IP's won't expose several OTG-critical
@@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb)
212 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 207 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
213 usb_otg_state_string(musb->xceiv->otg->state)); 208 usb_otg_state_string(musb->xceiv->otg->state));
214 209
215 spin_lock_irqsave(&musb->lock, flags);
216 switch (musb->xceiv->otg->state) { 210 switch (musb->xceiv->otg->state) {
217 case OTG_STATE_A_WAIT_VRISE: 211 case OTG_STATE_A_WAIT_VRISE:
218 mod_timer(&glue->timer, jiffies + 212 mod_timer(&glue->timer, jiffies +
@@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb)
245 default: 239 default:
246 break; 240 break;
247 } 241 }
248 spin_unlock_irqrestore(&musb->lock, flags);
249 242
243 return 0;
244}
245
246static void otg_timer(unsigned long _musb)
247{
248 struct musb *musb = (void *)_musb;
249 struct device *dev = musb->controller;
250 unsigned long flags;
251 int err;
252
253 err = pm_runtime_get(dev);
254 if ((err != -EINPROGRESS) && err < 0) {
255 dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
256 pm_runtime_put_noidle(dev);
257
258 return;
259 }
260
261 spin_lock_irqsave(&musb->lock, flags);
262 err = musb_queue_resume_work(musb, dsps_check_status, NULL);
263 if (err < 0)
264 dev_err(dev, "%s resume work: %i\n", __func__, err);
265 spin_unlock_irqrestore(&musb->lock, flags);
250 pm_runtime_mark_last_busy(dev); 266 pm_runtime_mark_last_busy(dev);
251 pm_runtime_put_autosuspend(dev); 267 pm_runtime_put_autosuspend(dev);
252} 268}
@@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev)
767 783
768 platform_set_drvdata(pdev, glue); 784 platform_set_drvdata(pdev, glue);
769 pm_runtime_enable(&pdev->dev); 785 pm_runtime_enable(&pdev->dev);
770 pm_runtime_use_autosuspend(&pdev->dev);
771 pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
772
773 ret = pm_runtime_get_sync(&pdev->dev);
774 if (ret < 0) {
775 dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
776 goto err2;
777 }
778
779 ret = dsps_create_musb_pdev(glue, pdev); 786 ret = dsps_create_musb_pdev(glue, pdev);
780 if (ret) 787 if (ret)
781 goto err3; 788 goto err;
782
783 pm_runtime_mark_last_busy(&pdev->dev);
784 pm_runtime_put_autosuspend(&pdev->dev);
785 789
786 return 0; 790 return 0;
787 791
788err3: 792err:
789 pm_runtime_put_sync(&pdev->dev);
790err2:
791 pm_runtime_dont_use_autosuspend(&pdev->dev);
792 pm_runtime_disable(&pdev->dev); 793 pm_runtime_disable(&pdev->dev);
793 return ret; 794 return ret;
794} 795}
@@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev)
799 800
800 platform_device_unregister(glue->musb); 801 platform_device_unregister(glue->musb);
801 802
802 /* disable usbss clocks */
803 pm_runtime_dont_use_autosuspend(&pdev->dev);
804 pm_runtime_put_sync(&pdev->dev);
805 pm_runtime_disable(&pdev->dev); 803 pm_runtime_disable(&pdev->dev);
806 804
807 return 0; 805 return 0;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 4042ea017985..a55173c9e564 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
1114 musb_ep->dma ? "dma, " : "", 1114 musb_ep->dma ? "dma, " : "",
1115 musb_ep->packet_sz); 1115 musb_ep->packet_sz);
1116 1116
1117 schedule_work(&musb->irq_work); 1117 schedule_delayed_work(&musb->irq_work, 0);
1118 1118
1119fail: 1119fail:
1120 spin_unlock_irqrestore(&musb->lock, flags); 1120 spin_unlock_irqrestore(&musb->lock, flags);
@@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
1158 musb_ep->desc = NULL; 1158 musb_ep->desc = NULL;
1159 musb_ep->end_point.desc = NULL; 1159 musb_ep->end_point.desc = NULL;
1160 1160
1161 schedule_work(&musb->irq_work); 1161 schedule_delayed_work(&musb->irq_work, 0);
1162 1162
1163 spin_unlock_irqrestore(&(musb->lock), flags); 1163 spin_unlock_irqrestore(&(musb->lock), flags);
1164 1164
@@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req)
1222 rxstate(musb, req); 1222 rxstate(musb, req);
1223} 1223}
1224 1224
1225static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1226{
1227 struct musb_request *req = data;
1228
1229 musb_ep_restart(musb, req);
1230
1231 return 0;
1232}
1233
1225static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1234static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1226 gfp_t gfp_flags) 1235 gfp_t gfp_flags)
1227{ 1236{
1228 struct musb_ep *musb_ep; 1237 struct musb_ep *musb_ep;
1229 struct musb_request *request; 1238 struct musb_request *request;
1230 struct musb *musb; 1239 struct musb *musb;
1231 int status = 0; 1240 int status;
1232 unsigned long lockflags; 1241 unsigned long lockflags;
1233 1242
1234 if (!ep || !req) 1243 if (!ep || !req)
@@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1245 if (request->ep != musb_ep) 1254 if (request->ep != musb_ep)
1246 return -EINVAL; 1255 return -EINVAL;
1247 1256
1257 status = pm_runtime_get(musb->controller);
1258 if ((status != -EINPROGRESS) && status < 0) {
1259 dev_err(musb->controller,
1260 "pm runtime get failed in %s\n",
1261 __func__);
1262 pm_runtime_put_noidle(musb->controller);
1263
1264 return status;
1265 }
1266 status = 0;
1267
1248 trace_musb_req_enq(request); 1268 trace_musb_req_enq(request);
1249 1269
1250 /* request is mine now... */ 1270 /* request is mine now... */
@@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1255 1275
1256 map_dma_buffer(request, musb, musb_ep); 1276 map_dma_buffer(request, musb, musb_ep);
1257 1277
1258 pm_runtime_get_sync(musb->controller);
1259 spin_lock_irqsave(&musb->lock, lockflags); 1278 spin_lock_irqsave(&musb->lock, lockflags);
1260 1279
1261 /* don't queue if the ep is down */ 1280 /* don't queue if the ep is down */
@@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1271 list_add_tail(&request->list, &musb_ep->req_list); 1290 list_add_tail(&request->list, &musb_ep->req_list);
1272 1291
1273 /* it this is the head of the queue, start i/o ... */ 1292 /* it this is the head of the queue, start i/o ... */
1274 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1293 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1275 musb_ep_restart(musb, request); 1294 status = musb_queue_resume_work(musb,
1295 musb_ep_restart_resume_work,
1296 request);
1297 if (status < 0)
1298 dev_err(musb->controller, "%s resume work: %i\n",
1299 __func__, status);
1300 }
1276 1301
1277unlock: 1302unlock:
1278 spin_unlock_irqrestore(&musb->lock, lockflags); 1303 spin_unlock_irqrestore(&musb->lock, lockflags);
@@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
1969 */ 1994 */
1970 1995
1971 /* Force check of devctl register for PM runtime */ 1996 /* Force check of devctl register for PM runtime */
1972 schedule_work(&musb->irq_work); 1997 schedule_delayed_work(&musb->irq_work, 0);
1973 1998
1974 pm_runtime_mark_last_busy(musb->controller); 1999 pm_runtime_mark_last_busy(musb->controller);
1975 pm_runtime_put_autosuspend(musb->controller); 2000 pm_runtime_put_autosuspend(musb->controller);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index cc1225485509..e8be8e39ab8f 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev)
513 } 513 }
514 514
515 pm_runtime_enable(glue->dev); 515 pm_runtime_enable(glue->dev);
516 pm_runtime_use_autosuspend(glue->dev);
517 pm_runtime_set_autosuspend_delay(glue->dev, 100);
518 516
519 ret = platform_device_add(musb); 517 ret = platform_device_add(musb);
520 if (ret) { 518 if (ret) {
521 dev_err(&pdev->dev, "failed to register musb device\n"); 519 dev_err(&pdev->dev, "failed to register musb device\n");
522 goto err2; 520 goto err3;
523 } 521 }
524 522
525 return 0; 523 return 0;
526 524
525err3:
526 pm_runtime_disable(glue->dev);
527
527err2: 528err2:
528 platform_device_put(musb); 529 platform_device_put(musb);
529 530
@@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev)
535{ 536{
536 struct omap2430_glue *glue = platform_get_drvdata(pdev); 537 struct omap2430_glue *glue = platform_get_drvdata(pdev);
537 538
538 pm_runtime_get_sync(glue->dev);
539 platform_device_unregister(glue->musb); 539 platform_device_unregister(glue->musb);
540 pm_runtime_put_sync(glue->dev);
541 pm_runtime_dont_use_autosuspend(glue->dev);
542 pm_runtime_disable(glue->dev); 540 pm_runtime_disable(glue->dev);
543 541
544 return 0; 542 return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index df7c9f46be54..e85cc8e4e7a9 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
724 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 724 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
725 usb_otg_state_string(musb->xceiv->otg->state), otg_stat); 725 usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
726 idle_timeout = jiffies + (1 * HZ); 726 idle_timeout = jiffies + (1 * HZ);
727 schedule_work(&musb->irq_work); 727 schedule_delayed_work(&musb->irq_work, 0);
728 728
729 } else /* A-dev state machine */ { 729 } else /* A-dev state machine */ {
730 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 730 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
@@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
814 break; 814 break;
815 } 815 }
816 } 816 }
817 schedule_work(&musb->irq_work); 817 schedule_delayed_work(&musb->irq_work, 0);
818 818
819 return idle_timeout; 819 return idle_timeout;
820} 820}
@@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
864 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); 864 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
865 if (reg & ~TUSB_PRCM_WNORCS) { 865 if (reg & ~TUSB_PRCM_WNORCS) {
866 musb->is_active = 1; 866 musb->is_active = 1;
867 schedule_work(&musb->irq_work); 867 schedule_delayed_work(&musb->irq_work, 0);
868 } 868 }
869 dev_dbg(musb->controller, "wake %sactive %02x\n", 869 dev_dbg(musb->controller, "wake %sactive %02x\n",
870 musb->is_active ? "" : "in", reg); 870 musb->is_active ? "" : "in", reg);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f61477bed3a8..243ac5ebe46a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = {
131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 131 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 132 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 133 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
134 { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
134 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 135 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
135 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 136 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
136 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 137 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 0ff7f38d7800..6e9fc8bcc285 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = {
1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, 1012 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
1013 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, 1013 { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
1014 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, 1014 { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
1015 { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
1016 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1015 { } /* Terminating entry */ 1017 { } /* Terminating entry */
1016}; 1018};
1017 1019
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21011c0a4c64..48ee04c94a75 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -596,6 +596,12 @@
596#define STK541_PID 0x2109 /* Zigbee Controller */ 596#define STK541_PID 0x2109 /* Zigbee Controller */
597 597
598/* 598/*
599 * Texas Instruments
600 */
601#define TI_VID 0x0451
602#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
603
604/*
599 * Blackfin gnICE JTAG 605 * Blackfin gnICE JTAG
600 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice 606 * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
601 */ 607 */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index ffd086733421..1a59f335b063 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
954 954
955 /* COMMAND STAGE */ 955 /* COMMAND STAGE */
956 /* let's send the command via the control pipe */ 956 /* let's send the command via the control pipe */
957 /*
958 * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
959 * Stack may be vmallocated. So no DMA for us. Make a copy.
960 */
961 memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
957 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, 962 result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
958 US_CBI_ADSC, 963 US_CBI_ADSC,
959 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, 964 USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
960 us->ifnum, srb->cmnd, srb->cmd_len); 965 us->ifnum, us->iobuf, srb->cmd_len);
961 966
962 /* check the return code for the command */ 967 /* check the return code for the command */
963 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", 968 usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
index 19ad8645d93c..e5d9bfc1703a 100644
--- a/drivers/video/fbdev/amba-clcd-versatile.c
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb,
526 np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, 526 np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
527 &clcd_id); 527 &clcd_id);
528 if (!np) { 528 if (!np) {
529 dev_err(dev, "no Versatile syscon node\n"); 529 /* Vexpress does not have this */
530 return -ENODEV; 530 return 0;
531 } 531 }
532 versatile_clcd_type = (enum versatile_clcd)clcd_id->data; 532 versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
533 533
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fdd3228e0678..3eb58cb51e56 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -155,6 +155,7 @@ config TANGOX_WATCHDOG
155config WDAT_WDT 155config WDAT_WDT
156 tristate "ACPI Watchdog Action Table (WDAT)" 156 tristate "ACPI Watchdog Action Table (WDAT)"
157 depends on ACPI 157 depends on ACPI
158 select WATCHDOG_CORE
158 select ACPI_WATCHDOG 159 select ACPI_WATCHDOG
159 help 160 help
160 This driver adds support for systems with ACPI Watchdog Action 161 This driver adds support for systems with ACPI Watchdog Action
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 8347c90cf483..5eb04129f938 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -808,7 +808,11 @@ calc_seckey(struct cifs_ses *ses)
808 struct crypto_skcipher *tfm_arc4; 808 struct crypto_skcipher *tfm_arc4;
809 struct scatterlist sgin, sgout; 809 struct scatterlist sgin, sgout;
810 struct skcipher_request *req; 810 struct skcipher_request *req;
811 unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ 811 unsigned char *sec_key;
812
813 sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL);
814 if (sec_key == NULL)
815 return -ENOMEM;
812 816
813 get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); 817 get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
814 818
@@ -816,7 +820,7 @@ calc_seckey(struct cifs_ses *ses)
816 if (IS_ERR(tfm_arc4)) { 820 if (IS_ERR(tfm_arc4)) {
817 rc = PTR_ERR(tfm_arc4); 821 rc = PTR_ERR(tfm_arc4);
818 cifs_dbg(VFS, "could not allocate crypto API arc4\n"); 822 cifs_dbg(VFS, "could not allocate crypto API arc4\n");
819 return rc; 823 goto out;
820 } 824 }
821 825
822 rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response, 826 rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response,
@@ -854,7 +858,8 @@ calc_seckey(struct cifs_ses *ses)
854 858
855out_free_cipher: 859out_free_cipher:
856 crypto_free_skcipher(tfm_arc4); 860 crypto_free_skcipher(tfm_arc4);
857 861out:
862 kfree(sec_key);
858 return rc; 863 return rc;
859} 864}
860 865
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3f3185febc58..e3fed9249a04 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -3427,6 +3427,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
3427 __u16 rc = 0; 3427 __u16 rc = 0;
3428 struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; 3428 struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
3429 struct posix_acl_xattr_header *local_acl = (void *)pACL; 3429 struct posix_acl_xattr_header *local_acl = (void *)pACL;
3430 struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
3430 int count; 3431 int count;
3431 int i; 3432 int i;
3432 3433
@@ -3453,8 +3454,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
3453 return 0; 3454 return 0;
3454 } 3455 }
3455 for (i = 0; i < count; i++) { 3456 for (i = 0; i < count; i++) {
3456 rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], 3457 rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
3457 (struct posix_acl_xattr_entry *)(local_acl + 1));
3458 if (rc != 0) { 3458 if (rc != 0) {
3459 /* ACE not converted */ 3459 /* ACE not converted */
3460 break; 3460 break;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index aab5227979e2..4547aeddd12b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
412 } 412 }
413 } while (server->tcpStatus == CifsNeedReconnect); 413 } while (server->tcpStatus == CifsNeedReconnect);
414 414
415 if (server->tcpStatus == CifsNeedNegotiate)
416 mod_delayed_work(cifsiod_wq, &server->echo, 0);
417
415 return rc; 418 return rc;
416} 419}
417 420
@@ -421,17 +424,25 @@ cifs_echo_request(struct work_struct *work)
421 int rc; 424 int rc;
422 struct TCP_Server_Info *server = container_of(work, 425 struct TCP_Server_Info *server = container_of(work,
423 struct TCP_Server_Info, echo.work); 426 struct TCP_Server_Info, echo.work);
424 unsigned long echo_interval = server->echo_interval; 427 unsigned long echo_interval;
428
429 /*
430 * If we need to renegotiate, set echo interval to zero to
431 * immediately call echo service where we can renegotiate.
432 */
433 if (server->tcpStatus == CifsNeedNegotiate)
434 echo_interval = 0;
435 else
436 echo_interval = server->echo_interval;
425 437
426 /* 438 /*
427 * We cannot send an echo if it is disabled or until the 439 * We cannot send an echo if it is disabled.
428 * NEGOTIATE_PROTOCOL request is done, which is indicated by 440 * Also, no need to ping if we got a response recently.
429 * server->ops->need_neg() == true. Also, no need to ping if
430 * we got a response recently.
431 */ 441 */
432 442
433 if (server->tcpStatus == CifsNeedReconnect || 443 if (server->tcpStatus == CifsNeedReconnect ||
434 server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || 444 server->tcpStatus == CifsExiting ||
445 server->tcpStatus == CifsNew ||
435 (server->ops->can_echo && !server->ops->can_echo(server)) || 446 (server->ops->can_echo && !server->ops->can_echo(server)) ||
436 time_before(jiffies, server->lstrp + echo_interval - HZ)) 447 time_before(jiffies, server->lstrp + echo_interval - HZ))
437 goto requeue_echo; 448 goto requeue_echo;
@@ -442,7 +453,7 @@ cifs_echo_request(struct work_struct *work)
442 server->hostname); 453 server->hostname);
443 454
444requeue_echo: 455requeue_echo:
445 queue_delayed_work(cifsiod_wq, &server->echo, echo_interval); 456 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
446} 457}
447 458
448static bool 459static bool
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 9a28133ac3b8..9b774f4b50c8 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -39,65 +39,54 @@ static void fname_crypt_complete(struct crypto_async_request *req, int res)
39static int fname_encrypt(struct inode *inode, 39static int fname_encrypt(struct inode *inode,
40 const struct qstr *iname, struct fscrypt_str *oname) 40 const struct qstr *iname, struct fscrypt_str *oname)
41{ 41{
42 u32 ciphertext_len;
43 struct skcipher_request *req = NULL; 42 struct skcipher_request *req = NULL;
44 DECLARE_FS_COMPLETION_RESULT(ecr); 43 DECLARE_FS_COMPLETION_RESULT(ecr);
45 struct fscrypt_info *ci = inode->i_crypt_info; 44 struct fscrypt_info *ci = inode->i_crypt_info;
46 struct crypto_skcipher *tfm = ci->ci_ctfm; 45 struct crypto_skcipher *tfm = ci->ci_ctfm;
47 int res = 0; 46 int res = 0;
48 char iv[FS_CRYPTO_BLOCK_SIZE]; 47 char iv[FS_CRYPTO_BLOCK_SIZE];
49 struct scatterlist src_sg, dst_sg; 48 struct scatterlist sg;
50 int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); 49 int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
51 char *workbuf, buf[32], *alloc_buf = NULL; 50 unsigned int lim;
52 unsigned lim; 51 unsigned int cryptlen;
53 52
54 lim = inode->i_sb->s_cop->max_namelen(inode); 53 lim = inode->i_sb->s_cop->max_namelen(inode);
55 if (iname->len <= 0 || iname->len > lim) 54 if (iname->len <= 0 || iname->len > lim)
56 return -EIO; 55 return -EIO;
57 56
58 ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE); 57 /*
59 ciphertext_len = round_up(ciphertext_len, padding); 58 * Copy the filename to the output buffer for encrypting in-place and
60 ciphertext_len = min(ciphertext_len, lim); 59 * pad it with the needed number of NUL bytes.
60 */
61 cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
62 cryptlen = round_up(cryptlen, padding);
63 cryptlen = min(cryptlen, lim);
64 memcpy(oname->name, iname->name, iname->len);
65 memset(oname->name + iname->len, 0, cryptlen - iname->len);
61 66
62 if (ciphertext_len <= sizeof(buf)) { 67 /* Initialize the IV */
63 workbuf = buf; 68 memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
64 } else {
65 alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
66 if (!alloc_buf)
67 return -ENOMEM;
68 workbuf = alloc_buf;
69 }
70 69
71 /* Allocate request */ 70 /* Set up the encryption request */
72 req = skcipher_request_alloc(tfm, GFP_NOFS); 71 req = skcipher_request_alloc(tfm, GFP_NOFS);
73 if (!req) { 72 if (!req) {
74 printk_ratelimited(KERN_ERR 73 printk_ratelimited(KERN_ERR
75 "%s: crypto_request_alloc() failed\n", __func__); 74 "%s: skcipher_request_alloc() failed\n", __func__);
76 kfree(alloc_buf);
77 return -ENOMEM; 75 return -ENOMEM;
78 } 76 }
79 skcipher_request_set_callback(req, 77 skcipher_request_set_callback(req,
80 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 78 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
81 fname_crypt_complete, &ecr); 79 fname_crypt_complete, &ecr);
80 sg_init_one(&sg, oname->name, cryptlen);
81 skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
82 82
83 /* Copy the input */ 83 /* Do the encryption */
84 memcpy(workbuf, iname->name, iname->len);
85 if (iname->len < ciphertext_len)
86 memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
87
88 /* Initialize IV */
89 memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
90
91 /* Create encryption request */
92 sg_init_one(&src_sg, workbuf, ciphertext_len);
93 sg_init_one(&dst_sg, oname->name, ciphertext_len);
94 skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
95 res = crypto_skcipher_encrypt(req); 84 res = crypto_skcipher_encrypt(req);
96 if (res == -EINPROGRESS || res == -EBUSY) { 85 if (res == -EINPROGRESS || res == -EBUSY) {
86 /* Request is being completed asynchronously; wait for it */
97 wait_for_completion(&ecr.completion); 87 wait_for_completion(&ecr.completion);
98 res = ecr.res; 88 res = ecr.res;
99 } 89 }
100 kfree(alloc_buf);
101 skcipher_request_free(req); 90 skcipher_request_free(req);
102 if (res < 0) { 91 if (res < 0) {
103 printk_ratelimited(KERN_ERR 92 printk_ratelimited(KERN_ERR
@@ -105,7 +94,7 @@ static int fname_encrypt(struct inode *inode,
105 return res; 94 return res;
106 } 95 }
107 96
108 oname->len = ciphertext_len; 97 oname->len = cryptlen;
109 return 0; 98 return 0;
110} 99}
111 100
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 82f0285f5d08..67fb6d8876d0 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -185,7 +185,7 @@ int get_crypt_info(struct inode *inode)
185 struct crypto_skcipher *ctfm; 185 struct crypto_skcipher *ctfm;
186 const char *cipher_str; 186 const char *cipher_str;
187 int keysize; 187 int keysize;
188 u8 raw_key[FS_MAX_KEY_SIZE]; 188 u8 *raw_key = NULL;
189 int res; 189 int res;
190 190
191 res = fscrypt_initialize(); 191 res = fscrypt_initialize();
@@ -238,6 +238,15 @@ retry:
238 if (res) 238 if (res)
239 goto out; 239 goto out;
240 240
241 /*
242 * This cannot be a stack buffer because it is passed to the scatterlist
243 * crypto API as part of key derivation.
244 */
245 res = -ENOMEM;
246 raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
247 if (!raw_key)
248 goto out;
249
241 if (fscrypt_dummy_context_enabled(inode)) { 250 if (fscrypt_dummy_context_enabled(inode)) {
242 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); 251 memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
243 goto got_key; 252 goto got_key;
@@ -276,7 +285,8 @@ got_key:
276 if (res) 285 if (res)
277 goto out; 286 goto out;
278 287
279 memzero_explicit(raw_key, sizeof(raw_key)); 288 kzfree(raw_key);
289 raw_key = NULL;
280 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { 290 if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
281 put_crypt_info(crypt_info); 291 put_crypt_info(crypt_info);
282 goto retry; 292 goto retry;
@@ -287,7 +297,7 @@ out:
287 if (res == -ENOKEY) 297 if (res == -ENOKEY)
288 res = 0; 298 res = 0;
289 put_crypt_info(crypt_info); 299 put_crypt_info(crypt_info);
290 memzero_explicit(raw_key, sizeof(raw_key)); 300 kzfree(raw_key);
291 return res; 301 return res;
292} 302}
293 303
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 282a51b07c57..a8a750f59621 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -235,6 +235,7 @@ struct ext4_io_submit {
235#define EXT4_MAX_BLOCK_SIZE 65536 235#define EXT4_MAX_BLOCK_SIZE 65536
236#define EXT4_MIN_BLOCK_LOG_SIZE 10 236#define EXT4_MIN_BLOCK_LOG_SIZE 10
237#define EXT4_MAX_BLOCK_LOG_SIZE 16 237#define EXT4_MAX_BLOCK_LOG_SIZE 16
238#define EXT4_MAX_CLUSTER_LOG_SIZE 30
238#ifdef __KERNEL__ 239#ifdef __KERNEL__
239# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) 240# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
240#else 241#else
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 20da99da0a34..52b0530c5d65 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3565,7 +3565,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3565 if (blocksize < EXT4_MIN_BLOCK_SIZE || 3565 if (blocksize < EXT4_MIN_BLOCK_SIZE ||
3566 blocksize > EXT4_MAX_BLOCK_SIZE) { 3566 blocksize > EXT4_MAX_BLOCK_SIZE) {
3567 ext4_msg(sb, KERN_ERR, 3567 ext4_msg(sb, KERN_ERR,
3568 "Unsupported filesystem blocksize %d", blocksize); 3568 "Unsupported filesystem blocksize %d (%d log_block_size)",
3569 blocksize, le32_to_cpu(es->s_log_block_size));
3570 goto failed_mount;
3571 }
3572 if (le32_to_cpu(es->s_log_block_size) >
3573 (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3574 ext4_msg(sb, KERN_ERR,
3575 "Invalid log block size: %u",
3576 le32_to_cpu(es->s_log_block_size));
3569 goto failed_mount; 3577 goto failed_mount;
3570 } 3578 }
3571 3579
@@ -3697,6 +3705,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3697 "block size (%d)", clustersize, blocksize); 3705 "block size (%d)", clustersize, blocksize);
3698 goto failed_mount; 3706 goto failed_mount;
3699 } 3707 }
3708 if (le32_to_cpu(es->s_log_cluster_size) >
3709 (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
3710 ext4_msg(sb, KERN_ERR,
3711 "Invalid log cluster size: %u",
3712 le32_to_cpu(es->s_log_cluster_size));
3713 goto failed_mount;
3714 }
3700 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - 3715 sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
3701 le32_to_cpu(es->s_log_block_size); 3716 le32_to_cpu(es->s_log_block_size);
3702 sbi->s_clusters_per_group = 3717 sbi->s_clusters_per_group =
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 6a4d0e5418a1..b3ebe512d64c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -286,6 +286,11 @@ const struct dentry_operations fuse_dentry_operations = {
286 .d_release = fuse_dentry_release, 286 .d_release = fuse_dentry_release,
287}; 287};
288 288
289const struct dentry_operations fuse_root_dentry_operations = {
290 .d_init = fuse_dentry_init,
291 .d_release = fuse_dentry_release,
292};
293
289int fuse_valid_type(int m) 294int fuse_valid_type(int m)
290{ 295{
291 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || 296 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index abc66a6237fd..2401c5dabb2a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1985{ 1985{
1986 struct inode *inode = page->mapping->host; 1986 struct inode *inode = page->mapping->host;
1987 1987
1988 /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
1989 if (!copied)
1990 goto unlock;
1991
1988 if (!PageUptodate(page)) { 1992 if (!PageUptodate(page)) {
1989 /* Zero any unwritten bytes at the end of the page */ 1993 /* Zero any unwritten bytes at the end of the page */
1990 size_t endoff = (pos + copied) & ~PAGE_MASK; 1994 size_t endoff = (pos + copied) & ~PAGE_MASK;
@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
1995 1999
1996 fuse_write_update_size(inode, pos + copied); 2000 fuse_write_update_size(inode, pos + copied);
1997 set_page_dirty(page); 2001 set_page_dirty(page);
2002
2003unlock:
1998 unlock_page(page); 2004 unlock_page(page);
1999 put_page(page); 2005 put_page(page);
2000 2006
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0dfbb136e59a..91307940c8ac 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -692,6 +692,7 @@ static inline u64 get_node_id(struct inode *inode)
692extern const struct file_operations fuse_dev_operations; 692extern const struct file_operations fuse_dev_operations;
693 693
694extern const struct dentry_operations fuse_dentry_operations; 694extern const struct dentry_operations fuse_dentry_operations;
695extern const struct dentry_operations fuse_root_dentry_operations;
695 696
696/** 697/**
697 * Inode to nodeid comparison. 698 * Inode to nodeid comparison.
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 17141099f2e7..6fe6a88ecb4a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1131 1131
1132 err = -ENOMEM; 1132 err = -ENOMEM;
1133 root = fuse_get_root_inode(sb, d.rootmode); 1133 root = fuse_get_root_inode(sb, d.rootmode);
1134 sb->s_d_op = &fuse_root_dentry_operations;
1134 root_dentry = d_make_root(root); 1135 root_dentry = d_make_root(root);
1135 if (!root_dentry) 1136 if (!root_dentry)
1136 goto err_dev_free; 1137 goto err_dev_free;
1137 /* only now - we want root dentry with NULL ->d_op */ 1138 /* Root dentry doesn't have .d_revalidate */
1138 sb->s_d_op = &fuse_dentry_operations; 1139 sb->s_d_op = &fuse_dentry_operations;
1139 1140
1140 init_req = fuse_request_alloc(0); 1141 init_req = fuse_request_alloc(0);
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 98b3eb7d8eaf..0ec137310320 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -377,9 +377,9 @@ repeat:
377 { 377 {
378 int p; 378 int p;
379 for (p = 0; p < rr->u.ER.len_id; p++) 379 for (p = 0; p < rr->u.ER.len_id; p++)
380 printk("%c", rr->u.ER.data[p]); 380 printk(KERN_CONT "%c", rr->u.ER.data[p]);
381 } 381 }
382 printk("\n"); 382 printk(KERN_CONT "\n");
383 break; 383 break;
384 case SIG('P', 'X'): 384 case SIG('P', 'X'):
385 inode->i_mode = isonum_733(rr->u.PX.mode); 385 inode->i_mode = isonum_733(rr->u.PX.mode);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 532d8e242d4d..484bebc20bca 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
197 } 197 }
198 198
199 ret = -EPROTONOSUPPORT; 199 ret = -EPROTONOSUPPORT;
200 if (minorversion == 0) 200 if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
201 ret = nfs4_callback_up_net(serv, net); 201 ret = nfs4_callback_up_net(serv, net);
202 else if (xprt->ops->bc_up) 202 else if (xprt->ops->bc_up)
203 ret = xprt->ops->bc_up(serv, net); 203 ret = xprt->ops->bc_up(serv, net);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9b3a82abab07..1452177c822d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; 542 return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
543} 543}
544 544
545static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
546 const nfs4_stateid *stateid)
547{
548 return test_bit(NFS_OPEN_STATE, &state->flags) &&
549 nfs4_stateid_match_other(&state->open_stateid, stateid);
550}
551
545#else 552#else
546 553
547#define nfs4_close_state(a, b) do { } while (0) 554#define nfs4_close_state(a, b) do { } while (0)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7897826d7c51..241da19b7da4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1451} 1451}
1452 1452
1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1453static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1454 nfs4_stateid *arg_stateid,
1455 nfs4_stateid *stateid, fmode_t fmode) 1454 nfs4_stateid *stateid, fmode_t fmode)
1456{ 1455{
1457 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1456 clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1469 } 1468 }
1470 if (stateid == NULL) 1469 if (stateid == NULL)
1471 return; 1470 return;
1472 /* Handle races with OPEN */ 1471 /* Handle OPEN+OPEN_DOWNGRADE races */
1473 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1472 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1474 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1473 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1475 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1476 nfs_resync_open_stateid_locked(state); 1474 nfs_resync_open_stateid_locked(state);
1477 return; 1475 return;
1478 } 1476 }
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
1486 nfs4_stateid *stateid, fmode_t fmode) 1484 nfs4_stateid *stateid, fmode_t fmode)
1487{ 1485{
1488 write_seqlock(&state->seqlock); 1486 write_seqlock(&state->seqlock);
1489 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1487 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1488 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1489 nfs_clear_open_stateid_locked(state, stateid, fmode);
1490 write_sequnlock(&state->seqlock); 1490 write_sequnlock(&state->seqlock);
1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1491 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1492 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2564static int nfs41_check_expired_locks(struct nfs4_state *state) 2564static int nfs41_check_expired_locks(struct nfs4_state *state)
2565{ 2565{
2566 int status, ret = NFS_OK; 2566 int status, ret = NFS_OK;
2567 struct nfs4_lock_state *lsp; 2567 struct nfs4_lock_state *lsp, *prev = NULL;
2568 struct nfs_server *server = NFS_SERVER(state->inode); 2568 struct nfs_server *server = NFS_SERVER(state->inode);
2569 2569
2570 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2570 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2571 goto out; 2571 goto out;
2572
2573 spin_lock(&state->state_lock);
2572 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2574 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2573 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2575 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2574 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 2576 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
2575 2577
2578 atomic_inc(&lsp->ls_count);
2579 spin_unlock(&state->state_lock);
2580
2581 nfs4_put_lock_state(prev);
2582 prev = lsp;
2583
2576 status = nfs41_test_and_free_expired_stateid(server, 2584 status = nfs41_test_and_free_expired_stateid(server,
2577 &lsp->ls_stateid, 2585 &lsp->ls_stateid,
2578 cred); 2586 cred);
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
2585 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2593 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2586 } else if (status != NFS_OK) { 2594 } else if (status != NFS_OK) {
2587 ret = status; 2595 ret = status;
2588 break; 2596 nfs4_put_lock_state(prev);
2597 goto out;
2589 } 2598 }
2599 spin_lock(&state->state_lock);
2590 } 2600 }
2591 }; 2601 }
2602 spin_unlock(&state->state_lock);
2603 nfs4_put_lock_state(prev);
2592out: 2604out:
2593 return ret; 2605 return ret;
2594} 2606}
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3122 } else if (is_rdwr) 3134 } else if (is_rdwr)
3123 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3135 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3124 3136
3125 if (!nfs4_valid_open_stateid(state)) 3137 if (!nfs4_valid_open_stateid(state) ||
3138 test_bit(NFS_OPEN_STATE, &state->flags) == 0)
3126 call_close = 0; 3139 call_close = 0;
3127 spin_unlock(&state->owner->so_lock); 3140 spin_unlock(&state->owner->so_lock);
3128 3141
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5569 switch (task->tk_status) { 5582 switch (task->tk_status) {
5570 case 0: 5583 case 0:
5571 renew_lease(data->res.server, data->timestamp); 5584 renew_lease(data->res.server, data->timestamp);
5585 break;
5572 case -NFS4ERR_ADMIN_REVOKED: 5586 case -NFS4ERR_ADMIN_REVOKED:
5573 case -NFS4ERR_DELEG_REVOKED: 5587 case -NFS4ERR_DELEG_REVOKED:
5574 case -NFS4ERR_EXPIRED: 5588 case -NFS4ERR_EXPIRED:
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5579 case -NFS4ERR_OLD_STATEID: 5593 case -NFS4ERR_OLD_STATEID:
5580 case -NFS4ERR_STALE_STATEID: 5594 case -NFS4ERR_STALE_STATEID:
5581 task->tk_status = 0; 5595 task->tk_status = 0;
5582 if (data->roc)
5583 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5584 break; 5596 break;
5585 default: 5597 default:
5586 if (nfs4_async_handle_error(task, data->res.server, 5598 if (nfs4_async_handle_error(task, data->res.server,
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5590 } 5602 }
5591 } 5603 }
5592 data->rpc_status = task->tk_status; 5604 data->rpc_status = task->tk_status;
5605 if (data->roc && data->rpc_status == 0)
5606 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5593} 5607}
5594 5608
5595static void nfs4_delegreturn_release(void *calldata) 5609static void nfs4_delegreturn_release(void *calldata)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5f4281ec5f72..0959c9661662 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1547,6 +1547,7 @@ restart:
1547 ssleep(1); 1547 ssleep(1);
1548 case -NFS4ERR_ADMIN_REVOKED: 1548 case -NFS4ERR_ADMIN_REVOKED:
1549 case -NFS4ERR_STALE_STATEID: 1549 case -NFS4ERR_STALE_STATEID:
1550 case -NFS4ERR_OLD_STATEID:
1550 case -NFS4ERR_BAD_STATEID: 1551 case -NFS4ERR_BAD_STATEID:
1551 case -NFS4ERR_RECLAIM_BAD: 1552 case -NFS4ERR_RECLAIM_BAD:
1552 case -NFS4ERR_RECLAIM_CONFLICT: 1553 case -NFS4ERR_RECLAIM_CONFLICT:
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index d484068ca716..38887cc5577f 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = {
114}; 114};
115 115
116const struct file_operations debug_help_fops = { 116const struct file_operations debug_help_fops = {
117 .owner = THIS_MODULE,
117 .open = orangefs_debug_help_open, 118 .open = orangefs_debug_help_open,
118 .read = seq_read, 119 .read = seq_read,
119 .release = seq_release, 120 .release = seq_release,
@@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = {
121}; 122};
122 123
123static const struct file_operations kernel_debug_fops = { 124static const struct file_operations kernel_debug_fops = {
125 .owner = THIS_MODULE,
124 .open = orangefs_debug_open, 126 .open = orangefs_debug_open,
125 .read = orangefs_debug_read, 127 .read = orangefs_debug_read,
126 .write = orangefs_debug_write, 128 .write = orangefs_debug_write,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index edd46a0e951d..0e100856c7b8 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -328,11 +328,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
328 if (!real) 328 if (!real)
329 goto bug; 329 goto bug;
330 330
331 /* Handle recursion */
332 real = d_real(real, inode, open_flags);
333
331 if (!inode || inode == d_inode(real)) 334 if (!inode || inode == d_inode(real))
332 return real; 335 return real;
333
334 /* Handle recursion */
335 return d_real(real, inode, open_flags);
336bug: 336bug:
337 WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry, 337 WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
338 inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0); 338 inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
diff --git a/fs/splice.c b/fs/splice.c
index dcaf185a5731..5a7750bd2eea 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -408,7 +408,8 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
408 if (res <= 0) 408 if (res <= 0)
409 return -ENOMEM; 409 return -ENOMEM;
410 410
411 nr_pages = res / PAGE_SIZE; 411 BUG_ON(dummy);
412 nr_pages = DIV_ROUND_UP(res, PAGE_SIZE);
412 413
413 vec = __vec; 414 vec = __vec;
414 if (nr_pages > PIPE_DEF_BUFFERS) { 415 if (nr_pages > PIPE_DEF_BUFFERS) {
diff --git a/fs/xattr.c b/fs/xattr.c
index 3368659c471e..2d13b4e62fae 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
170 const void *value, size_t size, int flags) 170 const void *value, size_t size, int flags)
171{ 171{
172 struct inode *inode = dentry->d_inode; 172 struct inode *inode = dentry->d_inode;
173 int error = -EOPNOTSUPP; 173 int error = -EAGAIN;
174 int issec = !strncmp(name, XATTR_SECURITY_PREFIX, 174 int issec = !strncmp(name, XATTR_SECURITY_PREFIX,
175 XATTR_SECURITY_PREFIX_LEN); 175 XATTR_SECURITY_PREFIX_LEN);
176 176
@@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
183 security_inode_post_setxattr(dentry, name, value, 183 security_inode_post_setxattr(dentry, name, value,
184 size, flags); 184 size, flags);
185 } 185 }
186 } else if (issec) { 186 } else {
187 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
188
189 if (unlikely(is_bad_inode(inode))) 187 if (unlikely(is_bad_inode(inode)))
190 return -EIO; 188 return -EIO;
191 error = security_inode_setsecurity(inode, suffix, value, 189 }
192 size, flags); 190 if (error == -EAGAIN) {
193 if (!error) 191 error = -EOPNOTSUPP;
194 fsnotify_xattr(dentry); 192
193 if (issec) {
194 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
195
196 error = security_inode_setsecurity(inode, suffix, value,
197 size, flags);
198 if (!error)
199 fsnotify_xattr(dentry);
200 }
195 } 201 }
196 202
197 return error; 203 return error;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 1b949e08015c..c19700e2a2fe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -230,72 +230,62 @@ struct acpi_table_facs {
230/* Fields common to all versions of the FADT */ 230/* Fields common to all versions of the FADT */
231 231
232struct acpi_table_fadt { 232struct acpi_table_fadt {
233 struct acpi_table_header header; /* [V1] Common ACPI table header */ 233 struct acpi_table_header header; /* Common ACPI table header */
234 u32 facs; /* [V1] 32-bit physical address of FACS */ 234 u32 facs; /* 32-bit physical address of FACS */
235 u32 dsdt; /* [V1] 32-bit physical address of DSDT */ 235 u32 dsdt; /* 32-bit physical address of DSDT */
236 u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ 236 u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */
237 u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ 237 u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */
238 u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ 238 u16 sci_interrupt; /* System vector of SCI interrupt */
239 u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ 239 u32 smi_command; /* 32-bit Port address of SMI command port */
240 u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ 240 u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */
241 u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ 241 u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */
242 u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ 242 u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */
243 u8 pstate_control; /* [V1] Processor performance state control */ 243 u8 pstate_control; /* Processor performance state control */
244 u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ 244 u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */
245 u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ 245 u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */
246 u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ 246 u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */
247 u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ 247 u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */
248 u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ 248 u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */
249 u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ 249 u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */
250 u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ 250 u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */
251 u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ 251 u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */
252 u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ 252 u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */
253 u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ 253 u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */
254 u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ 254 u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */
255 u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ 255 u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */
256 u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ 256 u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */
257 u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ 257 u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
258 u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ 258 u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
259 u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ 259 u8 cst_control; /* Support for the _CST object and C-States change notification */
260 u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ 260 u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */
261 u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ 261 u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */
262 u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ 262 u16 flush_size; /* Processor memory cache line width, in bytes */
263 u16 flush_stride; /* [V1] Number of flush strides that need to be read */ 263 u16 flush_stride; /* Number of flush strides that need to be read */
264 u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ 264 u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */
265 u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ 265 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
266 u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ 266 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
267 u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ 267 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
268 u8 century; /* [V1] Index to century in RTC CMOS RAM */ 268 u8 century; /* Index to century in RTC CMOS RAM */
269 u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ 269 u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
270 u8 reserved; /* [V1] Reserved, must be zero */ 270 u8 reserved; /* Reserved, must be zero */
271 u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ 271 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
272 /* End of Version 1 FADT fields (ACPI 1.0) */ 272 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
273 273 u8 reset_value; /* Value to write to the reset_register port to reset the system */
274 struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ 274 u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */
275 u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ 275 u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */
276 u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ 276 u64 Xfacs; /* 64-bit physical address of FACS */
277 u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ 277 u64 Xdsdt; /* 64-bit physical address of DSDT */
278 u64 Xfacs; /* [V3] 64-bit physical address of FACS */ 278 struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */
279 u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ 279 struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */
280 struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ 280 struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */
281 struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ 281 struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */
282 struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ 282 struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */
283 struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ 283 struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
284 struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ 284 struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */
285 struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ 285 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
286 struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ 286 struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */
287 struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ 287 struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */
288 /* End of Version 3 FADT fields (ACPI 2.0) */ 288 u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */
289
290 struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */
291 /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */
292
293 struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */
294 /* End of Version 5 FADT fields (ACPI 5.0) */
295
296 u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */
297 /* End of Version 6 FADT fields (ACPI 6.0) */
298
299}; 289};
300 290
301/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ 291/* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */
@@ -311,8 +301,8 @@ struct acpi_table_fadt {
311 301
312/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ 302/* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */
313 303
314#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ 304#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */
315#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ 305#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */
316 306
317/* Masks for FADT flags */ 307/* Masks for FADT flags */
318 308
@@ -409,34 +399,20 @@ struct acpi_table_desc {
409 * match the expected length. In other words, the length of the 399 * match the expected length. In other words, the length of the
410 * FADT is the bottom line as to what the version really is. 400 * FADT is the bottom line as to what the version really is.
411 * 401 *
412 * NOTE: There is no officialy released V2 of the FADT. This 402 * For reference, the values below are as follows:
413 * version was used only for prototyping and testing during the 403 * FADT V1 size: 0x074
414 * 32-bit to 64-bit transition. V3 was the first official 64-bit 404 * FADT V2 size: 0x084
415 * version of the FADT. 405 * FADT V3 size: 0x0F4
416 * 406 * FADT V4 size: 0x0F4
417 * Update this list of defines when a new version of the FADT is 407 * FADT V5 size: 0x10C
418 * added to the ACPI specification. Note that the FADT version is 408 * FADT V6 size: 0x114
419 * only incremented when new fields are appended to the existing
420 * version. Therefore, the FADT version is competely independent
421 * from the version of the ACPI specification where it is
422 * defined.
423 *
424 * For reference, the various FADT lengths are as follows:
425 * FADT V1 size: 0x074 ACPI 1.0
426 * FADT V3 size: 0x0F4 ACPI 2.0
427 * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0
428 * FADT V5 size: 0x10C ACPI 5.0
429 * FADT V6 size: 0x114 ACPI 6.0
430 */ 409 */
431#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ 410#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
432#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ 411#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1)
433#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ 412#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control))
434#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ 413#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id))
435#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ 414#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt))
436
437/* Update these when new FADT versions are added */
438 415
439#define ACPI_FADT_MAX_VERSION 6
440#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" 416#define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)"
441 417
442#endif /* __ACTBL_H__ */ 418#endif /* __ACTBL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index a5d98d171866..e861a24f06f2 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -191,6 +191,9 @@
191#ifndef __init 191#ifndef __init
192#define __init 192#define __init
193#endif 193#endif
194#ifndef __iomem
195#define __iomem
196#endif
194 197
195/* Host-dependent types and defines for user-space ACPICA */ 198/* Host-dependent types and defines for user-space ACPICA */
196 199
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9f6e0c..59a3b2f58c22 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -54,6 +54,7 @@ KSYM(__kstrtab_\name):
54KSYM(__kcrctab_\name): 54KSYM(__kcrctab_\name):
55 __put KSYM(__crc_\name) 55 __put KSYM(__crc_\name)
56 .weak KSYM(__crc_\name) 56 .weak KSYM(__crc_\name)
57 .set KSYM(__crc_\name), 0
57 .previous 58 .previous
58#endif 59#endif
59#endif 60#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7035b997aaa5..6aaf425cebc3 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -14,7 +14,7 @@
14 * are obviously wrong for any sort of memory access. 14 * are obviously wrong for any sort of memory access.
15 */ 15 */
16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) 16#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
17#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) 17#define BPF_REGISTER_MIN_RANGE -1
18 18
19struct bpf_reg_state { 19struct bpf_reg_state {
20 enum bpf_reg_type type; 20 enum bpf_reg_type type;
@@ -22,7 +22,8 @@ struct bpf_reg_state {
22 * Used to determine if any memory access using this register will 22 * Used to determine if any memory access using this register will
23 * result in a bad access. 23 * result in a bad access.
24 */ 24 */
25 u64 min_value, max_value; 25 s64 min_value;
26 u64 max_value;
26 union { 27 union {
27 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ 28 /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
28 s64 imm; 29 s64 imm;
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 432f5c97e18f..928e5ca0caee 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -263,7 +263,9 @@
263#endif 263#endif
264#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ 264#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
265 265
266#if GCC_VERSION >= 50000 266#if GCC_VERSION >= 70000
267#define KASAN_ABI_VERSION 5
268#elif GCC_VERSION >= 50000
267#define KASAN_ABI_VERSION 4 269#define KASAN_ABI_VERSION 4
268#elif GCC_VERSION >= 40902 270#elif GCC_VERSION >= 40902
269#define KASAN_ABI_VERSION 3 271#define KASAN_ABI_VERSION 3
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9b9f65d99873..e35e6de633b9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
22 unsigned char *vec); 22 unsigned char *vec);
23extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 23extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
24 unsigned long new_addr, unsigned long old_end, 24 unsigned long new_addr, unsigned long old_end,
25 pmd_t *old_pmd, pmd_t *new_pmd); 25 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
26extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 26extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
27 unsigned long addr, pgprot_t newprot, 27 unsigned long addr, pgprot_t newprot,
28 int prot_numa); 28 int prot_numa);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2d9b650047a5..d49e26c6cdc7 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -429,6 +429,7 @@ struct intel_iommu {
429 struct page_req_dsc *prq; 429 struct page_req_dsc *prq;
430 unsigned char prq_name[16]; /* Name for PRQ interrupt */ 430 unsigned char prq_name[16]; /* Name for PRQ interrupt */
431 struct idr pasid_idr; 431 struct idr pasid_idr;
432 u32 pasid_max;
432#endif 433#endif
433 struct q_inval *qi; /* Queued invalidation info */ 434 struct q_inval *qi; /* Queued invalidation info */
434 u32 *iommu_state; /* Store iommu states between suspend and resume.*/ 435 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ca1ad9ebbc92..a0649973ee5b 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
149{ 149{
150#if defined(CONFIG_NET_L3_MASTER_DEV) 150#if defined(CONFIG_NET_L3_MASTER_DEV)
151 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 151 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
152 ipv6_l3mdev_skb(IP6CB(skb)->flags)) 152 skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
153 return true; 153 return true;
154#endif 154#endif
155 return false; 155 return false;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3be7abd6e722..c9f379689dd0 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,7 +476,6 @@ enum {
476enum { 476enum {
477 MLX4_INTERFACE_STATE_UP = 1 << 0, 477 MLX4_INTERFACE_STATE_UP = 1 << 0,
478 MLX4_INTERFACE_STATE_DELETION = 1 << 1, 478 MLX4_INTERFACE_STATE_DELETION = 1 << 1,
479 MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2,
480}; 479};
481 480
482#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 481#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 91ee3643ccc8..e16a2a980ea8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1619,7 +1619,7 @@ enum netdev_priv_flags {
1619 * @dcbnl_ops: Data Center Bridging netlink ops 1619 * @dcbnl_ops: Data Center Bridging netlink ops
1620 * @num_tc: Number of traffic classes in the net device 1620 * @num_tc: Number of traffic classes in the net device
1621 * @tc_to_txq: XXX: need comments on this one 1621 * @tc_to_txq: XXX: need comments on this one
1622 * @prio_tc_map XXX: need comments on this one 1622 * @prio_tc_map: XXX: need comments on this one
1623 * 1623 *
1624 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp 1624 * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1625 * 1625 *
@@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
3354bool is_skb_forwardable(const struct net_device *dev, 3354bool is_skb_forwardable(const struct net_device *dev,
3355 const struct sk_buff *skb); 3355 const struct sk_buff *skb);
3356 3356
3357static __always_inline int ____dev_forward_skb(struct net_device *dev,
3358 struct sk_buff *skb)
3359{
3360 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
3361 unlikely(!is_skb_forwardable(dev, skb))) {
3362 atomic_long_inc(&dev->rx_dropped);
3363 kfree_skb(skb);
3364 return NET_RX_DROP;
3365 }
3366
3367 skb_scrub_packet(skb, true);
3368 skb->priority = 0;
3369 return 0;
3370}
3371
3357void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); 3372void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
3358 3373
3359extern int netdev_budget; 3374extern int netdev_budget;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2ab233661ae5..a58cca8bcb29 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev,
29extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 29extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
30extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); 30extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
31extern int of_phy_register_fixed_link(struct device_node *np); 31extern int of_phy_register_fixed_link(struct device_node *np);
32extern void of_phy_deregister_fixed_link(struct device_node *np);
32extern bool of_phy_is_fixed_link(struct device_node *np); 33extern bool of_phy_is_fixed_link(struct device_node *np);
33 34
34#else /* CONFIG_OF */ 35#else /* CONFIG_OF */
@@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np)
83{ 84{
84 return -ENOSYS; 85 return -ENOSYS;
85} 86}
87static inline void of_phy_deregister_fixed_link(struct device_node *np)
88{
89}
86static inline bool of_phy_is_fixed_link(struct device_node *np) 90static inline bool of_phy_is_fixed_link(struct device_node *np)
87{ 91{
88 return false; 92 return false;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index dd15d39e1985..7dbe9148b2f8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
374} 374}
375 375
376/* 376/*
377 * Get the offset in PAGE_SIZE. 377 * Get index of the page with in radix-tree
378 * (TODO: hugepage should have ->index in PAGE_SIZE) 378 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
379 */ 379 */
380static inline pgoff_t page_to_pgoff(struct page *page) 380static inline pgoff_t page_to_index(struct page *page)
381{ 381{
382 pgoff_t pgoff; 382 pgoff_t pgoff;
383 383
384 if (unlikely(PageHeadHuge(page)))
385 return page->index << compound_order(page);
386
387 if (likely(!PageTransTail(page))) 384 if (likely(!PageTransTail(page)))
388 return page->index; 385 return page->index;
389 386
@@ -397,6 +394,18 @@ static inline pgoff_t page_to_pgoff(struct page *page)
397} 394}
398 395
399/* 396/*
397 * Get the offset in PAGE_SIZE.
398 * (TODO: hugepage should have ->index in PAGE_SIZE)
399 */
400static inline pgoff_t page_to_pgoff(struct page *page)
401{
402 if (unlikely(PageHeadHuge(page)))
403 return page->index << compound_order(page);
404
405 return page_to_index(page);
406}
407
408/*
400 * Return byte-offset into filesystem object for page. 409 * Return byte-offset into filesystem object for page.
401 */ 410 */
402static inline loff_t page_offset(struct page *page) 411static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0e49f70dbd9b..a38772a85588 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1928,6 +1928,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
1928 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; 1928 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
1929} 1929}
1930 1930
1931static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
1932{
1933 while (1) {
1934 if (!pci_is_pcie(dev))
1935 break;
1936 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1937 return dev;
1938 if (!dev->bus->self)
1939 break;
1940 dev = dev->bus->self;
1941 }
1942 return NULL;
1943}
1944
1931void pci_request_acs(void); 1945void pci_request_acs(void);
1932bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 1946bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
1933bool pci_acs_path_enabled(struct pci_dev *start, 1947bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 348f51b0ec92..e9c009dc3a4a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p);
2567extern void sched_autogroup_detach(struct task_struct *p); 2567extern void sched_autogroup_detach(struct task_struct *p);
2568extern void sched_autogroup_fork(struct signal_struct *sig); 2568extern void sched_autogroup_fork(struct signal_struct *sig);
2569extern void sched_autogroup_exit(struct signal_struct *sig); 2569extern void sched_autogroup_exit(struct signal_struct *sig);
2570extern void sched_autogroup_exit_task(struct task_struct *p);
2570#ifdef CONFIG_PROC_FS 2571#ifdef CONFIG_PROC_FS
2571extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2572extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2572extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); 2573extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
@@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2576static inline void sched_autogroup_detach(struct task_struct *p) { } 2577static inline void sched_autogroup_detach(struct task_struct *p) { }
2577static inline void sched_autogroup_fork(struct signal_struct *sig) { } 2578static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2578static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2579static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2580static inline void sched_autogroup_exit_task(struct task_struct *p) { }
2579#endif 2581#endif
2580 2582
2581extern int yield_to(struct task_struct *p, bool preempt); 2583extern int yield_to(struct task_struct *p, bool preempt);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ab02a457da1f..e5d193440374 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -25,6 +25,7 @@ struct svc_xprt_ops {
25 void (*xpo_detach)(struct svc_xprt *); 25 void (*xpo_detach)(struct svc_xprt *);
26 void (*xpo_free)(struct svc_xprt *); 26 void (*xpo_free)(struct svc_xprt *);
27 int (*xpo_secure_port)(struct svc_rqst *); 27 int (*xpo_secure_port)(struct svc_rqst *);
28 void (*xpo_kill_temp_xprt)(struct svc_xprt *);
28}; 29};
29 30
30struct svc_xprt_class { 31struct svc_xprt_class {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f00bf667ec33..554671c81f4a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1018,7 +1018,7 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
1018} 1018}
1019 1019
1020struct hci_dev *hci_dev_get(int index); 1020struct hci_dev *hci_dev_get(int index);
1021struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); 1021struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type);
1022 1022
1023struct hci_dev *hci_alloc_dev(void); 1023struct hci_dev *hci_alloc_dev(void);
1024void hci_free_dev(struct hci_dev *hdev); 1024void hci_free_dev(struct hci_dev *hdev);
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index d15214d673b2..2a1abbf8da74 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); 68 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
69 69
70 __skb_queue_head_init(&cell->napi_skbs); 70 __skb_queue_head_init(&cell->napi_skbs);
71
72 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
73
71 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); 74 netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
72 napi_enable(&cell->napi); 75 napi_enable(&cell->napi);
73 } 76 }
diff --git a/include/net/ip.h b/include/net/ip.h
index 5413883ac47f..d3a107850a41 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -47,8 +47,7 @@ struct inet_skb_parm {
47#define IPSKB_REROUTED BIT(4) 47#define IPSKB_REROUTED BIT(4)
48#define IPSKB_DOREDIRECT BIT(5) 48#define IPSKB_DOREDIRECT BIT(5)
49#define IPSKB_FRAG_PMTU BIT(6) 49#define IPSKB_FRAG_PMTU BIT(6)
50#define IPSKB_FRAG_SEGS BIT(7) 50#define IPSKB_L3SLAVE BIT(7)
51#define IPSKB_L3SLAVE BIT(8)
52 51
53 u16 frag_max_size; 52 u16 frag_max_size;
54}; 53};
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 20ed9699fcd4..1b1cf33cbfb0 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
146{ 146{
147 int pkt_len, err; 147 int pkt_len, err;
148 148
149 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
149 pkt_len = skb->len - skb_inner_network_offset(skb); 150 pkt_len = skb->len - skb_inner_network_offset(skb);
150 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); 151 err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
151 if (unlikely(net_xmit_eval(err))) 152 if (unlikely(net_xmit_eval(err)))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index b9314b48e39f..f390c3bb05c5 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
243 struct netlink_callback *cb); 243 struct netlink_callback *cb);
244int fib_table_flush(struct net *net, struct fib_table *table); 244int fib_table_flush(struct net *net, struct fib_table *table);
245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
246void fib_table_flush_external(struct fib_table *table);
246void fib_free_table(struct fib_table *tb); 247void fib_free_table(struct fib_table *tb);
247 248
248#ifndef CONFIG_IP_MULTIPLE_TABLES 249#ifndef CONFIG_IP_MULTIPLE_TABLES
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8fed1cd78658..f11ca837361b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -970,6 +970,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
970int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, 970int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
971 char __user *optval, int __user *optlen); 971 char __user *optval, int __user *optlen);
972 972
973int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
974 int addr_len);
973int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); 975int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
974int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, 976int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
975 int addr_len); 977 int addr_len);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index fc4f757107df..0940598c002f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags,
170extern struct list_head net_namespace_list; 170extern struct list_head net_namespace_list;
171 171
172struct net *get_net_ns_by_pid(pid_t pid); 172struct net *get_net_ns_by_pid(pid_t pid);
173struct net *get_net_ns_by_fd(int pid); 173struct net *get_net_ns_by_fd(int fd);
174 174
175#ifdef CONFIG_SYSCTL 175#ifdef CONFIG_SYSCTL
176void ipx_register_sysctl(void); 176void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 50418052a520..d9d52c020a70 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -100,6 +100,9 @@ struct nf_conn {
100 100
101 possible_net_t ct_net; 101 possible_net_t ct_net;
102 102
103#if IS_ENABLED(CONFIG_NF_NAT)
104 struct rhlist_head nat_bysource;
105#endif
103 /* all members below initialized via memset */ 106 /* all members below initialized via memset */
104 u8 __nfct_init_offset[0]; 107 u8 __nfct_init_offset[0];
105 108
@@ -117,9 +120,6 @@ struct nf_conn {
117 /* Extensions */ 120 /* Extensions */
118 struct nf_ct_ext *ext; 121 struct nf_ct_ext *ext;
119 122
120#if IS_ENABLED(CONFIG_NF_NAT)
121 struct rhash_head nat_bysource;
122#endif
123 /* Storage reserved for other modules, must be the last member */ 123 /* Storage reserved for other modules, must be the last member */
124 union nf_conntrack_proto proto; 124 union nf_conntrack_proto proto;
125}; 125};
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 498814626e28..1723a67c0b0a 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
30 if (net->ct.labels_used == 0) 30 if (net->ct.labels_used == 0)
31 return NULL; 31 return NULL;
32 32
33 return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, 33 return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC);
34 sizeof(struct nf_conn_labels), GFP_ATOMIC);
35#else 34#else
36 return NULL; 35 return NULL;
37#endif 36#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 5031e072567b..b02af0bf5777 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
145 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; 145 return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
146} 146}
147 147
148unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); 148int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
149unsigned int nft_parse_register(const struct nlattr *attr); 149unsigned int nft_parse_register(const struct nlattr *attr);
150int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); 150int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
151 151
@@ -313,7 +313,7 @@ void nft_unregister_set(struct nft_set_ops *ops);
313 * @size: maximum set size 313 * @size: maximum set size
314 * @nelems: number of elements 314 * @nelems: number of elements
315 * @ndeact: number of deactivated elements queued for removal 315 * @ndeact: number of deactivated elements queued for removal
316 * @timeout: default timeout value in msecs 316 * @timeout: default timeout value in jiffies
317 * @gc_int: garbage collection interval in msecs 317 * @gc_int: garbage collection interval in msecs
318 * @policy: set parameterization (see enum nft_set_policies) 318 * @policy: set parameterization (see enum nft_set_policies)
319 * @udlen: user data length 319 * @udlen: user data length
@@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set,
542 const struct nft_set_ext_tmpl *tmpl, 542 const struct nft_set_ext_tmpl *tmpl,
543 const u32 *key, const u32 *data, 543 const u32 *key, const u32 *data,
544 u64 timeout, gfp_t gfp); 544 u64 timeout, gfp_t gfp);
545void nft_set_elem_destroy(const struct nft_set *set, void *elem); 545void nft_set_elem_destroy(const struct nft_set *set, void *elem,
546 bool destroy_expr);
546 547
547/** 548/**
548 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch 549 * struct nft_set_gc_batch_head - nf_tables set garbage collection batch
@@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
693{ 694{
694 int err; 695 int err;
695 696
696 __module_get(src->ops->type->owner);
697 if (src->ops->clone) { 697 if (src->ops->clone) {
698 dst->ops = src->ops; 698 dst->ops = src->ops;
699 err = src->ops->clone(dst, src); 699 err = src->ops->clone(dst, src);
@@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
702 } else { 702 } else {
703 memcpy(dst, src, src->ops->size); 703 memcpy(dst, src, src->ops->size);
704 } 704 }
705
706 __module_get(src->ops->type->owner);
705 return 0; 707 return 0;
706} 708}
707 709
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 87a7f42e7639..31acc3f4f132 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *);
152struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, 152struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
153 struct sctphdr *, struct sctp_association **, 153 struct sctphdr *, struct sctp_association **,
154 struct sctp_transport **); 154 struct sctp_transport **);
155void sctp_err_finish(struct sock *, struct sctp_association *); 155void sctp_err_finish(struct sock *, struct sctp_transport *);
156void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, 156void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
157 struct sctp_transport *t, __u32 pmtu); 157 struct sctp_transport *t, __u32 pmtu);
158void sctp_icmp_redirect(struct sock *, struct sctp_transport *, 158void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 73c6b008f1b7..92b269709b9a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk)
1596void sock_gen_put(struct sock *sk); 1596void sock_gen_put(struct sock *sk);
1597 1597
1598int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, 1598int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1599 unsigned int trim_cap); 1599 unsigned int trim_cap, bool refcounted);
1600static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, 1600static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1601 const int nested) 1601 const int nested)
1602{ 1602{
1603 return __sk_receive_skb(sk, skb, nested, 1); 1603 return __sk_receive_skb(sk, skb, nested, 1, true);
1604} 1604}
1605 1605
1606static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1606static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5b82d4d94834..123979fe12bf 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
805{ 805{
806#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 806#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
807 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 807 if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
808 ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 808 skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
809 return true; 809 return true;
810#endif 810#endif
811 return false; 811 return false;
@@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1220 1220
1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); 1221bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1222bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1223int tcp_filter(struct sock *sk, struct sk_buff *skb);
1223 1224
1224#undef STATE_TRACE 1225#undef STATE_TRACE
1225 1226
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 5cd4d4d2dd1d..9c9c6ad55f14 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -14,7 +14,6 @@
14 14
15#include <linux/atmapi.h> 15#include <linux/atmapi.h>
16#include <linux/atmioc.h> 16#include <linux/atmioc.h>
17#include <linux/time.h>
18 17
19#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) 18#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc)
20 /* get pool statistics */ 19 /* get pool statistics */
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h
index a6c35e1a89ad..05865edaefda 100644
--- a/include/uapi/linux/bpqether.h
+++ b/include/uapi/linux/bpqether.h
@@ -5,9 +5,7 @@
5 * Defines for the BPQETHER pseudo device driver 5 * Defines for the BPQETHER pseudo device driver
6 */ 6 */
7 7
8#ifndef __LINUX_IF_ETHER_H
9#include <linux/if_ether.h> 8#include <linux/if_ether.h>
10#endif
11 9
12#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ 10#define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */
13#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) 11#define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1)
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index d6d071fc3c56..3af60ee69053 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -640,7 +640,7 @@
640 * Control a data application associated with the currently viewed channel, 640 * Control a data application associated with the currently viewed channel,
641 * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.) 641 * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
642 */ 642 */
643#define KEY_DATA 0x275 643#define KEY_DATA 0x277
644 644
645#define BTN_TRIGGER_HAPPY 0x2c0 645#define BTN_TRIGGER_HAPPY 0x2c0
646#define BTN_TRIGGER_HAPPY1 0x2c0 646#define BTN_TRIGGER_HAPPY1 0x2c0
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 300ef255d1e0..4ee67cb99143 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -972,12 +972,19 @@ struct kvm_irqfd {
972 __u8 pad[16]; 972 __u8 pad[16];
973}; 973};
974 974
975/* For KVM_CAP_ADJUST_CLOCK */
976
977/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
978#define KVM_CLOCK_TSC_STABLE 2
979
975struct kvm_clock_data { 980struct kvm_clock_data {
976 __u64 clock; 981 __u64 clock;
977 __u32 flags; 982 __u32 flags;
978 __u32 pad[9]; 983 __u32 pad[9];
979}; 984};
980 985
986/* For KVM_CAP_SW_TLB */
987
981#define KVM_MMU_FSL_BOOKE_NOHV 0 988#define KVM_MMU_FSL_BOOKE_NOHV 0
982#define KVM_MMU_FSL_BOOKE_HV 1 989#define KVM_MMU_FSL_BOOKE_HV 1
983 990
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index e3969bd939e4..9611c7b6c18f 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -11,3 +11,4 @@ header-y += tc_vlan.h
11header-y += tc_bpf.h 11header-y += tc_bpf.h
12header-y += tc_connmark.h 12header-y += tc_connmark.h
13header-y += tc_ife.h 13header-y += tc_ife.h
14header-y += tc_tunnel_key.h
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 8a09b32e07d6..dd4104c9aa12 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -272,7 +272,7 @@ int __init rd_load_image(char *from)
272 sys_write(out_fd, buf, BLOCK_SIZE); 272 sys_write(out_fd, buf, BLOCK_SIZE);
273#if !defined(CONFIG_S390) 273#if !defined(CONFIG_S390)
274 if (!(i % 16)) { 274 if (!(i % 16)) {
275 printk("%c\b", rotator[rotate & 0x3]); 275 pr_cont("%c\b", rotator[rotate & 0x3]);
276 rotate++; 276 rotate++;
277 } 277 }
278#endif 278#endif
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 570eeca7bdfa..ad1bc67aff1b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
687 687
688 hlist_for_each_entry_safe(l, n, head, hash_node) { 688 hlist_for_each_entry_safe(l, n, head, hash_node) {
689 hlist_del_rcu(&l->hash_node); 689 hlist_del_rcu(&l->hash_node);
690 htab_elem_free(htab, l); 690 if (l->state != HTAB_EXTRA_ELEM_USED)
691 htab_elem_free(htab, l);
691 } 692 }
692 } 693 }
693} 694}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 228f962447a5..237f3d6a7ddc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr)
194 194
195 err = bpf_map_charge_memlock(map); 195 err = bpf_map_charge_memlock(map);
196 if (err) 196 if (err)
197 goto free_map; 197 goto free_map_nouncharge;
198 198
199 err = bpf_map_new_fd(map); 199 err = bpf_map_new_fd(map);
200 if (err < 0) 200 if (err < 0)
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr)
204 return err; 204 return err;
205 205
206free_map: 206free_map:
207 bpf_map_uncharge_memlock(map);
208free_map_nouncharge:
207 map->ops->map_free(map); 209 map->ops->map_free(map);
208 return err; 210 return err;
209} 211}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 99a7e5b388f2..8199821f54cf 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
216 reg->map_ptr->key_size, 216 reg->map_ptr->key_size,
217 reg->map_ptr->value_size); 217 reg->map_ptr->value_size);
218 if (reg->min_value != BPF_REGISTER_MIN_RANGE) 218 if (reg->min_value != BPF_REGISTER_MIN_RANGE)
219 verbose(",min_value=%llu", 219 verbose(",min_value=%lld",
220 (unsigned long long)reg->min_value); 220 (long long)reg->min_value);
221 if (reg->max_value != BPF_REGISTER_MAX_RANGE) 221 if (reg->max_value != BPF_REGISTER_MAX_RANGE)
222 verbose(",max_value=%llu", 222 verbose(",max_value=%llu",
223 (unsigned long long)reg->max_value); 223 (unsigned long long)reg->max_value);
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
758 * index'es we need to make sure that whatever we use 758 * index'es we need to make sure that whatever we use
759 * will have a set floor within our range. 759 * will have a set floor within our range.
760 */ 760 */
761 if ((s64)reg->min_value < 0) { 761 if (reg->min_value < 0) {
762 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 762 verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
763 regno); 763 regno);
764 return -EACCES; 764 return -EACCES;
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
1468{ 1468{
1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE) 1469 if (reg->max_value > BPF_REGISTER_MAX_RANGE)
1470 reg->max_value = BPF_REGISTER_MAX_RANGE; 1470 reg->max_value = BPF_REGISTER_MAX_RANGE;
1471 if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) 1471 if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
1472 reg->min_value > BPF_REGISTER_MAX_RANGE)
1472 reg->min_value = BPF_REGISTER_MIN_RANGE; 1473 reg->min_value = BPF_REGISTER_MIN_RANGE;
1473} 1474}
1474 1475
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1476 struct bpf_insn *insn) 1477 struct bpf_insn *insn)
1477{ 1478{
1478 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; 1479 struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
1479 u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; 1480 s64 min_val = BPF_REGISTER_MIN_RANGE;
1481 u64 max_val = BPF_REGISTER_MAX_RANGE;
1480 bool min_set = false, max_set = false; 1482 bool min_set = false, max_set = false;
1481 u8 opcode = BPF_OP(insn->code); 1483 u8 opcode = BPF_OP(insn->code);
1482 1484
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1512 return; 1514 return;
1513 } 1515 }
1514 1516
1517 /* If one of our values was at the end of our ranges then we can't just
1518 * do our normal operations to the register, we need to set the values
1519 * to the min/max since they are undefined.
1520 */
1521 if (min_val == BPF_REGISTER_MIN_RANGE)
1522 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1523 if (max_val == BPF_REGISTER_MAX_RANGE)
1524 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1525
1515 switch (opcode) { 1526 switch (opcode) {
1516 case BPF_ADD: 1527 case BPF_ADD:
1517 dst_reg->min_value += min_val; 1528 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1518 dst_reg->max_value += max_val; 1529 dst_reg->min_value += min_val;
1530 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1531 dst_reg->max_value += max_val;
1519 break; 1532 break;
1520 case BPF_SUB: 1533 case BPF_SUB:
1521 dst_reg->min_value -= min_val; 1534 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1522 dst_reg->max_value -= max_val; 1535 dst_reg->min_value -= min_val;
1536 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1537 dst_reg->max_value -= max_val;
1523 break; 1538 break;
1524 case BPF_MUL: 1539 case BPF_MUL:
1525 dst_reg->min_value *= min_val; 1540 if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1526 dst_reg->max_value *= max_val; 1541 dst_reg->min_value *= min_val;
1542 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1543 dst_reg->max_value *= max_val;
1527 break; 1544 break;
1528 case BPF_AND: 1545 case BPF_AND:
1529 /* & is special since it could end up with 0 bits set. */ 1546 /* Disallow AND'ing of negative numbers, ain't nobody got time
1530 dst_reg->min_value &= min_val; 1547 * for that. Otherwise the minimum is 0 and the max is the max
1548 * value we could AND against.
1549 */
1550 if (min_val < 0)
1551 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1552 else
1553 dst_reg->min_value = 0;
1531 dst_reg->max_value = max_val; 1554 dst_reg->max_value = max_val;
1532 break; 1555 break;
1533 case BPF_LSH: 1556 case BPF_LSH:
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1537 */ 1560 */
1538 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1561 if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
1539 dst_reg->min_value = BPF_REGISTER_MIN_RANGE; 1562 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1540 else 1563 else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1541 dst_reg->min_value <<= min_val; 1564 dst_reg->min_value <<= min_val;
1542 1565
1543 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) 1566 if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
1544 dst_reg->max_value = BPF_REGISTER_MAX_RANGE; 1567 dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1545 else 1568 else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1546 dst_reg->max_value <<= max_val; 1569 dst_reg->max_value <<= max_val;
1547 break; 1570 break;
1548 case BPF_RSH: 1571 case BPF_RSH:
1549 dst_reg->min_value >>= min_val; 1572 /* RSH by a negative number is undefined, and the BPF_RSH is an
1550 dst_reg->max_value >>= max_val; 1573 * unsigned shift, so make the appropriate casts.
1551 break;
1552 case BPF_MOD:
1553 /* % is special since it is an unsigned modulus, so the floor
1554 * will always be 0.
1555 */ 1574 */
1556 dst_reg->min_value = 0; 1575 if (min_val < 0 || dst_reg->min_value < 0)
1557 dst_reg->max_value = max_val - 1; 1576 dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1577 else
1578 dst_reg->min_value =
1579 (u64)(dst_reg->min_value) >> min_val;
1580 if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1581 dst_reg->max_value >>= max_val;
1558 break; 1582 break;
1559 default: 1583 default:
1560 reset_reg_range_values(regs, insn->dst_reg); 1584 reset_reg_range_values(regs, insn->dst_reg);
@@ -2430,6 +2454,7 @@ static bool states_equal(struct bpf_verifier_env *env,
2430 struct bpf_verifier_state *old, 2454 struct bpf_verifier_state *old,
2431 struct bpf_verifier_state *cur) 2455 struct bpf_verifier_state *cur)
2432{ 2456{
2457 bool varlen_map_access = env->varlen_map_value_access;
2433 struct bpf_reg_state *rold, *rcur; 2458 struct bpf_reg_state *rold, *rcur;
2434 int i; 2459 int i;
2435 2460
@@ -2443,12 +2468,17 @@ static bool states_equal(struct bpf_verifier_env *env,
2443 /* If the ranges were not the same, but everything else was and 2468 /* If the ranges were not the same, but everything else was and
2444 * we didn't do a variable access into a map then we are a-ok. 2469 * we didn't do a variable access into a map then we are a-ok.
2445 */ 2470 */
2446 if (!env->varlen_map_value_access && 2471 if (!varlen_map_access &&
2447 rold->type == rcur->type && rold->imm == rcur->imm) 2472 rold->type == rcur->type && rold->imm == rcur->imm)
2448 continue; 2473 continue;
2449 2474
2475 /* If we didn't map access then again we don't care about the
2476 * mismatched range values and it's ok if our old type was
2477 * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
2478 */
2450 if (rold->type == NOT_INIT || 2479 if (rold->type == NOT_INIT ||
2451 (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) 2480 (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
2481 rcur->type != NOT_INIT))
2452 continue; 2482 continue;
2453 2483
2454 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2484 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0e292132efac..6ee1febdf6ff 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event,
902 * this will always be called from the right CPU. 902 * this will always be called from the right CPU.
903 */ 903 */
904 cpuctx = __get_cpu_context(ctx); 904 cpuctx = __get_cpu_context(ctx);
905
906 /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
907 if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
908 /*
909 * We are removing the last cpu event in this context.
910 * If that event is not active in this cpu, cpuctx->cgrp
911 * should've been cleared by perf_cgroup_switch.
912 */
913 WARN_ON_ONCE(!add && cpuctx->cgrp);
914 return;
915 }
905 cpuctx->cgrp = add ? event->cgrp : NULL; 916 cpuctx->cgrp = add ? event->cgrp : NULL;
906} 917}
907 918
@@ -8018,6 +8029,7 @@ restart:
8018 * if <size> is not specified, the range is treated as a single address. 8029 * if <size> is not specified, the range is treated as a single address.
8019 */ 8030 */
8020enum { 8031enum {
8032 IF_ACT_NONE = -1,
8021 IF_ACT_FILTER, 8033 IF_ACT_FILTER,
8022 IF_ACT_START, 8034 IF_ACT_START,
8023 IF_ACT_STOP, 8035 IF_ACT_STOP,
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = {
8041 { IF_SRC_KERNEL, "%u/%u" }, 8053 { IF_SRC_KERNEL, "%u/%u" },
8042 { IF_SRC_FILEADDR, "%u@%s" }, 8054 { IF_SRC_FILEADDR, "%u@%s" },
8043 { IF_SRC_KERNELADDR, "%u" }, 8055 { IF_SRC_KERNELADDR, "%u" },
8056 { IF_ACT_NONE, NULL },
8044}; 8057};
8045 8058
8046/* 8059/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d68c45ebbe3..3076f3089919 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code)
836 */ 836 */
837 perf_event_exit_task(tsk); 837 perf_event_exit_task(tsk);
838 838
839 sched_autogroup_exit_task(tsk);
839 cgroup_exit(tsk); 840 cgroup_exit(tsk);
840 841
841 /* 842 /*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9c4d30483264..6b669593e7eb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1341 1341
1342 } else if (new->flags & IRQF_TRIGGER_MASK) { 1342 } else if (new->flags & IRQF_TRIGGER_MASK) {
1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1344 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1344 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1345 1345
1346 if (nmsk != omsk) 1346 if (nmsk != omsk)
1347 /* hope the handler works with current trigger mode */ 1347 /* hope the handler works with current trigger mode */
1348 pr_warn("irq %d uses trigger mode %u; requested %u\n", 1348 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1349 irq, nmsk, omsk); 1349 irq, omsk, nmsk);
1350 } 1350 }
1351 1351
1352 *old_ptr = new; 1352 *old_ptr = new;
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 51c4b24b6328..c2b88490d857 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -46,6 +46,14 @@ enum {
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) 46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47 47
48/* 48/*
49 * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
50 * .data and .bss to fit in required 32MB limit for the kernel. With
51 * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
52 * So, reduce the static allocations for lockdeps related structures so that
53 * everything fits in current required size limit.
54 */
55#ifdef CONFIG_PROVE_LOCKING_SMALL
56/*
49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 57 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
50 * we track. 58 * we track.
51 * 59 *
@@ -54,18 +62,24 @@ enum {
54 * table (if it's not there yet), and we check it for lock order 62 * table (if it's not there yet), and we check it for lock order
55 * conflicts and deadlocks. 63 * conflicts and deadlocks.
56 */ 64 */
65#define MAX_LOCKDEP_ENTRIES 16384UL
66#define MAX_LOCKDEP_CHAINS_BITS 15
67#define MAX_STACK_TRACE_ENTRIES 262144UL
68#else
57#define MAX_LOCKDEP_ENTRIES 32768UL 69#define MAX_LOCKDEP_ENTRIES 32768UL
58 70
59#define MAX_LOCKDEP_CHAINS_BITS 16 71#define MAX_LOCKDEP_CHAINS_BITS 16
60#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
61
62#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
63 72
64/* 73/*
65 * Stack-trace: tightly packed array of stack backtrace 74 * Stack-trace: tightly packed array of stack backtrace
66 * addresses. Protected by the hash_lock. 75 * addresses. Protected by the hash_lock.
67 */ 76 */
68#define MAX_STACK_TRACE_ENTRIES 524288UL 77#define MAX_STACK_TRACE_ENTRIES 524288UL
78#endif
79
80#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
81
82#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
69 83
70extern struct list_head all_lock_classes; 84extern struct list_head all_lock_classes;
71extern struct lock_chain lock_chains[]; 85extern struct lock_chain lock_chains[];
diff --git a/kernel/module.c b/kernel/module.c
index f57dd63186e6..0e54d5bf0097 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1301,8 +1301,9 @@ static int check_version(Elf_Shdr *sechdrs,
1301 goto bad_version; 1301 goto bad_version;
1302 } 1302 }
1303 1303
1304 pr_warn("%s: no symbol version for %s\n", mod->name, symname); 1304 /* Broken toolchain. Warn once, then let it go.. */
1305 return 0; 1305 pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
1306 return 1;
1306 1307
1307bad_version: 1308bad_version:
1308 pr_warn("%s: disagrees about version of symbol %s\n", 1309 pr_warn("%s: disagrees about version of symbol %s\n",
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5028f4fd504a..f7a55e9ff2f7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -783,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
783 return ret; 783 return ret;
784} 784}
785 785
786static void cont_flush(void);
787
788static ssize_t devkmsg_read(struct file *file, char __user *buf, 786static ssize_t devkmsg_read(struct file *file, char __user *buf,
789 size_t count, loff_t *ppos) 787 size_t count, loff_t *ppos)
790{ 788{
@@ -800,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
800 if (ret) 798 if (ret)
801 return ret; 799 return ret;
802 raw_spin_lock_irq(&logbuf_lock); 800 raw_spin_lock_irq(&logbuf_lock);
803 cont_flush();
804 while (user->seq == log_next_seq) { 801 while (user->seq == log_next_seq) {
805 if (file->f_flags & O_NONBLOCK) { 802 if (file->f_flags & O_NONBLOCK) {
806 ret = -EAGAIN; 803 ret = -EAGAIN;
@@ -863,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
863 return -ESPIPE; 860 return -ESPIPE;
864 861
865 raw_spin_lock_irq(&logbuf_lock); 862 raw_spin_lock_irq(&logbuf_lock);
866 cont_flush();
867 switch (whence) { 863 switch (whence) {
868 case SEEK_SET: 864 case SEEK_SET:
869 /* the first record */ 865 /* the first record */
@@ -902,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
902 poll_wait(file, &log_wait, wait); 898 poll_wait(file, &log_wait, wait);
903 899
904 raw_spin_lock_irq(&logbuf_lock); 900 raw_spin_lock_irq(&logbuf_lock);
905 cont_flush();
906 if (user->seq < log_next_seq) { 901 if (user->seq < log_next_seq) {
907 /* return error when data has vanished underneath us */ 902 /* return error when data has vanished underneath us */
908 if (user->seq < log_first_seq) 903 if (user->seq < log_first_seq)
@@ -1289,7 +1284,6 @@ static int syslog_print(char __user *buf, int size)
1289 size_t skip; 1284 size_t skip;
1290 1285
1291 raw_spin_lock_irq(&logbuf_lock); 1286 raw_spin_lock_irq(&logbuf_lock);
1292 cont_flush();
1293 if (syslog_seq < log_first_seq) { 1287 if (syslog_seq < log_first_seq) {
1294 /* messages are gone, move to first one */ 1288 /* messages are gone, move to first one */
1295 syslog_seq = log_first_seq; 1289 syslog_seq = log_first_seq;
@@ -1349,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1349 return -ENOMEM; 1343 return -ENOMEM;
1350 1344
1351 raw_spin_lock_irq(&logbuf_lock); 1345 raw_spin_lock_irq(&logbuf_lock);
1352 cont_flush();
1353 if (buf) { 1346 if (buf) {
1354 u64 next_seq; 1347 u64 next_seq;
1355 u64 seq; 1348 u64 seq;
@@ -1511,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
1511 /* Number of chars in the log buffer */ 1504 /* Number of chars in the log buffer */
1512 case SYSLOG_ACTION_SIZE_UNREAD: 1505 case SYSLOG_ACTION_SIZE_UNREAD:
1513 raw_spin_lock_irq(&logbuf_lock); 1506 raw_spin_lock_irq(&logbuf_lock);
1514 cont_flush();
1515 if (syslog_seq < log_first_seq) { 1507 if (syslog_seq < log_first_seq) {
1516 /* messages are gone, move to first one */ 1508 /* messages are gone, move to first one */
1517 syslog_seq = log_first_seq; 1509 syslog_seq = log_first_seq;
@@ -3028,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
3028 dumper->active = true; 3020 dumper->active = true;
3029 3021
3030 raw_spin_lock_irqsave(&logbuf_lock, flags); 3022 raw_spin_lock_irqsave(&logbuf_lock, flags);
3031 cont_flush();
3032 dumper->cur_seq = clear_seq; 3023 dumper->cur_seq = clear_seq;
3033 dumper->cur_idx = clear_idx; 3024 dumper->cur_idx = clear_idx;
3034 dumper->next_seq = log_next_seq; 3025 dumper->next_seq = log_next_seq;
@@ -3119,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
3119 bool ret; 3110 bool ret;
3120 3111
3121 raw_spin_lock_irqsave(&logbuf_lock, flags); 3112 raw_spin_lock_irqsave(&logbuf_lock, flags);
3122 cont_flush();
3123 ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); 3113 ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
3124 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 3114 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
3125 3115
@@ -3162,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
3162 goto out; 3152 goto out;
3163 3153
3164 raw_spin_lock_irqsave(&logbuf_lock, flags); 3154 raw_spin_lock_irqsave(&logbuf_lock, flags);
3165 cont_flush();
3166 if (dumper->cur_seq < log_first_seq) { 3155 if (dumper->cur_seq < log_first_seq) {
3167 /* messages are gone, move to first available one */ 3156 /* messages are gone, move to first available one */
3168 dumper->cur_seq = log_first_seq; 3157 dumper->cur_seq = log_first_seq;
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index a5d966cb8891..f1c8fd566246 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
111{ 111{
112 if (tg != &root_task_group) 112 if (tg != &root_task_group)
113 return false; 113 return false;
114
115 /* 114 /*
116 * We can only assume the task group can't go away on us if 115 * If we race with autogroup_move_group() the caller can use the old
117 * autogroup_move_group() can see us on ->thread_group list. 116 * value of signal->autogroup but in this case sched_move_task() will
117 * be called again before autogroup_kref_put().
118 *
119 * However, there is no way sched_autogroup_exit_task() could tell us
120 * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
118 */ 121 */
119 if (p->flags & PF_EXITING) 122 if (p->flags & PF_EXITING)
120 return false; 123 return false;
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
122 return true; 125 return true;
123} 126}
124 127
128void sched_autogroup_exit_task(struct task_struct *p)
129{
130 /*
131 * We are going to call exit_notify() and autogroup_move_group() can't
132 * see this thread after that: we can no longer use signal->autogroup.
133 * See the PF_EXITING check in task_wants_autogroup().
134 */
135 sched_move_task(p);
136}
137
125static void 138static void
126autogroup_move_group(struct task_struct *p, struct autogroup *ag) 139autogroup_move_group(struct task_struct *p, struct autogroup *ag)
127{ 140{
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
138 } 151 }
139 152
140 p->signal->autogroup = autogroup_kref_get(ag); 153 p->signal->autogroup = autogroup_kref_get(ag);
141 154 /*
142 if (!READ_ONCE(sysctl_sched_autogroup_enabled)) 155 * We can't avoid sched_move_task() after we changed signal->autogroup,
143 goto out; 156 * this process can already run with task_group() == prev->tg or we can
144 157 * race with cgroup code which can read autogroup = prev under rq->lock.
158 * In the latter case for_each_thread() can not miss a migrating thread,
159 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
160 * can't be removed from thread list, we hold ->siglock.
161 *
162 * If an exiting thread was already removed from thread list we rely on
163 * sched_autogroup_exit_task().
164 */
145 for_each_thread(p, t) 165 for_each_thread(p, t)
146 sched_move_task(t); 166 sched_move_task(t);
147out: 167
148 unlock_task_sighand(p, &flags); 168 unlock_task_sighand(p, &flags);
149 autogroup_kref_put(prev); 169 autogroup_kref_put(prev);
150} 170}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index b3f05ee20d18..cbb387a265db 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, 54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; 55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
56 56
57static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { 57/*
58 * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
59 * Make sure they are always aligned.
60 */
61static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, 62 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
59}; 63};
60 64
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2050a7652a86..da87b3cba5b3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1862 1862
1863 /* Update rec->flags */ 1863 /* Update rec->flags */
1864 do_for_each_ftrace_rec(pg, rec) { 1864 do_for_each_ftrace_rec(pg, rec) {
1865
1866 if (rec->flags & FTRACE_FL_DISABLED)
1867 continue;
1868
1865 /* We need to update only differences of filter_hash */ 1869 /* We need to update only differences of filter_hash */
1866 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1870 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1867 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1871 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
@@ -1884,6 +1888,10 @@ rollback:
1884 1888
1885 /* Roll back what we did above */ 1889 /* Roll back what we did above */
1886 do_for_each_ftrace_rec(pg, rec) { 1890 do_for_each_ftrace_rec(pg, rec) {
1891
1892 if (rec->flags & FTRACE_FL_DISABLED)
1893 continue;
1894
1887 if (rec == end) 1895 if (rec == end)
1888 goto err_out; 1896 goto err_out;
1889 1897
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable)
2397 return; 2405 return;
2398 2406
2399 do_for_each_ftrace_rec(pg, rec) { 2407 do_for_each_ftrace_rec(pg, rec) {
2408
2409 if (rec->flags & FTRACE_FL_DISABLED)
2410 continue;
2411
2400 failed = __ftrace_replace_code(rec, enable); 2412 failed = __ftrace_replace_code(rec, enable);
2401 if (failed) { 2413 if (failed) {
2402 ftrace_bug(failed, rec); 2414 ftrace_bug(failed, rec);
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2763 struct dyn_ftrace *rec; 2775 struct dyn_ftrace *rec;
2764 2776
2765 do_for_each_ftrace_rec(pg, rec) { 2777 do_for_each_ftrace_rec(pg, rec) {
2766 if (FTRACE_WARN_ON_ONCE(rec->flags)) 2778 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2767 pr_warn(" %pS flags:%lx\n", 2779 pr_warn(" %pS flags:%lx\n",
2768 (void *)rec->ip, rec->flags); 2780 (void *)rec->ip, rec->flags);
2769 } while_for_each_ftrace_rec(); 2781 } while_for_each_ftrace_rec();
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3598 goto out_unlock; 3610 goto out_unlock;
3599 3611
3600 do_for_each_ftrace_rec(pg, rec) { 3612 do_for_each_ftrace_rec(pg, rec) {
3613
3614 if (rec->flags & FTRACE_FL_DISABLED)
3615 continue;
3616
3601 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3617 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3602 ret = enter_record(hash, rec, clear_filter); 3618 ret = enter_record(hash, rec, clear_filter);
3603 if (ret < 0) { 3619 if (ret < 0) {
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3793 3809
3794 do_for_each_ftrace_rec(pg, rec) { 3810 do_for_each_ftrace_rec(pg, rec) {
3795 3811
3812 if (rec->flags & FTRACE_FL_DISABLED)
3813 continue;
3814
3796 if (!ftrace_match_record(rec, &func_g, NULL, 0)) 3815 if (!ftrace_match_record(rec, &func_g, NULL, 0))
3797 continue; 3816 continue;
3798 3817
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4685 4704
4686 do_for_each_ftrace_rec(pg, rec) { 4705 do_for_each_ftrace_rec(pg, rec) {
4687 4706
4707 if (rec->flags & FTRACE_FL_DISABLED)
4708 continue;
4709
4688 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 4710 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
4689 /* if it is in the array */ 4711 /* if it is in the array */
4690 exists = false; 4712 exists = false;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b01e547d4d04..a6c8db1d62f6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1085,6 +1085,9 @@ config PROVE_LOCKING
1085 1085
1086 For more details, see Documentation/locking/lockdep-design.txt. 1086 For more details, see Documentation/locking/lockdep-design.txt.
1087 1087
1088config PROVE_LOCKING_SMALL
1089 bool
1090
1088config LOCKDEP 1091config LOCKDEP
1089 bool 1092 bool
1090 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1093 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a8e12601eb37..056052dc8e91 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr)
362 362
363 __debug_object_init(addr, descr, 0); 363 __debug_object_init(addr, descr, 0);
364} 364}
365EXPORT_SYMBOL_GPL(debug_object_init);
365 366
366/** 367/**
367 * debug_object_init_on_stack - debug checks when an object on stack is 368 * debug_object_init_on_stack - debug checks when an object on stack is
@@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
376 377
377 __debug_object_init(addr, descr, 1); 378 __debug_object_init(addr, descr, 1);
378} 379}
380EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
379 381
380/** 382/**
381 * debug_object_activate - debug checks when an object is activated 383 * debug_object_activate - debug checks when an object is activated
@@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
449 } 451 }
450 return 0; 452 return 0;
451} 453}
454EXPORT_SYMBOL_GPL(debug_object_activate);
452 455
453/** 456/**
454 * debug_object_deactivate - debug checks when an object is deactivated 457 * debug_object_deactivate - debug checks when an object is deactivated
@@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
496 499
497 raw_spin_unlock_irqrestore(&db->lock, flags); 500 raw_spin_unlock_irqrestore(&db->lock, flags);
498} 501}
502EXPORT_SYMBOL_GPL(debug_object_deactivate);
499 503
500/** 504/**
501 * debug_object_destroy - debug checks when an object is destroyed 505 * debug_object_destroy - debug checks when an object is destroyed
@@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
542out_unlock: 546out_unlock:
543 raw_spin_unlock_irqrestore(&db->lock, flags); 547 raw_spin_unlock_irqrestore(&db->lock, flags);
544} 548}
549EXPORT_SYMBOL_GPL(debug_object_destroy);
545 550
546/** 551/**
547 * debug_object_free - debug checks when an object is freed 552 * debug_object_free - debug checks when an object is freed
@@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
582out_unlock: 587out_unlock:
583 raw_spin_unlock_irqrestore(&db->lock, flags); 588 raw_spin_unlock_irqrestore(&db->lock, flags);
584} 589}
590EXPORT_SYMBOL_GPL(debug_object_free);
585 591
586/** 592/**
587 * debug_object_assert_init - debug checks when object should be init-ed 593 * debug_object_assert_init - debug checks when object should be init-ed
@@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
626 632
627 raw_spin_unlock_irqrestore(&db->lock, flags); 633 raw_spin_unlock_irqrestore(&db->lock, flags);
628} 634}
635EXPORT_SYMBOL_GPL(debug_object_assert_init);
629 636
630/** 637/**
631 * debug_object_active_state - debug checks object usage state machine 638 * debug_object_active_state - debug checks object usage state machine
@@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
673 680
674 raw_spin_unlock_irqrestore(&db->lock, flags); 681 raw_spin_unlock_irqrestore(&db->lock, flags);
675} 682}
683EXPORT_SYMBOL_GPL(debug_object_active_state);
676 684
677#ifdef CONFIG_DEBUG_OBJECTS_FREE 685#ifdef CONFIG_DEBUG_OBJECTS_FREE
678static void __debug_check_no_obj_freed(const void *address, unsigned long size) 686static void __debug_check_no_obj_freed(const void *address, unsigned long size)
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f0c7f1481bae..f2bd21b93dfc 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size)
683 struct pipe_inode_info *pipe = i->pipe; 683 struct pipe_inode_info *pipe = i->pipe;
684 struct pipe_buffer *buf; 684 struct pipe_buffer *buf;
685 int idx = i->idx; 685 int idx = i->idx;
686 size_t off = i->iov_offset; 686 size_t off = i->iov_offset, orig_sz;
687 687
688 if (unlikely(i->count < size)) 688 if (unlikely(i->count < size))
689 size = i->count; 689 size = i->count;
690 orig_sz = size;
690 691
691 if (size) { 692 if (size) {
692 if (off) /* make it relative to the beginning of buffer */ 693 if (off) /* make it relative to the beginning of buffer */
@@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size)
713 pipe->nrbufs--; 714 pipe->nrbufs--;
714 } 715 }
715 } 716 }
717 i->count -= orig_sz;
716} 718}
717 719
718void iov_iter_advance(struct iov_iter *i, size_t size) 720void iov_iter_advance(struct iov_iter *i, size_t size)
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 5464c8744ea9..e24388a863a7 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
64 if (!esize) { 64 if (!esize) {
65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 65 /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
66 * depending on if MOD equals 1. */ 66 * depending on if MOD equals 1. */
67 rp[0] = 1;
68 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; 67 res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1;
68 if (res->nlimbs) {
69 if (mpi_resize(res, 1) < 0)
70 goto enomem;
71 rp = res->d;
72 rp[0] = 1;
73 }
69 res->sign = 0; 74 res->sign = 0;
70 goto leave; 75 goto leave;
71 } 76 }
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 5e51872b3fc1..fbdf87920093 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,11 @@
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23/*
24 * Note: test functions are marked noinline so that their names appear in
25 * reports.
26 */
27
23static noinline void __init kmalloc_oob_right(void) 28static noinline void __init kmalloc_oob_right(void)
24{ 29{
25 char *ptr; 30 char *ptr;
@@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void)
411 kfree(kmem); 416 kfree(kmem);
412} 417}
413 418
419static noinline void __init use_after_scope_test(void)
420{
421 volatile char *volatile p;
422
423 pr_info("use-after-scope on int\n");
424 {
425 int local = 0;
426
427 p = (char *)&local;
428 }
429 p[0] = 1;
430 p[3] = 1;
431
432 pr_info("use-after-scope on array\n");
433 {
434 char local[1024] = {0};
435
436 p = local;
437 }
438 p[0] = 1;
439 p[1023] = 1;
440}
441
414static int __init kmalloc_tests_init(void) 442static int __init kmalloc_tests_init(void)
415{ 443{
416 kmalloc_oob_right(); 444 kmalloc_oob_right();
@@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void)
436 kasan_global_oob(); 464 kasan_global_oob();
437 ksize_unpoisons_memory(); 465 ksize_unpoisons_memory();
438 copy_user_test(); 466 copy_user_test();
467 use_after_scope_test();
439 return -EAGAIN; 468 return -EAGAIN;
440} 469}
441 470
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index cdcd25cb30fe..d4a6e4001512 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1426 1426
1427bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, 1427bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1428 unsigned long new_addr, unsigned long old_end, 1428 unsigned long new_addr, unsigned long old_end,
1429 pmd_t *old_pmd, pmd_t *new_pmd) 1429 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1430{ 1430{
1431 spinlock_t *old_ptl, *new_ptl; 1431 spinlock_t *old_ptl, *new_ptl;
1432 pmd_t pmd; 1432 pmd_t pmd;
1433 struct mm_struct *mm = vma->vm_mm; 1433 struct mm_struct *mm = vma->vm_mm;
1434 bool force_flush = false;
1434 1435
1435 if ((old_addr & ~HPAGE_PMD_MASK) || 1436 if ((old_addr & ~HPAGE_PMD_MASK) ||
1436 (new_addr & ~HPAGE_PMD_MASK) || 1437 (new_addr & ~HPAGE_PMD_MASK) ||
@@ -1456,6 +1457,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1456 if (new_ptl != old_ptl) 1457 if (new_ptl != old_ptl)
1457 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 1458 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1458 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); 1459 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1460 if (pmd_present(pmd) && pmd_dirty(pmd))
1461 force_flush = true;
1459 VM_BUG_ON(!pmd_none(*new_pmd)); 1462 VM_BUG_ON(!pmd_none(*new_pmd));
1460 1463
1461 if (pmd_move_must_withdraw(new_ptl, old_ptl) && 1464 if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
@@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1467 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); 1470 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1468 if (new_ptl != old_ptl) 1471 if (new_ptl != old_ptl)
1469 spin_unlock(new_ptl); 1472 spin_unlock(new_ptl);
1473 if (force_flush)
1474 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1475 else
1476 *need_flush = true;
1470 spin_unlock(old_ptl); 1477 spin_unlock(old_ptl);
1471 return true; 1478 return true;
1472 } 1479 }
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 70c009741aab..0e9505f66ec1 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -764,6 +764,25 @@ EXPORT_SYMBOL(__asan_storeN_noabort);
764void __asan_handle_no_return(void) {} 764void __asan_handle_no_return(void) {}
765EXPORT_SYMBOL(__asan_handle_no_return); 765EXPORT_SYMBOL(__asan_handle_no_return);
766 766
767/* Emitted by compiler to poison large objects when they go out of scope. */
768void __asan_poison_stack_memory(const void *addr, size_t size)
769{
770 /*
771 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
772 * by redzones, so we simply round up size to simplify logic.
773 */
774 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
775 KASAN_USE_AFTER_SCOPE);
776}
777EXPORT_SYMBOL(__asan_poison_stack_memory);
778
779/* Emitted by compiler to unpoison large objects when they go into scope. */
780void __asan_unpoison_stack_memory(const void *addr, size_t size)
781{
782 kasan_unpoison_shadow(addr, size);
783}
784EXPORT_SYMBOL(__asan_unpoison_stack_memory);
785
767#ifdef CONFIG_MEMORY_HOTPLUG 786#ifdef CONFIG_MEMORY_HOTPLUG
768static int kasan_mem_notifier(struct notifier_block *nb, 787static int kasan_mem_notifier(struct notifier_block *nb,
769 unsigned long action, void *data) 788 unsigned long action, void *data)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index e5c2181fee6f..1c260e6b3b3c 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -21,6 +21,7 @@
21#define KASAN_STACK_MID 0xF2 21#define KASAN_STACK_MID 0xF2
22#define KASAN_STACK_RIGHT 0xF3 22#define KASAN_STACK_RIGHT 0xF3
23#define KASAN_STACK_PARTIAL 0xF4 23#define KASAN_STACK_PARTIAL 0xF4
24#define KASAN_USE_AFTER_SCOPE 0xF8
24 25
25/* Don't break randconfig/all*config builds */ 26/* Don't break randconfig/all*config builds */
26#ifndef KASAN_ABI_VERSION 27#ifndef KASAN_ABI_VERSION
@@ -53,6 +54,9 @@ struct kasan_global {
53#if KASAN_ABI_VERSION >= 4 54#if KASAN_ABI_VERSION >= 4
54 struct kasan_source_location *location; 55 struct kasan_source_location *location;
55#endif 56#endif
57#if KASAN_ABI_VERSION >= 5
58 char *odr_indicator;
59#endif
56}; 60};
57 61
58/** 62/**
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 24c1211fe9d5..073325aedc68 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -90,6 +90,9 @@ static void print_error_description(struct kasan_access_info *info)
90 case KASAN_KMALLOC_FREE: 90 case KASAN_KMALLOC_FREE:
91 bug_type = "use-after-free"; 91 bug_type = "use-after-free";
92 break; 92 break;
93 case KASAN_USE_AFTER_SCOPE:
94 bug_type = "use-after-scope";
95 break;
93 } 96 }
94 97
95 pr_err("BUG: KASAN: %s in %pS at addr %p\n", 98 pr_err("BUG: KASAN: %s in %pS at addr %p\n",
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 728d7790dc2d..87e1a7ca3846 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = {
103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), 103 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
104}; 104};
105 105
106#ifdef CONFIG_SYSFS
106static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, 107static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
107 struct kobj_attribute *attr, 108 struct kobj_attribute *attr,
108 char *buf) 109 char *buf)
@@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = {
295 .attrs = khugepaged_attr, 296 .attrs = khugepaged_attr,
296 .name = "khugepaged", 297 .name = "khugepaged",
297}; 298};
299#endif /* CONFIG_SYSFS */
298 300
299#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) 301#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
300 302
diff --git a/mm/mlock.c b/mm/mlock.c
index 145a4258ddbc..cdbed8aaa426 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
190 */ 190 */
191 spin_lock_irq(zone_lru_lock(zone)); 191 spin_lock_irq(zone_lru_lock(zone));
192 192
193 nr_pages = hpage_nr_pages(page); 193 if (!TestClearPageMlocked(page)) {
194 if (!TestClearPageMlocked(page)) 194 /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
195 nr_pages = 1;
195 goto unlock_out; 196 goto unlock_out;
197 }
196 198
199 nr_pages = hpage_nr_pages(page);
197 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); 200 __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
198 201
199 if (__munlock_isolate_lru_page(page, true)) { 202 if (__munlock_isolate_lru_page(page, true)) {
diff --git a/mm/mremap.c b/mm/mremap.c
index da22ad2a5678..30d7d2482eea 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte)
104static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 104static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
105 unsigned long old_addr, unsigned long old_end, 105 unsigned long old_addr, unsigned long old_end,
106 struct vm_area_struct *new_vma, pmd_t *new_pmd, 106 struct vm_area_struct *new_vma, pmd_t *new_pmd,
107 unsigned long new_addr, bool need_rmap_locks) 107 unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
108{ 108{
109 struct mm_struct *mm = vma->vm_mm; 109 struct mm_struct *mm = vma->vm_mm;
110 pte_t *old_pte, *new_pte, pte; 110 pte_t *old_pte, *new_pte, pte;
111 spinlock_t *old_ptl, *new_ptl; 111 spinlock_t *old_ptl, *new_ptl;
112 bool force_flush = false;
113 unsigned long len = old_end - old_addr;
112 114
113 /* 115 /*
114 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 116 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@@ -146,7 +148,19 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
146 new_pte++, new_addr += PAGE_SIZE) { 148 new_pte++, new_addr += PAGE_SIZE) {
147 if (pte_none(*old_pte)) 149 if (pte_none(*old_pte))
148 continue; 150 continue;
151
149 pte = ptep_get_and_clear(mm, old_addr, old_pte); 152 pte = ptep_get_and_clear(mm, old_addr, old_pte);
153 /*
154 * If we are remapping a dirty PTE, make sure
155 * to flush TLB before we drop the PTL for the
156 * old PTE or we may race with page_mkclean().
157 *
158 * This check has to be done after we removed the
159 * old PTE from page tables or another thread may
160 * dirty it after the check and before the removal.
161 */
162 if (pte_present(pte) && pte_dirty(pte))
163 force_flush = true;
150 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 164 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
151 pte = move_soft_dirty_pte(pte); 165 pte = move_soft_dirty_pte(pte);
152 set_pte_at(mm, new_addr, new_pte, pte); 166 set_pte_at(mm, new_addr, new_pte, pte);
@@ -156,6 +170,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
156 if (new_ptl != old_ptl) 170 if (new_ptl != old_ptl)
157 spin_unlock(new_ptl); 171 spin_unlock(new_ptl);
158 pte_unmap(new_pte - 1); 172 pte_unmap(new_pte - 1);
173 if (force_flush)
174 flush_tlb_range(vma, old_end - len, old_end);
175 else
176 *need_flush = true;
159 pte_unmap_unlock(old_pte - 1, old_ptl); 177 pte_unmap_unlock(old_pte - 1, old_ptl);
160 if (need_rmap_locks) 178 if (need_rmap_locks)
161 drop_rmap_locks(vma); 179 drop_rmap_locks(vma);
@@ -201,13 +219,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
201 if (need_rmap_locks) 219 if (need_rmap_locks)
202 take_rmap_locks(vma); 220 take_rmap_locks(vma);
203 moved = move_huge_pmd(vma, old_addr, new_addr, 221 moved = move_huge_pmd(vma, old_addr, new_addr,
204 old_end, old_pmd, new_pmd); 222 old_end, old_pmd, new_pmd,
223 &need_flush);
205 if (need_rmap_locks) 224 if (need_rmap_locks)
206 drop_rmap_locks(vma); 225 drop_rmap_locks(vma);
207 if (moved) { 226 if (moved)
208 need_flush = true;
209 continue; 227 continue;
210 }
211 } 228 }
212 split_huge_pmd(vma, old_pmd, old_addr); 229 split_huge_pmd(vma, old_pmd, old_addr);
213 if (pmd_trans_unstable(old_pmd)) 230 if (pmd_trans_unstable(old_pmd))
@@ -220,11 +237,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
220 extent = next - new_addr; 237 extent = next - new_addr;
221 if (extent > LATENCY_LIMIT) 238 if (extent > LATENCY_LIMIT)
222 extent = LATENCY_LIMIT; 239 extent = LATENCY_LIMIT;
223 move_ptes(vma, old_pmd, old_addr, old_addr + extent, 240 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
224 new_vma, new_pmd, new_addr, need_rmap_locks); 241 new_pmd, new_addr, need_rmap_locks, &need_flush);
225 need_flush = true;
226 } 242 }
227 if (likely(need_flush)) 243 if (need_flush)
228 flush_tlb_range(vma, old_end-len, old_addr); 244 flush_tlb_range(vma, old_end-len, old_addr);
229 245
230 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 246 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/truncate.c b/mm/truncate.c
index a01cce450a26..8d8c62d89e6d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
283 283
284 if (!trylock_page(page)) 284 if (!trylock_page(page))
285 continue; 285 continue;
286 WARN_ON(page_to_pgoff(page) != index); 286 WARN_ON(page_to_index(page) != index);
287 if (PageWriteback(page)) { 287 if (PageWriteback(page)) {
288 unlock_page(page); 288 unlock_page(page);
289 continue; 289 continue;
@@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
371 } 371 }
372 372
373 lock_page(page); 373 lock_page(page);
374 WARN_ON(page_to_pgoff(page) != index); 374 WARN_ON(page_to_index(page) != index);
375 wait_on_page_writeback(page); 375 wait_on_page_writeback(page);
376 truncate_inode_page(mapping, page); 376 truncate_inode_page(mapping, page);
377 unlock_page(page); 377 unlock_page(page);
@@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
492 if (!trylock_page(page)) 492 if (!trylock_page(page))
493 continue; 493 continue;
494 494
495 WARN_ON(page_to_pgoff(page) != index); 495 WARN_ON(page_to_index(page) != index);
496 496
497 /* Middle of THP: skip */ 497 /* Middle of THP: skip */
498 if (PageTransTail(page)) { 498 if (PageTransTail(page)) {
@@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
612 } 612 }
613 613
614 lock_page(page); 614 lock_page(page);
615 WARN_ON(page_to_pgoff(page) != index); 615 WARN_ON(page_to_index(page) != index);
616 if (page->mapping != mapping) { 616 if (page->mapping != mapping) {
617 unlock_page(page); 617 unlock_page(page);
618 continue; 618 continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76fda2268148..d75cdf360730 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2354,6 +2354,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
2354 } 2354 }
2355 } 2355 }
2356 2356
2357 cond_resched();
2358
2357 if (nr_reclaimed < nr_to_reclaim || scan_adjusted) 2359 if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
2358 continue; 2360 continue;
2359 2361
diff --git a/mm/workingset.c b/mm/workingset.c
index 617475f529f4..fb1f9183d89a 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
348 shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); 348 shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
349 local_irq_enable(); 349 local_irq_enable();
350 350
351 if (memcg_kmem_enabled()) { 351 if (sc->memcg) {
352 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, 352 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
353 LRU_ALL_FILE); 353 LRU_ALL_FILE);
354 } else { 354 } else {
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index e034afbd1bb0..08ce36147c4c 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
652 batadv_softif_destroy_sysfs(hard_iface->soft_iface); 652 batadv_softif_destroy_sysfs(hard_iface->soft_iface);
653 } 653 }
654 654
655 hard_iface->soft_iface = NULL;
655 batadv_hardif_put(hard_iface); 656 batadv_hardif_put(hard_iface);
656 657
657out: 658out:
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 2333777f919d..8af1611b8ab2 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg)
837 primary_if = batadv_primary_if_get_selected(bat_priv); 837 primary_if = batadv_primary_if_get_selected(bat_priv);
838 if (unlikely(!primary_if)) { 838 if (unlikely(!primary_if)) {
839 err = BATADV_TP_REASON_DST_UNREACHABLE; 839 err = BATADV_TP_REASON_DST_UNREACHABLE;
840 tp_vars->reason = err;
840 goto out; 841 goto out;
841 } 842 }
842 843
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index d020299baba4..1904a93f47d5 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -1090,7 +1090,6 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1090{ 1090{
1091 struct hci_conn *hcon; 1091 struct hci_conn *hcon;
1092 struct hci_dev *hdev; 1092 struct hci_dev *hdev;
1093 bdaddr_t *src = BDADDR_ANY;
1094 int n; 1093 int n;
1095 1094
1096 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", 1095 n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
@@ -1101,7 +1100,8 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
1101 if (n < 7) 1100 if (n < 7)
1102 return -EINVAL; 1101 return -EINVAL;
1103 1102
1104 hdev = hci_get_route(addr, src); 1103 /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
1104 hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
1105 if (!hdev) 1105 if (!hdev)
1106 return -ENOENT; 1106 return -ENOENT;
1107 1107
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3809617aa98d..dc59eae54717 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -613,7 +613,7 @@ int hci_conn_del(struct hci_conn *conn)
613 return 0; 613 return 0;
614} 614}
615 615
616struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 616struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
617{ 617{
618 int use_src = bacmp(src, BDADDR_ANY); 618 int use_src = bacmp(src, BDADDR_ANY);
619 struct hci_dev *hdev = NULL, *d; 619 struct hci_dev *hdev = NULL, *d;
@@ -634,7 +634,29 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
634 */ 634 */
635 635
636 if (use_src) { 636 if (use_src) {
637 if (!bacmp(&d->bdaddr, src)) { 637 bdaddr_t id_addr;
638 u8 id_addr_type;
639
640 if (src_type == BDADDR_BREDR) {
641 if (!lmp_bredr_capable(d))
642 continue;
643 bacpy(&id_addr, &d->bdaddr);
644 id_addr_type = BDADDR_BREDR;
645 } else {
646 if (!lmp_le_capable(d))
647 continue;
648
649 hci_copy_identity_address(d, &id_addr,
650 &id_addr_type);
651
652 /* Convert from HCI to three-value type */
653 if (id_addr_type == ADDR_LE_DEV_PUBLIC)
654 id_addr_type = BDADDR_LE_PUBLIC;
655 else
656 id_addr_type = BDADDR_LE_RANDOM;
657 }
658
659 if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
638 hdev = d; break; 660 hdev = d; break;
639 } 661 }
640 } else { 662 } else {
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index d4cad29b033f..577f1c01454a 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7060,7 +7060,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7060 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, 7060 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7061 dst_type, __le16_to_cpu(psm)); 7061 dst_type, __le16_to_cpu(psm));
7062 7062
7063 hdev = hci_get_route(dst, &chan->src); 7063 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7064 if (!hdev) 7064 if (!hdev)
7065 return -EHOSTUNREACH; 7065 return -EHOSTUNREACH;
7066 7066
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 8e385a0ae60e..2f2cb5e27cdd 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -178,7 +178,7 @@ static void rfcomm_reparent_device(struct rfcomm_dev *dev)
178 struct hci_dev *hdev; 178 struct hci_dev *hdev;
179 struct hci_conn *conn; 179 struct hci_conn *conn;
180 180
181 hdev = hci_get_route(&dev->dst, &dev->src); 181 hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR);
182 if (!hdev) 182 if (!hdev)
183 return; 183 return;
184 184
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f52bcbf2e58c..3125ce670c2f 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -219,7 +219,7 @@ static int sco_connect(struct sock *sk)
219 219
220 BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); 220 BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
221 221
222 hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); 222 hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
223 if (!hdev) 223 if (!hdev)
224 return -EHOSTUNREACH; 224 return -EHOSTUNREACH;
225 225
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8e999ffdf28b..436a7537e6a9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -77,7 +77,7 @@
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ 77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) 78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
79 79
80#define CAN_BCM_VERSION "20160617" 80#define CAN_BCM_VERSION "20161123"
81 81
82MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); 82MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
83MODULE_LICENSE("Dual BSD/GPL"); 83MODULE_LICENSE("Dual BSD/GPL");
@@ -109,8 +109,9 @@ struct bcm_op {
109 u32 count; 109 u32 count;
110 u32 nframes; 110 u32 nframes;
111 u32 currframe; 111 u32 currframe;
112 struct canfd_frame *frames; 112 /* void pointers to arrays of struct can[fd]_frame */
113 struct canfd_frame *last_frames; 113 void *frames;
114 void *last_frames;
114 struct canfd_frame sframe; 115 struct canfd_frame sframe;
115 struct canfd_frame last_sframe; 116 struct canfd_frame last_sframe;
116 struct sock *sk; 117 struct sock *sk;
@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
681 682
682 if (op->flags & RX_FILTER_ID) { 683 if (op->flags & RX_FILTER_ID) {
683 /* the easiest case */ 684 /* the easiest case */
684 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); 685 bcm_rx_update_and_send(op, op->last_frames, rxframe);
685 goto rx_starttimer; 686 goto rx_starttimer;
686 } 687 }
687 688
@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1068 1069
1069 if (msg_head->nframes) { 1070 if (msg_head->nframes) {
1070 /* update CAN frames content */ 1071 /* update CAN frames content */
1071 err = memcpy_from_msg((u8 *)op->frames, msg, 1072 err = memcpy_from_msg(op->frames, msg,
1072 msg_head->nframes * op->cfsiz); 1073 msg_head->nframes * op->cfsiz);
1073 if (err < 0) 1074 if (err < 0)
1074 return err; 1075 return err;
@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1118 } 1119 }
1119 1120
1120 if (msg_head->nframes) { 1121 if (msg_head->nframes) {
1121 err = memcpy_from_msg((u8 *)op->frames, msg, 1122 err = memcpy_from_msg(op->frames, msg,
1122 msg_head->nframes * op->cfsiz); 1123 msg_head->nframes * op->cfsiz);
1123 if (err < 0) { 1124 if (err < 0) {
1124 if (op->frames != &op->sframe) 1125 if (op->frames != &op->sframe)
@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1163 /* check flags */ 1164 /* check flags */
1164 1165
1165 if (op->flags & RX_RTR_FRAME) { 1166 if (op->flags & RX_RTR_FRAME) {
1167 struct canfd_frame *frame0 = op->frames;
1166 1168
1167 /* no timers in RTR-mode */ 1169 /* no timers in RTR-mode */
1168 hrtimer_cancel(&op->thrtimer); 1170 hrtimer_cancel(&op->thrtimer);
@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1174 * prevent a full-load-loopback-test ... ;-] 1176 * prevent a full-load-loopback-test ... ;-]
1175 */ 1177 */
1176 if ((op->flags & TX_CP_CAN_ID) || 1178 if ((op->flags & TX_CP_CAN_ID) ||
1177 (op->frames[0].can_id == op->can_id)) 1179 (frame0->can_id == op->can_id))
1178 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; 1180 frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
1179 1181
1180 } else { 1182 } else {
1181 if (op->flags & SETTIMER) { 1183 if (op->flags & SETTIMER) {
@@ -1549,24 +1551,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1549 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; 1551 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1550 struct sock *sk = sock->sk; 1552 struct sock *sk = sock->sk;
1551 struct bcm_sock *bo = bcm_sk(sk); 1553 struct bcm_sock *bo = bcm_sk(sk);
1554 int ret = 0;
1552 1555
1553 if (len < sizeof(*addr)) 1556 if (len < sizeof(*addr))
1554 return -EINVAL; 1557 return -EINVAL;
1555 1558
1556 if (bo->bound) 1559 lock_sock(sk);
1557 return -EISCONN; 1560
1561 if (bo->bound) {
1562 ret = -EISCONN;
1563 goto fail;
1564 }
1558 1565
1559 /* bind a device to this socket */ 1566 /* bind a device to this socket */
1560 if (addr->can_ifindex) { 1567 if (addr->can_ifindex) {
1561 struct net_device *dev; 1568 struct net_device *dev;
1562 1569
1563 dev = dev_get_by_index(&init_net, addr->can_ifindex); 1570 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1564 if (!dev) 1571 if (!dev) {
1565 return -ENODEV; 1572 ret = -ENODEV;
1566 1573 goto fail;
1574 }
1567 if (dev->type != ARPHRD_CAN) { 1575 if (dev->type != ARPHRD_CAN) {
1568 dev_put(dev); 1576 dev_put(dev);
1569 return -ENODEV; 1577 ret = -ENODEV;
1578 goto fail;
1570 } 1579 }
1571 1580
1572 bo->ifindex = dev->ifindex; 1581 bo->ifindex = dev->ifindex;
@@ -1577,17 +1586,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1577 bo->ifindex = 0; 1586 bo->ifindex = 0;
1578 } 1587 }
1579 1588
1580 bo->bound = 1;
1581
1582 if (proc_dir) { 1589 if (proc_dir) {
1583 /* unique socket address as filename */ 1590 /* unique socket address as filename */
1584 sprintf(bo->procname, "%lu", sock_i_ino(sk)); 1591 sprintf(bo->procname, "%lu", sock_i_ino(sk));
1585 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, 1592 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1586 proc_dir, 1593 proc_dir,
1587 &bcm_proc_fops, sk); 1594 &bcm_proc_fops, sk);
1595 if (!bo->bcm_proc_read) {
1596 ret = -ENOMEM;
1597 goto fail;
1598 }
1588 } 1599 }
1589 1600
1590 return 0; 1601 bo->bound = 1;
1602
1603fail:
1604 release_sock(sk);
1605
1606 return ret;
1591} 1607}
1592 1608
1593static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1609static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/net/core/dev.c b/net/core/dev.c
index 820bac239738..6666b28b6815 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
1766 1766
1767int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1767int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1768{ 1768{
1769 if (skb_orphan_frags(skb, GFP_ATOMIC) || 1769 int ret = ____dev_forward_skb(dev, skb);
1770 unlikely(!is_skb_forwardable(dev, skb))) {
1771 atomic_long_inc(&dev->rx_dropped);
1772 kfree_skb(skb);
1773 return NET_RX_DROP;
1774 }
1775 1770
1776 skb_scrub_packet(skb, true); 1771 if (likely(!ret)) {
1777 skb->priority = 0; 1772 skb->protocol = eth_type_trans(skb, dev);
1778 skb->protocol = eth_type_trans(skb, dev); 1773 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1779 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 1774 }
1780 1775
1781 return 0; 1776 return ret;
1782} 1777}
1783EXPORT_SYMBOL_GPL(__dev_forward_skb); 1778EXPORT_SYMBOL_GPL(__dev_forward_skb);
1784 1779
@@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb)
2484 goto out; 2479 goto out;
2485 } 2480 }
2486 2481
2487 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2482 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2488out_set_summed: 2483out_set_summed:
2489 skb->ip_summed = CHECKSUM_NONE; 2484 skb->ip_summed = CHECKSUM_NONE;
2490out: 2485out:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 977489820eb9..047a1752ece1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2479,6 +2479,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2479 case ETHTOOL_GET_TS_INFO: 2479 case ETHTOOL_GET_TS_INFO:
2480 case ETHTOOL_GEEE: 2480 case ETHTOOL_GEEE:
2481 case ETHTOOL_GTUNABLE: 2481 case ETHTOOL_GTUNABLE:
2482 case ETHTOOL_GLINKSETTINGS:
2482 break; 2483 break;
2483 default: 2484 default:
2484 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2485 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
diff --git a/net/core/filter.c b/net/core/filter.c
index 00351cdf7d0c..b391209838ef 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1628 return dev_forward_skb(dev, skb); 1628 return dev_forward_skb(dev, skb);
1629} 1629}
1630 1630
1631static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1632 struct sk_buff *skb)
1633{
1634 int ret = ____dev_forward_skb(dev, skb);
1635
1636 if (likely(!ret)) {
1637 skb->dev = dev;
1638 ret = netif_rx(skb);
1639 }
1640
1641 return ret;
1642}
1643
1631static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) 1644static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1632{ 1645{
1633 int ret; 1646 int ret;
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1647 return ret; 1660 return ret;
1648} 1661}
1649 1662
1663static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1664 u32 flags)
1665{
1666 /* skb->mac_len is not set on normal egress */
1667 unsigned int mlen = skb->network_header - skb->mac_header;
1668
1669 __skb_pull(skb, mlen);
1670
1671 /* At ingress, the mac header has already been pulled once.
1672 * At egress, skb_pospull_rcsum has to be done in case that
1673 * the skb is originated from ingress (i.e. a forwarded skb)
1674 * to ensure that rcsum starts at net header.
1675 */
1676 if (!skb_at_tc_ingress(skb))
1677 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1678 skb_pop_mac_header(skb);
1679 skb_reset_mac_len(skb);
1680 return flags & BPF_F_INGRESS ?
1681 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1682}
1683
1684static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1685 u32 flags)
1686{
1687 bpf_push_mac_rcsum(skb);
1688 return flags & BPF_F_INGRESS ?
1689 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1690}
1691
1692static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1693 u32 flags)
1694{
1695 switch (dev->type) {
1696 case ARPHRD_TUNNEL:
1697 case ARPHRD_TUNNEL6:
1698 case ARPHRD_SIT:
1699 case ARPHRD_IPGRE:
1700 case ARPHRD_VOID:
1701 case ARPHRD_NONE:
1702 return __bpf_redirect_no_mac(skb, dev, flags);
1703 default:
1704 return __bpf_redirect_common(skb, dev, flags);
1705 }
1706}
1707
1650BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) 1708BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1651{ 1709{
1652 struct net_device *dev; 1710 struct net_device *dev;
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1675 return -ENOMEM; 1733 return -ENOMEM;
1676 } 1734 }
1677 1735
1678 bpf_push_mac_rcsum(clone); 1736 return __bpf_redirect(clone, dev, flags);
1679
1680 return flags & BPF_F_INGRESS ?
1681 __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
1682} 1737}
1683 1738
1684static const struct bpf_func_proto bpf_clone_redirect_proto = { 1739static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb)
1722 return -EINVAL; 1777 return -EINVAL;
1723 } 1778 }
1724 1779
1725 bpf_push_mac_rcsum(skb); 1780 return __bpf_redirect(skb, dev, ri->flags);
1726
1727 return ri->flags & BPF_F_INGRESS ?
1728 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1729} 1781}
1730 1782
1731static const struct bpf_func_proto bpf_redirect_proto = { 1783static const struct bpf_func_proto bpf_redirect_proto = {
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b68d5b..18e8893d4be5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work)
95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { 95 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
96 flow_entry_kill(fce, xfrm); 96 flow_entry_kill(fce, xfrm);
97 atomic_dec(&xfrm->flow_cache_gc_count); 97 atomic_dec(&xfrm->flow_cache_gc_count);
98 WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
99 } 98 }
100} 99}
101 100
@@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
236 if (fcp->hash_count > fc->high_watermark) 235 if (fcp->hash_count > fc->high_watermark)
237 flow_cache_shrink(fc, fcp); 236 flow_cache_shrink(fc, fcp);
238 237
239 if (fcp->hash_count > 2 * fc->high_watermark || 238 if (atomic_read(&net->xfrm.flow_cache_gc_count) >
240 atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { 239 2 * num_online_cpus() * fc->high_watermark) {
241 atomic_inc(&net->xfrm.flow_cache_genid);
242 flo = ERR_PTR(-ENOBUFS); 240 flo = ERR_PTR(-ENOBUFS);
243 goto ret_object; 241 goto ret_object;
244 } 242 }
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ab193e5def07..c6d8207ffa7e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
122 struct flow_dissector_key_keyid *key_keyid; 122 struct flow_dissector_key_keyid *key_keyid;
123 bool skip_vlan = false; 123 bool skip_vlan = false;
124 u8 ip_proto = 0; 124 u8 ip_proto = 0;
125 bool ret = false; 125 bool ret;
126 126
127 if (!data) { 127 if (!data) {
128 data = skb->data; 128 data = skb->data;
@@ -549,12 +549,17 @@ ip_proto_again:
549out_good: 549out_good:
550 ret = true; 550 ret = true;
551 551
552out_bad: 552 key_control->thoff = (u16)nhoff;
553out:
553 key_basic->n_proto = proto; 554 key_basic->n_proto = proto;
554 key_basic->ip_proto = ip_proto; 555 key_basic->ip_proto = ip_proto;
555 key_control->thoff = (u16)nhoff;
556 556
557 return ret; 557 return ret;
558
559out_bad:
560 ret = false;
561 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
562 goto out;
558} 563}
559EXPORT_SYMBOL(__skb_flow_dissect); 564EXPORT_SYMBOL(__skb_flow_dissect);
560 565
@@ -1008,4 +1013,4 @@ static int __init init_default_flow_dissectors(void)
1008 return 0; 1013 return 0;
1009} 1014}
1010 1015
1011late_initcall_sync(init_default_flow_dissectors); 1016core_initcall(init_default_flow_dissectors);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index f61c0e02a413..7001da910c6b 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
219 bool alloc; 219 bool alloc;
220 int id; 220 int id;
221 221
222 if (atomic_read(&net->count) == 0)
223 return NETNSA_NSID_NOT_ASSIGNED;
222 spin_lock_irqsave(&net->nsid_lock, flags); 224 spin_lock_irqsave(&net->nsid_lock, flags);
223 alloc = atomic_read(&peer->count) == 0 ? false : true; 225 alloc = atomic_read(&peer->count) == 0 ? false : true;
224 id = __peernet2id_alloc(net, peer, &alloc); 226 id = __peernet2id_alloc(net, peer, &alloc);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index fb7348f13501..a6196cf844f6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype)
275 275
276 rtnl_msg_handlers[protocol][msgindex].doit = NULL; 276 rtnl_msg_handlers[protocol][msgindex].doit = NULL;
277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; 277 rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
278 rtnl_msg_handlers[protocol][msgindex].calcit = NULL;
278 279
279 return 0; 280 return 0;
280} 281}
@@ -839,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
839 if (dev->dev.parent && dev_is_pci(dev->dev.parent) && 840 if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
840 (ext_filter_mask & RTEXT_FILTER_VF)) { 841 (ext_filter_mask & RTEXT_FILTER_VF)) {
841 int num_vfs = dev_num_vf(dev->dev.parent); 842 int num_vfs = dev_num_vf(dev->dev.parent);
842 size_t size = nla_total_size(sizeof(struct nlattr)); 843 size_t size = nla_total_size(0);
843 size += nla_total_size(num_vfs * sizeof(struct nlattr));
844 size += num_vfs * 844 size += num_vfs *
845 (nla_total_size(sizeof(struct ifla_vf_mac)) + 845 (nla_total_size(0) +
846 nla_total_size(MAX_VLAN_LIST_LEN * 846 nla_total_size(sizeof(struct ifla_vf_mac)) +
847 sizeof(struct nlattr)) + 847 nla_total_size(sizeof(struct ifla_vf_vlan)) +
848 nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
848 nla_total_size(MAX_VLAN_LIST_LEN * 849 nla_total_size(MAX_VLAN_LIST_LEN *
849 sizeof(struct ifla_vf_vlan_info)) + 850 sizeof(struct ifla_vf_vlan_info)) +
850 nla_total_size(sizeof(struct ifla_vf_spoofchk)) + 851 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
852 nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
851 nla_total_size(sizeof(struct ifla_vf_rate)) + 853 nla_total_size(sizeof(struct ifla_vf_rate)) +
852 nla_total_size(sizeof(struct ifla_vf_link_state)) + 854 nla_total_size(sizeof(struct ifla_vf_link_state)) +
853 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + 855 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
856 nla_total_size(0) + /* nest IFLA_VF_STATS */
854 /* IFLA_VF_STATS_RX_PACKETS */ 857 /* IFLA_VF_STATS_RX_PACKETS */
855 nla_total_size_64bit(sizeof(__u64)) + 858 nla_total_size_64bit(sizeof(__u64)) +
856 /* IFLA_VF_STATS_TX_PACKETS */ 859 /* IFLA_VF_STATS_TX_PACKETS */
@@ -898,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev,
898 901
899static size_t rtnl_xdp_size(const struct net_device *dev) 902static size_t rtnl_xdp_size(const struct net_device *dev)
900{ 903{
901 size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ 904 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
905 nla_total_size(1); /* XDP_ATTACHED */
902 906
903 if (!dev->netdev_ops->ndo_xdp) 907 if (!dev->netdev_ops->ndo_xdp)
904 return 0; 908 return 0;
@@ -927,8 +931,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
927 + nla_total_size(4) /* IFLA_PROMISCUITY */ 931 + nla_total_size(4) /* IFLA_PROMISCUITY */
928 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ 932 + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
929 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ 933 + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
930 + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */ 934 + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
931 + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */ 935 + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
932 + nla_total_size(1) /* IFLA_OPERSTATE */ 936 + nla_total_size(1) /* IFLA_OPERSTATE */
933 + nla_total_size(1) /* IFLA_LINKMODE */ 937 + nla_total_size(1) /* IFLA_LINKMODE */
934 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 938 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
@@ -1605,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1605 head = &net->dev_index_head[h]; 1609 head = &net->dev_index_head[h];
1606 hlist_for_each_entry(dev, head, index_hlist) { 1610 hlist_for_each_entry(dev, head, index_hlist) {
1607 if (link_dump_filtered(dev, master_idx, kind_ops)) 1611 if (link_dump_filtered(dev, master_idx, kind_ops))
1608 continue; 1612 goto cont;
1609 if (idx < s_idx) 1613 if (idx < s_idx)
1610 goto cont; 1614 goto cont;
1611 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1615 err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -2733,7 +2737,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
2733 ext_filter_mask)); 2737 ext_filter_mask));
2734 } 2738 }
2735 2739
2736 return min_ifinfo_dump_size; 2740 return nlmsg_total_size(min_ifinfo_dump_size);
2737} 2741}
2738 2742
2739static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 2743static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
@@ -2848,7 +2852,10 @@ nla_put_failure:
2848 2852
2849static inline size_t rtnl_fdb_nlmsg_size(void) 2853static inline size_t rtnl_fdb_nlmsg_size(void)
2850{ 2854{
2851 return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); 2855 return NLMSG_ALIGN(sizeof(struct ndmsg)) +
2856 nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
2857 nla_total_size(sizeof(u16)) + /* NDA_VLAN */
2858 0;
2852} 2859}
2853 2860
2854static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, 2861static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
diff --git a/net/core/sock.c b/net/core/sock.c
index c73e28fc9c2a..00a074dbfe9b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
453EXPORT_SYMBOL(sock_queue_rcv_skb); 453EXPORT_SYMBOL(sock_queue_rcv_skb);
454 454
455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
456 const int nested, unsigned int trim_cap) 456 const int nested, unsigned int trim_cap, bool refcounted)
457{ 457{
458 int rc = NET_RX_SUCCESS; 458 int rc = NET_RX_SUCCESS;
459 459
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
487 487
488 bh_unlock_sock(sk); 488 bh_unlock_sock(sk);
489out: 489out:
490 sock_put(sk); 490 if (refcounted)
491 sock_put(sk);
491 return rc; 492 return rc;
492discard_and_relse: 493discard_and_relse:
493 kfree_skb(skb); 494 kfree_skb(skb);
@@ -714,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
714 val = min_t(u32, val, sysctl_wmem_max); 715 val = min_t(u32, val, sysctl_wmem_max);
715set_sndbuf: 716set_sndbuf:
716 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 717 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
717 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 718 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
718 /* Wake up sending tasks if we upped the value. */ 719 /* Wake up sending tasks if we upped the value. */
719 sk->sk_write_space(sk); 720 sk->sk_write_space(sk);
720 break; 721 break;
@@ -750,7 +751,7 @@ set_rcvbuf:
750 * returning the value we actually used in getsockopt 751 * returning the value we actually used in getsockopt
751 * is the most desirable behavior. 752 * is the most desirable behavior.
752 */ 753 */
753 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 754 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
754 break; 755 break;
755 756
756 case SO_RCVBUFFORCE: 757 case SO_RCVBUFFORCE:
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1543 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1544 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1544 1545
1545 newsk->sk_err = 0; 1546 newsk->sk_err = 0;
1547 newsk->sk_err_soft = 0;
1546 newsk->sk_priority = 0; 1548 newsk->sk_priority = 0;
1547 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1549 newsk->sk_incoming_cpu = raw_smp_processor_id();
1548 atomic64_set(&newsk->sk_cookie, 0); 1550 atomic64_set(&newsk->sk_cookie, 0);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 345a3aeb8c7e..edbe59d203ef 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
235{ 235{
236 const struct iphdr *iph = (struct iphdr *)skb->data; 236 const struct iphdr *iph = (struct iphdr *)skb->data;
237 const u8 offset = iph->ihl << 2; 237 const u8 offset = iph->ihl << 2;
238 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 238 const struct dccp_hdr *dh;
239 struct dccp_sock *dp; 239 struct dccp_sock *dp;
240 struct inet_sock *inet; 240 struct inet_sock *inet;
241 const int type = icmp_hdr(skb)->type; 241 const int type = icmp_hdr(skb)->type;
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
245 int err; 245 int err;
246 struct net *net = dev_net(skb->dev); 246 struct net *net = dev_net(skb->dev);
247 247
248 if (skb->len < offset + sizeof(*dh) || 248 /* Only need dccph_dport & dccph_sport which are the first
249 skb->len < offset + __dccp_basic_hdr_len(dh)) { 249 * 4 bytes in dccp header.
250 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 250 * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
251 return; 251 */
252 } 252 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
253 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
254 dh = (struct dccp_hdr *)(skb->data + offset);
253 255
254 sk = __inet_lookup_established(net, &dccp_hashinfo, 256 sk = __inet_lookup_established(net, &dccp_hashinfo,
255 iph->daddr, dh->dccph_dport, 257 iph->daddr, dh->dccph_dport,
@@ -698,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
698{ 700{
699 const struct dccp_hdr *dh; 701 const struct dccp_hdr *dh;
700 unsigned int cscov; 702 unsigned int cscov;
703 u8 dccph_doff;
701 704
702 if (skb->pkt_type != PACKET_HOST) 705 if (skb->pkt_type != PACKET_HOST)
703 return 1; 706 return 1;
@@ -719,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
719 /* 722 /*
720 * If P.Data Offset is too small for packet type, drop packet and return 723 * If P.Data Offset is too small for packet type, drop packet and return
721 */ 724 */
722 if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { 725 dccph_doff = dh->dccph_doff;
723 DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); 726 if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
727 DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
724 return 1; 728 return 1;
725 } 729 }
726 /* 730 /*
727 * If P.Data Offset is too too large for packet, drop packet and return 731 * If P.Data Offset is too too large for packet, drop packet and return
728 */ 732 */
729 if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { 733 if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
730 DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); 734 DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
731 return 1; 735 return 1;
732 } 736 }
733 737 dh = dccp_hdr(skb);
734 /* 738 /*
735 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet 739 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
736 * has short sequence numbers), drop packet and return 740 * has short sequence numbers), drop packet and return
@@ -868,7 +872,7 @@ lookup:
868 goto discard_and_relse; 872 goto discard_and_relse;
869 nf_reset(skb); 873 nf_reset(skb);
870 874
871 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); 875 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
872 876
873no_dccp_socket: 877no_dccp_socket:
874 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 878 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3828f94b234c..715e5d1dc107 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
70 u8 type, u8 code, int offset, __be32 info) 70 u8 type, u8 code, int offset, __be32 info)
71{ 71{
72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; 72 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
73 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 73 const struct dccp_hdr *dh;
74 struct dccp_sock *dp; 74 struct dccp_sock *dp;
75 struct ipv6_pinfo *np; 75 struct ipv6_pinfo *np;
76 struct sock *sk; 76 struct sock *sk;
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
78 __u64 seq; 78 __u64 seq;
79 struct net *net = dev_net(skb->dev); 79 struct net *net = dev_net(skb->dev);
80 80
81 if (skb->len < offset + sizeof(*dh) || 81 /* Only need dccph_dport & dccph_sport which are the first
82 skb->len < offset + __dccp_basic_hdr_len(dh)) { 82 * 4 bytes in dccp header.
83 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), 83 * Our caller (icmpv6_notify()) already pulled 8 bytes for us.
84 ICMP6_MIB_INERRORS); 84 */
85 return; 85 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
86 } 86 BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
87 dh = (struct dccp_hdr *)(skb->data + offset);
87 88
88 sk = __inet6_lookup_established(net, &dccp_hashinfo, 89 sk = __inet6_lookup_established(net, &dccp_hashinfo,
89 &hdr->daddr, dh->dccph_dport, 90 &hdr->daddr, dh->dccph_dport,
@@ -738,7 +739,8 @@ lookup:
738 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 739 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
739 goto discard_and_relse; 740 goto discard_and_relse;
740 741
741 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; 742 return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
743 refcounted) ? -1 : 0;
742 744
743no_dccp_socket: 745no_dccp_socket:
744 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 746 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
956 .getsockopt = ipv6_getsockopt, 958 .getsockopt = ipv6_getsockopt,
957 .addr2sockaddr = inet6_csk_addr2sockaddr, 959 .addr2sockaddr = inet6_csk_addr2sockaddr,
958 .sockaddr_len = sizeof(struct sockaddr_in6), 960 .sockaddr_len = sizeof(struct sockaddr_in6),
961 .bind_conflict = inet6_csk_bind_conflict,
959#ifdef CONFIG_COMPAT 962#ifdef CONFIG_COMPAT
960 .compat_setsockopt = compat_ipv6_setsockopt, 963 .compat_setsockopt = compat_ipv6_setsockopt,
961 .compat_getsockopt = compat_ipv6_getsockopt, 964 .compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 41e65804ddf5..9fe25bf63296 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout)
1009 __kfree_skb(skb); 1009 __kfree_skb(skb);
1010 } 1010 }
1011 1011
1012 /* If socket has been already reset kill it. */
1013 if (sk->sk_state == DCCP_CLOSED)
1014 goto adjudge_to_death;
1015
1012 if (data_was_unread) { 1016 if (data_was_unread) {
1013 /* Unread data was tossed, send an appropriate Reset Code */ 1017 /* Unread data was tossed, send an appropriate Reset Code */
1014 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); 1018 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index a6902c1e2f28..7899919cd9f0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -233,6 +233,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
233 genphy_read_status(phydev); 233 genphy_read_status(phydev);
234 if (ds->ops->adjust_link) 234 if (ds->ops->adjust_link)
235 ds->ops->adjust_link(ds, port, phydev); 235 ds->ops->adjust_link(ds, port, phydev);
236
237 put_device(&phydev->mdio.dev);
236 } 238 }
237 239
238 return 0; 240 return 0;
@@ -504,15 +506,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
504 506
505void dsa_cpu_dsa_destroy(struct device_node *port_dn) 507void dsa_cpu_dsa_destroy(struct device_node *port_dn)
506{ 508{
507 struct phy_device *phydev; 509 if (of_phy_is_fixed_link(port_dn))
508 510 of_phy_deregister_fixed_link(port_dn);
509 if (of_phy_is_fixed_link(port_dn)) {
510 phydev = of_phy_find_device(port_dn);
511 if (phydev) {
512 phy_device_free(phydev);
513 fixed_phy_unregister(phydev);
514 }
515 }
516} 511}
517 512
518static void dsa_switch_destroy(struct dsa_switch *ds) 513static void dsa_switch_destroy(struct dsa_switch *ds)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index f8a7d9aab437..5fff951a0a49 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree)
28 struct dsa_switch_tree *dst; 28 struct dsa_switch_tree *dst;
29 29
30 list_for_each_entry(dst, &dsa_switch_trees, list) 30 list_for_each_entry(dst, &dsa_switch_trees, list)
31 if (dst->tree == tree) 31 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
32 return dst; 33 return dst;
34 }
33 return NULL; 35 return NULL;
34} 36}
35 37
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6b1282c006b1..30e2e21d7619 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1125,7 +1125,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1125 p->phy_interface = mode; 1125 p->phy_interface = mode;
1126 1126
1127 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 1127 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
1128 if (of_phy_is_fixed_link(port_dn)) { 1128 if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
1129 /* In the case of a fixed PHY, the DT node associated 1129 /* In the case of a fixed PHY, the DT node associated
1130 * to the fixed PHY is the Port DT node 1130 * to the fixed PHY is the Port DT node
1131 */ 1131 */
@@ -1135,7 +1135,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1135 return ret; 1135 return ret;
1136 } 1136 }
1137 phy_is_fixed = true; 1137 phy_is_fixed = true;
1138 phy_dn = port_dn; 1138 phy_dn = of_node_get(port_dn);
1139 } 1139 }
1140 1140
1141 if (ds->ops->get_phy_flags) 1141 if (ds->ops->get_phy_flags)
@@ -1154,6 +1154,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1154 ret = dsa_slave_phy_connect(p, slave_dev, phy_id); 1154 ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
1155 if (ret) { 1155 if (ret) {
1156 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); 1156 netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
1157 of_node_put(phy_dn);
1157 return ret; 1158 return ret;
1158 } 1159 }
1159 } else { 1160 } else {
@@ -1162,6 +1163,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1162 phy_flags, 1163 phy_flags,
1163 p->phy_interface); 1164 p->phy_interface);
1164 } 1165 }
1166
1167 of_node_put(phy_dn);
1165 } 1168 }
1166 1169
1167 if (p->phy && phy_is_fixed) 1170 if (p->phy && phy_is_fixed)
@@ -1174,6 +1177,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
1174 ret = dsa_slave_phy_connect(p, slave_dev, p->port); 1177 ret = dsa_slave_phy_connect(p, slave_dev, p->port);
1175 if (ret) { 1178 if (ret) {
1176 netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); 1179 netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
1180 if (phy_is_fixed)
1181 of_phy_deregister_fixed_link(port_dn);
1177 return ret; 1182 return ret;
1178 } 1183 }
1179 } 1184 }
@@ -1289,10 +1294,18 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
1289void dsa_slave_destroy(struct net_device *slave_dev) 1294void dsa_slave_destroy(struct net_device *slave_dev)
1290{ 1295{
1291 struct dsa_slave_priv *p = netdev_priv(slave_dev); 1296 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1297 struct dsa_switch *ds = p->parent;
1298 struct device_node *port_dn;
1299
1300 port_dn = ds->ports[p->port].dn;
1292 1301
1293 netif_carrier_off(slave_dev); 1302 netif_carrier_off(slave_dev);
1294 if (p->phy) 1303 if (p->phy) {
1295 phy_disconnect(p->phy); 1304 phy_disconnect(p->phy);
1305
1306 if (of_phy_is_fixed_link(port_dn))
1307 of_phy_deregister_fixed_link(port_dn);
1308 }
1296 unregister_netdev(slave_dev); 1309 unregister_netdev(slave_dev);
1297 free_netdev(slave_dev); 1310 free_netdev(slave_dev);
1298} 1311}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 300b06888fdf..b54b3ca939db 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -715,6 +715,7 @@ config DEFAULT_TCP_CONG
715 default "reno" if DEFAULT_RENO 715 default "reno" if DEFAULT_RENO
716 default "dctcp" if DEFAULT_DCTCP 716 default "dctcp" if DEFAULT_DCTCP
717 default "cdg" if DEFAULT_CDG 717 default "cdg" if DEFAULT_CDG
718 default "bbr" if DEFAULT_BBR
718 default "cubic" 719 default "cubic"
719 720
720config TCP_MD5SIG 721config TCP_MD5SIG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9648c97e541f..215143246e4b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect);
533 533
534static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) 534static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
535{ 535{
536 DEFINE_WAIT(wait); 536 DEFINE_WAIT_FUNC(wait, woken_wake_function);
537 537
538 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 538 add_wait_queue(sk_sleep(sk), &wait);
539 sk->sk_write_pending += writebias; 539 sk->sk_write_pending += writebias;
540 540
541 /* Basic assumption: if someone sets sk->sk_err, he _must_ 541 /* Basic assumption: if someone sets sk->sk_err, he _must_
@@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
545 */ 545 */
546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 546 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
547 release_sock(sk); 547 release_sock(sk);
548 timeo = schedule_timeout(timeo); 548 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
549 lock_sock(sk); 549 lock_sock(sk);
550 if (signal_pending(current) || !timeo) 550 if (signal_pending(current) || !timeo)
551 break; 551 break;
552 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
553 } 552 }
554 finish_wait(sk_sleep(sk), &wait); 553 remove_wait_queue(sk_sleep(sk), &wait);
555 sk->sk_write_pending -= writebias; 554 sk->sk_write_pending -= writebias;
556 return timeo; 555 return timeo;
557} 556}
@@ -1234,7 +1233,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1234 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); 1233 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1235 1234
1236 /* fixed ID is invalid if DF bit is not set */ 1235 /* fixed ID is invalid if DF bit is not set */
1237 if (fixedid && !(iph->frag_off & htons(IP_DF))) 1236 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1238 goto out; 1237 goto out;
1239 } 1238 }
1240 1239
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d95631d09248..20fb25e3027b 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
476 esph = (void *)skb_push(skb, 4); 476 esph = (void *)skb_push(skb, 4);
477 *seqhi = esph->spi; 477 *seqhi = esph->spi;
478 esph->spi = esph->seq_no; 478 esph->spi = esph->seq_no;
479 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 479 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
480 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 480 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
481 } 481 }
482 482
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c3b80478226e..161fc0f0d752 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old,
151 151
152int fib_unmerge(struct net *net) 152int fib_unmerge(struct net *net)
153{ 153{
154 struct fib_table *old, *new; 154 struct fib_table *old, *new, *main_table;
155 155
156 /* attempt to fetch local table if it has been allocated */ 156 /* attempt to fetch local table if it has been allocated */
157 old = fib_get_table(net, RT_TABLE_LOCAL); 157 old = fib_get_table(net, RT_TABLE_LOCAL);
@@ -162,11 +162,21 @@ int fib_unmerge(struct net *net)
162 if (!new) 162 if (!new)
163 return -ENOMEM; 163 return -ENOMEM;
164 164
165 /* table is already unmerged */
166 if (new == old)
167 return 0;
168
165 /* replace merged table with clean table */ 169 /* replace merged table with clean table */
166 if (new != old) { 170 fib_replace_table(net, old, new);
167 fib_replace_table(net, old, new); 171 fib_free_table(old);
168 fib_free_table(old); 172
169 } 173 /* attempt to fetch main table if it has been allocated */
174 main_table = fib_get_table(net, RT_TABLE_MAIN);
175 if (!main_table)
176 return 0;
177
178 /* flush local entries from main table */
179 fib_table_flush_external(main_table);
170 180
171 return 0; 181 return 0;
172} 182}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 31cef3602585..026f309c51e9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
1743 local_l = fib_find_node(lt, &local_tp, l->key); 1743 local_l = fib_find_node(lt, &local_tp, l->key);
1744 1744
1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa, 1745 if (fib_insert_alias(lt, local_tp, local_l, new_fa,
1746 NULL, l->key)) 1746 NULL, l->key)) {
1747 kmem_cache_free(fn_alias_kmem, new_fa);
1747 goto out; 1748 goto out;
1749 }
1748 } 1750 }
1749 1751
1750 /* stop loop if key wrapped back to 0 */ 1752 /* stop loop if key wrapped back to 0 */
@@ -1760,6 +1762,71 @@ out:
1760 return NULL; 1762 return NULL;
1761} 1763}
1762 1764
1765/* Caller must hold RTNL */
1766void fib_table_flush_external(struct fib_table *tb)
1767{
1768 struct trie *t = (struct trie *)tb->tb_data;
1769 struct key_vector *pn = t->kv;
1770 unsigned long cindex = 1;
1771 struct hlist_node *tmp;
1772 struct fib_alias *fa;
1773
1774 /* walk trie in reverse order */
1775 for (;;) {
1776 unsigned char slen = 0;
1777 struct key_vector *n;
1778
1779 if (!(cindex--)) {
1780 t_key pkey = pn->key;
1781
1782 /* cannot resize the trie vector */
1783 if (IS_TRIE(pn))
1784 break;
1785
1786 /* resize completed node */
1787 pn = resize(t, pn);
1788 cindex = get_index(pkey, pn);
1789
1790 continue;
1791 }
1792
1793 /* grab the next available node */
1794 n = get_child(pn, cindex);
1795 if (!n)
1796 continue;
1797
1798 if (IS_TNODE(n)) {
1799 /* record pn and cindex for leaf walking */
1800 pn = n;
1801 cindex = 1ul << n->bits;
1802
1803 continue;
1804 }
1805
1806 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1807 /* if alias was cloned to local then we just
1808 * need to remove the local copy from main
1809 */
1810 if (tb->tb_id != fa->tb_id) {
1811 hlist_del_rcu(&fa->fa_list);
1812 alias_free_mem_rcu(fa);
1813 continue;
1814 }
1815
1816 /* record local slen */
1817 slen = fa->fa_slen;
1818 }
1819
1820 /* update leaf slen */
1821 n->slen = slen;
1822
1823 if (hlist_empty(&n->leaf)) {
1824 put_child_root(pn, n->key, NULL);
1825 node_free(n);
1826 }
1827 }
1828}
1829
1763/* Caller must hold RTNL. */ 1830/* Caller must hold RTNL. */
1764int fib_table_flush(struct net *net, struct fib_table *tb) 1831int fib_table_flush(struct net *net, struct fib_table *tb)
1765{ 1832{
@@ -2413,22 +2480,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2413 struct key_vector *l, **tp = &iter->tnode; 2480 struct key_vector *l, **tp = &iter->tnode;
2414 t_key key; 2481 t_key key;
2415 2482
2416 /* use cache location of next-to-find key */ 2483 /* use cached location of previously found key */
2417 if (iter->pos > 0 && pos >= iter->pos) { 2484 if (iter->pos > 0 && pos >= iter->pos) {
2418 pos -= iter->pos;
2419 key = iter->key; 2485 key = iter->key;
2420 } else { 2486 } else {
2421 iter->pos = 0; 2487 iter->pos = 1;
2422 key = 0; 2488 key = 0;
2423 } 2489 }
2424 2490
2425 while ((l = leaf_walk_rcu(tp, key)) != NULL) { 2491 pos -= iter->pos;
2492
2493 while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
2426 key = l->key + 1; 2494 key = l->key + 1;
2427 iter->pos++; 2495 iter->pos++;
2428
2429 if (--pos <= 0)
2430 break;
2431
2432 l = NULL; 2496 l = NULL;
2433 2497
2434 /* handle unlikely case of a key wrap */ 2498 /* handle unlikely case of a key wrap */
@@ -2437,7 +2501,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2437 } 2501 }
2438 2502
2439 if (l) 2503 if (l)
2440 iter->key = key; /* remember it */ 2504 iter->key = l->key; /* remember it */
2441 else 2505 else
2442 iter->pos = 0; /* forget it */ 2506 iter->pos = 0; /* forget it */
2443 2507
@@ -2465,7 +2529,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2465 return fib_route_get_idx(iter, *pos); 2529 return fib_route_get_idx(iter, *pos);
2466 2530
2467 iter->pos = 0; 2531 iter->pos = 0;
2468 iter->key = 0; 2532 iter->key = KEY_MAX;
2469 2533
2470 return SEQ_START_TOKEN; 2534 return SEQ_START_TOKEN;
2471} 2535}
@@ -2474,7 +2538,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2474{ 2538{
2475 struct fib_route_iter *iter = seq->private; 2539 struct fib_route_iter *iter = seq->private;
2476 struct key_vector *l = NULL; 2540 struct key_vector *l = NULL;
2477 t_key key = iter->key; 2541 t_key key = iter->key + 1;
2478 2542
2479 ++*pos; 2543 ++*pos;
2480 2544
@@ -2483,7 +2547,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2483 l = leaf_walk_rcu(&iter->tnode, key); 2547 l = leaf_walk_rcu(&iter->tnode, key);
2484 2548
2485 if (l) { 2549 if (l) {
2486 iter->key = l->key + 1; 2550 iter->key = l->key;
2487 iter->pos++; 2551 iter->pos++;
2488 } else { 2552 } else {
2489 iter->pos = 0; 2553 iter->pos = 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 38abe70e595f..48734ee6293f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
477 fl4->flowi4_proto = IPPROTO_ICMP; 477 fl4->flowi4_proto = IPPROTO_ICMP;
478 fl4->fl4_icmp_type = type; 478 fl4->fl4_icmp_type = type;
479 fl4->fl4_icmp_code = code; 479 fl4->fl4_icmp_code = code;
480 fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); 480 fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
481 481
482 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); 482 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
483 rt = __ip_route_output_key_hash(net, fl4, 483 rt = __ip_route_output_key_hash(net, fl4,
@@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
502 if (err) 502 if (err)
503 goto relookup_failed; 503 goto relookup_failed;
504 504
505 if (inet_addr_type_dev_table(net, skb_in->dev, 505 if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
506 fl4_dec.saddr) == RTN_LOCAL) { 506 fl4_dec.saddr) == RTN_LOCAL) {
507 rt2 = __ip_route_output_key(net, &fl4_dec); 507 rt2 = __ip_route_output_key(net, &fl4_dec);
508 if (IS_ERR(rt2)) 508 if (IS_ERR(rt2))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 606cc3e85d2b..15db786d50ed 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev)
162} 162}
163 163
164static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); 164static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
165static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); 165static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
166static void igmpv3_clear_delrec(struct in_device *in_dev); 166static void igmpv3_clear_delrec(struct in_device *in_dev);
167static int sf_setstate(struct ip_mc_list *pmc); 167static int sf_setstate(struct ip_mc_list *pmc);
168static void sf_markstate(struct ip_mc_list *pmc); 168static void sf_markstate(struct ip_mc_list *pmc);
@@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1130 spin_unlock_bh(&in_dev->mc_tomb_lock); 1130 spin_unlock_bh(&in_dev->mc_tomb_lock);
1131} 1131}
1132 1132
1133static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) 1133/*
1134 * restore ip_mc_list deleted records
1135 */
1136static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1134{ 1137{
1135 struct ip_mc_list *pmc, *pmc_prev; 1138 struct ip_mc_list *pmc, *pmc_prev;
1136 struct ip_sf_list *psf, *psf_next; 1139 struct ip_sf_list *psf;
1140 struct net *net = dev_net(in_dev->dev);
1141 __be32 multiaddr = im->multiaddr;
1137 1142
1138 spin_lock_bh(&in_dev->mc_tomb_lock); 1143 spin_lock_bh(&in_dev->mc_tomb_lock);
1139 pmc_prev = NULL; 1144 pmc_prev = NULL;
@@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1149 in_dev->mc_tomb = pmc->next; 1154 in_dev->mc_tomb = pmc->next;
1150 } 1155 }
1151 spin_unlock_bh(&in_dev->mc_tomb_lock); 1156 spin_unlock_bh(&in_dev->mc_tomb_lock);
1157
1158 spin_lock_bh(&im->lock);
1152 if (pmc) { 1159 if (pmc) {
1153 for (psf = pmc->tomb; psf; psf = psf_next) { 1160 im->interface = pmc->interface;
1154 psf_next = psf->sf_next; 1161 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1155 kfree(psf); 1162 im->sfmode = pmc->sfmode;
1163 if (pmc->sfmode == MCAST_INCLUDE) {
1164 im->tomb = pmc->tomb;
1165 im->sources = pmc->sources;
1166 for (psf = im->sources; psf; psf = psf->sf_next)
1167 psf->sf_crcount = im->crcount;
1156 } 1168 }
1157 in_dev_put(pmc->interface); 1169 in_dev_put(pmc->interface);
1158 kfree(pmc);
1159 } 1170 }
1171 spin_unlock_bh(&im->lock);
1160} 1172}
1161 1173
1174/*
1175 * flush ip_mc_list deleted records
1176 */
1162static void igmpv3_clear_delrec(struct in_device *in_dev) 1177static void igmpv3_clear_delrec(struct in_device *in_dev)
1163{ 1178{
1164 struct ip_mc_list *pmc, *nextpmc; 1179 struct ip_mc_list *pmc, *nextpmc;
@@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1366 ip_mc_hash_add(in_dev, im); 1381 ip_mc_hash_add(in_dev, im);
1367 1382
1368#ifdef CONFIG_IP_MULTICAST 1383#ifdef CONFIG_IP_MULTICAST
1369 igmpv3_del_delrec(in_dev, im->multiaddr); 1384 igmpv3_del_delrec(in_dev, im);
1370#endif 1385#endif
1371 igmp_group_added(im); 1386 igmp_group_added(im);
1372 if (!in_dev->dead) 1387 if (!in_dev->dead)
@@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev)
1626 1641
1627 ASSERT_RTNL(); 1642 ASSERT_RTNL();
1628 1643
1629 for_each_pmc_rtnl(in_dev, pmc) 1644 for_each_pmc_rtnl(in_dev, pmc) {
1645#ifdef CONFIG_IP_MULTICAST
1646 igmpv3_del_delrec(in_dev, pmc);
1647#endif
1630 igmp_group_added(pmc); 1648 igmp_group_added(pmc);
1649 }
1631} 1650}
1632 1651
1633/* Device going down */ 1652/* Device going down */
@@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev)
1648 in_dev->mr_gq_running = 0; 1667 in_dev->mr_gq_running = 0;
1649 if (del_timer(&in_dev->mr_gq_timer)) 1668 if (del_timer(&in_dev->mr_gq_timer))
1650 __in_dev_put(in_dev); 1669 __in_dev_put(in_dev);
1651 igmpv3_clear_delrec(in_dev);
1652#endif 1670#endif
1653 1671
1654 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); 1672 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
@@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev)
1688#endif 1706#endif
1689 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); 1707 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1690 1708
1691 for_each_pmc_rtnl(in_dev, pmc) 1709 for_each_pmc_rtnl(in_dev, pmc) {
1710#ifdef CONFIG_IP_MULTICAST
1711 igmpv3_del_delrec(in_dev, pmc);
1712#endif
1692 igmp_group_added(pmc); 1713 igmp_group_added(pmc);
1714 }
1693} 1715}
1694 1716
1695/* 1717/*
@@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1704 1726
1705 /* Deactivate timers */ 1727 /* Deactivate timers */
1706 ip_mc_down(in_dev); 1728 ip_mc_down(in_dev);
1729#ifdef CONFIG_IP_MULTICAST
1730 igmpv3_clear_delrec(in_dev);
1731#endif
1707 1732
1708 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { 1733 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1709 in_dev->mc_list = i->next_rcu; 1734 in_dev->mc_list = i->next_rcu;
1710 in_dev->mc_count--; 1735 in_dev->mc_count--;
1711
1712 /* We've dropped the groups in ip_mc_down already */
1713 ip_mc_clear_src(i);
1714 ip_ma_put(i); 1736 ip_ma_put(i);
1715 } 1737 }
1716} 1738}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 8b4ffd216839..9f0a7b96646f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb)
117 if (opt->is_strictroute && rt->rt_uses_gateway) 117 if (opt->is_strictroute && rt->rt_uses_gateway)
118 goto sr_failed; 118 goto sr_failed;
119 119
120 IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 120 IPCB(skb)->flags |= IPSKB_FORWARDED;
121 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); 121 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
122 if (ip_exceeds_mtu(skb, mtu)) { 122 if (ip_exceeds_mtu(skb, mtu)) {
123 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); 123 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 03e7f7310423..877bdb02e887 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -107,6 +107,8 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
107 if (unlikely(!skb)) 107 if (unlikely(!skb))
108 return 0; 108 return 0;
109 109
110 skb->protocol = htons(ETH_P_IP);
111
110 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, 112 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
111 net, sk, skb, NULL, skb_dst(skb)->dev, 113 net, sk, skb, NULL, skb_dst(skb)->dev,
112 dst_output); 114 dst_output);
@@ -239,19 +241,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk,
239 struct sk_buff *segs; 241 struct sk_buff *segs;
240 int ret = 0; 242 int ret = 0;
241 243
242 /* common case: fragmentation of segments is not allowed, 244 /* common case: seglen is <= mtu
243 * or seglen is <= mtu
244 */ 245 */
245 if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || 246 if (skb_gso_validate_mtu(skb, mtu))
246 skb_gso_validate_mtu(skb, mtu))
247 return ip_finish_output2(net, sk, skb); 247 return ip_finish_output2(net, sk, skb);
248 248
249 /* Slowpath - GSO segment length is exceeding the dst MTU. 249 /* Slowpath - GSO segment length exceeds the egress MTU.
250 * 250 *
251 * This can happen in two cases: 251 * This can happen in several cases:
252 * 1) TCP GRO packet, DF bit not set 252 * - Forwarding of a TCP GRO skb, when DF flag is not set.
253 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly 253 * - Forwarding of an skb that arrived on a virtualization interface
254 * from host network stack. 254 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
255 * stack.
256 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
257 * interface with a smaller MTU.
258 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
259 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
260 * insufficent MTU.
255 */ 261 */
256 features = netif_skb_features(skb); 262 features = netif_skb_features(skb);
257 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); 263 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
@@ -1579,7 +1585,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1579 } 1585 }
1580 1586
1581 oif = arg->bound_dev_if; 1587 oif = arg->bound_dev_if;
1582 oif = oif ? : skb->skb_iif; 1588 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1589 oif = skb->skb_iif;
1583 1590
1584 flowi4_init_output(&fl4, oif, 1591 flowi4_init_output(&fl4, oif,
1585 IP4_REPLY_MARK(net, skb->mark), 1592 IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 777bc1883870..fed3d29f9eb3 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
63 int pkt_len = skb->len - skb_inner_network_offset(skb); 63 int pkt_len = skb->len - skb_inner_network_offset(skb);
64 struct net *net = dev_net(rt->dst.dev); 64 struct net *net = dev_net(rt->dst.dev);
65 struct net_device *dev = skb->dev; 65 struct net_device *dev = skb->dev;
66 int skb_iif = skb->skb_iif;
67 struct iphdr *iph; 66 struct iphdr *iph;
68 int err; 67 int err;
69 68
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
73 skb_dst_set(skb, &rt->dst); 72 skb_dst_set(skb, &rt->dst);
74 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 73 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
75 74
76 if (skb_iif && !(df & htons(IP_DF))) {
77 /* Arrived from an ingress interface, got encapsulated, with
78 * fragmentation of encapulating frames allowed.
79 * If skb is gso, the resulting encapsulated network segments
80 * may exceed dst mtu.
81 * Allow IP Fragmentation of segments.
82 */
83 IPCB(skb)->flags |= IPSKB_FRAG_SEGS;
84 }
85
86 /* Push down and install the IP header. */ 75 /* Push down and install the IP header. */
87 skb_push(skb, sizeof(struct iphdr)); 76 skb_push(skb, sizeof(struct iphdr));
88 skb_reset_network_header(skb); 77 skb_reset_network_header(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5f006e13de56..27089f5ebbb1 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1749 vif->dev->stats.tx_bytes += skb->len; 1749 vif->dev->stats.tx_bytes += skb->len;
1750 } 1750 }
1751 1751
1752 IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; 1752 IPCB(skb)->flags |= IPSKB_FORWARDED;
1753 1753
1754 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally 1754 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1755 * not only before forwarding, but after forwarding on all output 1755 * not only before forwarding, but after forwarding on all output
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c3776ff6749f..b3cc1335adbc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -24,10 +24,11 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
24 struct flowi4 fl4 = {}; 24 struct flowi4 fl4 = {};
25 __be32 saddr = iph->saddr; 25 __be32 saddr = iph->saddr;
26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 26 __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
27 struct net_device *dev = skb_dst(skb)->dev;
27 unsigned int hh_len; 28 unsigned int hh_len;
28 29
29 if (addr_type == RTN_UNSPEC) 30 if (addr_type == RTN_UNSPEC)
30 addr_type = inet_addr_type(net, saddr); 31 addr_type = inet_addr_type_dev_table(net, dev, saddr);
31 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) 32 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
32 flags |= FLOWI_FLAG_ANYSRC; 33 flags |= FLOWI_FLAG_ANYSRC;
33 else 34 else
@@ -40,6 +41,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
40 fl4.saddr = saddr; 41 fl4.saddr = saddr;
41 fl4.flowi4_tos = RT_TOS(iph->tos); 42 fl4.flowi4_tos = RT_TOS(iph->tos);
42 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 43 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
44 if (!fl4.flowi4_oif)
45 fl4.flowi4_oif = l3mdev_master_ifindex(dev);
43 fl4.flowi4_mark = skb->mark; 46 fl4.flowi4_mark = skb->mark;
44 fl4.flowi4_flags = flags; 47 fl4.flowi4_flags = flags;
45 rt = ip_route_output_key(net, &fl4); 48 rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b31df597fd37..697538464e6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1201,8 +1201,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
1201 1201
1202 newinfo->number = compatr->num_entries; 1202 newinfo->number = compatr->num_entries;
1203 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1203 for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
1204 newinfo->hook_entry[i] = info->hook_entry[i]; 1204 newinfo->hook_entry[i] = compatr->hook_entry[i];
1205 newinfo->underflow[i] = info->underflow[i]; 1205 newinfo->underflow[i] = compatr->underflow[i];
1206 } 1206 }
1207 entry1 = newinfo->entries; 1207 entry1 = newinfo->entries;
1208 pos = entry1; 1208 pos = entry1;
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index bf855e64fc45..0c01a270bf9f 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr,
28 struct in_addr gw = { 28 struct in_addr gw = {
29 .s_addr = (__force __be32)regs->data[priv->sreg_addr], 29 .s_addr = (__force __be32)regs->data[priv->sreg_addr],
30 }; 30 };
31 int oif = regs->data[priv->sreg_dev]; 31 int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
32 32
33 nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); 33 nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif);
34} 34}
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
59{ 59{
60 struct nft_dup_ipv4 *priv = nft_expr_priv(expr); 60 struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
61 61
62 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 62 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
63 goto nla_put_failure;
64 if (priv->sreg_dev &&
63 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 65 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
64 goto nla_put_failure; 66 goto nla_put_failure;
65 67
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 62d4d90c1389..2a57566e6e91 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
753 goto reject_redirect; 753 goto reject_redirect;
754 } 754 }
755 755
756 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); 756 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
757 if (!n)
758 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
757 if (!IS_ERR(n)) { 759 if (!IS_ERR(n)) {
758 if (!(n->nud_state & NUD_VALID)) { 760 if (!(n->nud_state & NUD_VALID)) {
759 neigh_event_send(n, NULL); 761 neigh_event_send(n, NULL);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3251fe71f39f..814af89c1bd3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1164,7 +1164,7 @@ restart:
1164 1164
1165 err = -EPIPE; 1165 err = -EPIPE;
1166 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1166 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1167 goto out_err; 1167 goto do_error;
1168 1168
1169 sg = !!(sk->sk_route_caps & NETIF_F_SG); 1169 sg = !!(sk->sk_route_caps & NETIF_F_SG);
1170 1170
@@ -1241,7 +1241,7 @@ new_segment:
1241 1241
1242 if (!skb_can_coalesce(skb, i, pfrag->page, 1242 if (!skb_can_coalesce(skb, i, pfrag->page,
1243 pfrag->offset)) { 1243 pfrag->offset)) {
1244 if (i == sysctl_max_skb_frags || !sg) { 1244 if (i >= sysctl_max_skb_frags || !sg) {
1245 tcp_mark_push(tp, skb); 1245 tcp_mark_push(tp, skb);
1246 goto new_segment; 1246 goto new_segment;
1247 } 1247 }
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 1294af4e0127..f9038d6b109e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk,
200 icsk->icsk_ca_ops = ca; 200 icsk->icsk_ca_ops = ca;
201 icsk->icsk_ca_setsockopt = 1; 201 icsk->icsk_ca_setsockopt = 1;
202 202
203 if (sk->sk_state != TCP_CLOSE) 203 if (sk->sk_state != TCP_CLOSE) {
204 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
204 tcp_init_congestion_control(sk); 205 tcp_init_congestion_control(sk);
206 }
205} 207}
206 208
207/* Manage refcounts on socket close. */ 209/* Manage refcounts on socket close. */
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 10d728b6804c..ab37c6775630 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -56,6 +56,7 @@ struct dctcp {
56 u32 next_seq; 56 u32 next_seq;
57 u32 ce_state; 57 u32 ce_state;
58 u32 delayed_ack_reserved; 58 u32 delayed_ack_reserved;
59 u32 loss_cwnd;
59}; 60};
60 61
61static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ 62static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk)
96 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); 97 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
97 98
98 ca->delayed_ack_reserved = 0; 99 ca->delayed_ack_reserved = 0;
100 ca->loss_cwnd = 0;
99 ca->ce_state = 0; 101 ca->ce_state = 0;
100 102
101 dctcp_reset(tp, ca); 103 dctcp_reset(tp, ca);
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk)
111 113
112static u32 dctcp_ssthresh(struct sock *sk) 114static u32 dctcp_ssthresh(struct sock *sk)
113{ 115{
114 const struct dctcp *ca = inet_csk_ca(sk); 116 struct dctcp *ca = inet_csk_ca(sk);
115 struct tcp_sock *tp = tcp_sk(sk); 117 struct tcp_sock *tp = tcp_sk(sk);
116 118
119 ca->loss_cwnd = tp->snd_cwnd;
117 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); 120 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
118} 121}
119 122
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
308 return 0; 311 return 0;
309} 312}
310 313
314static u32 dctcp_cwnd_undo(struct sock *sk)
315{
316 const struct dctcp *ca = inet_csk_ca(sk);
317
318 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
319}
320
311static struct tcp_congestion_ops dctcp __read_mostly = { 321static struct tcp_congestion_ops dctcp __read_mostly = {
312 .init = dctcp_init, 322 .init = dctcp_init,
313 .in_ack_event = dctcp_update_alpha, 323 .in_ack_event = dctcp_update_alpha,
314 .cwnd_event = dctcp_cwnd_event, 324 .cwnd_event = dctcp_cwnd_event,
315 .ssthresh = dctcp_ssthresh, 325 .ssthresh = dctcp_ssthresh,
316 .cong_avoid = tcp_reno_cong_avoid, 326 .cong_avoid = tcp_reno_cong_avoid,
327 .undo_cwnd = dctcp_cwnd_undo,
317 .set_state = dctcp_state, 328 .set_state = dctcp_state,
318 .get_info = dctcp_get_info, 329 .get_info = dctcp_get_info,
319 .flags = TCP_CONG_NEEDS_ECN, 330 .flags = TCP_CONG_NEEDS_ECN,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 61b7be303eec..2259114c7242 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1564} 1564}
1565EXPORT_SYMBOL(tcp_add_backlog); 1565EXPORT_SYMBOL(tcp_add_backlog);
1566 1566
1567int tcp_filter(struct sock *sk, struct sk_buff *skb)
1568{
1569 struct tcphdr *th = (struct tcphdr *)skb->data;
1570 unsigned int eaten = skb->len;
1571 int err;
1572
1573 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1574 if (!err) {
1575 eaten -= skb->len;
1576 TCP_SKB_CB(skb)->end_seq -= eaten;
1577 }
1578 return err;
1579}
1580EXPORT_SYMBOL(tcp_filter);
1581
1567/* 1582/*
1568 * From tcp_input.c 1583 * From tcp_input.c
1569 */ 1584 */
@@ -1676,8 +1691,10 @@ process:
1676 1691
1677 nf_reset(skb); 1692 nf_reset(skb);
1678 1693
1679 if (sk_filter(sk, skb)) 1694 if (tcp_filter(sk, skb))
1680 goto discard_and_relse; 1695 goto discard_and_relse;
1696 th = (const struct tcphdr *)skb->data;
1697 iph = ip_hdr(skb);
1681 1698
1682 skb->dev = NULL; 1699 skb->dev = NULL;
1683 1700
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d123d68f4d1d..5bab6c3f7a2f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1455,7 +1455,7 @@ static void udp_v4_rehash(struct sock *sk)
1455 udp_lib_rehash(sk, new_hash); 1455 udp_lib_rehash(sk, new_hash);
1456} 1456}
1457 1457
1458static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1458int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1459{ 1459{
1460 int rc; 1460 int rc;
1461 1461
@@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
1652 1652
1653 if (use_hash2) { 1653 if (use_hash2) {
1654 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & 1654 hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
1655 udp_table.mask; 1655 udptable->mask;
1656 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; 1656 hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
1657start_lookup: 1657start_lookup:
1658 hslot = &udp_table.hash2[hash2]; 1658 hslot = &udptable->hash2[hash2];
1659 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 1659 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
1660 } 1660 }
1661 1661
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 7e0fe4bdd967..feb50a16398d 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
25 int flags, int *addr_len); 25 int flags, int *addr_len);
26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
27 int flags); 27 int flags);
28int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 28int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29void udp_destroy_sock(struct sock *sk); 29void udp_destroy_sock(struct sock *sk);
30 30
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index af817158d830..ff450c2aad9b 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -50,7 +50,7 @@ struct proto udplite_prot = {
50 .sendmsg = udp_sendmsg, 50 .sendmsg = udp_sendmsg,
51 .recvmsg = udp_recvmsg, 51 .recvmsg = udp_recvmsg,
52 .sendpage = udp_sendpage, 52 .sendpage = udp_sendpage,
53 .backlog_rcv = udp_queue_rcv_skb, 53 .backlog_rcv = __udp_queue_rcv_skb,
54 .hash = udp_lib_hash, 54 .hash = udp_lib_hash,
55 .unhash = udp_lib_unhash, 55 .unhash = udp_lib_unhash,
56 .get_port = udp_v4_get_port, 56 .get_port = udp_v4_get_port,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 060dd9922018..4bc5ba3ae452 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -183,7 +183,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
183 183
184static void addrconf_dad_start(struct inet6_ifaddr *ifp); 184static void addrconf_dad_start(struct inet6_ifaddr *ifp);
185static void addrconf_dad_work(struct work_struct *w); 185static void addrconf_dad_work(struct work_struct *w);
186static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 186static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
187static void addrconf_dad_run(struct inet6_dev *idev); 187static void addrconf_dad_run(struct inet6_dev *idev);
188static void addrconf_rs_timer(unsigned long data); 188static void addrconf_rs_timer(unsigned long data);
189static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); 189static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -2898,6 +2898,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2898 spin_lock_bh(&ifp->lock); 2898 spin_lock_bh(&ifp->lock);
2899 ifp->flags &= ~IFA_F_TENTATIVE; 2899 ifp->flags &= ~IFA_F_TENTATIVE;
2900 spin_unlock_bh(&ifp->lock); 2900 spin_unlock_bh(&ifp->lock);
2901 rt_genid_bump_ipv6(dev_net(idev->dev));
2901 ipv6_ifa_notify(RTM_NEWADDR, ifp); 2902 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2902 in6_ifa_put(ifp); 2903 in6_ifa_put(ifp);
2903 } 2904 }
@@ -3740,7 +3741,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3740{ 3741{
3741 struct inet6_dev *idev = ifp->idev; 3742 struct inet6_dev *idev = ifp->idev;
3742 struct net_device *dev = idev->dev; 3743 struct net_device *dev = idev->dev;
3743 bool notify = false; 3744 bool bump_id, notify = false;
3744 3745
3745 addrconf_join_solict(dev, &ifp->addr); 3746 addrconf_join_solict(dev, &ifp->addr);
3746 3747
@@ -3755,11 +3756,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3755 idev->cnf.accept_dad < 1 || 3756 idev->cnf.accept_dad < 1 ||
3756 !(ifp->flags&IFA_F_TENTATIVE) || 3757 !(ifp->flags&IFA_F_TENTATIVE) ||
3757 ifp->flags & IFA_F_NODAD) { 3758 ifp->flags & IFA_F_NODAD) {
3759 bump_id = ifp->flags & IFA_F_TENTATIVE;
3758 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3760 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3759 spin_unlock(&ifp->lock); 3761 spin_unlock(&ifp->lock);
3760 read_unlock_bh(&idev->lock); 3762 read_unlock_bh(&idev->lock);
3761 3763
3762 addrconf_dad_completed(ifp); 3764 addrconf_dad_completed(ifp, bump_id);
3763 return; 3765 return;
3764 } 3766 }
3765 3767
@@ -3819,8 +3821,8 @@ static void addrconf_dad_work(struct work_struct *w)
3819 struct inet6_ifaddr, 3821 struct inet6_ifaddr,
3820 dad_work); 3822 dad_work);
3821 struct inet6_dev *idev = ifp->idev; 3823 struct inet6_dev *idev = ifp->idev;
3824 bool bump_id, disable_ipv6 = false;
3822 struct in6_addr mcaddr; 3825 struct in6_addr mcaddr;
3823 bool disable_ipv6 = false;
3824 3826
3825 enum { 3827 enum {
3826 DAD_PROCESS, 3828 DAD_PROCESS,
@@ -3890,11 +3892,12 @@ static void addrconf_dad_work(struct work_struct *w)
3890 * DAD was successful 3892 * DAD was successful
3891 */ 3893 */
3892 3894
3895 bump_id = ifp->flags & IFA_F_TENTATIVE;
3893 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3896 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3894 spin_unlock(&ifp->lock); 3897 spin_unlock(&ifp->lock);
3895 write_unlock_bh(&idev->lock); 3898 write_unlock_bh(&idev->lock);
3896 3899
3897 addrconf_dad_completed(ifp); 3900 addrconf_dad_completed(ifp, bump_id);
3898 3901
3899 goto out; 3902 goto out;
3900 } 3903 }
@@ -3931,7 +3934,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3931 return true; 3934 return true;
3932} 3935}
3933 3936
3934static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 3937static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
3935{ 3938{
3936 struct net_device *dev = ifp->idev->dev; 3939 struct net_device *dev = ifp->idev->dev;
3937 struct in6_addr lladdr; 3940 struct in6_addr lladdr;
@@ -3983,6 +3986,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3983 spin_unlock(&ifp->lock); 3986 spin_unlock(&ifp->lock);
3984 write_unlock_bh(&ifp->idev->lock); 3987 write_unlock_bh(&ifp->idev->lock);
3985 } 3988 }
3989
3990 if (bump_id)
3991 rt_genid_bump_ipv6(dev_net(dev));
3986} 3992}
3987 3993
3988static void addrconf_dad_run(struct inet6_dev *idev) 3994static void addrconf_dad_run(struct inet6_dev *idev)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 37874e2f30ed..ccf40550c475 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -139,7 +139,8 @@ void ip6_datagram_release_cb(struct sock *sk)
139} 139}
140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); 140EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
141 141
142static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 142int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
143 int addr_len)
143{ 144{
144 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 145 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
145 struct inet_sock *inet = inet_sk(sk); 146 struct inet_sock *inet = inet_sk(sk);
@@ -252,6 +253,7 @@ ipv4_connected:
252out: 253out:
253 return err; 254 return err;
254} 255}
256EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
255 257
256int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 258int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
257{ 259{
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 060a60b2f8a6..111ba55fd512 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
418 esph = (void *)skb_push(skb, 4); 418 esph = (void *)skb_push(skb, 4);
419 *seqhi = esph->spi; 419 *seqhi = esph->spi;
420 esph->spi = esph->seq_no; 420 esph->spi = esph->seq_no;
421 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); 421 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
422 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 422 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
423 } 423 }
424 424
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index bd59c343d35f..2772004ba5a1 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -447,8 +447,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
447 447
448 if (__ipv6_addr_needs_scope_id(addr_type)) 448 if (__ipv6_addr_needs_scope_id(addr_type))
449 iif = skb->dev->ifindex; 449 iif = skb->dev->ifindex;
450 else 450 else {
451 iif = l3mdev_master_ifindex(skb->dev); 451 dst = skb_dst(skb);
452 iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
453 }
452 454
453 /* 455 /*
454 * Must not send error if the source does not uniquely 456 * Must not send error if the source does not uniquely
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1fcf61f1cbc3..89c59e656f44 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,7 +99,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
99 segs = ops->callbacks.gso_segment(skb, features); 99 segs = ops->callbacks.gso_segment(skb, features);
100 } 100 }
101 101
102 if (IS_ERR(segs)) 102 if (IS_ERR_OR_NULL(segs))
103 goto out; 103 goto out;
104 104
105 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); 105 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6001e781164e..59eb4ed99ce8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1366,7 +1366,7 @@ emsgsize:
1366 if (((length > mtu) || 1366 if (((length > mtu) ||
1367 (skb && skb_is_gso(skb))) && 1367 (skb && skb_is_gso(skb))) &&
1368 (sk->sk_protocol == IPPROTO_UDP) && 1368 (sk->sk_protocol == IPPROTO_UDP) &&
1369 (rt->dst.dev->features & NETIF_F_UFO) && 1369 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
1370 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { 1370 (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1371 err = ip6_ufo_append_data(sk, queue, getfrag, from, length, 1371 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1372 hh_len, fragheaderlen, exthdrlen, 1372 hh_len, fragheaderlen, exthdrlen,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 87784560dc46..d76674efe523 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1034 int mtu; 1034 int mtu;
1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; 1035 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1036 unsigned int max_headroom = psh_hlen; 1036 unsigned int max_headroom = psh_hlen;
1037 bool use_cache = false;
1037 u8 hop_limit; 1038 u8 hop_limit;
1038 int err = -1; 1039 int err = -1;
1039 1040
@@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1066 1067
1067 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); 1068 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1068 neigh_release(neigh); 1069 neigh_release(neigh);
1069 } else if (!fl6->flowi6_mark) 1070 } else if (!(t->parms.flags &
1071 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1072 /* enable the cache only only if the routing decision does
1073 * not depend on the current inner header value
1074 */
1075 use_cache = true;
1076 }
1077
1078 if (use_cache)
1070 dst = dst_cache_get(&t->dst_cache); 1079 dst = dst_cache_get(&t->dst_cache);
1071 1080
1072 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) 1081 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
@@ -1150,7 +1159,7 @@ route_lookup:
1150 if (t->encap.type != TUNNEL_ENCAP_NONE) 1159 if (t->encap.type != TUNNEL_ENCAP_NONE)
1151 goto tx_err_dst_release; 1160 goto tx_err_dst_release;
1152 } else { 1161 } else {
1153 if (!fl6->flowi6_mark && ndst) 1162 if (use_cache && ndst)
1154 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); 1163 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1155 } 1164 }
1156 skb_dst_set(skb, dst); 1165 skb_dst_set(skb, dst);
@@ -1172,7 +1181,6 @@ route_lookup:
1172 if (err) 1181 if (err)
1173 return err; 1182 return err;
1174 1183
1175 skb->protocol = htons(ETH_P_IPV6);
1176 skb_push(skb, sizeof(struct ipv6hdr)); 1184 skb_push(skb, sizeof(struct ipv6hdr));
1177 skb_reset_network_header(skb); 1185 skb_reset_network_header(skb);
1178 ipv6h = ipv6_hdr(skb); 1186 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index a7520528ecd2..b283f293ee4a 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
88 88
89 uh->len = htons(skb->len); 89 uh->len = htons(skb->len);
90 90
91 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
92 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
93 | IPSKB_REROUTED);
94 skb_dst_set(skb, dst); 91 skb_dst_set(skb, dst);
95 92
96 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); 93 udp6_set_csum(nocheck, skb, saddr, daddr, skb->len);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 8a02ca8a11af..c299c1e2bbf0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1138,6 +1138,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
1138 .priority = 100, 1138 .priority = 100,
1139}; 1139};
1140 1140
1141static bool is_vti6_tunnel(const struct net_device *dev)
1142{
1143 return dev->netdev_ops == &vti6_netdev_ops;
1144}
1145
1146static int vti6_device_event(struct notifier_block *unused,
1147 unsigned long event, void *ptr)
1148{
1149 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1150 struct ip6_tnl *t = netdev_priv(dev);
1151
1152 if (!is_vti6_tunnel(dev))
1153 return NOTIFY_DONE;
1154
1155 switch (event) {
1156 case NETDEV_DOWN:
1157 if (!net_eq(t->net, dev_net(dev)))
1158 xfrm_garbage_collect(t->net);
1159 break;
1160 }
1161 return NOTIFY_DONE;
1162}
1163
1164static struct notifier_block vti6_notifier_block __read_mostly = {
1165 .notifier_call = vti6_device_event,
1166};
1167
1141/** 1168/**
1142 * vti6_tunnel_init - register protocol and reserve needed resources 1169 * vti6_tunnel_init - register protocol and reserve needed resources
1143 * 1170 *
@@ -1148,6 +1175,8 @@ static int __init vti6_tunnel_init(void)
1148 const char *msg; 1175 const char *msg;
1149 int err; 1176 int err;
1150 1177
1178 register_netdevice_notifier(&vti6_notifier_block);
1179
1151 msg = "tunnel device"; 1180 msg = "tunnel device";
1152 err = register_pernet_device(&vti6_net_ops); 1181 err = register_pernet_device(&vti6_net_ops);
1153 if (err < 0) 1182 if (err < 0)
@@ -1180,6 +1209,7 @@ xfrm_proto_ah_failed:
1180xfrm_proto_esp_failed: 1209xfrm_proto_esp_failed:
1181 unregister_pernet_device(&vti6_net_ops); 1210 unregister_pernet_device(&vti6_net_ops);
1182pernet_dev_failed: 1211pernet_dev_failed:
1212 unregister_netdevice_notifier(&vti6_notifier_block);
1183 pr_err("vti6 init: failed to register %s\n", msg); 1213 pr_err("vti6 init: failed to register %s\n", msg);
1184 return err; 1214 return err;
1185} 1215}
@@ -1194,6 +1224,7 @@ static void __exit vti6_tunnel_cleanup(void)
1194 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); 1224 xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
1195 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); 1225 xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
1196 unregister_pernet_device(&vti6_net_ops); 1226 unregister_pernet_device(&vti6_net_ops);
1227 unregister_netdevice_notifier(&vti6_notifier_block);
1197} 1228}
1198 1229
1199module_init(vti6_tunnel_init); 1230module_init(vti6_tunnel_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e4347aeb2e65..9948b5ce52da 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -576,11 +576,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
576 /* Jumbo payload inhibits frag. header */ 576 /* Jumbo payload inhibits frag. header */
577 if (ipv6_hdr(skb)->payload_len == 0) { 577 if (ipv6_hdr(skb)->payload_len == 0) {
578 pr_debug("payload len = 0\n"); 578 pr_debug("payload len = 0\n");
579 return -EINVAL; 579 return 0;
580 } 580 }
581 581
582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) 582 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
583 return -EINVAL; 583 return 0;
584 584
585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) 585 if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
586 return -ENOMEM; 586 return -ENOMEM;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f7aab5ab93a5..f06b0471f39f 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -69,7 +69,7 @@ static unsigned int ipv6_defrag(void *priv,
69 if (err == -EINPROGRESS) 69 if (err == -EINPROGRESS)
70 return NF_STOLEN; 70 return NF_STOLEN;
71 71
72 return NF_ACCEPT; 72 return err == 0 ? NF_ACCEPT : NF_DROP;
73} 73}
74 74
75static struct nf_hook_ops ipv6_defrag_ops[] = { 75static struct nf_hook_ops ipv6_defrag_ops[] = {
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index a5400223fd74..10090400c72f 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -156,6 +156,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
156 fl6.daddr = oip6h->saddr; 156 fl6.daddr = oip6h->saddr;
157 fl6.fl6_sport = otcph->dest; 157 fl6.fl6_sport = otcph->dest;
158 fl6.fl6_dport = otcph->source; 158 fl6.fl6_dport = otcph->source;
159 fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
159 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); 160 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
160 dst = ip6_route_output(net, NULL, &fl6); 161 dst = ip6_route_output(net, NULL, &fl6);
161 if (dst->error) { 162 if (dst->error) {
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 8bfd470cbe72..831f86e1ec08 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr,
26{ 26{
27 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 27 struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
28 struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr]; 28 struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
29 int oif = regs->data[priv->sreg_dev]; 29 int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1;
30 30
31 nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); 31 nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif);
32} 32}
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
57{ 57{
58 struct nft_dup_ipv6 *priv = nft_expr_priv(expr); 58 struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
59 59
60 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || 60 if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr))
61 goto nla_put_failure;
62 if (priv->sreg_dev &&
61 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) 63 nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
62 goto nla_put_failure; 64 goto nla_put_failure;
63 65
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 7cca8ac66fe9..cd4252346a32 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -155,6 +155,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
155 if (unlikely(!skb)) 155 if (unlikely(!skb))
156 return 0; 156 return 0;
157 157
158 skb->protocol = htons(ETH_P_IPV6);
159
158 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, 160 return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
159 net, sk, skb, NULL, skb_dst(skb)->dev, 161 net, sk, skb, NULL, skb_dst(skb)->dev,
160 dst_output); 162 dst_output);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 947ed1ded026..1b57e11e6e0d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1364 if (rt6->rt6i_flags & RTF_LOCAL) 1364 if (rt6->rt6i_flags & RTF_LOCAL)
1365 return; 1365 return;
1366 1366
1367 if (dst_metric_locked(dst, RTAX_MTU))
1368 return;
1369
1367 dst_confirm(dst); 1370 dst_confirm(dst);
1368 mtu = max_t(u32, mtu, IPV6_MIN_MTU); 1371 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1369 if (mtu >= dst_mtu(dst)) 1372 if (mtu >= dst_mtu(dst))
@@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2758 PMTU discouvery. 2761 PMTU discouvery.
2759 */ 2762 */
2760 if (rt->dst.dev == arg->dev && 2763 if (rt->dst.dev == arg->dev &&
2764 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2761 !dst_metric_locked(&rt->dst, RTAX_MTU)) { 2765 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2762 if (rt->rt6i_flags & RTF_CACHE) { 2766 if (rt->rt6i_flags & RTF_CACHE) {
2763 /* For RTF_CACHE with rt6i_pmtu == 0 2767 /* For RTF_CACHE with rt6i_pmtu == 0
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5a27ab4eab39..b9f1fee9a886 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
818 fl6.flowi6_proto = IPPROTO_TCP; 818 fl6.flowi6_proto = IPPROTO_TCP;
819 if (rt6_need_strict(&fl6.daddr) && !oif) 819 if (rt6_need_strict(&fl6.daddr) && !oif)
820 fl6.flowi6_oif = tcp_v6_iif(skb); 820 fl6.flowi6_oif = tcp_v6_iif(skb);
821 else 821 else {
822 fl6.flowi6_oif = oif ? : skb->skb_iif; 822 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
823 oif = skb->skb_iif;
824
825 fl6.flowi6_oif = oif;
826 }
823 827
824 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); 828 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
825 fl6.fl6_dport = t1->dest; 829 fl6.fl6_dport = t1->dest;
@@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1225 if (skb->protocol == htons(ETH_P_IP)) 1229 if (skb->protocol == htons(ETH_P_IP))
1226 return tcp_v4_do_rcv(sk, skb); 1230 return tcp_v4_do_rcv(sk, skb);
1227 1231
1228 if (sk_filter(sk, skb)) 1232 if (tcp_filter(sk, skb))
1229 goto discard; 1233 goto discard;
1230 1234
1231 /* 1235 /*
@@ -1453,8 +1457,10 @@ process:
1453 if (tcp_v6_inbound_md5_hash(sk, skb)) 1457 if (tcp_v6_inbound_md5_hash(sk, skb))
1454 goto discard_and_relse; 1458 goto discard_and_relse;
1455 1459
1456 if (sk_filter(sk, skb)) 1460 if (tcp_filter(sk, skb))
1457 goto discard_and_relse; 1461 goto discard_and_relse;
1462 th = (const struct tcphdr *)skb->data;
1463 hdr = ipv6_hdr(skb);
1458 1464
1459 skb->dev = NULL; 1465 skb->dev = NULL;
1460 1466
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b2ef061e6836..e4a8000d59ad 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -514,7 +514,7 @@ out:
514 return; 514 return;
515} 515}
516 516
517static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 517int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
518{ 518{
519 int rc; 519 int rc;
520 520
@@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
706 706
707 if (use_hash2) { 707 if (use_hash2) {
708 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & 708 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
709 udp_table.mask; 709 udptable->mask;
710 hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; 710 hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
711start_lookup: 711start_lookup:
712 hslot = &udp_table.hash2[hash2]; 712 hslot = &udptable->hash2[hash2];
713 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); 713 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
714 } 714 }
715 715
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index f6eb1ab34f4b..e78bdc76dcc3 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
28 int flags, int *addr_len); 28 int flags, int *addr_len);
29int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 29int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
30void udpv6_destroy_sock(struct sock *sk); 30void udpv6_destroy_sock(struct sock *sk);
31 31
32#ifdef CONFIG_PROC_FS 32#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 47d0d2b87106..2f5101a12283 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -45,7 +45,7 @@ struct proto udplitev6_prot = {
45 .getsockopt = udpv6_getsockopt, 45 .getsockopt = udpv6_getsockopt,
46 .sendmsg = udpv6_sendmsg, 46 .sendmsg = udpv6_sendmsg,
47 .recvmsg = udpv6_recvmsg, 47 .recvmsg = udpv6_recvmsg,
48 .backlog_rcv = udpv6_queue_rcv_skb, 48 .backlog_rcv = __udpv6_queue_rcv_skb,
49 .hash = udp_lib_hash, 49 .hash = udp_lib_hash,
50 .unhash = udp_lib_unhash, 50 .unhash = udp_lib_unhash,
51 .get_port = udp_v6_get_port, 51 .get_port = udp_v6_get_port,
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index fce25afb652a..8938b6ba57a0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -61,7 +61,8 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
61 if ((l2tp->conn_id == tunnel_id) && 61 if ((l2tp->conn_id == tunnel_id) &&
62 net_eq(sock_net(sk), net) && 62 net_eq(sock_net(sk), net) &&
63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && 63 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
64 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 64 (!sk->sk_bound_dev_if || !dif ||
65 sk->sk_bound_dev_if == dif))
65 goto found; 66 goto found;
66 } 67 }
67 68
@@ -182,15 +183,17 @@ pass_up:
182 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 183 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
183 184
184 read_lock_bh(&l2tp_ip_lock); 185 read_lock_bh(&l2tp_ip_lock);
185 sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); 186 sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
187 tunnel_id);
188 if (!sk) {
189 read_unlock_bh(&l2tp_ip_lock);
190 goto discard;
191 }
192
193 sock_hold(sk);
186 read_unlock_bh(&l2tp_ip_lock); 194 read_unlock_bh(&l2tp_ip_lock);
187 } 195 }
188 196
189 if (sk == NULL)
190 goto discard;
191
192 sock_hold(sk);
193
194 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
195 goto discard_put; 198 goto discard_put;
196 199
@@ -251,22 +254,17 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
251 int ret; 254 int ret;
252 int chk_addr_ret; 255 int chk_addr_ret;
253 256
254 if (!sock_flag(sk, SOCK_ZAPPED))
255 return -EINVAL;
256 if (addr_len < sizeof(struct sockaddr_l2tpip)) 257 if (addr_len < sizeof(struct sockaddr_l2tpip))
257 return -EINVAL; 258 return -EINVAL;
258 if (addr->l2tp_family != AF_INET) 259 if (addr->l2tp_family != AF_INET)
259 return -EINVAL; 260 return -EINVAL;
260 261
261 ret = -EADDRINUSE; 262 lock_sock(sk);
262 read_lock_bh(&l2tp_ip_lock);
263 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
264 sk->sk_bound_dev_if, addr->l2tp_conn_id))
265 goto out_in_use;
266 263
267 read_unlock_bh(&l2tp_ip_lock); 264 ret = -EINVAL;
265 if (!sock_flag(sk, SOCK_ZAPPED))
266 goto out;
268 267
269 lock_sock(sk);
270 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) 268 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
271 goto out; 269 goto out;
272 270
@@ -280,14 +278,22 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
280 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 278 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
281 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 279 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
282 inet->inet_saddr = 0; /* Use device */ 280 inet->inet_saddr = 0; /* Use device */
283 sk_dst_reset(sk);
284 281
282 write_lock_bh(&l2tp_ip_lock);
283 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
284 sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
285 write_unlock_bh(&l2tp_ip_lock);
286 ret = -EADDRINUSE;
287 goto out;
288 }
289
290 sk_dst_reset(sk);
285 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 291 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
286 292
287 write_lock_bh(&l2tp_ip_lock);
288 sk_add_bind_node(sk, &l2tp_ip_bind_table); 293 sk_add_bind_node(sk, &l2tp_ip_bind_table);
289 sk_del_node_init(sk); 294 sk_del_node_init(sk);
290 write_unlock_bh(&l2tp_ip_lock); 295 write_unlock_bh(&l2tp_ip_lock);
296
291 ret = 0; 297 ret = 0;
292 sock_reset_flag(sk, SOCK_ZAPPED); 298 sock_reset_flag(sk, SOCK_ZAPPED);
293 299
@@ -295,11 +301,6 @@ out:
295 release_sock(sk); 301 release_sock(sk);
296 302
297 return ret; 303 return ret;
298
299out_in_use:
300 read_unlock_bh(&l2tp_ip_lock);
301
302 return ret;
303} 304}
304 305
305static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 306static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -307,21 +308,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
307 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 308 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
308 int rc; 309 int rc;
309 310
310 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
311 return -EINVAL;
312
313 if (addr_len < sizeof(*lsa)) 311 if (addr_len < sizeof(*lsa))
314 return -EINVAL; 312 return -EINVAL;
315 313
316 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 314 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
317 return -EINVAL; 315 return -EINVAL;
318 316
319 rc = ip4_datagram_connect(sk, uaddr, addr_len);
320 if (rc < 0)
321 return rc;
322
323 lock_sock(sk); 317 lock_sock(sk);
324 318
319 /* Must bind first - autobinding does not work */
320 if (sock_flag(sk, SOCK_ZAPPED)) {
321 rc = -EINVAL;
322 goto out_sk;
323 }
324
325 rc = __ip4_datagram_connect(sk, uaddr, addr_len);
326 if (rc < 0)
327 goto out_sk;
328
325 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 329 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
326 330
327 write_lock_bh(&l2tp_ip_lock); 331 write_lock_bh(&l2tp_ip_lock);
@@ -329,7 +333,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
329 sk_add_bind_node(sk, &l2tp_ip_bind_table); 333 sk_add_bind_node(sk, &l2tp_ip_bind_table);
330 write_unlock_bh(&l2tp_ip_lock); 334 write_unlock_bh(&l2tp_ip_lock);
331 335
336out_sk:
332 release_sock(sk); 337 release_sock(sk);
338
333 return rc; 339 return rc;
334} 340}
335 341
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ad3468c32b53..aa821cb639e5 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -72,8 +72,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
72 72
73 if ((l2tp->conn_id == tunnel_id) && 73 if ((l2tp->conn_id == tunnel_id) &&
74 net_eq(sock_net(sk), net) && 74 net_eq(sock_net(sk), net) &&
75 !(addr && ipv6_addr_equal(addr, laddr)) && 75 (!addr || ipv6_addr_equal(addr, laddr)) &&
76 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) 76 (!sk->sk_bound_dev_if || !dif ||
77 sk->sk_bound_dev_if == dif))
77 goto found; 78 goto found;
78 } 79 }
79 80
@@ -196,16 +197,17 @@ pass_up:
196 struct ipv6hdr *iph = ipv6_hdr(skb); 197 struct ipv6hdr *iph = ipv6_hdr(skb);
197 198
198 read_lock_bh(&l2tp_ip6_lock); 199 read_lock_bh(&l2tp_ip6_lock);
199 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 200 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
200 0, tunnel_id); 201 tunnel_id);
202 if (!sk) {
203 read_unlock_bh(&l2tp_ip6_lock);
204 goto discard;
205 }
206
207 sock_hold(sk);
201 read_unlock_bh(&l2tp_ip6_lock); 208 read_unlock_bh(&l2tp_ip6_lock);
202 } 209 }
203 210
204 if (sk == NULL)
205 goto discard;
206
207 sock_hold(sk);
208
209 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 211 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
210 goto discard_put; 212 goto discard_put;
211 213
@@ -266,11 +268,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
266 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; 268 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
267 struct net *net = sock_net(sk); 269 struct net *net = sock_net(sk);
268 __be32 v4addr = 0; 270 __be32 v4addr = 0;
271 int bound_dev_if;
269 int addr_type; 272 int addr_type;
270 int err; 273 int err;
271 274
272 if (!sock_flag(sk, SOCK_ZAPPED))
273 return -EINVAL;
274 if (addr->l2tp_family != AF_INET6) 275 if (addr->l2tp_family != AF_INET6)
275 return -EINVAL; 276 return -EINVAL;
276 if (addr_len < sizeof(*addr)) 277 if (addr_len < sizeof(*addr))
@@ -286,41 +287,34 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
286 if (addr_type & IPV6_ADDR_MULTICAST) 287 if (addr_type & IPV6_ADDR_MULTICAST)
287 return -EADDRNOTAVAIL; 288 return -EADDRNOTAVAIL;
288 289
289 err = -EADDRINUSE;
290 read_lock_bh(&l2tp_ip6_lock);
291 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
292 sk->sk_bound_dev_if, addr->l2tp_conn_id))
293 goto out_in_use;
294 read_unlock_bh(&l2tp_ip6_lock);
295
296 lock_sock(sk); 290 lock_sock(sk);
297 291
298 err = -EINVAL; 292 err = -EINVAL;
293 if (!sock_flag(sk, SOCK_ZAPPED))
294 goto out_unlock;
295
299 if (sk->sk_state != TCP_CLOSE) 296 if (sk->sk_state != TCP_CLOSE)
300 goto out_unlock; 297 goto out_unlock;
301 298
299 bound_dev_if = sk->sk_bound_dev_if;
300
302 /* Check if the address belongs to the host. */ 301 /* Check if the address belongs to the host. */
303 rcu_read_lock(); 302 rcu_read_lock();
304 if (addr_type != IPV6_ADDR_ANY) { 303 if (addr_type != IPV6_ADDR_ANY) {
305 struct net_device *dev = NULL; 304 struct net_device *dev = NULL;
306 305
307 if (addr_type & IPV6_ADDR_LINKLOCAL) { 306 if (addr_type & IPV6_ADDR_LINKLOCAL) {
308 if (addr_len >= sizeof(struct sockaddr_in6) && 307 if (addr->l2tp_scope_id)
309 addr->l2tp_scope_id) { 308 bound_dev_if = addr->l2tp_scope_id;
310 /* Override any existing binding, if another
311 * one is supplied by user.
312 */
313 sk->sk_bound_dev_if = addr->l2tp_scope_id;
314 }
315 309
316 /* Binding to link-local address requires an 310 /* Binding to link-local address requires an
317 interface */ 311 * interface.
318 if (!sk->sk_bound_dev_if) 312 */
313 if (!bound_dev_if)
319 goto out_unlock_rcu; 314 goto out_unlock_rcu;
320 315
321 err = -ENODEV; 316 err = -ENODEV;
322 dev = dev_get_by_index_rcu(sock_net(sk), 317 dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
323 sk->sk_bound_dev_if);
324 if (!dev) 318 if (!dev)
325 goto out_unlock_rcu; 319 goto out_unlock_rcu;
326 } 320 }
@@ -335,13 +329,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
335 } 329 }
336 rcu_read_unlock(); 330 rcu_read_unlock();
337 331
338 inet->inet_rcv_saddr = inet->inet_saddr = v4addr; 332 write_lock_bh(&l2tp_ip6_lock);
333 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
334 addr->l2tp_conn_id)) {
335 write_unlock_bh(&l2tp_ip6_lock);
336 err = -EADDRINUSE;
337 goto out_unlock;
338 }
339
340 inet->inet_saddr = v4addr;
341 inet->inet_rcv_saddr = v4addr;
342 sk->sk_bound_dev_if = bound_dev_if;
339 sk->sk_v6_rcv_saddr = addr->l2tp_addr; 343 sk->sk_v6_rcv_saddr = addr->l2tp_addr;
340 np->saddr = addr->l2tp_addr; 344 np->saddr = addr->l2tp_addr;
341 345
342 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; 346 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
343 347
344 write_lock_bh(&l2tp_ip6_lock);
345 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 348 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
346 sk_del_node_init(sk); 349 sk_del_node_init(sk);
347 write_unlock_bh(&l2tp_ip6_lock); 350 write_unlock_bh(&l2tp_ip6_lock);
@@ -354,10 +357,7 @@ out_unlock_rcu:
354 rcu_read_unlock(); 357 rcu_read_unlock();
355out_unlock: 358out_unlock:
356 release_sock(sk); 359 release_sock(sk);
357 return err;
358 360
359out_in_use:
360 read_unlock_bh(&l2tp_ip6_lock);
361 return err; 361 return err;
362} 362}
363 363
@@ -370,9 +370,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
370 int addr_type; 370 int addr_type;
371 int rc; 371 int rc;
372 372
373 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
374 return -EINVAL;
375
376 if (addr_len < sizeof(*lsa)) 373 if (addr_len < sizeof(*lsa))
377 return -EINVAL; 374 return -EINVAL;
378 375
@@ -389,10 +386,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
389 return -EINVAL; 386 return -EINVAL;
390 } 387 }
391 388
392 rc = ip6_datagram_connect(sk, uaddr, addr_len);
393
394 lock_sock(sk); 389 lock_sock(sk);
395 390
391 /* Must bind first - autobinding does not work */
392 if (sock_flag(sk, SOCK_ZAPPED)) {
393 rc = -EINVAL;
394 goto out_sk;
395 }
396
397 rc = __ip6_datagram_connect(sk, uaddr, addr_len);
398 if (rc < 0)
399 goto out_sk;
400
396 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 401 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
397 402
398 write_lock_bh(&l2tp_ip6_lock); 403 write_lock_bh(&l2tp_ip6_lock);
@@ -400,6 +405,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
400 sk_add_bind_node(sk, &l2tp_ip6_bind_table); 405 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
401 write_unlock_bh(&l2tp_ip6_lock); 406 write_unlock_bh(&l2tp_ip6_lock);
402 407
408out_sk:
403 release_sock(sk); 409 release_sock(sk);
404 410
405 return rc; 411 return rc;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 78e9ecbc96e6..8e05032689f0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
688 } 688 }
689 689
690 /* No need to do anything if the driver does all */ 690 /* No need to do anything if the driver does all */
691 if (!local->ops->set_tim) 691 if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
692 return; 692 return;
693 693
694 if (sta->dead) 694 if (sta->dead)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1c56abc49627..bd5f4be89435 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1501 struct sta_info *sta, 1501 struct sta_info *sta,
1502 struct sk_buff *skb) 1502 struct sk_buff *skb)
1503{ 1503{
1504 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1505 struct fq *fq = &local->fq; 1504 struct fq *fq = &local->fq;
1506 struct ieee80211_vif *vif; 1505 struct ieee80211_vif *vif;
1507 struct txq_info *txqi; 1506 struct txq_info *txqi;
@@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
1526 if (!txqi) 1525 if (!txqi)
1527 return false; 1526 return false;
1528 1527
1529 info->control.vif = vif;
1530
1531 spin_lock_bh(&fq->lock); 1528 spin_lock_bh(&fq->lock);
1532 ieee80211_txq_enqueue(local, txqi, skb); 1529 ieee80211_txq_enqueue(local, txqi, skb);
1533 spin_unlock_bh(&fq->lock); 1530 spin_unlock_bh(&fq->lock);
@@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
3213 3210
3214 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3211 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3215 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 3212 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3216 *ieee80211_get_qos_ctl(hdr) = tid;
3217 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); 3213 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
3218 } else { 3214 } else {
3219 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 3215 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
@@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3338 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); 3334 (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
3339 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; 3335 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
3340 3336
3337 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3338 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
3339 *ieee80211_get_qos_ctl(hdr) = tid;
3340 }
3341
3341 __skb_queue_head_init(&tx.skbs); 3342 __skb_queue_head_init(&tx.skbs);
3342 3343
3343 tx.flags = IEEE80211_TX_UNICAST; 3344 tx.flags = IEEE80211_TX_UNICAST;
@@ -3426,6 +3427,11 @@ begin:
3426 goto begin; 3427 goto begin;
3427 } 3428 }
3428 3429
3430 if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
3431 info->flags |= IEEE80211_TX_CTL_AMPDU;
3432 else
3433 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
3434
3429 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { 3435 if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
3430 struct sta_info *sta = container_of(txq->sta, struct sta_info, 3436 struct sta_info *sta = container_of(txq->sta, struct sta_info,
3431 sta); 3437 sta);
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index ee715764a828..6832bf6ab69f 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); 270 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
271 } 271 }
272 272
273 /*
274 * This is a workaround for VHT-enabled STAs which break the spec
275 * and have the VHT-MCS Rx map filled in with value 3 for all eight
276 * spacial streams, an example is AR9462.
277 *
278 * As per spec, in section 22.1.1 Introduction to the VHT PHY
279 * A VHT STA shall support at least single spactial stream VHT-MCSs
280 * 0 to 7 (transmit and receive) in all supported channel widths.
281 */
282 if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
283 vht_cap->vht_supported = false;
284 sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
285 sta->addr);
286 return;
287 }
288
273 /* finally set up the bandwidth */ 289 /* finally set up the bandwidth */
274 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 290 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
275 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 291 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c3c809b2e712..a6e44ef2ec9a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = {
2845 .hdrsize = 0, 2845 .hdrsize = 0,
2846 .name = IPVS_GENL_NAME, 2846 .name = IPVS_GENL_NAME,
2847 .version = IPVS_GENL_VERSION, 2847 .version = IPVS_GENL_VERSION,
2848 .maxattr = IPVS_CMD_MAX, 2848 .maxattr = IPVS_CMD_ATTR_MAX,
2849 .netnsok = true, /* Make ipvsadm to work on netns */ 2849 .netnsok = true, /* Make ipvsadm to work on netns */
2850}; 2850};
2851 2851
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578bedf3..9350530c16c1 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff {
283 */ 283 */
284static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) 284static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
285{ 285{
286 memset(ho, 0, sizeof(*ho));
286 ho->init_seq = get_unaligned_be32(&no->init_seq); 287 ho->init_seq = get_unaligned_be32(&no->init_seq);
287 ho->delta = get_unaligned_be32(&no->delta); 288 ho->delta = get_unaligned_be32(&no->delta);
288 ho->previous_delta = get_unaligned_be32(&no->previous_delta); 289 ho->previous_delta = get_unaligned_be32(&no->previous_delta);
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa
917 kfree(param->pe_data); 918 kfree(param->pe_data);
918 } 919 }
919 920
920 if (opt) 921 if (opt) {
921 memcpy(&cp->in_seq, opt, sizeof(*opt)); 922 cp->in_seq = opt->in_seq;
923 cp->out_seq = opt->out_seq;
924 }
922 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); 925 atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
923 cp->state = state; 926 cp->state = state;
924 cp->old_state = cp->state; 927 cp->old_state = cp->state;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index df2f5a3901df..0f87e5d21be7 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -76,6 +76,7 @@ struct conntrack_gc_work {
76 struct delayed_work dwork; 76 struct delayed_work dwork;
77 u32 last_bucket; 77 u32 last_bucket;
78 bool exiting; 78 bool exiting;
79 long next_gc_run;
79}; 80};
80 81
81static __read_mostly struct kmem_cache *nf_conntrack_cachep; 82static __read_mostly struct kmem_cache *nf_conntrack_cachep;
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
83static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 84static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
84static __read_mostly bool nf_conntrack_locks_all; 85static __read_mostly bool nf_conntrack_locks_all;
85 86
87/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
86#define GC_MAX_BUCKETS_DIV 64u 88#define GC_MAX_BUCKETS_DIV 64u
87#define GC_MAX_BUCKETS 8192u 89/* upper bound of scan intervals */
88#define GC_INTERVAL (5 * HZ) 90#define GC_INTERVAL_MAX (2 * HZ)
91/* maximum conntracks to evict per gc run */
89#define GC_MAX_EVICTS 256u 92#define GC_MAX_EVICTS 256u
90 93
91static struct conntrack_gc_work conntrack_gc_work; 94static struct conntrack_gc_work conntrack_gc_work;
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
936static void gc_worker(struct work_struct *work) 939static void gc_worker(struct work_struct *work)
937{ 940{
938 unsigned int i, goal, buckets = 0, expired_count = 0; 941 unsigned int i, goal, buckets = 0, expired_count = 0;
939 unsigned long next_run = GC_INTERVAL;
940 unsigned int ratio, scanned = 0;
941 struct conntrack_gc_work *gc_work; 942 struct conntrack_gc_work *gc_work;
943 unsigned int ratio, scanned = 0;
944 unsigned long next_run;
942 945
943 gc_work = container_of(work, struct conntrack_gc_work, dwork.work); 946 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
944 947
945 goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); 948 goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
946 i = gc_work->last_bucket; 949 i = gc_work->last_bucket;
947 950
948 do { 951 do {
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work)
982 if (gc_work->exiting) 985 if (gc_work->exiting)
983 return; 986 return;
984 987
988 /*
989 * Eviction will normally happen from the packet path, and not
990 * from this gc worker.
991 *
992 * This worker is only here to reap expired entries when system went
993 * idle after a busy period.
994 *
995 * The heuristics below are supposed to balance conflicting goals:
996 *
997 * 1. Minimize time until we notice a stale entry
998 * 2. Maximize scan intervals to not waste cycles
999 *
1000 * Normally, expired_count will be 0, this increases the next_run time
1001 * to priorize 2) above.
1002 *
1003 * As soon as a timed-out entry is found, move towards 1) and increase
1004 * the scan frequency.
1005 * In case we have lots of evictions next scan is done immediately.
1006 */
985 ratio = scanned ? expired_count * 100 / scanned : 0; 1007 ratio = scanned ? expired_count * 100 / scanned : 0;
986 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) 1008 if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
1009 gc_work->next_gc_run = 0;
987 next_run = 0; 1010 next_run = 0;
1011 } else if (expired_count) {
1012 gc_work->next_gc_run /= 2U;
1013 next_run = msecs_to_jiffies(1);
1014 } else {
1015 if (gc_work->next_gc_run < GC_INTERVAL_MAX)
1016 gc_work->next_gc_run += msecs_to_jiffies(1);
1017
1018 next_run = gc_work->next_gc_run;
1019 }
988 1020
989 gc_work->last_bucket = i; 1021 gc_work->last_bucket = i;
990 schedule_delayed_work(&gc_work->dwork, next_run); 1022 queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
991} 1023}
992 1024
993static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) 1025static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
994{ 1026{
995 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); 1027 INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
1028 gc_work->next_gc_run = GC_INTERVAL_MAX;
996 gc_work->exiting = false; 1029 gc_work->exiting = false;
997} 1030}
998 1031
@@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void)
1885 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); 1918 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1886 1919
1887 conntrack_gc_work_init(&conntrack_gc_work); 1920 conntrack_gc_work_init(&conntrack_gc_work);
1888 schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); 1921 queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
1889 1922
1890 return 0; 1923 return 0;
1891 1924
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 336e21559e01..7341adf7059d 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
138 138
139 for (i = 0; i < nf_ct_helper_hsize; i++) { 139 for (i = 0; i < nf_ct_helper_hsize; i++) {
140 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { 140 hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) {
141 if (!strcmp(h->name, name) && 141 if (strcmp(h->name, name))
142 h->tuple.src.l3num == l3num && 142 continue;
143 h->tuple.dst.protonum == protonum) 143
144 if (h->tuple.src.l3num != NFPROTO_UNSPEC &&
145 h->tuple.src.l3num != l3num)
146 continue;
147
148 if (h->tuple.dst.protonum == protonum)
144 return h; 149 return h;
145 } 150 }
146 } 151 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 621b81c7bddc..c3fc14e021ec 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
1436 handler = &sip_handlers[i]; 1436 handler = &sip_handlers[i];
1437 if (handler->request == NULL) 1437 if (handler->request == NULL)
1438 continue; 1438 continue;
1439 if (*datalen < handler->len || 1439 if (*datalen < handler->len + 2 ||
1440 strncasecmp(*dptr, handler->method, handler->len)) 1440 strncasecmp(*dptr, handler->method, handler->len))
1441 continue; 1441 continue;
1442 if ((*dptr)[handler->len] != ' ' ||
1443 !isalpha((*dptr)[handler->len+1]))
1444 continue;
1442 1445
1443 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, 1446 if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
1444 &matchoff, &matchlen) <= 0) { 1447 &matchoff, &matchlen) <= 0) {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index bbb8f3df79f7..5b9c884a452e 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -42,7 +42,7 @@ struct nf_nat_conn_key {
42 const struct nf_conntrack_zone *zone; 42 const struct nf_conntrack_zone *zone;
43}; 43};
44 44
45static struct rhashtable nf_nat_bysource_table; 45static struct rhltable nf_nat_bysource_table;
46 46
47inline const struct nf_nat_l3proto * 47inline const struct nf_nat_l3proto *
48__nf_nat_l3proto_find(u8 family) 48__nf_nat_l3proto_find(u8 family)
@@ -193,9 +193,12 @@ static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
193 const struct nf_nat_conn_key *key = arg->key; 193 const struct nf_nat_conn_key *key = arg->key;
194 const struct nf_conn *ct = obj; 194 const struct nf_conn *ct = obj;
195 195
196 return same_src(ct, key->tuple) && 196 if (!same_src(ct, key->tuple) ||
197 net_eq(nf_ct_net(ct), key->net) && 197 !net_eq(nf_ct_net(ct), key->net) ||
198 nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL); 198 !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
199 return 1;
200
201 return 0;
199} 202}
200 203
201static struct rhashtable_params nf_nat_bysource_params = { 204static struct rhashtable_params nf_nat_bysource_params = {
@@ -204,7 +207,6 @@ static struct rhashtable_params nf_nat_bysource_params = {
204 .obj_cmpfn = nf_nat_bysource_cmp, 207 .obj_cmpfn = nf_nat_bysource_cmp,
205 .nelem_hint = 256, 208 .nelem_hint = 256,
206 .min_size = 1024, 209 .min_size = 1024,
207 .nulls_base = (1U << RHT_BASE_SHIFT),
208}; 210};
209 211
210/* Only called for SRC manip */ 212/* Only called for SRC manip */
@@ -223,12 +225,15 @@ find_appropriate_src(struct net *net,
223 .tuple = tuple, 225 .tuple = tuple,
224 .zone = zone 226 .zone = zone
225 }; 227 };
228 struct rhlist_head *hl;
226 229
227 ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key, 230 hl = rhltable_lookup(&nf_nat_bysource_table, &key,
228 nf_nat_bysource_params); 231 nf_nat_bysource_params);
229 if (!ct) 232 if (!hl)
230 return 0; 233 return 0;
231 234
235 ct = container_of(hl, typeof(*ct), nat_bysource);
236
232 nf_ct_invert_tuplepr(result, 237 nf_ct_invert_tuplepr(result,
233 &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 238 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
234 result->dst = tuple->dst; 239 result->dst = tuple->dst;
@@ -446,11 +451,17 @@ nf_nat_setup_info(struct nf_conn *ct,
446 } 451 }
447 452
448 if (maniptype == NF_NAT_MANIP_SRC) { 453 if (maniptype == NF_NAT_MANIP_SRC) {
454 struct nf_nat_conn_key key = {
455 .net = nf_ct_net(ct),
456 .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
457 .zone = nf_ct_zone(ct),
458 };
449 int err; 459 int err;
450 460
451 err = rhashtable_insert_fast(&nf_nat_bysource_table, 461 err = rhltable_insert_key(&nf_nat_bysource_table,
452 &ct->nat_bysource, 462 &key,
453 nf_nat_bysource_params); 463 &ct->nat_bysource,
464 nf_nat_bysource_params);
454 if (err) 465 if (err)
455 return NF_DROP; 466 return NF_DROP;
456 } 467 }
@@ -567,8 +578,8 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
567 * will delete entry from already-freed table. 578 * will delete entry from already-freed table.
568 */ 579 */
569 ct->status &= ~IPS_NAT_DONE_MASK; 580 ct->status &= ~IPS_NAT_DONE_MASK;
570 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 581 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
571 nf_nat_bysource_params); 582 nf_nat_bysource_params);
572 583
573 /* don't delete conntrack. Although that would make things a lot 584 /* don't delete conntrack. Although that would make things a lot
574 * simpler, we'd end up flushing all conntracks on nat rmmod. 585 * simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +709,8 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
698 if (!nat) 709 if (!nat)
699 return; 710 return;
700 711
701 rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, 712 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
702 nf_nat_bysource_params); 713 nf_nat_bysource_params);
703} 714}
704 715
705static struct nf_ct_ext_type nat_extend __read_mostly = { 716static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -834,13 +845,13 @@ static int __init nf_nat_init(void)
834{ 845{
835 int ret; 846 int ret;
836 847
837 ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); 848 ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
838 if (ret) 849 if (ret)
839 return ret; 850 return ret;
840 851
841 ret = nf_ct_extend_register(&nat_extend); 852 ret = nf_ct_extend_register(&nat_extend);
842 if (ret < 0) { 853 if (ret < 0) {
843 rhashtable_destroy(&nf_nat_bysource_table); 854 rhltable_destroy(&nf_nat_bysource_table);
844 printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); 855 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
845 return ret; 856 return ret;
846 } 857 }
@@ -864,7 +875,7 @@ static int __init nf_nat_init(void)
864 return 0; 875 return 0;
865 876
866 cleanup_extend: 877 cleanup_extend:
867 rhashtable_destroy(&nf_nat_bysource_table); 878 rhltable_destroy(&nf_nat_bysource_table);
868 nf_ct_extend_unregister(&nat_extend); 879 nf_ct_extend_unregister(&nat_extend);
869 return ret; 880 return ret;
870} 881}
@@ -883,7 +894,7 @@ static void __exit nf_nat_cleanup(void)
883 for (i = 0; i < NFPROTO_NUMPROTO; i++) 894 for (i = 0; i < NFPROTO_NUMPROTO; i++)
884 kfree(nf_nat_l4protos[i]); 895 kfree(nf_nat_l4protos[i]);
885 896
886 rhashtable_destroy(&nf_nat_bysource_table); 897 rhltable_destroy(&nf_nat_bysource_table);
887} 898}
888 899
889MODULE_LICENSE("GPL"); 900MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 24db22257586..e5194f6f906c 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2570,7 +2570,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
2570 } 2570 }
2571 2571
2572 if (set->timeout && 2572 if (set->timeout &&
2573 nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout), 2573 nla_put_be64(skb, NFTA_SET_TIMEOUT,
2574 cpu_to_be64(jiffies_to_msecs(set->timeout)),
2574 NFTA_SET_PAD)) 2575 NFTA_SET_PAD))
2575 goto nla_put_failure; 2576 goto nla_put_failure;
2576 if (set->gc_int && 2577 if (set->gc_int &&
@@ -2859,7 +2860,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2859 if (nla[NFTA_SET_TIMEOUT] != NULL) { 2860 if (nla[NFTA_SET_TIMEOUT] != NULL) {
2860 if (!(flags & NFT_SET_TIMEOUT)) 2861 if (!(flags & NFT_SET_TIMEOUT))
2861 return -EINVAL; 2862 return -EINVAL;
2862 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT])); 2863 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
2864 nla[NFTA_SET_TIMEOUT])));
2863 } 2865 }
2864 gc_int = 0; 2866 gc_int = 0;
2865 if (nla[NFTA_SET_GC_INTERVAL] != NULL) { 2867 if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
@@ -2956,12 +2958,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
2956 2958
2957 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); 2959 err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
2958 if (err < 0) 2960 if (err < 0)
2959 goto err2; 2961 goto err3;
2960 2962
2961 list_add_tail_rcu(&set->list, &table->sets); 2963 list_add_tail_rcu(&set->list, &table->sets);
2962 table->use++; 2964 table->use++;
2963 return 0; 2965 return 0;
2964 2966
2967err3:
2968 ops->destroy(set);
2965err2: 2969err2:
2966 kfree(set); 2970 kfree(set);
2967err1: 2971err1:
@@ -3176,7 +3180,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
3176 3180
3177 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && 3181 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
3178 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, 3182 nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
3179 cpu_to_be64(*nft_set_ext_timeout(ext)), 3183 cpu_to_be64(jiffies_to_msecs(
3184 *nft_set_ext_timeout(ext))),
3180 NFTA_SET_ELEM_PAD)) 3185 NFTA_SET_ELEM_PAD))
3181 goto nla_put_failure; 3186 goto nla_put_failure;
3182 3187
@@ -3445,21 +3450,22 @@ void *nft_set_elem_init(const struct nft_set *set,
3445 memcpy(nft_set_ext_data(ext), data, set->dlen); 3450 memcpy(nft_set_ext_data(ext), data, set->dlen);
3446 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) 3451 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
3447 *nft_set_ext_expiration(ext) = 3452 *nft_set_ext_expiration(ext) =
3448 jiffies + msecs_to_jiffies(timeout); 3453 jiffies + timeout;
3449 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) 3454 if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
3450 *nft_set_ext_timeout(ext) = timeout; 3455 *nft_set_ext_timeout(ext) = timeout;
3451 3456
3452 return elem; 3457 return elem;
3453} 3458}
3454 3459
3455void nft_set_elem_destroy(const struct nft_set *set, void *elem) 3460void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3461 bool destroy_expr)
3456{ 3462{
3457 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3463 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3458 3464
3459 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3465 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE);
3460 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3466 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3461 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3467 nft_data_uninit(nft_set_ext_data(ext), set->dtype);
3462 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3468 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3463 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3469 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3464 3470
3465 kfree(elem); 3471 kfree(elem);
@@ -3532,7 +3538,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3532 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { 3538 if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
3533 if (!(set->flags & NFT_SET_TIMEOUT)) 3539 if (!(set->flags & NFT_SET_TIMEOUT))
3534 return -EINVAL; 3540 return -EINVAL;
3535 timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT])); 3541 timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
3542 nla[NFTA_SET_ELEM_TIMEOUT])));
3536 } else if (set->flags & NFT_SET_TIMEOUT) { 3543 } else if (set->flags & NFT_SET_TIMEOUT) {
3537 timeout = set->timeout; 3544 timeout = set->timeout;
3538 } 3545 }
@@ -3565,6 +3572,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
3565 dreg = nft_type_to_reg(set->dtype); 3572 dreg = nft_type_to_reg(set->dtype);
3566 list_for_each_entry(binding, &set->bindings, list) { 3573 list_for_each_entry(binding, &set->bindings, list) {
3567 struct nft_ctx bind_ctx = { 3574 struct nft_ctx bind_ctx = {
3575 .net = ctx->net,
3568 .afi = ctx->afi, 3576 .afi = ctx->afi,
3569 .table = ctx->table, 3577 .table = ctx->table,
3570 .chain = (struct nft_chain *)binding->chain, 3578 .chain = (struct nft_chain *)binding->chain,
@@ -3812,7 +3820,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu)
3812 3820
3813 gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); 3821 gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu);
3814 for (i = 0; i < gcb->head.cnt; i++) 3822 for (i = 0; i < gcb->head.cnt; i++)
3815 nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); 3823 nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true);
3816 kfree(gcb); 3824 kfree(gcb);
3817} 3825}
3818EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); 3826EXPORT_SYMBOL_GPL(nft_set_gc_batch_release);
@@ -4030,7 +4038,7 @@ static void nf_tables_commit_release(struct nft_trans *trans)
4030 break; 4038 break;
4031 case NFT_MSG_DELSETELEM: 4039 case NFT_MSG_DELSETELEM:
4032 nft_set_elem_destroy(nft_trans_elem_set(trans), 4040 nft_set_elem_destroy(nft_trans_elem_set(trans),
4033 nft_trans_elem(trans).priv); 4041 nft_trans_elem(trans).priv, true);
4034 break; 4042 break;
4035 } 4043 }
4036 kfree(trans); 4044 kfree(trans);
@@ -4171,7 +4179,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
4171 break; 4179 break;
4172 case NFT_MSG_NEWSETELEM: 4180 case NFT_MSG_NEWSETELEM:
4173 nft_set_elem_destroy(nft_trans_elem_set(trans), 4181 nft_set_elem_destroy(nft_trans_elem_set(trans),
4174 nft_trans_elem(trans).priv); 4182 nft_trans_elem(trans).priv, true);
4175 break; 4183 break;
4176 } 4184 }
4177 kfree(trans); 4185 kfree(trans);
@@ -4421,7 +4429,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
4421 * Otherwise a 0 is returned and the attribute value is stored in the 4429 * Otherwise a 0 is returned and the attribute value is stored in the
4422 * destination variable. 4430 * destination variable.
4423 */ 4431 */
4424unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) 4432int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
4425{ 4433{
4426 u32 val; 4434 u32 val;
4427 4435
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 517f08767a3c..31ca94793aa9 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr,
44 &regs->data[priv->sreg_key], 44 &regs->data[priv->sreg_key],
45 &regs->data[priv->sreg_data], 45 &regs->data[priv->sreg_data],
46 timeout, GFP_ATOMIC); 46 timeout, GFP_ATOMIC);
47 if (elem == NULL) { 47 if (elem == NULL)
48 if (set->size) 48 goto err1;
49 atomic_dec(&set->nelems);
50 return NULL;
51 }
52 49
53 ext = nft_set_elem_ext(set, elem); 50 ext = nft_set_elem_ext(set, elem);
54 if (priv->expr != NULL && 51 if (priv->expr != NULL &&
55 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) 52 nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
56 return NULL; 53 goto err2;
57 54
58 return elem; 55 return elem;
56
57err2:
58 nft_set_elem_destroy(set, elem, false);
59err1:
60 if (set->size)
61 atomic_dec(&set->nelems);
62 return NULL;
59} 63}
60 64
61static void nft_dynset_eval(const struct nft_expr *expr, 65static void nft_dynset_eval(const struct nft_expr *expr,
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
139 return PTR_ERR(set); 143 return PTR_ERR(set);
140 } 144 }
141 145
146 if (set->ops->update == NULL)
147 return -EOPNOTSUPP;
148
142 if (set->flags & NFT_SET_CONSTANT) 149 if (set->flags & NFT_SET_CONSTANT)
143 return -EBUSY; 150 return -EBUSY;
144 151
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index baf694de3935..d5447a22275c 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -53,6 +53,7 @@ static int nft_hash_init(const struct nft_ctx *ctx,
53{ 53{
54 struct nft_hash *priv = nft_expr_priv(expr); 54 struct nft_hash *priv = nft_expr_priv(expr);
55 u32 len; 55 u32 len;
56 int err;
56 57
57 if (!tb[NFTA_HASH_SREG] || 58 if (!tb[NFTA_HASH_SREG] ||
58 !tb[NFTA_HASH_DREG] || 59 !tb[NFTA_HASH_DREG] ||
@@ -67,8 +68,10 @@ static int nft_hash_init(const struct nft_ctx *ctx,
67 priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]); 68 priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
68 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); 69 priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
69 70
70 len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN])); 71 err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
71 if (len == 0 || len > U8_MAX) 72 if (err < 0)
73 return err;
74 if (len == 0)
72 return -ERANGE; 75 return -ERANGE;
73 76
74 priv->len = len; 77 priv->len = len;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index fbc88009ca2e..8f0aaaea1376 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -59,6 +59,12 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
59 int err; 59 int err;
60 u32 op; 60 u32 op;
61 61
62 if (!tb[NFTA_RANGE_SREG] ||
63 !tb[NFTA_RANGE_OP] ||
64 !tb[NFTA_RANGE_FROM_DATA] ||
65 !tb[NFTA_RANGE_TO_DATA])
66 return -EINVAL;
67
62 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), 68 err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
63 &desc_from, tb[NFTA_RANGE_FROM_DATA]); 69 &desc_from, tb[NFTA_RANGE_FROM_DATA]);
64 if (err < 0) 70 if (err < 0)
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 3794cb2fc788..a3dface3e6e6 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
98 const struct nft_set_ext **ext) 98 const struct nft_set_ext **ext)
99{ 99{
100 struct nft_hash *priv = nft_set_priv(set); 100 struct nft_hash *priv = nft_set_priv(set);
101 struct nft_hash_elem *he; 101 struct nft_hash_elem *he, *prev;
102 struct nft_hash_cmp_arg arg = { 102 struct nft_hash_cmp_arg arg = {
103 .genmask = NFT_GENMASK_ANY, 103 .genmask = NFT_GENMASK_ANY,
104 .set = set, 104 .set = set,
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key,
112 he = new(set, expr, regs); 112 he = new(set, expr, regs);
113 if (he == NULL) 113 if (he == NULL)
114 goto err1; 114 goto err1;
115 if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, 115
116 nft_hash_params)) 116 prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node,
117 nft_hash_params);
118 if (IS_ERR(prev))
117 goto err2; 119 goto err2;
120
121 /* Another cpu may race to insert the element with the same key */
122 if (prev) {
123 nft_set_elem_destroy(set, he, true);
124 he = prev;
125 }
126
118out: 127out:
119 *ext = &he->ext; 128 *ext = &he->ext;
120 return true; 129 return true;
121 130
122err2: 131err2:
123 nft_set_elem_destroy(set, he); 132 nft_set_elem_destroy(set, he, true);
124err1: 133err1:
125 return false; 134 return false;
126} 135}
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set,
332 341
333static void nft_hash_elem_destroy(void *ptr, void *arg) 342static void nft_hash_elem_destroy(void *ptr, void *arg)
334{ 343{
335 nft_set_elem_destroy((const struct nft_set *)arg, ptr); 344 nft_set_elem_destroy((const struct nft_set *)arg, ptr, true);
336} 345}
337 346
338static void nft_hash_destroy(const struct nft_set *set) 347static void nft_hash_destroy(const struct nft_set *set)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 38b5bda242f8..36493a7cae88 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
266 while ((node = priv->root.rb_node) != NULL) { 266 while ((node = priv->root.rb_node) != NULL) {
267 rb_erase(node, &priv->root); 267 rb_erase(node, &priv->root);
268 rbe = rb_entry(node, struct nft_rbtree_elem, node); 268 rbe = rb_entry(node, struct nft_rbtree_elem, node);
269 nft_set_elem_destroy(set, rbe); 269 nft_set_elem_destroy(set, rbe, true);
270 } 270 }
271} 271}
272 272
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 69f78e96fdb4..b83e158e116a 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
44 u_int32_t newmark; 44 u_int32_t newmark;
45 45
46 ct = nf_ct_get(skb, &ctinfo); 46 ct = nf_ct_get(skb, &ctinfo);
47 if (ct == NULL) 47 if (ct == NULL || nf_ct_is_untracked(ct))
48 return XT_CONTINUE; 48 return XT_CONTINUE;
49 49
50 switch (info->mode) { 50 switch (info->mode) {
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par)
97 const struct nf_conn *ct; 97 const struct nf_conn *ct;
98 98
99 ct = nf_ct_get(skb, &ctinfo); 99 ct = nf_ct_get(skb, &ctinfo);
100 if (ct == NULL) 100 if (ct == NULL || nf_ct_is_untracked(ct))
101 return false; 101 return false;
102 102
103 return ((ct->mark & info->mask) == info->mark) ^ info->invert; 103 return ((ct->mark & info->mask) == info->mark) ^ info->invert;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 62bea4591054..602e5ebe9db3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
322 sk_mem_charge(sk, skb->truesize); 322 sk_mem_charge(sk, skb->truesize);
323} 323}
324 324
325static void netlink_sock_destruct(struct sock *sk) 325static void __netlink_sock_destruct(struct sock *sk)
326{ 326{
327 struct netlink_sock *nlk = nlk_sk(sk); 327 struct netlink_sock *nlk = nlk_sk(sk);
328 328
329 if (nlk->cb_running) { 329 if (nlk->cb_running) {
330 if (nlk->cb.done)
331 nlk->cb.done(&nlk->cb);
332
333 module_put(nlk->cb.module); 330 module_put(nlk->cb.module);
334 kfree_skb(nlk->cb.skb); 331 kfree_skb(nlk->cb.skb);
335 } 332 }
@@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk)
346 WARN_ON(nlk_sk(sk)->groups); 343 WARN_ON(nlk_sk(sk)->groups);
347} 344}
348 345
346static void netlink_sock_destruct_work(struct work_struct *work)
347{
348 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
349 work);
350
351 nlk->cb.done(&nlk->cb);
352 __netlink_sock_destruct(&nlk->sk);
353}
354
355static void netlink_sock_destruct(struct sock *sk)
356{
357 struct netlink_sock *nlk = nlk_sk(sk);
358
359 if (nlk->cb_running && nlk->cb.done) {
360 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
361 schedule_work(&nlk->work);
362 return;
363 }
364
365 __netlink_sock_destruct(sk);
366}
367
349/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 368/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
350 * SMP. Look, when several writers sleep and reader wakes them up, all but one 369 * SMP. Look, when several writers sleep and reader wakes them up, all but one
351 * immediately hit write lock and grab all the cpus. Exclusive sleep solves 370 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 3cfd6cc60504..4fdb38318977 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/rhashtable.h> 4#include <linux/rhashtable.h>
5#include <linux/atomic.h> 5#include <linux/atomic.h>
6#include <linux/workqueue.h>
6#include <net/sock.h> 7#include <net/sock.h>
7 8
8#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 9#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -33,6 +34,7 @@ struct netlink_sock {
33 34
34 struct rhash_head node; 35 struct rhash_head node;
35 struct rcu_head rcu; 36 struct rcu_head rcu;
37 struct work_struct work;
36}; 38};
37 39
38static inline struct netlink_sock *nlk_sk(struct sock *sk) 40static inline struct netlink_sock *nlk_sk(struct sock *sk)
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index b2f0e986a6f4..a5546249fb10 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
178 } 178 }
179 cb->args[1] = i; 179 cb->args[1] = i;
180 } else { 180 } else {
181 if (req->sdiag_protocol >= MAX_LINKS) { 181 if (req->sdiag_protocol >= MAX_LINKS)
182 read_unlock(&nl_table_lock);
183 rcu_read_unlock();
184 return -ENOENT; 182 return -ENOENT;
185 }
186 183
187 err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); 184 err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
188 } 185 }
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 23cc12639ba7..49c28e8ef01b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family)
404 404
405 err = genl_validate_assign_mc_groups(family); 405 err = genl_validate_assign_mc_groups(family);
406 if (err) 406 if (err)
407 goto errout_locked; 407 goto errout_free;
408 408
409 list_add_tail(&family->family_list, genl_family_chain(family->id)); 409 list_add_tail(&family->family_list, genl_family_chain(family->id));
410 genl_unlock_all(); 410 genl_unlock_all();
@@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family)
417 417
418 return 0; 418 return 0;
419 419
420errout_free:
421 kfree(family->attrbuf);
420errout_locked: 422errout_locked:
421 genl_unlock_all(); 423 genl_unlock_all();
422errout: 424errout:
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 31045ef44a82..fecefa2dc94e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -370,8 +370,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
370 skb_orphan(skb); 370 skb_orphan(skb);
371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); 371 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
372 err = nf_ct_frag6_gather(net, skb, user); 372 err = nf_ct_frag6_gather(net, skb, user);
373 if (err) 373 if (err) {
374 if (err != -EINPROGRESS)
375 kfree_skb(skb);
374 return err; 376 return err;
377 }
375 378
376 key->ip.proto = ipv6_hdr(skb)->nexthdr; 379 key->ip.proto = ipv6_hdr(skb)->nexthdr;
377 ovs_cb.mru = IP6CB(skb)->frag_max_size; 380 ovs_cb.mru = IP6CB(skb)->frag_max_size;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d2238b204691..dd2332390c45 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
3648 3648
3649 if (optlen != sizeof(val)) 3649 if (optlen != sizeof(val))
3650 return -EINVAL; 3650 return -EINVAL;
3651 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3652 return -EBUSY;
3653 if (copy_from_user(&val, optval, sizeof(val))) 3651 if (copy_from_user(&val, optval, sizeof(val)))
3654 return -EFAULT; 3652 return -EFAULT;
3655 switch (val) { 3653 switch (val) {
3656 case TPACKET_V1: 3654 case TPACKET_V1:
3657 case TPACKET_V2: 3655 case TPACKET_V2:
3658 case TPACKET_V3: 3656 case TPACKET_V3:
3659 po->tp_version = val; 3657 break;
3660 return 0;
3661 default: 3658 default:
3662 return -EINVAL; 3659 return -EINVAL;
3663 } 3660 }
3661 lock_sock(sk);
3662 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3663 ret = -EBUSY;
3664 } else {
3665 po->tp_version = val;
3666 ret = 0;
3667 }
3668 release_sock(sk);
3669 return ret;
3664 } 3670 }
3665 case PACKET_RESERVE: 3671 case PACKET_RESERVE:
3666 { 3672 {
@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4164 /* Added to avoid minimal code churn */ 4170 /* Added to avoid minimal code churn */
4165 struct tpacket_req *req = &req_u->req; 4171 struct tpacket_req *req = &req_u->req;
4166 4172
4173 lock_sock(sk);
4167 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ 4174 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4168 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { 4175 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4169 net_warn_ratelimited("Tx-ring is not supported.\n"); 4176 net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4245,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4245 goto out; 4252 goto out;
4246 } 4253 }
4247 4254
4248 lock_sock(sk);
4249 4255
4250 /* Detach socket from network */ 4256 /* Detach socket from network */
4251 spin_lock(&po->bind_lock); 4257 spin_lock(&po->bind_lock);
@@ -4294,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4294 if (!tx_ring) 4300 if (!tx_ring)
4295 prb_shutdown_retire_blk_timer(po, rb_queue); 4301 prb_shutdown_retire_blk_timer(po, rb_queue);
4296 } 4302 }
4297 release_sock(sk);
4298 4303
4299 if (pg_vec) 4304 if (pg_vec)
4300 free_pg_vec(pg_vec, order, req->tp_block_nr); 4305 free_pg_vec(pg_vec, order, req->tp_block_nr);
4301out: 4306out:
4307 release_sock(sk);
4302 return err; 4308 return err;
4303} 4309}
4304 4310
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fcddacc92e01..20e2923dc827 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -659,6 +659,8 @@ out_recv:
659out_pernet: 659out_pernet:
660 unregister_pernet_subsys(&rds_tcp_net_ops); 660 unregister_pernet_subsys(&rds_tcp_net_ops);
661out_slab: 661out_slab:
662 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
663 pr_warn("could not unregister rds_tcp_dev_notifier\n");
662 kmem_cache_destroy(rds_tcp_conn_slab); 664 kmem_cache_destroy(rds_tcp_conn_slab);
663out: 665out:
664 return ret; 666 return ret;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b54d56d4959b..cf9b2fe8eac6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
108 kfree(keys); 108 kfree(keys);
109} 109}
110 110
111static bool offset_valid(struct sk_buff *skb, int offset)
112{
113 if (offset > 0 && offset > skb->len)
114 return false;
115
116 if (offset < 0 && -offset > skb_headroom(skb))
117 return false;
118
119 return true;
120}
121
111static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, 122static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
112 struct tcf_result *res) 123 struct tcf_result *res)
113{ 124{
@@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
134 if (tkey->offmask) { 145 if (tkey->offmask) {
135 char *d, _d; 146 char *d, _d;
136 147
148 if (!offset_valid(skb, off + tkey->at)) {
149 pr_info("tc filter pedit 'at' offset %d out of bounds\n",
150 off + tkey->at);
151 goto bad;
152 }
137 d = skb_header_pointer(skb, off + tkey->at, 1, 153 d = skb_header_pointer(skb, off + tkey->at, 1,
138 &_d); 154 &_d);
139 if (!d) 155 if (!d)
@@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
146 " offset must be on 32 bit boundaries\n"); 162 " offset must be on 32 bit boundaries\n");
147 goto bad; 163 goto bad;
148 } 164 }
149 if (offset > 0 && offset > skb->len) { 165
150 pr_info("tc filter pedit" 166 if (!offset_valid(skb, off + offset)) {
151 " offset %d can't exceed pkt length %d\n", 167 pr_info("tc filter pedit offset %d out of bounds\n",
152 offset, skb->len); 168 offset);
153 goto bad; 169 goto bad;
154 } 170 }
155 171
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2b2a7974e4bb..b05d4a2155b0 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
430 if (!skb) 430 if (!skb)
431 return -ENOBUFS; 431 return -ENOBUFS;
432 432
433 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { 433 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
434 n->nlmsg_flags, event) <= 0) {
434 kfree_skb(skb); 435 kfree_skb(skb);
435 return -EINVAL; 436 return -EINVAL;
436 } 437 }
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index eb219b78cd49..5877f6061b57 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
62 struct basic_head *head = rtnl_dereference(tp->root); 62 struct basic_head *head = rtnl_dereference(tp->root);
63 struct basic_filter *f; 63 struct basic_filter *f;
64 64
65 if (head == NULL)
66 return 0UL;
67
68 list_for_each_entry(f, &head->flist, link) { 65 list_for_each_entry(f, &head->flist, link) {
69 if (f->handle == handle) { 66 if (f->handle == handle) {
70 l = (unsigned long) f; 67 l = (unsigned long) f;
@@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
109 tcf_unbind_filter(tp, &f->res); 106 tcf_unbind_filter(tp, &f->res);
110 call_rcu(&f->rcu, basic_delete_filter); 107 call_rcu(&f->rcu, basic_delete_filter);
111 } 108 }
112 RCU_INIT_POINTER(tp->root, NULL);
113 kfree_rcu(head, rcu); 109 kfree_rcu(head, rcu);
114 return true; 110 return true;
115} 111}
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bb1d5a487081..0a47ba5e6109 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -292,7 +292,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
292 call_rcu(&prog->rcu, __cls_bpf_delete_prog); 292 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
293 } 293 }
294 294
295 RCU_INIT_POINTER(tp->root, NULL);
296 kfree_rcu(head, rcu); 295 kfree_rcu(head, rcu);
297 return true; 296 return true;
298} 297}
@@ -303,9 +302,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
303 struct cls_bpf_prog *prog; 302 struct cls_bpf_prog *prog;
304 unsigned long ret = 0UL; 303 unsigned long ret = 0UL;
305 304
306 if (head == NULL)
307 return 0UL;
308
309 list_for_each_entry(prog, &head->plist, link) { 305 list_for_each_entry(prog, &head->plist, link) {
310 if (prog->handle == handle) { 306 if (prog->handle == handle) {
311 ret = (unsigned long) prog; 307 ret = (unsigned long) prog;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 85233c470035..c1f20077837f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -137,11 +137,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
137 137
138 if (!force) 138 if (!force)
139 return false; 139 return false;
140 140 /* Head can still be NULL due to cls_cgroup_init(). */
141 if (head) { 141 if (head)
142 RCU_INIT_POINTER(tp->root, NULL);
143 call_rcu(&head->rcu, cls_cgroup_destroy_rcu); 142 call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
144 } 143
145 return true; 144 return true;
146} 145}
147 146
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e39672394c7b..6575aba87630 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -596,7 +596,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
596 list_del_rcu(&f->list); 596 list_del_rcu(&f->list);
597 call_rcu(&f->rcu, flow_destroy_filter); 597 call_rcu(&f->rcu, flow_destroy_filter);
598 } 598 }
599 RCU_INIT_POINTER(tp->root, NULL);
600 kfree_rcu(head, rcu); 599 kfree_rcu(head, rcu);
601 return true; 600 return true;
602} 601}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6f40fba599b..904442421db3 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rhashtable.h> 15#include <linux/rhashtable.h>
16#include <linux/workqueue.h>
16 17
17#include <linux/if_ether.h> 18#include <linux/if_ether.h>
18#include <linux/in6.h> 19#include <linux/in6.h>
@@ -64,7 +65,10 @@ struct cls_fl_head {
64 bool mask_assigned; 65 bool mask_assigned;
65 struct list_head filters; 66 struct list_head filters;
66 struct rhashtable_params ht_params; 67 struct rhashtable_params ht_params;
67 struct rcu_head rcu; 68 union {
69 struct work_struct work;
70 struct rcu_head rcu;
71 };
68}; 72};
69 73
70struct cls_fl_filter { 74struct cls_fl_filter {
@@ -269,6 +273,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
269 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); 273 dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
270} 274}
271 275
276static void fl_destroy_sleepable(struct work_struct *work)
277{
278 struct cls_fl_head *head = container_of(work, struct cls_fl_head,
279 work);
280 if (head->mask_assigned)
281 rhashtable_destroy(&head->ht);
282 kfree(head);
283 module_put(THIS_MODULE);
284}
285
286static void fl_destroy_rcu(struct rcu_head *rcu)
287{
288 struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
289
290 INIT_WORK(&head->work, fl_destroy_sleepable);
291 schedule_work(&head->work);
292}
293
272static bool fl_destroy(struct tcf_proto *tp, bool force) 294static bool fl_destroy(struct tcf_proto *tp, bool force)
273{ 295{
274 struct cls_fl_head *head = rtnl_dereference(tp->root); 296 struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -282,10 +304,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
282 list_del_rcu(&f->list); 304 list_del_rcu(&f->list);
283 call_rcu(&f->rcu, fl_destroy_filter); 305 call_rcu(&f->rcu, fl_destroy_filter);
284 } 306 }
285 RCU_INIT_POINTER(tp->root, NULL); 307
286 if (head->mask_assigned) 308 __module_get(THIS_MODULE);
287 rhashtable_destroy(&head->ht); 309 call_rcu(&head->rcu, fl_destroy_rcu);
288 kfree_rcu(head, rcu);
289 return true; 310 return true;
290} 311}
291 312
@@ -711,8 +732,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
711 goto errout; 732 goto errout;
712 733
713 if (fold) { 734 if (fold) {
714 rhashtable_remove_fast(&head->ht, &fold->ht_node, 735 if (!tc_skip_sw(fold->flags))
715 head->ht_params); 736 rhashtable_remove_fast(&head->ht, &fold->ht_node,
737 head->ht_params);
716 fl_hw_destroy_filter(tp, (unsigned long)fold); 738 fl_hw_destroy_filter(tp, (unsigned long)fold);
717 } 739 }
718 740
@@ -739,8 +761,9 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg)
739 struct cls_fl_head *head = rtnl_dereference(tp->root); 761 struct cls_fl_head *head = rtnl_dereference(tp->root);
740 struct cls_fl_filter *f = (struct cls_fl_filter *) arg; 762 struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
741 763
742 rhashtable_remove_fast(&head->ht, &f->ht_node, 764 if (!tc_skip_sw(f->flags))
743 head->ht_params); 765 rhashtable_remove_fast(&head->ht, &f->ht_node,
766 head->ht_params);
744 list_del_rcu(&f->list); 767 list_del_rcu(&f->list);
745 fl_hw_destroy_filter(tp, (unsigned long)f); 768 fl_hw_destroy_filter(tp, (unsigned long)f);
746 tcf_unbind_filter(tp, &f->res); 769 tcf_unbind_filter(tp, &f->res);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 25927b6c4436..f935429bd5ef 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
114 114
115 call_rcu(&f->rcu, mall_destroy_filter); 115 call_rcu(&f->rcu, mall_destroy_filter);
116 } 116 }
117 RCU_INIT_POINTER(tp->root, NULL);
118 kfree_rcu(head, rcu); 117 kfree_rcu(head, rcu);
119 return true; 118 return true;
120} 119}
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 4f05a19fb073..322438fb3ffc 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
152 return -1; 152 return -1;
153 nhptr = ip_hdr(skb); 153 nhptr = ip_hdr(skb);
154#endif 154#endif
155 155 if (unlikely(!head))
156 return -1;
156restart: 157restart:
157 158
158#if RSVP_DST_LEN == 4 159#if RSVP_DST_LEN == 4
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 96144bdf30db..0751245a6ace 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -543,7 +543,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
543 walker.fn = tcindex_destroy_element; 543 walker.fn = tcindex_destroy_element;
544 tcindex_walk(tp, &walker); 544 tcindex_walk(tp, &walker);
545 545
546 RCU_INIT_POINTER(tp->root, NULL);
547 call_rcu(&p->rcu, __tcindex_destroy); 546 call_rcu(&p->rcu, __tcindex_destroy);
548 return true; 547 return true;
549} 548}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a2ea1d1cc06a..a01a56ec8b8c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb)
181 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB 181 * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
182 */ 182 */
183 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { 183 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
184 if (asoc) { 184 if (transport) {
185 sctp_association_put(asoc); 185 sctp_transport_put(transport);
186 asoc = NULL; 186 asoc = NULL;
187 transport = NULL;
187 } else { 188 } else {
188 sctp_endpoint_put(ep); 189 sctp_endpoint_put(ep);
189 ep = NULL; 190 ep = NULL;
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb)
269 bh_unlock_sock(sk); 270 bh_unlock_sock(sk);
270 271
271 /* Release the asoc/ep ref we took in the lookup calls. */ 272 /* Release the asoc/ep ref we took in the lookup calls. */
272 if (asoc) 273 if (transport)
273 sctp_association_put(asoc); 274 sctp_transport_put(transport);
274 else 275 else
275 sctp_endpoint_put(ep); 276 sctp_endpoint_put(ep);
276 277
@@ -283,8 +284,8 @@ discard_it:
283 284
284discard_release: 285discard_release:
285 /* Release the asoc/ep ref we took in the lookup calls. */ 286 /* Release the asoc/ep ref we took in the lookup calls. */
286 if (asoc) 287 if (transport)
287 sctp_association_put(asoc); 288 sctp_transport_put(transport);
288 else 289 else
289 sctp_endpoint_put(ep); 290 sctp_endpoint_put(ep);
290 291
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
300{ 301{
301 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 302 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
302 struct sctp_inq *inqueue = &chunk->rcvr->inqueue; 303 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
304 struct sctp_transport *t = chunk->transport;
303 struct sctp_ep_common *rcvr = NULL; 305 struct sctp_ep_common *rcvr = NULL;
304 int backloged = 0; 306 int backloged = 0;
305 307
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
351done: 353done:
352 /* Release the refs we took in sctp_add_backlog */ 354 /* Release the refs we took in sctp_add_backlog */
353 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 355 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
354 sctp_association_put(sctp_assoc(rcvr)); 356 sctp_transport_put(t);
355 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 357 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
356 sctp_endpoint_put(sctp_ep(rcvr)); 358 sctp_endpoint_put(sctp_ep(rcvr));
357 else 359 else
@@ -363,6 +365,7 @@ done:
363static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) 365static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
364{ 366{
365 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; 367 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
368 struct sctp_transport *t = chunk->transport;
366 struct sctp_ep_common *rcvr = chunk->rcvr; 369 struct sctp_ep_common *rcvr = chunk->rcvr;
367 int ret; 370 int ret;
368 371
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
373 * from us 376 * from us
374 */ 377 */
375 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) 378 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
376 sctp_association_hold(sctp_assoc(rcvr)); 379 sctp_transport_hold(t);
377 else if (SCTP_EP_TYPE_SOCKET == rcvr->type) 380 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
378 sctp_endpoint_hold(sctp_ep(rcvr)); 381 sctp_endpoint_hold(sctp_ep(rcvr));
379 else 382 else
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
537 return sk; 540 return sk;
538 541
539out: 542out:
540 sctp_association_put(asoc); 543 sctp_transport_put(transport);
541 return NULL; 544 return NULL;
542} 545}
543 546
544/* Common cleanup code for icmp/icmpv6 error handler. */ 547/* Common cleanup code for icmp/icmpv6 error handler. */
545void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) 548void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
546{ 549{
547 bh_unlock_sock(sk); 550 bh_unlock_sock(sk);
548 sctp_association_put(asoc); 551 sctp_transport_put(t);
549} 552}
550 553
551/* 554/*
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
641 } 644 }
642 645
643out_unlock: 646out_unlock:
644 sctp_err_finish(sk, asoc); 647 sctp_err_finish(sk, transport);
645} 648}
646 649
647/* 650/*
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association(
952 goto out; 955 goto out;
953 956
954 asoc = t->asoc; 957 asoc = t->asoc;
955 sctp_association_hold(asoc);
956 *pt = t; 958 *pt = t;
957 959
958 sctp_transport_put(t);
959
960out: 960out:
961 return asoc; 961 return asoc;
962} 962}
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net,
986 struct sctp_transport *transport; 986 struct sctp_transport *transport;
987 987
988 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { 988 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
989 sctp_association_put(asoc); 989 sctp_transport_put(transport);
990 return 1; 990 return 1;
991 } 991 }
992 992
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1021 struct sctphdr *sh = sctp_hdr(skb); 1021 struct sctphdr *sh = sctp_hdr(skb);
1022 union sctp_params params; 1022 union sctp_params params;
1023 sctp_init_chunk_t *init; 1023 sctp_init_chunk_t *init;
1024 struct sctp_transport *transport;
1025 struct sctp_af *af; 1024 struct sctp_af *af;
1026 1025
1027 /* 1026 /*
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1052 1051
1053 af->from_addr_param(paddr, params.addr, sh->source, 0); 1052 af->from_addr_param(paddr, params.addr, sh->source, 0);
1054 1053
1055 asoc = __sctp_lookup_association(net, laddr, paddr, &transport); 1054 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1056 if (asoc) 1055 if (asoc)
1057 return asoc; 1056 return asoc;
1058 } 1057 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f473779e8b1c..176af3080a2b 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
198 } 198 }
199 199
200out_unlock: 200out_unlock:
201 sctp_err_finish(sk, asoc); 201 sctp_err_finish(sk, transport);
202out: 202out:
203 if (likely(idev != NULL)) 203 if (likely(idev != NULL))
204 in6_dev_put(idev); 204 in6_dev_put(idev);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9fbb6feb8c27..f23ad913dc7a 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk,
1214 1214
1215 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); 1215 timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
1216 1216
1217 err = sctp_wait_for_connect(asoc, &timeo); 1217 if (assoc_id)
1218 if ((err == 0 || err == -EINPROGRESS) && assoc_id)
1219 *assoc_id = asoc->assoc_id; 1218 *assoc_id = asoc->assoc_id;
1219 err = sctp_wait_for_connect(asoc, &timeo);
1220 /* Note: the asoc may be freed after the return of
1221 * sctp_wait_for_connect.
1222 */
1220 1223
1221 /* Don't free association on exit. */ 1224 /* Don't free association on exit. */
1222 asoc = NULL; 1225 asoc = NULL;
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how)
4282{ 4285{
4283 struct net *net = sock_net(sk); 4286 struct net *net = sock_net(sk);
4284 struct sctp_endpoint *ep; 4287 struct sctp_endpoint *ep;
4285 struct sctp_association *asoc;
4286 4288
4287 if (!sctp_style(sk, TCP)) 4289 if (!sctp_style(sk, TCP))
4288 return; 4290 return;
4289 4291
4290 if (how & SEND_SHUTDOWN) { 4292 ep = sctp_sk(sk)->ep;
4293 if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
4294 struct sctp_association *asoc;
4295
4291 sk->sk_state = SCTP_SS_CLOSING; 4296 sk->sk_state = SCTP_SS_CLOSING;
4292 ep = sctp_sk(sk)->ep; 4297 asoc = list_entry(ep->asocs.next,
4293 if (!list_empty(&ep->asocs)) { 4298 struct sctp_association, asocs);
4294 asoc = list_entry(ep->asocs.next, 4299 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4295 struct sctp_association, asocs);
4296 sctp_primitive_SHUTDOWN(net, asoc, NULL);
4297 }
4298 } 4300 }
4299} 4301}
4300 4302
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
4480 if (!transport || !sctp_transport_hold(transport)) 4482 if (!transport || !sctp_transport_hold(transport))
4481 goto out; 4483 goto out;
4482 4484
4483 sctp_association_hold(transport->asoc);
4484 sctp_transport_put(transport);
4485
4486 rcu_read_unlock(); 4485 rcu_read_unlock();
4487 err = cb(transport, p); 4486 err = cb(transport, p);
4488 sctp_association_put(transport->asoc); 4487 sctp_transport_put(transport);
4489 4488
4490out: 4489out:
4491 return err; 4490 return err;
diff --git a/net/socket.c b/net/socket.c
index 5a9bf5ee2464..73dc69f9681e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = {
341 .get = sockfs_xattr_get, 341 .get = sockfs_xattr_get,
342}; 342};
343 343
344static int sockfs_security_xattr_set(const struct xattr_handler *handler,
345 struct dentry *dentry, struct inode *inode,
346 const char *suffix, const void *value,
347 size_t size, int flags)
348{
349 /* Handled by LSM. */
350 return -EAGAIN;
351}
352
353static const struct xattr_handler sockfs_security_xattr_handler = {
354 .prefix = XATTR_SECURITY_PREFIX,
355 .set = sockfs_security_xattr_set,
356};
357
344static const struct xattr_handler *sockfs_xattr_handlers[] = { 358static const struct xattr_handler *sockfs_xattr_handlers[] = {
345 &sockfs_xattr_handler, 359 &sockfs_xattr_handler,
360 &sockfs_security_xattr_handler,
346 NULL 361 NULL
347}; 362};
348 363
@@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2038 if (err) 2053 if (err)
2039 break; 2054 break;
2040 ++datagrams; 2055 ++datagrams;
2056 if (msg_data_left(&msg_sys))
2057 break;
2041 cond_resched(); 2058 cond_resched();
2042 } 2059 }
2043 2060
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c3f652395a80..3bc1d61694cb 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure)
1002void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) 1002void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
1003{ 1003{
1004 struct svc_xprt *xprt; 1004 struct svc_xprt *xprt;
1005 struct svc_sock *svsk;
1006 struct socket *sock;
1007 struct list_head *le, *next; 1005 struct list_head *le, *next;
1008 LIST_HEAD(to_be_closed); 1006 LIST_HEAD(to_be_closed);
1009 struct linger no_linger = {
1010 .l_onoff = 1,
1011 .l_linger = 0,
1012 };
1013 1007
1014 spin_lock_bh(&serv->sv_lock); 1008 spin_lock_bh(&serv->sv_lock);
1015 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1009 list_for_each_safe(le, next, &serv->sv_tempsocks) {
@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
1027 list_del_init(le); 1021 list_del_init(le);
1028 xprt = list_entry(le, struct svc_xprt, xpt_list); 1022 xprt = list_entry(le, struct svc_xprt, xpt_list);
1029 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); 1023 dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
1030 svsk = container_of(xprt, struct svc_sock, sk_xprt); 1024 xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
1031 sock = svsk->sk_sock;
1032 kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
1033 (char *)&no_linger, sizeof(no_linger));
1034 svc_close_xprt(xprt); 1025 svc_close_xprt(xprt);
1035 } 1026 }
1036} 1027}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 57625f64efd5..a4bc98265d88 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
438 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 438 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
439} 439}
440 440
441static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt)
442{
443 struct svc_sock *svsk;
444 struct socket *sock;
445 struct linger no_linger = {
446 .l_onoff = 1,
447 .l_linger = 0,
448 };
449
450 svsk = container_of(xprt, struct svc_sock, sk_xprt);
451 sock = svsk->sk_sock;
452 kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
453 (char *)&no_linger, sizeof(no_linger));
454}
455
441/* 456/*
442 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo 457 * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo
443 */ 458 */
@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
648 return NULL; 663 return NULL;
649} 664}
650 665
666static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt)
667{
668}
669
651static struct svc_xprt *svc_udp_create(struct svc_serv *serv, 670static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
652 struct net *net, 671 struct net *net,
653 struct sockaddr *sa, int salen, 672 struct sockaddr *sa, int salen,
@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = {
667 .xpo_has_wspace = svc_udp_has_wspace, 686 .xpo_has_wspace = svc_udp_has_wspace,
668 .xpo_accept = svc_udp_accept, 687 .xpo_accept = svc_udp_accept,
669 .xpo_secure_port = svc_sock_secure_port, 688 .xpo_secure_port = svc_sock_secure_port,
689 .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
670}; 690};
671 691
672static struct svc_xprt_class svc_udp_class = { 692static struct svc_xprt_class svc_udp_class = {
@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = {
1242 .xpo_has_wspace = svc_tcp_has_wspace, 1262 .xpo_has_wspace = svc_tcp_has_wspace,
1243 .xpo_accept = svc_tcp_accept, 1263 .xpo_accept = svc_tcp_accept,
1244 .xpo_secure_port = svc_sock_secure_port, 1264 .xpo_secure_port = svc_sock_secure_port,
1265 .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
1245}; 1266};
1246 1267
1247static struct svc_xprt_class svc_tcp_class = { 1268static struct svc_xprt_class svc_tcp_class = {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6864fb967038..1334de2715c2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt);
67static void svc_rdma_free(struct svc_xprt *xprt); 67static void svc_rdma_free(struct svc_xprt *xprt);
68static int svc_rdma_has_wspace(struct svc_xprt *xprt); 68static int svc_rdma_has_wspace(struct svc_xprt *xprt);
69static int svc_rdma_secure_port(struct svc_rqst *); 69static int svc_rdma_secure_port(struct svc_rqst *);
70static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
70 71
71static struct svc_xprt_ops svc_rdma_ops = { 72static struct svc_xprt_ops svc_rdma_ops = {
72 .xpo_create = svc_rdma_create, 73 .xpo_create = svc_rdma_create,
@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = {
79 .xpo_has_wspace = svc_rdma_has_wspace, 80 .xpo_has_wspace = svc_rdma_has_wspace,
80 .xpo_accept = svc_rdma_accept, 81 .xpo_accept = svc_rdma_accept,
81 .xpo_secure_port = svc_rdma_secure_port, 82 .xpo_secure_port = svc_rdma_secure_port,
83 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
82}; 84};
83 85
84struct svc_xprt_class svc_rdma_class = { 86struct svc_xprt_class svc_rdma_class = {
@@ -1317,6 +1319,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1317 return 1; 1319 return 1;
1318} 1320}
1319 1321
1322static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
1323{
1324}
1325
1320int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) 1326int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1321{ 1327{
1322 struct ib_send_wr *bad_wr, *n_wr; 1328 struct ib_send_wr *bad_wr, *n_wr;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 975dbeb60ab0..52d74760fb68 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -421,6 +421,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
421 dev = dev_get_by_name(net, driver_name); 421 dev = dev_get_by_name(net, driver_name);
422 if (!dev) 422 if (!dev)
423 return -ENODEV; 423 return -ENODEV;
424 if (tipc_mtu_bad(dev, 0)) {
425 dev_put(dev);
426 return -EINVAL;
427 }
424 428
425 /* Associate TIPC bearer with L2 bearer */ 429 /* Associate TIPC bearer with L2 bearer */
426 rcu_assign_pointer(b->media_ptr, dev); 430 rcu_assign_pointer(b->media_ptr, dev);
@@ -610,8 +614,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
610 if (!b) 614 if (!b)
611 return NOTIFY_DONE; 615 return NOTIFY_DONE;
612 616
613 b->mtu = dev->mtu;
614
615 switch (evt) { 617 switch (evt) {
616 case NETDEV_CHANGE: 618 case NETDEV_CHANGE:
617 if (netif_carrier_ok(dev)) 619 if (netif_carrier_ok(dev))
@@ -624,6 +626,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
624 tipc_reset_bearer(net, b); 626 tipc_reset_bearer(net, b);
625 break; 627 break;
626 case NETDEV_CHANGEMTU: 628 case NETDEV_CHANGEMTU:
629 if (tipc_mtu_bad(dev, 0)) {
630 bearer_disable(net, b);
631 break;
632 }
633 b->mtu = dev->mtu;
627 tipc_reset_bearer(net, b); 634 tipc_reset_bearer(net, b);
628 break; 635 break;
629 case NETDEV_CHANGEADDR: 636 case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78892e2f53e3..278ff7f616f9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
39 39
40#include "netlink.h" 40#include "netlink.h"
41#include "core.h" 41#include "core.h"
42#include "msg.h"
42#include <net/genetlink.h> 43#include <net/genetlink.h>
43 44
44#define MAX_MEDIA 3 45#define MAX_MEDIA 3
@@ -59,6 +60,9 @@
59#define TIPC_MEDIA_TYPE_IB 2 60#define TIPC_MEDIA_TYPE_IB 2
60#define TIPC_MEDIA_TYPE_UDP 3 61#define TIPC_MEDIA_TYPE_UDP 3
61 62
63/* minimum bearer MTU */
64#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
65
62/** 66/**
63 * struct tipc_media_addr - destination address used by TIPC bearers 67 * struct tipc_media_addr - destination address used by TIPC bearers
64 * @value: address info (format defined by media) 68 * @value: address info (format defined by media)
@@ -215,4 +219,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
215void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, 219void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
216 struct sk_buff_head *xmitq); 220 struct sk_buff_head *xmitq);
217 221
222/* check if device MTU is too low for tipc headers */
223static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
224{
225 if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
226 return false;
227 netdev_warn(dev, "MTU too low for tipc bearer\n");
228 return true;
229}
230
218#endif /* _TIPC_BEARER_H */ 231#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1055164c6232..bda89bf9f4ff 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -47,8 +47,8 @@
47#include <linux/pkt_sched.h> 47#include <linux/pkt_sched.h>
48 48
49struct tipc_stats { 49struct tipc_stats {
50 u32 sent_info; /* used in counting # sent packets */ 50 u32 sent_pkts;
51 u32 recv_info; /* used in counting # recv'd packets */ 51 u32 recv_pkts;
52 u32 sent_states; 52 u32 sent_states;
53 u32 recv_states; 53 u32 recv_states;
54 u32 sent_probes; 54 u32 sent_probes;
@@ -857,7 +857,6 @@ void tipc_link_reset(struct tipc_link *l)
857 l->acked = 0; 857 l->acked = 0;
858 l->silent_intv_cnt = 0; 858 l->silent_intv_cnt = 0;
859 l->rst_cnt = 0; 859 l->rst_cnt = 0;
860 l->stats.recv_info = 0;
861 l->stale_count = 0; 860 l->stale_count = 0;
862 l->bc_peer_is_up = false; 861 l->bc_peer_is_up = false;
863 memset(&l->mon_state, 0, sizeof(l->mon_state)); 862 memset(&l->mon_state, 0, sizeof(l->mon_state));
@@ -888,6 +887,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
888 struct sk_buff_head *transmq = &l->transmq; 887 struct sk_buff_head *transmq = &l->transmq;
889 struct sk_buff_head *backlogq = &l->backlogq; 888 struct sk_buff_head *backlogq = &l->backlogq;
890 struct sk_buff *skb, *_skb, *bskb; 889 struct sk_buff *skb, *_skb, *bskb;
890 int pkt_cnt = skb_queue_len(list);
891 891
892 /* Match msg importance against this and all higher backlog limits: */ 892 /* Match msg importance against this and all higher backlog limits: */
893 if (!skb_queue_empty(backlogq)) { 893 if (!skb_queue_empty(backlogq)) {
@@ -901,6 +901,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
901 return -EMSGSIZE; 901 return -EMSGSIZE;
902 } 902 }
903 903
904 if (pkt_cnt > 1) {
905 l->stats.sent_fragmented++;
906 l->stats.sent_fragments += pkt_cnt;
907 }
908
904 /* Prepare each packet for sending, and add to relevant queue: */ 909 /* Prepare each packet for sending, and add to relevant queue: */
905 while (skb_queue_len(list)) { 910 while (skb_queue_len(list)) {
906 skb = skb_peek(list); 911 skb = skb_peek(list);
@@ -920,6 +925,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
920 __skb_queue_tail(xmitq, _skb); 925 __skb_queue_tail(xmitq, _skb);
921 TIPC_SKB_CB(skb)->ackers = l->ackers; 926 TIPC_SKB_CB(skb)->ackers = l->ackers;
922 l->rcv_unacked = 0; 927 l->rcv_unacked = 0;
928 l->stats.sent_pkts++;
923 seqno++; 929 seqno++;
924 continue; 930 continue;
925 } 931 }
@@ -968,6 +974,7 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
968 msg_set_ack(hdr, ack); 974 msg_set_ack(hdr, ack);
969 msg_set_bcast_ack(hdr, bc_ack); 975 msg_set_bcast_ack(hdr, bc_ack);
970 l->rcv_unacked = 0; 976 l->rcv_unacked = 0;
977 l->stats.sent_pkts++;
971 seqno++; 978 seqno++;
972 } 979 }
973 l->snd_nxt = seqno; 980 l->snd_nxt = seqno;
@@ -1260,7 +1267,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1260 1267
1261 /* Deliver packet */ 1268 /* Deliver packet */
1262 l->rcv_nxt++; 1269 l->rcv_nxt++;
1263 l->stats.recv_info++; 1270 l->stats.recv_pkts++;
1264 if (!tipc_data_input(l, skb, l->inputq)) 1271 if (!tipc_data_input(l, skb, l->inputq))
1265 rc |= tipc_link_input(l, skb, l->inputq); 1272 rc |= tipc_link_input(l, skb, l->inputq);
1266 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) 1273 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
@@ -1492,8 +1499,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1492 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) 1499 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1493 l->tolerance = peers_tol; 1500 l->tolerance = peers_tol;
1494 1501
1495 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, 1502 /* Update own prio if peer indicates a different value */
1496 TIPC_MAX_LINK_PRI)) { 1503 if ((peers_prio != l->priority) &&
1504 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1497 l->priority = peers_prio; 1505 l->priority = peers_prio;
1498 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1506 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1499 } 1507 }
@@ -1799,10 +1807,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1799void tipc_link_reset_stats(struct tipc_link *l) 1807void tipc_link_reset_stats(struct tipc_link *l)
1800{ 1808{
1801 memset(&l->stats, 0, sizeof(l->stats)); 1809 memset(&l->stats, 0, sizeof(l->stats));
1802 if (!link_is_bc_sndlink(l)) {
1803 l->stats.sent_info = l->snd_nxt;
1804 l->stats.recv_info = l->rcv_nxt;
1805 }
1806} 1810}
1807 1811
1808static void link_print(struct tipc_link *l, const char *str) 1812static void link_print(struct tipc_link *l, const char *str)
@@ -1866,12 +1870,12 @@ static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1866 }; 1870 };
1867 1871
1868 struct nla_map map[] = { 1872 struct nla_map map[] = {
1869 {TIPC_NLA_STATS_RX_INFO, s->recv_info}, 1873 {TIPC_NLA_STATS_RX_INFO, 0},
1870 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, 1874 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1871 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, 1875 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1872 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, 1876 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1873 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, 1877 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1874 {TIPC_NLA_STATS_TX_INFO, s->sent_info}, 1878 {TIPC_NLA_STATS_TX_INFO, 0},
1875 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, 1879 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1876 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, 1880 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1877 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, 1881 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
@@ -1946,9 +1950,9 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1946 goto attr_msg_full; 1950 goto attr_msg_full;
1947 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 1951 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1948 goto attr_msg_full; 1952 goto attr_msg_full;
1949 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) 1953 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1950 goto attr_msg_full; 1954 goto attr_msg_full;
1951 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) 1955 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1952 goto attr_msg_full; 1956 goto attr_msg_full;
1953 1957
1954 if (tipc_link_is_up(link)) 1958 if (tipc_link_is_up(link))
@@ -2003,12 +2007,12 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2003 }; 2007 };
2004 2008
2005 struct nla_map map[] = { 2009 struct nla_map map[] = {
2006 {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, 2010 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2007 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, 2011 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2008 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, 2012 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2009 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, 2013 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2010 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, 2014 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2011 {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, 2015 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2012 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, 2016 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2013 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, 2017 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2014 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, 2018 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
@@ -2075,9 +2079,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2075 goto attr_msg_full; 2079 goto attr_msg_full;
2076 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) 2080 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2077 goto attr_msg_full; 2081 goto attr_msg_full;
2078 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) 2082 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2079 goto attr_msg_full; 2083 goto attr_msg_full;
2080 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) 2084 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2081 goto attr_msg_full; 2085 goto attr_msg_full;
2082 2086
2083 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 2087 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index ed97a5876ebe..9e109bb1a207 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -455,14 +455,14 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
455 int i, applied_bef; 455 int i, applied_bef;
456 456
457 state->probing = false; 457 state->probing = false;
458 if (!dlen)
459 return;
460 458
461 /* Sanity check received domain record */ 459 /* Sanity check received domain record */
462 if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) { 460 if (dlen < dom_rec_len(arrv_dom, 0))
463 pr_warn_ratelimited("Received illegal domain record\n"); 461 return;
462 if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
463 return;
464 if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
464 return; 465 return;
465 }
466 466
467 /* Synch generation numbers with peer if link just came up */ 467 /* Synch generation numbers with peer if link just came up */
468 if (!state->synched) { 468 if (!state->synched) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f9f5f3c3dab5..41f013888f07 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB 4 * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -129,54 +129,8 @@ static const struct proto_ops packet_ops;
129static const struct proto_ops stream_ops; 129static const struct proto_ops stream_ops;
130static const struct proto_ops msg_ops; 130static const struct proto_ops msg_ops;
131static struct proto tipc_proto; 131static struct proto tipc_proto;
132
133static const struct rhashtable_params tsk_rht_params; 132static const struct rhashtable_params tsk_rht_params;
134 133
135/*
136 * Revised TIPC socket locking policy:
137 *
138 * Most socket operations take the standard socket lock when they start
139 * and hold it until they finish (or until they need to sleep). Acquiring
140 * this lock grants the owner exclusive access to the fields of the socket
141 * data structures, with the exception of the backlog queue. A few socket
142 * operations can be done without taking the socket lock because they only
143 * read socket information that never changes during the life of the socket.
144 *
145 * Socket operations may acquire the lock for the associated TIPC port if they
146 * need to perform an operation on the port. If any routine needs to acquire
147 * both the socket lock and the port lock it must take the socket lock first
148 * to avoid the risk of deadlock.
149 *
150 * The dispatcher handling incoming messages cannot grab the socket lock in
151 * the standard fashion, since invoked it runs at the BH level and cannot block.
152 * Instead, it checks to see if the socket lock is currently owned by someone,
153 * and either handles the message itself or adds it to the socket's backlog
154 * queue; in the latter case the queued message is processed once the process
155 * owning the socket lock releases it.
156 *
157 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
158 * the problem of a blocked socket operation preventing any other operations
159 * from occurring. However, applications must be careful if they have
160 * multiple threads trying to send (or receive) on the same socket, as these
161 * operations might interfere with each other. For example, doing a connect
162 * and a receive at the same time might allow the receive to consume the
163 * ACK message meant for the connect. While additional work could be done
164 * to try and overcome this, it doesn't seem to be worthwhile at the present.
165 *
166 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
167 * that another operation that must be performed in a non-blocking manner is
168 * not delayed for very long because the lock has already been taken.
169 *
170 * NOTE: This code assumes that certain fields of a port/socket pair are
171 * constant over its lifetime; such fields can be examined without taking
172 * the socket lock and/or port lock, and do not need to be re-read even
173 * after resuming processing after waiting. These fields include:
174 * - socket type
175 * - pointer to socket sk structure (aka tipc_sock structure)
176 * - pointer to port structure
177 * - port reference
178 */
179
180static u32 tsk_own_node(struct tipc_sock *tsk) 134static u32 tsk_own_node(struct tipc_sock *tsk)
181{ 135{
182 return msg_prevnode(&tsk->phdr); 136 return msg_prevnode(&tsk->phdr);
@@ -232,7 +186,7 @@ static struct tipc_sock *tipc_sk(const struct sock *sk)
232 186
233static bool tsk_conn_cong(struct tipc_sock *tsk) 187static bool tsk_conn_cong(struct tipc_sock *tsk)
234{ 188{
235 return tsk->snt_unacked >= tsk->snd_win; 189 return tsk->snt_unacked > tsk->snd_win;
236} 190}
237 191
238/* tsk_blocks(): translate a buffer size in bytes to number of 192/* tsk_blocks(): translate a buffer size in bytes to number of
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 78cab9c5a445..b58dc95f3d35 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -697,6 +697,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
697 udp_conf.local_ip.s_addr = htonl(INADDR_ANY); 697 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
698 udp_conf.use_udp_checksums = false; 698 udp_conf.use_udp_checksums = false;
699 ub->ifindex = dev->ifindex; 699 ub->ifindex = dev->ifindex;
700 if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
701 sizeof(struct udphdr))) {
702 err = -EINVAL;
703 goto err;
704 }
700 b->mtu = dev->mtu - sizeof(struct iphdr) 705 b->mtu = dev->mtu - sizeof(struct iphdr)
701 - sizeof(struct udphdr); 706 - sizeof(struct udphdr);
702#if IS_ENABLED(CONFIG_IPV6) 707#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 145082e2ba36..2358f2690ec5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2199,7 +2199,8 @@ out:
2199 * Sleep until more data has arrived. But check for races.. 2199 * Sleep until more data has arrived. But check for races..
2200 */ 2200 */
2201static long unix_stream_data_wait(struct sock *sk, long timeo, 2201static long unix_stream_data_wait(struct sock *sk, long timeo,
2202 struct sk_buff *last, unsigned int last_len) 2202 struct sk_buff *last, unsigned int last_len,
2203 bool freezable)
2203{ 2204{
2204 struct sk_buff *tail; 2205 struct sk_buff *tail;
2205 DEFINE_WAIT(wait); 2206 DEFINE_WAIT(wait);
@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
2220 2221
2221 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2222 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2222 unix_state_unlock(sk); 2223 unix_state_unlock(sk);
2223 timeo = freezable_schedule_timeout(timeo); 2224 if (freezable)
2225 timeo = freezable_schedule_timeout(timeo);
2226 else
2227 timeo = schedule_timeout(timeo);
2224 unix_state_lock(sk); 2228 unix_state_lock(sk);
2225 2229
2226 if (sock_flag(sk, SOCK_DEAD)) 2230 if (sock_flag(sk, SOCK_DEAD))
@@ -2250,7 +2254,8 @@ struct unix_stream_read_state {
2250 unsigned int splice_flags; 2254 unsigned int splice_flags;
2251}; 2255};
2252 2256
2253static int unix_stream_read_generic(struct unix_stream_read_state *state) 2257static int unix_stream_read_generic(struct unix_stream_read_state *state,
2258 bool freezable)
2254{ 2259{
2255 struct scm_cookie scm; 2260 struct scm_cookie scm;
2256 struct socket *sock = state->socket; 2261 struct socket *sock = state->socket;
@@ -2330,7 +2335,7 @@ again:
2330 mutex_unlock(&u->iolock); 2335 mutex_unlock(&u->iolock);
2331 2336
2332 timeo = unix_stream_data_wait(sk, timeo, last, 2337 timeo = unix_stream_data_wait(sk, timeo, last,
2333 last_len); 2338 last_len, freezable);
2334 2339
2335 if (signal_pending(current)) { 2340 if (signal_pending(current)) {
2336 err = sock_intr_errno(timeo); 2341 err = sock_intr_errno(timeo);
@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2472 .flags = flags 2477 .flags = flags
2473 }; 2478 };
2474 2479
2475 return unix_stream_read_generic(&state); 2480 return unix_stream_read_generic(&state, true);
2476} 2481}
2477 2482
2478static int unix_stream_splice_actor(struct sk_buff *skb, 2483static int unix_stream_splice_actor(struct sk_buff *skb,
@@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2503 flags & SPLICE_F_NONBLOCK) 2508 flags & SPLICE_F_NONBLOCK)
2504 state.flags = MSG_DONTWAIT; 2509 state.flags = MSG_DONTWAIT;
2505 2510
2506 return unix_stream_read_generic(&state); 2511 return unix_stream_read_generic(&state, false);
2507} 2512}
2508 2513
2509static int unix_shutdown(struct socket *sock, int mode) 2514static int unix_shutdown(struct socket *sock, int mode)
@@ -2812,7 +2817,8 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2812 i++; 2817 i++;
2813 } 2818 }
2814 for ( ; i < len; i++) 2819 for ( ; i < len; i++)
2815 seq_putc(seq, u->addr->name->sun_path[i]); 2820 seq_putc(seq, u->addr->name->sun_path[i] ?:
2821 '@');
2816 } 2822 }
2817 unix_state_unlock(s); 2823 unix_state_unlock(s);
2818 seq_putc(seq, '\n'); 2824 seq_putc(seq, '\n');
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 08d2e948c9ad..f0c0c8a48c92 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -71,6 +71,7 @@ struct cfg80211_registered_device {
71 struct list_head bss_list; 71 struct list_head bss_list;
72 struct rb_root bss_tree; 72 struct rb_root bss_tree;
73 u32 bss_generation; 73 u32 bss_generation;
74 u32 bss_entries;
74 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 75 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
75 struct sk_buff *scan_msg; 76 struct sk_buff *scan_msg;
76 struct cfg80211_sched_scan_request __rcu *sched_scan_req; 77 struct cfg80211_sched_scan_request __rcu *sched_scan_req;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b5bd58d0f731..35ad69fd0838 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -57,6 +57,19 @@
57 * also linked into the probe response struct. 57 * also linked into the probe response struct.
58 */ 58 */
59 59
60/*
61 * Limit the number of BSS entries stored in mac80211. Each one is
62 * a bit over 4k at most, so this limits to roughly 4-5M of memory.
63 * If somebody wants to really attack this though, they'd likely
64 * use small beacons, and only one type of frame, limiting each of
65 * the entries to a much smaller size (in order to generate more
66 * entries in total, so overhead is bigger.)
67 */
68static int bss_entries_limit = 1000;
69module_param(bss_entries_limit, int, 0644);
70MODULE_PARM_DESC(bss_entries_limit,
71 "limit to number of scan BSS entries (per wiphy, default 1000)");
72
60#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) 73#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
61 74
62static void bss_free(struct cfg80211_internal_bss *bss) 75static void bss_free(struct cfg80211_internal_bss *bss)
@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
137 150
138 list_del_init(&bss->list); 151 list_del_init(&bss->list);
139 rb_erase(&bss->rbn, &rdev->bss_tree); 152 rb_erase(&bss->rbn, &rdev->bss_tree);
153 rdev->bss_entries--;
154 WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
155 "rdev bss entries[%d]/list[empty:%d] corruption\n",
156 rdev->bss_entries, list_empty(&rdev->bss_list));
140 bss_ref_put(rdev, bss); 157 bss_ref_put(rdev, bss);
141 return true; 158 return true;
142} 159}
@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
163 rdev->bss_generation++; 180 rdev->bss_generation++;
164} 181}
165 182
183static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
184{
185 struct cfg80211_internal_bss *bss, *oldest = NULL;
186 bool ret;
187
188 lockdep_assert_held(&rdev->bss_lock);
189
190 list_for_each_entry(bss, &rdev->bss_list, list) {
191 if (atomic_read(&bss->hold))
192 continue;
193
194 if (!list_empty(&bss->hidden_list) &&
195 !bss->pub.hidden_beacon_bss)
196 continue;
197
198 if (oldest && time_before(oldest->ts, bss->ts))
199 continue;
200 oldest = bss;
201 }
202
203 if (WARN_ON(!oldest))
204 return false;
205
206 /*
207 * The callers make sure to increase rdev->bss_generation if anything
208 * gets removed (and a new entry added), so there's no need to also do
209 * it here.
210 */
211
212 ret = __cfg80211_unlink_bss(rdev, oldest);
213 WARN_ON(!ret);
214 return ret;
215}
216
166void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, 217void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
167 bool send_message) 218 bool send_message)
168{ 219{
@@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
689 const u8 *ie; 740 const u8 *ie;
690 int i, ssidlen; 741 int i, ssidlen;
691 u8 fold = 0; 742 u8 fold = 0;
743 u32 n_entries = 0;
692 744
693 ies = rcu_access_pointer(new->pub.beacon_ies); 745 ies = rcu_access_pointer(new->pub.beacon_ies);
694 if (WARN_ON(!ies)) 746 if (WARN_ON(!ies))
@@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
712 /* This is the bad part ... */ 764 /* This is the bad part ... */
713 765
714 list_for_each_entry(bss, &rdev->bss_list, list) { 766 list_for_each_entry(bss, &rdev->bss_list, list) {
767 /*
768 * we're iterating all the entries anyway, so take the
769 * opportunity to validate the list length accounting
770 */
771 n_entries++;
772
715 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) 773 if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
716 continue; 774 continue;
717 if (bss->pub.channel != new->pub.channel) 775 if (bss->pub.channel != new->pub.channel)
@@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
740 new->pub.beacon_ies); 798 new->pub.beacon_ies);
741 } 799 }
742 800
801 WARN_ONCE(n_entries != rdev->bss_entries,
802 "rdev bss entries[%d]/list[len:%d] corruption\n",
803 rdev->bss_entries, n_entries);
804
743 return true; 805 return true;
744} 806}
745 807
@@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
894 } 956 }
895 } 957 }
896 958
959 if (rdev->bss_entries >= bss_entries_limit &&
960 !cfg80211_bss_expire_oldest(rdev)) {
961 kfree(new);
962 goto drop;
963 }
964
897 list_add_tail(&new->list, &rdev->bss_list); 965 list_add_tail(&new->list, &rdev->bss_list);
966 rdev->bss_entries++;
898 rb_insert_bss(rdev, new); 967 rb_insert_bss(rdev, new);
899 found = new; 968 found = new;
900 } 969 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 5ea12afc7706..659b507b347d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
1158 58500000, 1158 58500000,
1159 65000000, 1159 65000000,
1160 78000000, 1160 78000000,
1161 0, 1161 /* not in the spec, but some devices use this: */
1162 86500000,
1162 }, 1163 },
1163 { 13500000, 1164 { 13500000,
1164 27000000, 1165 27000000,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fd6986634e6f..5bf7e1bfeac7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1268,12 +1268,14 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1268 err = security_xfrm_policy_lookup(pol->security, 1268 err = security_xfrm_policy_lookup(pol->security,
1269 fl->flowi_secid, 1269 fl->flowi_secid,
1270 policy_to_flow_dir(dir)); 1270 policy_to_flow_dir(dir));
1271 if (!err && !xfrm_pol_hold_rcu(pol)) 1271 if (!err) {
1272 goto again; 1272 if (!xfrm_pol_hold_rcu(pol))
1273 else if (err == -ESRCH) 1273 goto again;
1274 } else if (err == -ESRCH) {
1274 pol = NULL; 1275 pol = NULL;
1275 else 1276 } else {
1276 pol = ERR_PTR(err); 1277 pol = ERR_PTR(err);
1278 }
1277 } else 1279 } else
1278 pol = NULL; 1280 pol = NULL;
1279 } 1281 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 08892091cfe3..671a1d0333f0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2450,7 +2450,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2450 2450
2451#ifdef CONFIG_COMPAT 2451#ifdef CONFIG_COMPAT
2452 if (in_compat_syscall()) 2452 if (in_compat_syscall())
2453 return -ENOTSUPP; 2453 return -EOPNOTSUPP;
2454#endif 2454#endif
2455 2455
2456 type = nlh->nlmsg_type; 2456 type = nlh->nlmsg_type;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 12b7304d55dc..72c58675973e 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -27,6 +27,7 @@ hostprogs-y += xdp2
27hostprogs-y += test_current_task_under_cgroup 27hostprogs-y += test_current_task_under_cgroup
28hostprogs-y += trace_event 28hostprogs-y += trace_event
29hostprogs-y += sampleip 29hostprogs-y += sampleip
30hostprogs-y += tc_l2_redirect
30 31
31test_verifier-objs := test_verifier.o libbpf.o 32test_verifier-objs := test_verifier.o libbpf.o
32test_maps-objs := test_maps.o libbpf.o 33test_maps-objs := test_maps.o libbpf.o
@@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \
56 test_current_task_under_cgroup_user.o 57 test_current_task_under_cgroup_user.o
57trace_event-objs := bpf_load.o libbpf.o trace_event_user.o 58trace_event-objs := bpf_load.o libbpf.o trace_event_user.o
58sampleip-objs := bpf_load.o libbpf.o sampleip_user.o 59sampleip-objs := bpf_load.o libbpf.o sampleip_user.o
60tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o
59 61
60# Tell kbuild to always build the programs 62# Tell kbuild to always build the programs
61always := $(hostprogs-y) 63always := $(hostprogs-y)
@@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o
72always += trace_output_kern.o 74always += trace_output_kern.o
73always += tcbpf1_kern.o 75always += tcbpf1_kern.o
74always += tcbpf2_kern.o 76always += tcbpf2_kern.o
77always += tc_l2_redirect_kern.o
75always += lathist_kern.o 78always += lathist_kern.o
76always += offwaketime_kern.o 79always += offwaketime_kern.o
77always += spintest_kern.o 80always += spintest_kern.o
@@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf
111HOSTLOADLIBES_test_current_task_under_cgroup += -lelf 114HOSTLOADLIBES_test_current_task_under_cgroup += -lelf
112HOSTLOADLIBES_trace_event += -lelf 115HOSTLOADLIBES_trace_event += -lelf
113HOSTLOADLIBES_sampleip += -lelf 116HOSTLOADLIBES_sampleip += -lelf
117HOSTLOADLIBES_tc_l2_redirect += -l elf
114 118
115# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: 119# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
116# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang 120# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 90f44bd2045e..dadd5161bd91 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -113,7 +113,7 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
113#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ 113#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
114#define PT_REGS_RC(x) ((x)->gprs[2]) 114#define PT_REGS_RC(x) ((x)->gprs[2])
115#define PT_REGS_SP(x) ((x)->gprs[15]) 115#define PT_REGS_SP(x) ((x)->gprs[15])
116#define PT_REGS_IP(x) ((x)->ip) 116#define PT_REGS_IP(x) ((x)->psw.addr)
117 117
118#elif defined(__aarch64__) 118#elif defined(__aarch64__)
119 119
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index 774a681f374a..ceabf31079cf 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -25,7 +25,7 @@ int do_sample(struct bpf_perf_event_data *ctx)
25 u64 ip; 25 u64 ip;
26 u32 *value, init_val = 1; 26 u32 *value, init_val = 1;
27 27
28 ip = ctx->regs.ip; 28 ip = PT_REGS_IP(&ctx->regs);
29 value = bpf_map_lookup_elem(&ip_map, &ip); 29 value = bpf_map_lookup_elem(&ip_map, &ip);
30 if (value) 30 if (value)
31 *value += 1; 31 *value += 1;
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh
new file mode 100755
index 000000000000..80a05591a140
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect.sh
@@ -0,0 +1,173 @@
1#!/bin/bash
2
3[[ -z $TC ]] && TC='tc'
4[[ -z $IP ]] && IP='ip'
5
6REDIRECT_USER='./tc_l2_redirect'
7REDIRECT_BPF='./tc_l2_redirect_kern.o'
8
9RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter)
10IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding)
11
12function config_common {
13 local tun_type=$1
14
15 $IP netns add ns1
16 $IP netns add ns2
17 $IP link add ve1 type veth peer name vens1
18 $IP link add ve2 type veth peer name vens2
19 $IP link set dev ve1 up
20 $IP link set dev ve2 up
21 $IP link set dev ve1 mtu 1500
22 $IP link set dev ve2 mtu 1500
23 $IP link set dev vens1 netns ns1
24 $IP link set dev vens2 netns ns2
25
26 $IP -n ns1 link set dev lo up
27 $IP -n ns1 link set dev vens1 up
28 $IP -n ns1 addr add 10.1.1.101/24 dev vens1
29 $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad
30 $IP -n ns1 route add default via 10.1.1.1 dev vens1
31 $IP -n ns1 route add default via 2401:db01::1 dev vens1
32
33 $IP -n ns2 link set dev lo up
34 $IP -n ns2 link set dev vens2 up
35 $IP -n ns2 addr add 10.2.1.102/24 dev vens2
36 $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad
37 $IP -n ns2 addr add 10.10.1.102 dev lo
38 $IP -n ns2 addr add 2401:face::66/64 dev lo nodad
39 $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1
40 $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1
41 $IP -n ns2 link set dev ipt2 up
42 $IP -n ns2 link set dev ip6t2 up
43 $IP netns exec ns2 $TC qdisc add dev vens2 clsact
44 $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip
45 if [[ $tun_type == "ipip" ]]; then
46 $IP -n ns2 route add 10.1.1.0/24 dev ipt2
47 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
48 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0
49 else
50 $IP -n ns2 route add 10.1.1.0/24 dev ip6t2
51 $IP -n ns2 route add 2401:db01::/64 dev ip6t2
52 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0
53 $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0
54 fi
55
56 $IP addr add 10.1.1.1/24 dev ve1
57 $IP addr add 2401:db01::1/64 dev ve1 nodad
58 $IP addr add 10.2.1.1/24 dev ve2
59 $IP addr add 2401:db02::1/64 dev ve2 nodad
60
61 $TC qdisc add dev ve2 clsact
62 $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward
63
64 sysctl -q -w net.ipv4.conf.all.rp_filter=0
65 sysctl -q -w net.ipv6.conf.all.forwarding=1
66}
67
68function cleanup {
69 set +e
70 [[ -z $DEBUG ]] || set +x
71 $IP netns delete ns1 >& /dev/null
72 $IP netns delete ns2 >& /dev/null
73 $IP link del ve1 >& /dev/null
74 $IP link del ve2 >& /dev/null
75 $IP link del ipt >& /dev/null
76 $IP link del ip6t >& /dev/null
77 sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER
78 sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING
79 rm -f /sys/fs/bpf/tc/globals/tun_iface
80 [[ -z $DEBUG ]] || set -x
81 set -e
82}
83
84function l2_to_ipip {
85 echo -n "l2_to_ipip $1: "
86
87 local dir=$1
88
89 config_common ipip
90
91 $IP link add ipt type ipip external
92 $IP link set dev ipt up
93 sysctl -q -w net.ipv4.conf.ipt.rp_filter=0
94 sysctl -q -w net.ipv4.conf.ipt.forwarding=1
95
96 if [[ $dir == "egress" ]]; then
97 $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
98 $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
99 sysctl -q -w net.ipv4.conf.ve1.forwarding=1
100 else
101 $TC qdisc add dev ve1 clsact
102 $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect
103 fi
104
105 $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex)
106
107 $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
108
109 if [[ $dir == "egress" ]]; then
110 # test direct egress to ve2 (i.e. not forwarding from
111 # ve1 to ve2).
112 ping -c1 10.10.1.102 >& /dev/null
113 fi
114
115 cleanup
116
117 echo "OK"
118}
119
120function l2_to_ip6tnl {
121 echo -n "l2_to_ip6tnl $1: "
122
123 local dir=$1
124
125 config_common ip6tnl
126
127 $IP link add ip6t type ip6tnl mode any external
128 $IP link set dev ip6t up
129 sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0
130 sysctl -q -w net.ipv4.conf.ip6t.forwarding=1
131
132 if [[ $dir == "egress" ]]; then
133 $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2
134 $IP route add 2401:face::/64 via 2401:db02::66 dev ve2
135 $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
136 sysctl -q -w net.ipv4.conf.ve1.forwarding=1
137 else
138 $TC qdisc add dev ve1 clsact
139 $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect
140 fi
141
142 $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex)
143
144 $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null
145 $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null
146
147 if [[ $dir == "egress" ]]; then
148 # test direct egress to ve2 (i.e. not forwarding from
149 # ve1 to ve2).
150 ping -c1 10.10.1.102 >& /dev/null
151 ping -6 -c1 2401:face::66 >& /dev/null
152 fi
153
154 cleanup
155
156 echo "OK"
157}
158
159cleanup
160test_names="l2_to_ipip l2_to_ip6tnl"
161test_dirs="ingress egress"
162if [[ $# -ge 2 ]]; then
163 test_names=$1
164 test_dirs=$2
165elif [[ $# -ge 1 ]]; then
166 test_names=$1
167fi
168
169for t in $test_names; do
170 for d in $test_dirs; do
171 $t $d
172 done
173done
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
new file mode 100644
index 000000000000..92a44729dbe4
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -0,0 +1,236 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <uapi/linux/bpf.h>
8#include <uapi/linux/if_ether.h>
9#include <uapi/linux/if_packet.h>
10#include <uapi/linux/ip.h>
11#include <uapi/linux/ipv6.h>
12#include <uapi/linux/in.h>
13#include <uapi/linux/tcp.h>
14#include <uapi/linux/filter.h>
15#include <uapi/linux/pkt_cls.h>
16#include <net/ipv6.h>
17#include "bpf_helpers.h"
18
19#define _htonl __builtin_bswap32
20
21#define PIN_GLOBAL_NS 2
22struct bpf_elf_map {
23 __u32 type;
24 __u32 size_key;
25 __u32 size_value;
26 __u32 max_elem;
27 __u32 flags;
28 __u32 id;
29 __u32 pinning;
30};
31
32/* copy of 'struct ethhdr' without __packed */
33struct eth_hdr {
34 unsigned char h_dest[ETH_ALEN];
35 unsigned char h_source[ETH_ALEN];
36 unsigned short h_proto;
37};
38
39struct bpf_elf_map SEC("maps") tun_iface = {
40 .type = BPF_MAP_TYPE_ARRAY,
41 .size_key = sizeof(int),
42 .size_value = sizeof(int),
43 .pinning = PIN_GLOBAL_NS,
44 .max_elem = 1,
45};
46
47static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr)
48{
49 if (eth_proto == htons(ETH_P_IP))
50 return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100);
51 else if (eth_proto == htons(ETH_P_IPV6))
52 return (daddr == _htonl(0x2401face));
53
54 return false;
55}
56
57SEC("l2_to_iptun_ingress_forward")
58int _l2_to_iptun_ingress_forward(struct __sk_buff *skb)
59{
60 struct bpf_tunnel_key tkey = {};
61 void *data = (void *)(long)skb->data;
62 struct eth_hdr *eth = data;
63 void *data_end = (void *)(long)skb->data_end;
64 int key = 0, *ifindex;
65
66 int ret;
67
68 if (data + sizeof(*eth) > data_end)
69 return TC_ACT_OK;
70
71 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
72 if (!ifindex)
73 return TC_ACT_OK;
74
75 if (eth->h_proto == htons(ETH_P_IP)) {
76 char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n";
77 struct iphdr *iph = data + sizeof(*eth);
78
79 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
80 return TC_ACT_OK;
81
82 if (iph->protocol != IPPROTO_IPIP)
83 return TC_ACT_OK;
84
85 bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex,
86 _htonl(iph->daddr));
87 return bpf_redirect(*ifindex, BPF_F_INGRESS);
88 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
89 char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n";
90 struct ipv6hdr *ip6h = data + sizeof(*eth);
91
92 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
93 return TC_ACT_OK;
94
95 if (ip6h->nexthdr != IPPROTO_IPIP &&
96 ip6h->nexthdr != IPPROTO_IPV6)
97 return TC_ACT_OK;
98
99 bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex,
100 _htonl(ip6h->daddr.s6_addr32[0]),
101 _htonl(ip6h->daddr.s6_addr32[3]));
102 return bpf_redirect(*ifindex, BPF_F_INGRESS);
103 }
104
105 return TC_ACT_OK;
106}
107
108SEC("l2_to_iptun_ingress_redirect")
109int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb)
110{
111 struct bpf_tunnel_key tkey = {};
112 void *data = (void *)(long)skb->data;
113 struct eth_hdr *eth = data;
114 void *data_end = (void *)(long)skb->data_end;
115 int key = 0, *ifindex;
116
117 int ret;
118
119 if (data + sizeof(*eth) > data_end)
120 return TC_ACT_OK;
121
122 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
123 if (!ifindex)
124 return TC_ACT_OK;
125
126 if (eth->h_proto == htons(ETH_P_IP)) {
127 char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
128 struct iphdr *iph = data + sizeof(*eth);
129 __be32 daddr = iph->daddr;
130
131 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
132 return TC_ACT_OK;
133
134 if (!is_vip_addr(eth->h_proto, daddr))
135 return TC_ACT_OK;
136
137 bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex);
138 } else {
139 return TC_ACT_OK;
140 }
141
142 tkey.tunnel_id = 10000;
143 tkey.tunnel_ttl = 64;
144 tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */
145 bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0);
146 return bpf_redirect(*ifindex, 0);
147}
148
149SEC("l2_to_ip6tun_ingress_redirect")
150int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb)
151{
152 struct bpf_tunnel_key tkey = {};
153 void *data = (void *)(long)skb->data;
154 struct eth_hdr *eth = data;
155 void *data_end = (void *)(long)skb->data_end;
156 int key = 0, *ifindex;
157
158 if (data + sizeof(*eth) > data_end)
159 return TC_ACT_OK;
160
161 ifindex = bpf_map_lookup_elem(&tun_iface, &key);
162 if (!ifindex)
163 return TC_ACT_OK;
164
165 if (eth->h_proto == htons(ETH_P_IP)) {
166 char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n";
167 struct iphdr *iph = data + sizeof(*eth);
168
169 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
170 return TC_ACT_OK;
171
172 if (!is_vip_addr(eth->h_proto, iph->daddr))
173 return TC_ACT_OK;
174
175 bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr),
176 *ifindex);
177 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
178 char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n";
179 struct ipv6hdr *ip6h = data + sizeof(*eth);
180
181 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
182 return TC_ACT_OK;
183
184 if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
185 return TC_ACT_OK;
186
187 bpf_trace_printk(fmt6, sizeof(fmt6),
188 _htonl(ip6h->daddr.s6_addr32[0]), *ifindex);
189 } else {
190 return TC_ACT_OK;
191 }
192
193 tkey.tunnel_id = 10000;
194 tkey.tunnel_ttl = 64;
195 /* 2401:db02:0:0:0:0:0:66 */
196 tkey.remote_ipv6[0] = _htonl(0x2401db02);
197 tkey.remote_ipv6[1] = 0;
198 tkey.remote_ipv6[2] = 0;
199 tkey.remote_ipv6[3] = _htonl(0x00000066);
200 bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6);
201 return bpf_redirect(*ifindex, 0);
202}
203
204SEC("drop_non_tun_vip")
205int _drop_non_tun_vip(struct __sk_buff *skb)
206{
207 struct bpf_tunnel_key tkey = {};
208 void *data = (void *)(long)skb->data;
209 struct eth_hdr *eth = data;
210 void *data_end = (void *)(long)skb->data_end;
211
212 if (data + sizeof(*eth) > data_end)
213 return TC_ACT_OK;
214
215 if (eth->h_proto == htons(ETH_P_IP)) {
216 struct iphdr *iph = data + sizeof(*eth);
217
218 if (data + sizeof(*eth) + sizeof(*iph) > data_end)
219 return TC_ACT_OK;
220
221 if (is_vip_addr(eth->h_proto, iph->daddr))
222 return TC_ACT_SHOT;
223 } else if (eth->h_proto == htons(ETH_P_IPV6)) {
224 struct ipv6hdr *ip6h = data + sizeof(*eth);
225
226 if (data + sizeof(*eth) + sizeof(*ip6h) > data_end)
227 return TC_ACT_OK;
228
229 if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0]))
230 return TC_ACT_SHOT;
231 }
232
233 return TC_ACT_OK;
234}
235
236char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c
new file mode 100644
index 000000000000..4013c5337b91
--- /dev/null
+++ b/samples/bpf/tc_l2_redirect_user.c
@@ -0,0 +1,73 @@
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/unistd.h>
8#include <linux/bpf.h>
9
10#include <stdlib.h>
11#include <stdio.h>
12#include <unistd.h>
13#include <string.h>
14#include <errno.h>
15
16#include "libbpf.h"
17
18static void usage(void)
19{
20 printf("Usage: tc_l2_ipip_redirect [...]\n");
21 printf(" -U <file> Update an already pinned BPF array\n");
22 printf(" -i <ifindex> Interface index\n");
23 printf(" -h Display this help\n");
24}
25
26int main(int argc, char **argv)
27{
28 const char *pinned_file = NULL;
29 int ifindex = -1;
30 int array_key = 0;
31 int array_fd = -1;
32 int ret = -1;
33 int opt;
34
35 while ((opt = getopt(argc, argv, "F:U:i:")) != -1) {
36 switch (opt) {
37 /* General args */
38 case 'U':
39 pinned_file = optarg;
40 break;
41 case 'i':
42 ifindex = atoi(optarg);
43 break;
44 default:
45 usage();
46 goto out;
47 }
48 }
49
50 if (ifindex < 0 || !pinned_file) {
51 usage();
52 goto out;
53 }
54
55 array_fd = bpf_obj_get(pinned_file);
56 if (array_fd < 0) {
57 fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n",
58 pinned_file, strerror(errno), errno);
59 goto out;
60 }
61
62 /* bpf_tunnel_key.remote_ipv4 expects host byte orders */
63 ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0);
64 if (ret) {
65 perror("bpf_update_elem");
66 goto out;
67 }
68
69out:
70 if (array_fd != -1)
71 close(array_fd);
72 return ret;
73}
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index 71a8ed32823e..41b6115a32eb 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -50,7 +50,7 @@ int bpf_prog1(struct bpf_perf_event_data *ctx)
50 key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS); 50 key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
51 if ((int)key.kernstack < 0 && (int)key.userstack < 0) { 51 if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
52 bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period, 52 bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
53 ctx->regs.ip); 53 PT_REGS_IP(&ctx->regs));
54 return 0; 54 return 0;
55 } 55 }
56 56
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index de46ab03f063..7675d11ee65e 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -159,7 +159,8 @@ cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $<
159$(obj)/%.i: $(src)/%.c FORCE 159$(obj)/%.i: $(src)/%.c FORCE
160 $(call if_changed_dep,cpp_i_c) 160 $(call if_changed_dep,cpp_i_c)
161 161
162cmd_gensymtypes = \ 162# These mirror gensymtypes_S and co below, keep them in synch.
163cmd_gensymtypes_c = \
163 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ 164 $(CPP) -D__GENKSYMS__ $(c_flags) $< | \
164 $(GENKSYMS) $(if $(1), -T $(2)) \ 165 $(GENKSYMS) $(if $(1), -T $(2)) \
165 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ 166 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
@@ -169,7 +170,7 @@ cmd_gensymtypes = \
169quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ 170quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
170cmd_cc_symtypes_c = \ 171cmd_cc_symtypes_c = \
171 set -e; \ 172 set -e; \
172 $(call cmd_gensymtypes,true,$@) >/dev/null; \ 173 $(call cmd_gensymtypes_c,true,$@) >/dev/null; \
173 test -s $@ || rm -f $@ 174 test -s $@ || rm -f $@
174 175
175$(obj)/%.symtypes : $(src)/%.c FORCE 176$(obj)/%.symtypes : $(src)/%.c FORCE
@@ -198,9 +199,10 @@ else
198# the actual value of the checksum generated by genksyms 199# the actual value of the checksum generated by genksyms
199 200
200cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< 201cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
201cmd_modversions = \ 202
203cmd_modversions_c = \
202 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ 204 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
203 $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ 205 $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
204 > $(@D)/.tmp_$(@F:.o=.ver); \ 206 > $(@D)/.tmp_$(@F:.o=.ver); \
205 \ 207 \
206 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ 208 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
@@ -268,13 +270,14 @@ endif # CONFIG_STACK_VALIDATION
268define rule_cc_o_c 270define rule_cc_o_c
269 $(call echo-cmd,checksrc) $(cmd_checksrc) \ 271 $(call echo-cmd,checksrc) $(cmd_checksrc) \
270 $(call cmd_and_fixdep,cc_o_c) \ 272 $(call cmd_and_fixdep,cc_o_c) \
271 $(cmd_modversions) \ 273 $(cmd_modversions_c) \
272 $(cmd_objtool) \ 274 $(cmd_objtool) \
273 $(call echo-cmd,record_mcount) $(cmd_record_mcount) 275 $(call echo-cmd,record_mcount) $(cmd_record_mcount)
274endef 276endef
275 277
276define rule_as_o_S 278define rule_as_o_S
277 $(call cmd_and_fixdep,as_o_S) \ 279 $(call cmd_and_fixdep,as_o_S) \
280 $(cmd_modversions_S) \
278 $(cmd_objtool) 281 $(cmd_objtool)
279endef 282endef
280 283
@@ -314,6 +317,39 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)
314$(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 317$(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
315$(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) 318$(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
316 319
320# .S file exports must have their C prototypes defined in asm/asm-prototypes.h
321# or a file that it includes, in order to get versioned symbols. We build a
322# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from
323# the .S file (with trailing ';'), and run genksyms on that, to extract vers.
324#
325# This is convoluted. The .S file must first be preprocessed to run guards and
326# expand names, then the resulting exports must be constructed into plain
327# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
328# to make the genksyms input.
329#
330# These mirror gensymtypes_c and co above, keep them in synch.
331cmd_gensymtypes_S = \
332 (echo "\#include <linux/kernel.h>" ; \
333 echo "\#include <asm/asm-prototypes.h>" ; \
334 $(CPP) $(a_flags) $< | \
335 grep "\<___EXPORT_SYMBOL\>" | \
336 sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
337 $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
338 $(GENKSYMS) $(if $(1), -T $(2)) \
339 $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
340 $(if $(KBUILD_PRESERVE),-p) \
341 -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
342
343quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
344cmd_cc_symtypes_S = \
345 set -e; \
346 $(call cmd_gensymtypes_S,true,$@) >/dev/null; \
347 test -s $@ || rm -f $@
348
349$(obj)/%.symtypes : $(src)/%.S FORCE
350 $(call cmd,cc_symtypes_S)
351
352
317quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ 353quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
318cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< 354cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $<
319 355
@@ -321,7 +357,37 @@ $(obj)/%.s: $(src)/%.S FORCE
321 $(call if_changed_dep,cpp_s_S) 357 $(call if_changed_dep,cpp_s_S)
322 358
323quiet_cmd_as_o_S = AS $(quiet_modtag) $@ 359quiet_cmd_as_o_S = AS $(quiet_modtag) $@
324cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< 360
361ifndef CONFIG_MODVERSIONS
362cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
363
364else
365
366ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h)
367
368ifeq ($(ASM_PROTOTYPES),)
369cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
370
371else
372
373# versioning matches the C process described above, with difference that
374# we parse asm-prototypes.h C header to get function definitions.
375
376cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $<
377
378cmd_modversions_S = \
379 if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
380 $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \
381 > $(@D)/.tmp_$(@F:.o=.ver); \
382 \
383 $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
384 -T $(@D)/.tmp_$(@F:.o=.ver); \
385 rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \
386 else \
387 mv -f $(@D)/.tmp_$(@F) $@; \
388 fi;
389endif
390endif
325 391
326$(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE 392$(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE
327 $(call if_changed_rule,as_o_S) 393 $(call if_changed_rule,as_o_S)
@@ -430,6 +496,9 @@ cmd_export_list = $(OBJDUMP) -h $< | \
430 496
431$(obj)/lib-ksyms.o: $(lib-target) FORCE 497$(obj)/lib-ksyms.o: $(lib-target) FORCE
432 $(call if_changed,export_list) 498 $(call if_changed,export_list)
499
500targets += $(obj)/lib-ksyms.o
501
433endif 502endif
434 503
435# 504#
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index 973e8c141567..17867e723a51 100755
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
1#!/bin/sh 1#!/bin/sh
2 2
3echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" 3echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
4if [ "$?" -eq "0" ] ; then 4if [ "$?" -eq "0" ] ; then
5 echo y 5 echo y
6else 6else
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ebced77deb9c..90a091b6ae4d 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -35,6 +35,8 @@ nconfig: $(obj)/nconf
35 35
36silentoldconfig: $(obj)/conf 36silentoldconfig: $(obj)/conf
37 $(Q)mkdir -p include/config include/generated 37 $(Q)mkdir -p include/config include/generated
38 $(Q)test -e include/generated/autoksyms.h || \
39 touch include/generated/autoksyms.h
38 $< $(silent) --$@ $(Kconfig) 40 $< $(silent) --$@ $(Kconfig)
39 41
40localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf 42localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index fc3036b34e51..a4d90aa1045a 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
621 /* released below */ 621 /* released below */
622 cred = get_current_cred(); 622 cred = get_current_cred();
623 cxt = cred_cxt(cred); 623 cxt = cred_cxt(cred);
624 profile = aa_cred_profile(cred); 624 profile = aa_get_newest_profile(aa_cred_profile(cred));
625 previous_profile = cxt->previous; 625 previous_profile = aa_get_newest_profile(cxt->previous);
626 626
627 if (unconfined(profile)) { 627 if (unconfined(profile)) {
628 info = "unconfined"; 628 info = "unconfined";
@@ -718,6 +718,8 @@ audit:
718out: 718out:
719 aa_put_profile(hat); 719 aa_put_profile(hat);
720 kfree(name); 720 kfree(name);
721 aa_put_profile(profile);
722 aa_put_profile(previous_profile);
721 put_cred(cred); 723 put_cred(cred);
722 724
723 return error; 725 return error;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f909dd8b7b8..ea81c08ddc7a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6907,8 +6907,6 @@ static const struct hda_fixup alc662_fixups[] = {
6907 .v.pins = (const struct hda_pintbl[]) { 6907 .v.pins = (const struct hda_pintbl[]) {
6908 { 0x15, 0x40f000f0 }, /* disabled */ 6908 { 0x15, 0x40f000f0 }, /* disabled */
6909 { 0x16, 0x40f000f0 }, /* disabled */ 6909 { 0x16, 0x40f000f0 }, /* disabled */
6910 { 0x18, 0x01014011 }, /* LO */
6911 { 0x1a, 0x01014012 }, /* LO */
6912 { } 6910 { }
6913 } 6911 }
6914 }, 6912 },
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 6a23302297c9..4d9d320a7971 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int);
13static bool is_thinkpad(struct hda_codec *codec) 13static bool is_thinkpad(struct hda_codec *codec)
14{ 14{
15 return (codec->core.subsystem_id >> 16 == 0x17aa) && 15 return (codec->core.subsystem_id >> 16 == 0x17aa) &&
16 (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068")); 16 (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") ||
17 acpi_dev_found("IBM0068"));
17} 18}
18 19
19static void update_tpacpi_mute_led(void *private_data, int enabled) 20static void update_tpacpi_mute_led(void *private_data, int enabled)
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 07000f53db44..b392e51de94d 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -75,6 +75,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
75 data->i2s_port = cpu_dai->driver->id; 75 data->i2s_port = cpu_dai->driver->id;
76 runtime->private_data = data; 76 runtime->private_data = data;
77 77
78 dma_ch = 0;
78 if (v->alloc_dma_channel) 79 if (v->alloc_dma_channel)
79 dma_ch = v->alloc_dma_channel(drvdata, dir); 80 dma_ch = v->alloc_dma_channel(drvdata, dir);
80 if (dma_ch < 0) 81 if (dma_ch < 0)
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 0190cb6332f2..3fe4468ea2c5 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -304,7 +304,7 @@ struct snd_dbri {
304 spinlock_t lock; 304 spinlock_t lock;
305 305
306 struct dbri_dma *dma; /* Pointer to our DMA block */ 306 struct dbri_dma *dma; /* Pointer to our DMA block */
307 u32 dma_dvma; /* DBRI visible DMA address */ 307 dma_addr_t dma_dvma; /* DBRI visible DMA address */
308 308
309 void __iomem *regs; /* dbri HW regs */ 309 void __iomem *regs; /* dbri HW regs */
310 int dbri_irqp; /* intr queue pointer */ 310 int dbri_irqp; /* intr queue pointer */
@@ -657,12 +657,14 @@ static void dbri_cmdwait(struct snd_dbri *dbri)
657 */ 657 */
658static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len) 658static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len)
659{ 659{
660 u32 dvma_addr = (u32)dbri->dma_dvma;
661
660 /* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */ 662 /* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */
661 len += 2; 663 len += 2;
662 spin_lock(&dbri->cmdlock); 664 spin_lock(&dbri->cmdlock);
663 if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2) 665 if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2)
664 return dbri->cmdptr + 2; 666 return dbri->cmdptr + 2;
665 else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma) 667 else if (len < sbus_readl(dbri->regs + REG8) - dvma_addr)
666 return dbri->dma->cmd; 668 return dbri->dma->cmd;
667 else 669 else
668 printk(KERN_ERR "DBRI: no space for commands."); 670 printk(KERN_ERR "DBRI: no space for commands.");
@@ -680,6 +682,7 @@ static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len)
680 */ 682 */
681static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len) 683static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len)
682{ 684{
685 u32 dvma_addr = (u32)dbri->dma_dvma;
683 s32 tmp, addr; 686 s32 tmp, addr;
684 static int wait_id = 0; 687 static int wait_id = 0;
685 688
@@ -689,7 +692,7 @@ static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len)
689 *(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id); 692 *(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id);
690 693
691 /* Replace the last command with JUMP */ 694 /* Replace the last command with JUMP */
692 addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32); 695 addr = dvma_addr + (cmd - len - dbri->dma->cmd) * sizeof(s32);
693 *(dbri->cmdptr+1) = addr; 696 *(dbri->cmdptr+1) = addr;
694 *(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0); 697 *(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0);
695 698
@@ -747,6 +750,7 @@ static void dbri_reset(struct snd_dbri *dbri)
747/* Lock must not be held before calling this */ 750/* Lock must not be held before calling this */
748static void dbri_initialize(struct snd_dbri *dbri) 751static void dbri_initialize(struct snd_dbri *dbri)
749{ 752{
753 u32 dvma_addr = (u32)dbri->dma_dvma;
750 s32 *cmd; 754 s32 *cmd;
751 u32 dma_addr; 755 u32 dma_addr;
752 unsigned long flags; 756 unsigned long flags;
@@ -764,7 +768,7 @@ static void dbri_initialize(struct snd_dbri *dbri)
764 /* 768 /*
765 * Initialize the interrupt ring buffer. 769 * Initialize the interrupt ring buffer.
766 */ 770 */
767 dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0); 771 dma_addr = dvma_addr + dbri_dma_off(intr, 0);
768 dbri->dma->intr[0] = dma_addr; 772 dbri->dma->intr[0] = dma_addr;
769 dbri->dbri_irqp = 1; 773 dbri->dbri_irqp = 1;
770 /* 774 /*
@@ -778,7 +782,7 @@ static void dbri_initialize(struct snd_dbri *dbri)
778 dbri->cmdptr = cmd; 782 dbri->cmdptr = cmd;
779 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); 783 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
780 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); 784 *(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
781 dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0); 785 dma_addr = dvma_addr + dbri_dma_off(cmd, 0);
782 sbus_writel(dma_addr, dbri->regs + REG8); 786 sbus_writel(dma_addr, dbri->regs + REG8);
783 spin_unlock(&dbri->cmdlock); 787 spin_unlock(&dbri->cmdlock);
784 788
@@ -1077,6 +1081,7 @@ static void recv_fixed(struct snd_dbri *dbri, int pipe, volatile __u32 *ptr)
1077static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) 1081static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
1078{ 1082{
1079 struct dbri_streaminfo *info = &dbri->stream_info[streamno]; 1083 struct dbri_streaminfo *info = &dbri->stream_info[streamno];
1084 u32 dvma_addr = (u32)dbri->dma_dvma;
1080 __u32 dvma_buffer; 1085 __u32 dvma_buffer;
1081 int desc; 1086 int desc;
1082 int len; 1087 int len;
@@ -1177,7 +1182,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
1177 else { 1182 else {
1178 dbri->next_desc[last_desc] = desc; 1183 dbri->next_desc[last_desc] = desc;
1179 dbri->dma->desc[last_desc].nda = 1184 dbri->dma->desc[last_desc].nda =
1180 dbri->dma_dvma + dbri_dma_off(desc, desc); 1185 dvma_addr + dbri_dma_off(desc, desc);
1181 } 1186 }
1182 1187
1183 last_desc = desc; 1188 last_desc = desc;
@@ -1192,7 +1197,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
1192 } 1197 }
1193 1198
1194 dbri->dma->desc[last_desc].nda = 1199 dbri->dma->desc[last_desc].nda =
1195 dbri->dma_dvma + dbri_dma_off(desc, first_desc); 1200 dvma_addr + dbri_dma_off(desc, first_desc);
1196 dbri->next_desc[last_desc] = first_desc; 1201 dbri->next_desc[last_desc] = first_desc;
1197 dbri->pipes[info->pipe].first_desc = first_desc; 1202 dbri->pipes[info->pipe].first_desc = first_desc;
1198 dbri->pipes[info->pipe].desc = first_desc; 1203 dbri->pipes[info->pipe].desc = first_desc;
@@ -1697,6 +1702,7 @@ interrupts are disabled.
1697static void xmit_descs(struct snd_dbri *dbri) 1702static void xmit_descs(struct snd_dbri *dbri)
1698{ 1703{
1699 struct dbri_streaminfo *info; 1704 struct dbri_streaminfo *info;
1705 u32 dvma_addr = (u32)dbri->dma_dvma;
1700 s32 *cmd; 1706 s32 *cmd;
1701 unsigned long flags; 1707 unsigned long flags;
1702 int first_td; 1708 int first_td;
@@ -1718,7 +1724,7 @@ static void xmit_descs(struct snd_dbri *dbri)
1718 *(cmd++) = DBRI_CMD(D_SDP, 0, 1724 *(cmd++) = DBRI_CMD(D_SDP, 0,
1719 dbri->pipes[info->pipe].sdp 1725 dbri->pipes[info->pipe].sdp
1720 | D_SDP_P | D_SDP_EVERY | D_SDP_C); 1726 | D_SDP_P | D_SDP_EVERY | D_SDP_C);
1721 *(cmd++) = dbri->dma_dvma + 1727 *(cmd++) = dvma_addr +
1722 dbri_dma_off(desc, first_td); 1728 dbri_dma_off(desc, first_td);
1723 dbri_cmdsend(dbri, cmd, 2); 1729 dbri_cmdsend(dbri, cmd, 2);
1724 1730
@@ -1740,7 +1746,7 @@ static void xmit_descs(struct snd_dbri *dbri)
1740 *(cmd++) = DBRI_CMD(D_SDP, 0, 1746 *(cmd++) = DBRI_CMD(D_SDP, 0,
1741 dbri->pipes[info->pipe].sdp 1747 dbri->pipes[info->pipe].sdp
1742 | D_SDP_P | D_SDP_EVERY | D_SDP_C); 1748 | D_SDP_P | D_SDP_EVERY | D_SDP_C);
1743 *(cmd++) = dbri->dma_dvma + 1749 *(cmd++) = dvma_addr +
1744 dbri_dma_off(desc, first_td); 1750 dbri_dma_off(desc, first_td);
1745 dbri_cmdsend(dbri, cmd, 2); 1751 dbri_cmdsend(dbri, cmd, 2);
1746 1752
@@ -2539,7 +2545,7 @@ static int snd_dbri_create(struct snd_card *card,
2539 if (!dbri->dma) 2545 if (!dbri->dma)
2540 return -ENOMEM; 2546 return -ENOMEM;
2541 2547
2542 dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", 2548 dprintk(D_GEN, "DMA Cmd Block 0x%p (%pad)\n",
2543 dbri->dma, dbri->dma_dvma); 2549 dbri->dma, dbri->dma_dvma);
2544 2550
2545 /* Map the registers into memory. */ 2551 /* Map the registers into memory. */
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 9e5276d6dda0..2ddc034673a8 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
315 snd_usb_endpoint_free(ep); 315 snd_usb_endpoint_free(ep);
316 316
317 mutex_destroy(&chip->mutex); 317 mutex_destroy(&chip->mutex);
318 dev_set_drvdata(&chip->dev->dev, NULL); 318 if (!atomic_read(&chip->shutdown))
319 dev_set_drvdata(&chip->dev->dev, NULL);
319 kfree(chip); 320 kfree(chip);
320 return 0; 321 return 0;
321} 322}
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 4ffff7be9299..a53fef0c673b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1337 } 1337 }
1338 1338
1339 if (first) { 1339 if (first) {
1340 ui_browser__printf(&browser->b, "%c", folded_sign); 1340 ui_browser__printf(&browser->b, "%c ", folded_sign);
1341 width--; 1341 width -= 2;
1342 first = false; 1342 first = false;
1343 } else { 1343 } else {
1344 ui_browser__printf(&browser->b, " "); 1344 ui_browser__printf(&browser->b, " ");
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1361 width -= hpp.buf - s; 1361 width -= hpp.buf - s;
1362 } 1362 }
1363 1363
1364 ui_browser__write_nstring(&browser->b, "", hierarchy_indent); 1364 if (!first) {
1365 width -= hierarchy_indent; 1365 ui_browser__write_nstring(&browser->b, "", hierarchy_indent);
1366 width -= hierarchy_indent;
1367 }
1366 1368
1367 if (column >= browser->b.horiz_scroll) { 1369 if (column >= browser->b.horiz_scroll) {
1368 char s[2048]; 1370 char s[2048];
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
1381 } 1383 }
1382 1384
1383 perf_hpp_list__for_each_format(entry->hpp_list, fmt) { 1385 perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
1384 ui_browser__write_nstring(&browser->b, "", 2); 1386 if (first) {
1387 ui_browser__printf(&browser->b, "%c ", folded_sign);
1388 first = false;
1389 } else {
1390 ui_browser__write_nstring(&browser->b, "", 2);
1391 }
1392
1385 width -= 2; 1393 width -= 2;
1386 1394
1387 /* 1395 /*
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
1555 int indent = hists->nr_hpp_node - 2; 1563 int indent = hists->nr_hpp_node - 2;
1556 bool first_node, first_col; 1564 bool first_node, first_col;
1557 1565
1558 ret = scnprintf(buf, size, " "); 1566 ret = scnprintf(buf, size, " ");
1559 if (advance_hpp_check(&dummy_hpp, ret)) 1567 if (advance_hpp_check(&dummy_hpp, ret))
1560 return ret; 1568 return ret;
1561 1569
1570 first_node = true;
1562 /* the first hpp_list_node is for overhead columns */ 1571 /* the first hpp_list_node is for overhead columns */
1563 fmt_node = list_first_entry(&hists->hpp_formats, 1572 fmt_node = list_first_entry(&hists->hpp_formats,
1564 struct perf_hpp_list_node, list); 1573 struct perf_hpp_list_node, list);
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
1573 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); 1582 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " ");
1574 if (advance_hpp_check(&dummy_hpp, ret)) 1583 if (advance_hpp_check(&dummy_hpp, ret))
1575 break; 1584 break;
1585
1586 first_node = false;
1576 } 1587 }
1577 1588
1578 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", 1589 if (!first_node) {
1579 indent * HIERARCHY_INDENT, ""); 1590 ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
1580 if (advance_hpp_check(&dummy_hpp, ret)) 1591 indent * HIERARCHY_INDENT, "");
1581 return ret; 1592 if (advance_hpp_check(&dummy_hpp, ret))
1593 return ret;
1594 }
1582 1595
1583 first_node = true; 1596 first_node = true;
1584 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { 1597 list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser,
2076 browser->b.use_navkeypressed = true; 2089 browser->b.use_navkeypressed = true;
2077 browser->show_headers = symbol_conf.show_hist_headers; 2090 browser->show_headers = symbol_conf.show_hist_headers;
2078 2091
2079 hists__for_each_format(hists, fmt) 2092 if (symbol_conf.report_hierarchy) {
2093 struct perf_hpp_list_node *fmt_node;
2094
2095 /* count overhead columns (in the first node) */
2096 fmt_node = list_first_entry(&hists->hpp_formats,
2097 struct perf_hpp_list_node, list);
2098 perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
2099 ++browser->b.columns;
2100
2101 /* add a single column for whole hierarchy sort keys*/
2080 ++browser->b.columns; 2102 ++browser->b.columns;
2103 } else {
2104 hists__for_each_format(hists, fmt)
2105 ++browser->b.columns;
2106 }
2081 2107
2082 hists__reset_column_width(hists); 2108 hists__reset_column_width(hists);
2083} 2109}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b02992efb513..a69f027368ef 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists,
1600 if (prog) 1600 if (prog)
1601 ui_progress__update(prog, 1); 1601 ui_progress__update(prog, 1);
1602 1602
1603 hists->nr_entries++;
1604 if (!he->filtered) {
1605 hists->nr_non_filtered_entries++;
1606 hists__calc_col_len(hists, he);
1607 }
1608
1603 if (!he->leaf) { 1609 if (!he->leaf) {
1604 hists__hierarchy_output_resort(hists, prog, 1610 hists__hierarchy_output_resort(hists, prog,
1605 &he->hroot_in, 1611 &he->hroot_in,
1606 &he->hroot_out, 1612 &he->hroot_out,
1607 min_callchain_hits, 1613 min_callchain_hits,
1608 use_callchain); 1614 use_callchain);
1609 hists->nr_entries++;
1610 if (!he->filtered) {
1611 hists->nr_non_filtered_entries++;
1612 hists__calc_col_len(hists, he);
1613 }
1614
1615 continue; 1615 continue;
1616 } 1616 }
1617 1617
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index a538ff44b108..a1883bbb0144 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -8,18 +8,19 @@
8# as published by the Free Software Foundation; version 2 8# as published by the Free Software Foundation; version 2
9# of the License. 9# of the License.
10 10
11include ../../../../scripts/Makefile.include 11ifeq ($(srctree),)
12 12srctree := $(patsubst %/,%,$(dir $(shell pwd)))
13OUTPUT=./ 13srctree := $(patsubst %/,%,$(dir $(srctree)))
14ifeq ("$(origin O)", "command line") 14#$(info Determined 'srctree' to be $(srctree))
15 OUTPUT := $(O)/
16endif 15endif
17 16
18ifneq ($(OUTPUT),) 17include $(srctree)/../../scripts/Makefile.include
19# check that the output directory actually exists 18
20OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) 19OUTPUT=$(srctree)/
21$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) 20ifeq ("$(origin O)", "command line")
21 OUTPUT := $(O)/power/acpi/
22endif 22endif
23#$(info Determined 'OUTPUT' to be $(OUTPUT))
23 24
24# --- CONFIGURATION BEGIN --- 25# --- CONFIGURATION BEGIN ---
25 26
@@ -70,8 +71,8 @@ WARNINGS := -Wall
70WARNINGS += $(call cc-supports,-Wstrict-prototypes) 71WARNINGS += $(call cc-supports,-Wstrict-prototypes)
71WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) 72WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
72 73
73KERNEL_INCLUDE := ../../../include 74KERNEL_INCLUDE := $(OUTPUT)include
74ACPICA_INCLUDE := ../../../drivers/acpi/acpica 75ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
75CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) 76CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
76CFLAGS += $(WARNINGS) 77CFLAGS += $(WARNINGS)
77 78
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index ec87a9e562c0..373738338f51 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -8,28 +8,42 @@
8# as published by the Free Software Foundation; version 2 8# as published by the Free Software Foundation; version 2
9# of the License. 9# of the License.
10 10
11$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE 11objdir := $(OUTPUT)tools/$(TOOL)/
12 $(ECHO) " LD " $@ 12toolobjs := $(addprefix $(objdir),$(TOOL_OBJS))
13 $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@ 13$(OUTPUT)$(TOOL): $(toolobjs) FORCE
14 $(ECHO) " LD " $(subst $(OUTPUT),,$@)
15 $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@
16 $(ECHO) " STRIP " $(subst $(OUTPUT),,$@)
14 $(QUIET) $(STRIPCMD) $@ 17 $(QUIET) $(STRIPCMD) $@
15 18
16$(OUTPUT)%.o: %.c 19$(KERNEL_INCLUDE):
17 $(ECHO) " CC " $@ 20 $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@)
21 $(QUIET) mkdir -p $(KERNEL_INCLUDE)
22 $(ECHO) " CP " $(subst $(OUTPUT),,$@)
23 $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/
24
25$(objdir)%.o: %.c $(KERNEL_INCLUDE)
26 $(ECHO) " CC " $(subst $(OUTPUT),,$@)
18 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< 27 $(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
19 28
20all: $(OUTPUT)$(TOOL) 29all: $(OUTPUT)$(TOOL)
21clean: 30clean:
22 -find $(OUTPUT) \( -not -type d \) \ 31 $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir))
23 -and \( -name '*~' -o -name '*.[oas]' \) \ 32 $(QUIET) find $(objdir) \( -not -type d \)\
24 -type f -print \ 33 -and \( -name '*~' -o -name '*.[oas]' \)\
25 | xargs rm -f 34 -type f -print | xargs rm -f
26 -rm -f $(OUTPUT)$(TOOL) 35 $(ECHO) " RM " $(TOOL)
36 $(QUIET) rm -f $(OUTPUT)$(TOOL)
37 $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE))
38 $(QUIET) rm -rf $(KERNEL_INCLUDE)
27 39
28install-tools: 40install-tools:
29 $(INSTALL) -d $(DESTDIR)${sbindir} 41 $(ECHO) " INST " $(TOOL)
30 $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir} 42 $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir)
43 $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir)
31uninstall-tools: 44uninstall-tools:
32 - rm -f $(DESTDIR)${sbindir}/$(TOOL) 45 $(ECHO) " UNINST " $(TOOL)
46 $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL)
33 47
34install: all install-tools $(EXTRA_INSTALL) 48install: all install-tools $(EXTRA_INSTALL)
35uninstall: uninstall-tools $(EXTRA_UNINSTALL) 49uninstall: uninstall-tools $(EXTRA_UNINSTALL)
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile
index 352df4b41ae9..f2d06e773eb4 100644
--- a/tools/power/acpi/tools/acpidbg/Makefile
+++ b/tools/power/acpi/tools/acpidbg/Makefile
@@ -17,9 +17,7 @@ vpath %.c \
17 ../../os_specific/service_layers\ 17 ../../os_specific/service_layers\
18 . 18 .
19CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ 19CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\
20 -I.\ 20 -I.
21 -I../../../../../drivers/acpi/acpica\
22 -I../../../../../include
23LDFLAGS += -lpthread 21LDFLAGS += -lpthread
24TOOL_OBJS = \ 22TOOL_OBJS = \
25 acpidbg.o 23 acpidbg.o
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c
index a88ac45b7756..4308362d7068 100644
--- a/tools/power/acpi/tools/acpidbg/acpidbg.c
+++ b/tools/power/acpi/tools/acpidbg/acpidbg.c
@@ -12,10 +12,16 @@
12#include <acpi/acpi.h> 12#include <acpi/acpi.h>
13 13
14/* Headers not included by include/acpi/platform/aclinux.h */ 14/* Headers not included by include/acpi/platform/aclinux.h */
15#include <unistd.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <string.h>
19#include <error.h>
15#include <stdbool.h> 20#include <stdbool.h>
16#include <fcntl.h> 21#include <fcntl.h>
17#include <assert.h> 22#include <assert.h>
18#include <linux/circ_buf.h> 23#include <sys/select.h>
24#include "../../../../../include/linux/circ_buf.h"
19 25
20#define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" 26#define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg"
21#define ACPI_AML_SEC_TICK 1 27#define ACPI_AML_SEC_TICK 1
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
index 04b5db7c7c0b..f7c7af1f9258 100644
--- a/tools/power/acpi/tools/acpidump/Makefile
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -19,9 +19,7 @@ vpath %.c \
19 ./\ 19 ./\
20 ../../common\ 20 ../../common\
21 ../../os_specific/service_layers 21 ../../os_specific/service_layers
22CFLAGS += -DACPI_DUMP_APP -I.\ 22CFLAGS += -DACPI_DUMP_APP -I.
23 -I../../../../../drivers/acpi/acpica\
24 -I../../../../../include
25TOOL_OBJS = \ 23TOOL_OBJS = \
26 apdump.o\ 24 apdump.o\
27 apfiles.o\ 25 apfiles.o\
@@ -49,7 +47,9 @@ TOOL_OBJS = \
49 47
50include ../../Makefile.rules 48include ../../Makefile.rules
51 49
52install-man: ../../man/acpidump.8 50install-man: $(srctree)/man/acpidump.8
53 $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8 51 $(ECHO) " INST " acpidump.8
52 $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8
54uninstall-man: 53uninstall-man:
55 - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 54 $(ECHO) " UNINST " acpidump.8
55 $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 6e9c40eea208..69ccce308458 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
305 continue; 305 continue;
306 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) 306 type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
307 & ARMV8_PMU_EVTYPE_EVENT; 307 & ARMV8_PMU_EVTYPE_EVENT;
308 if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 308 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
309 && (enable & BIT(i))) { 309 && (enable & BIT(i))) {
310 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; 310 reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
311 reg = lower_32_bits(reg); 311 reg = lower_32_bits(reg);
@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
379 eventsel = data & ARMV8_PMU_EVTYPE_EVENT; 379 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
380 380
381 /* Software increment event does't need to be backed by a perf event */ 381 /* Software increment event does't need to be backed by a perf event */
382 if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) 382 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
383 select_idx != ARMV8_PMU_CYCLE_IDX)
383 return; 384 return;
384 385
385 memset(&attr, 0, sizeof(struct perf_event_attr)); 386 memset(&attr, 0, sizeof(struct perf_event_attr));
@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
391 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; 392 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
392 attr.exclude_hv = 1; /* Don't count EL2 events */ 393 attr.exclude_hv = 1; /* Don't count EL2 events */
393 attr.exclude_host = 1; /* Don't count host events */ 394 attr.exclude_host = 1; /* Don't count host events */
394 attr.config = eventsel; 395 attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
396 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
395 397
396 counter = kvm_pmu_get_counter_value(vcpu, select_idx); 398 counter = kvm_pmu_get_counter_value(vcpu, select_idx);
397 /* The initial sample period (overflow count) of an event. */ 399 /* The initial sample period (overflow count) of an event. */
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 0a063af40565..9bab86757fa4 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
50 50
51 WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE); 51 WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
52 52
53 kvm_notify_acked_irq(vcpu->kvm, 0, 53 /* Only SPIs require notification */
54 intid - VGIC_NR_PRIVATE_IRQS); 54 if (vgic_valid_spi(vcpu->kvm, intid))
55 kvm_notify_acked_irq(vcpu->kvm, 0,
56 intid - VGIC_NR_PRIVATE_IRQS);
55 } 57 }
56 } 58 }
57 59
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9f0dae397d9c..5c9f9745e6ca 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
41 41
42 WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); 42 WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
43 43
44 kvm_notify_acked_irq(vcpu->kvm, 0, 44 /* Only SPIs require notification */
45 intid - VGIC_NR_PRIVATE_IRQS); 45 if (vgic_valid_spi(vcpu->kvm, intid))
46 kvm_notify_acked_irq(vcpu->kvm, 0,
47 intid - VGIC_NR_PRIVATE_IRQS);
46 } 48 }
47 49
48 /* 50 /*
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 8035cc1eb955..efeceb0a222d 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work)
91 91
92 spin_lock(&vcpu->async_pf.lock); 92 spin_lock(&vcpu->async_pf.lock);
93 list_add_tail(&apf->link, &vcpu->async_pf.done); 93 list_add_tail(&apf->link, &vcpu->async_pf.done);
94 apf->vcpu = NULL;
94 spin_unlock(&vcpu->async_pf.lock); 95 spin_unlock(&vcpu->async_pf.lock);
95 96
96 /* 97 /*
@@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work)
113 114
114void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) 115void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
115{ 116{
117 spin_lock(&vcpu->async_pf.lock);
118
116 /* cancel outstanding work queue item */ 119 /* cancel outstanding work queue item */
117 while (!list_empty(&vcpu->async_pf.queue)) { 120 while (!list_empty(&vcpu->async_pf.queue)) {
118 struct kvm_async_pf *work = 121 struct kvm_async_pf *work =
@@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
120 typeof(*work), queue); 123 typeof(*work), queue);
121 list_del(&work->queue); 124 list_del(&work->queue);
122 125
126 /*
127 * We know it's present in vcpu->async_pf.done, do
128 * nothing here.
129 */
130 if (!work->vcpu)
131 continue;
132
133 spin_unlock(&vcpu->async_pf.lock);
123#ifdef CONFIG_KVM_ASYNC_PF_SYNC 134#ifdef CONFIG_KVM_ASYNC_PF_SYNC
124 flush_work(&work->work); 135 flush_work(&work->work);
125#else 136#else
@@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
129 kmem_cache_free(async_pf_cache, work); 140 kmem_cache_free(async_pf_cache, work);
130 } 141 }
131#endif 142#endif
143 spin_lock(&vcpu->async_pf.lock);
132 } 144 }
133 145
134 spin_lock(&vcpu->async_pf.lock);
135 while (!list_empty(&vcpu->async_pf.done)) { 146 while (!list_empty(&vcpu->async_pf.done)) {
136 struct kvm_async_pf *work = 147 struct kvm_async_pf *work =
137 list_first_entry(&vcpu->async_pf.done, 148 list_first_entry(&vcpu->async_pf.done,
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5c360347a1e9..7f9ee2929cfe 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2889,10 +2889,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
2889 2889
2890 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); 2890 ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
2891 if (ret < 0) { 2891 if (ret < 0) {
2892 ops->destroy(dev);
2893 mutex_lock(&kvm->lock); 2892 mutex_lock(&kvm->lock);
2894 list_del(&dev->vm_node); 2893 list_del(&dev->vm_node);
2895 mutex_unlock(&kvm->lock); 2894 mutex_unlock(&kvm->lock);
2895 ops->destroy(dev);
2896 return ret; 2896 return ret;
2897 } 2897 }
2898 2898